commit
ea90ec2205
@ -0,0 +1,28 @@ |
||||
{ lib, ... }: { |
||||
options.submodule = lib.mkOption { |
||||
inherit (lib.evalModules { |
||||
modules = [ |
||||
{ |
||||
options.inner = lib.mkOption { |
||||
type = lib.types.bool; |
||||
default = false; |
||||
}; |
||||
} |
||||
]; |
||||
}) type; |
||||
default = {}; |
||||
}; |
||||
|
||||
config.submodule = lib.mkMerge [ |
||||
({ lib, ... }: { |
||||
options.outer = lib.mkOption { |
||||
type = lib.types.bool; |
||||
default = false; |
||||
}; |
||||
}) |
||||
{ |
||||
inner = true; |
||||
outer = true; |
||||
} |
||||
]; |
||||
} |
@ -0,0 +1,102 @@ |
||||
# Edit this configuration file to define what should be installed on |
||||
# your system. Help is available in the configuration.nix(5) man page |
||||
# and in the NixOS manual (accessible by running ‘nixos-help’). |
||||
|
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
|
||||
{ |
||||
imports = |
||||
[ # Include the default lxd configuration. |
||||
../../../modules/virtualisation/lxc-container.nix |
||||
# Include the container-specific autogenerated configuration. |
||||
./lxd.nix |
||||
]; |
||||
|
||||
# networking.hostName = mkForce "nixos"; # Overwrite the hostname. |
||||
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant. |
||||
|
||||
# Set your time zone. |
||||
# time.timeZone = "Europe/Amsterdam"; |
||||
|
||||
# The global useDHCP flag is deprecated, therefore explicitly set to false here. |
||||
# Per-interface useDHCP will be mandatory in the future, so this generated config |
||||
# replicates the default behaviour. |
||||
networking.useDHCP = false; |
||||
networking.interfaces.eth0.useDHCP = true; |
||||
|
||||
# Configure network proxy if necessary |
||||
# networking.proxy.default = "http://user:password@proxy:port/"; |
||||
# networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain"; |
||||
|
||||
# Select internationalisation properties. |
||||
# i18n.defaultLocale = "en_US.UTF-8"; |
||||
# console = { |
||||
# font = "Lat2-Terminus16"; |
||||
# keyMap = "us"; |
||||
# }; |
||||
|
||||
# Enable the X11 windowing system. |
||||
# services.xserver.enable = true; |
||||
|
||||
# Configure keymap in X11 |
||||
# services.xserver.layout = "us"; |
||||
# services.xserver.xkbOptions = "eurosign:e"; |
||||
|
||||
# Enable CUPS to print documents. |
||||
# services.printing.enable = true; |
||||
|
||||
# Enable sound. |
||||
# sound.enable = true; |
||||
# hardware.pulseaudio.enable = true; |
||||
|
||||
# Enable touchpad support (enabled default in most desktopManager). |
||||
# services.xserver.libinput.enable = true; |
||||
|
||||
# Define a user account. Don't forget to set a password with ‘passwd’. |
||||
# users.users.jane = { |
||||
# isNormalUser = true; |
||||
# extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user. |
||||
# }; |
||||
|
||||
# List packages installed in system profile. To search, run: |
||||
# $ nix search wget |
||||
# environment.systemPackages = with pkgs; [ |
||||
# vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default. |
||||
# wget |
||||
# firefox |
||||
# ]; |
||||
|
||||
# Some programs need SUID wrappers, can be configured further or are |
||||
# started in user sessions. |
||||
# programs.mtr.enable = true; |
||||
# programs.gnupg.agent = { |
||||
# enable = true; |
||||
# enableSSHSupport = true; |
||||
# }; |
||||
|
||||
# List services that you want to enable: |
||||
|
||||
# Enable the OpenSSH daemon. |
||||
# services.openssh.enable = true; |
||||
|
||||
# Open ports in the firewall. |
||||
# networking.firewall.allowedTCPPorts = [ ... ]; |
||||
# networking.firewall.allowedUDPPorts = [ ... ]; |
||||
# Or disable the firewall altogether. |
||||
# networking.firewall.enable = false; |
||||
|
||||
# This value determines the NixOS release from which the default |
||||
# settings for stateful data, like file locations and database versions |
||||
# on your system were taken. It‘s perfectly fine and recommended to leave |
||||
# this value at the release version of the first install of this system. |
||||
# Before changing this value read the documentation for this option |
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html). |
||||
system.stateVersion = "21.05"; # Did you read the comment? |
||||
|
||||
# As this is intended as a stadalone image, undo some of the minimal profile stuff |
||||
documentation.enable = true; |
||||
documentation.nixos.enable = true; |
||||
environment.noXlibs = false; |
||||
} |
@ -0,0 +1,34 @@ |
||||
{ lib, config, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
{ |
||||
imports = [ |
||||
../../../modules/virtualisation/lxc-container.nix |
||||
]; |
||||
|
||||
virtualisation.lxc.templates.nix = { |
||||
enable = true; |
||||
target = "/etc/nixos/lxd.nix"; |
||||
template = ./nix.tpl; |
||||
when = [ "create" "copy" ]; |
||||
}; |
||||
|
||||
# copy the config for nixos-rebuild |
||||
system.activationScripts.config = '' |
||||
if [ ! -e /etc/nixos/configuration.nix ]; then |
||||
mkdir -p /etc/nixos |
||||
cat ${./lxd-image-inner.nix} > /etc/nixos/configuration.nix |
||||
sed 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix |
||||
fi |
||||
''; |
||||
|
||||
# Network |
||||
networking.useDHCP = false; |
||||
networking.interfaces.eth0.useDHCP = true; |
||||
|
||||
# As this is intended as a stadalone image, undo some of the minimal profile stuff |
||||
documentation.enable = true; |
||||
documentation.nixos.enable = true; |
||||
environment.noXlibs = false; |
||||
} |
@ -0,0 +1,9 @@ |
||||
{ lib, config, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY |
||||
|
||||
{ |
||||
networking.hostName = "{{ container.name }}"; |
||||
} |
@ -0,0 +1,12 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
let |
||||
cfg = config.hardware.flirc; |
||||
in |
||||
{ |
||||
options.hardware.flirc.enable = lib.mkEnableOption "software to configure a Flirc USB device"; |
||||
|
||||
config = lib.mkIf cfg.enable { |
||||
environment.systemPackages = [ pkgs.flirc ]; |
||||
services.udev.packages = [ pkgs.flirc ]; |
||||
}; |
||||
} |
@ -0,0 +1,18 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.hardware.gkraken; |
||||
in |
||||
{ |
||||
options.hardware.gkraken = { |
||||
enable = mkEnableOption "gkraken's udev rules for NZXT AIO liquid coolers"; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
services.udev.packages = with pkgs; [ |
||||
gkraken |
||||
]; |
||||
}; |
||||
} |
@ -0,0 +1,122 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.odoo; |
||||
format = pkgs.formats.ini {}; |
||||
in |
||||
{ |
||||
options = { |
||||
services.odoo = { |
||||
enable = mkEnableOption "odoo"; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.odoo; |
||||
defaultText = literalExpression "pkgs.odoo"; |
||||
description = "Odoo package to use."; |
||||
}; |
||||
|
||||
addons = mkOption { |
||||
type = with types; listOf package; |
||||
default = []; |
||||
example = literalExpression "[ pkgs.odoo_enterprise ]"; |
||||
description = "Odoo addons."; |
||||
}; |
||||
|
||||
settings = mkOption { |
||||
type = format.type; |
||||
default = {}; |
||||
description = '' |
||||
Odoo configuration settings. For more details see <link xlink:href="https://www.odoo.com/documentation/15.0/administration/install/deploy.html"/> |
||||
''; |
||||
}; |
||||
|
||||
domain = mkOption { |
||||
type = with types; nullOr str; |
||||
description = "Domain to host Odoo with nginx"; |
||||
default = null; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf (cfg.enable) (let |
||||
cfgFile = format.generate "odoo.cfg" cfg.settings; |
||||
in { |
||||
services.nginx = mkIf (cfg.domain != null) { |
||||
upstreams = { |
||||
odoo.servers = { |
||||
"127.0.0.1:8069" = {}; |
||||
}; |
||||
|
||||
odoochat.servers = { |
||||
"127.0.0.1:8072" = {}; |
||||
}; |
||||
}; |
||||
|
||||
virtualHosts."${cfg.domain}" = { |
||||
extraConfig = '' |
||||
proxy_read_timeout 720s; |
||||
proxy_connect_timeout 720s; |
||||
proxy_send_timeout 720s; |
||||
|
||||
proxy_set_header X-Forwarded-Host $host; |
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
||||
proxy_set_header X-Forwarded-Proto $scheme; |
||||
proxy_set_header X-Real-IP $remote_addr; |
||||
''; |
||||
|
||||
locations = { |
||||
"/longpolling" = { |
||||
proxyPass = "http://odoochat"; |
||||
}; |
||||
|
||||
"/" = { |
||||
proxyPass = "http://odoo"; |
||||
extraConfig = '' |
||||
proxy_redirect off; |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
services.odoo.settings.options = { |
||||
proxy_mode = cfg.domain != null; |
||||
}; |
||||
|
||||
users.users.odoo = { |
||||
isSystemUser = true; |
||||
group = "odoo"; |
||||
}; |
||||
users.groups.odoo = {}; |
||||
|
||||
systemd.services.odoo = { |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network.target" "postgresql.service" ]; |
||||
|
||||
# pg_dump |
||||
path = [ config.services.postgresql.package ]; |
||||
|
||||
requires = [ "postgresql.service" ]; |
||||
script = "HOME=$STATE_DIRECTORY ${cfg.package}/bin/odoo ${optionalString (cfg.addons != []) "--addons-path=${concatMapStringsSep "," escapeShellArg cfg.addons}"} -c ${cfgFile}"; |
||||
|
||||
serviceConfig = { |
||||
DynamicUser = true; |
||||
User = "odoo"; |
||||
StateDirectory = "odoo"; |
||||
}; |
||||
}; |
||||
|
||||
services.postgresql = { |
||||
enable = true; |
||||
|
||||
ensureUsers = [{ |
||||
name = "odoo"; |
||||
ensurePermissions = { "DATABASE odoo" = "ALL PRIVILEGES"; }; |
||||
}]; |
||||
ensureDatabases = [ "odoo" ]; |
||||
}; |
||||
}); |
||||
} |
@ -0,0 +1,171 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
|
||||
cfg = config.hardware.rasdaemon; |
||||
|
||||
in |
||||
{ |
||||
options.hardware.rasdaemon = { |
||||
|
||||
enable = mkEnableOption "RAS logging daemon"; |
||||
|
||||
record = mkOption { |
||||
type = types.bool; |
||||
default = true; |
||||
description = "record events via sqlite3, required for ras-mc-ctl"; |
||||
}; |
||||
|
||||
mainboard = mkOption { |
||||
type = types.lines; |
||||
default = ""; |
||||
description = "Custom mainboard description, see <citerefentry><refentrytitle>ras-mc-ctl</refentrytitle><manvolnum>8</manvolnum></citerefentry> for more details."; |
||||
example = '' |
||||
vendor = ASRock |
||||
model = B450M Pro4 |
||||
|
||||
# it should default to such values from |
||||
# /sys/class/dmi/id/board_[vendor|name] |
||||
# alternatively one can supply a script |
||||
# that returns the same format as above |
||||
|
||||
script = <path to script> |
||||
''; |
||||
}; |
||||
|
||||
# TODO, accept `rasdaemon.labels = " ";` or `rasdaemon.labels = { dell = " "; asrock = " "; };' |
||||
|
||||
labels = mkOption { |
||||
type = types.lines; |
||||
default = ""; |
||||
description = "Additional memory module label descriptions to be placed in /etc/ras/dimm_labels.d/labels"; |
||||
example = '' |
||||
# vendor and model may be shown by 'ras-mc-ctl --mainboard' |
||||
vendor: ASRock |
||||
product: To Be Filled By O.E.M. |
||||
model: B450M Pro4 |
||||
# these labels are names for the motherboard slots |
||||
# the numbers may be shown by `ras-mc-ctl --error-count` |
||||
# they are mc:csrow:channel |
||||
DDR4_A1: 0.2.0; DDR4_B1: 0.2.1; |
||||
DDR4_A2: 0.3.0; DDR4_B2: 0.3.1; |
||||
''; |
||||
}; |
||||
|
||||
config = mkOption { |
||||
type = types.lines; |
||||
default = ""; |
||||
description = '' |
||||
rasdaemon configuration, currently only used for CE PFA |
||||
for details, read rasdaemon.outPath/etc/sysconfig/rasdaemon's comments |
||||
''; |
||||
example = '' |
||||
# defaults from included config |
||||
PAGE_CE_REFRESH_CYCLE="24h" |
||||
PAGE_CE_THRESHOLD="50" |
||||
PAGE_CE_ACTION="soft" |
||||
''; |
||||
}; |
||||
|
||||
extraModules = mkOption { |
||||
type = types.listOf types.str; |
||||
default = []; |
||||
description = "extra kernel modules to load"; |
||||
example = [ "i7core_edac" ]; |
||||
}; |
||||
|
||||
testing = mkEnableOption "error injection infrastructure"; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
|
||||
environment.etc = { |
||||
"ras/mainboard" = { |
||||
enable = cfg.mainboard != ""; |
||||
text = cfg.mainboard; |
||||
}; |
||||
# TODO, handle multiple cfg.labels.brand = " "; |
||||
"ras/dimm_labels.d/labels" = { |
||||
enable = cfg.labels != ""; |
||||
text = cfg.labels; |
||||
}; |
||||
"sysconfig/rasdaemon" = { |
||||
enable = cfg.config != ""; |
||||
text = cfg.config; |
||||
}; |
||||
}; |
||||
environment.systemPackages = [ pkgs.rasdaemon ] |
||||
++ optionals (cfg.testing) (with pkgs.error-inject; [ |
||||
edac-inject |
||||
mce-inject |
||||
aer-inject |
||||
]); |
||||
|
||||
boot.initrd.kernelModules = cfg.extraModules |
||||
++ optionals (cfg.testing) [ |
||||
# edac_core and amd64_edac should get loaded automatically |
||||
# i7core_edac may not be, and may not be required, but should load successfully |
||||
"edac_core" |
||||
"amd64_edac" |
||||
"i7core_edac" |
||||
"mce-inject" |
||||
"aer-inject" |
||||
]; |
||||
|
||||
boot.kernelPatches = optionals (cfg.testing) [{ |
||||
name = "rasdaemon-tests"; |
||||
patch = null; |
||||
extraConfig = '' |
||||
EDAC_DEBUG y |
||||
X86_MCE_INJECT y |
||||
|
||||
PCIEPORTBUS y |
||||
PCIEAER y |
||||
PCIEAER_INJECT y |
||||
''; |
||||
}]; |
||||
|
||||
# i tried to set up a group for this |
||||
# but rasdaemon needs higher permissions? |
||||
# `rasdaemon: Can't locate a mounted debugfs` |
||||
|
||||
# most of this taken from src/misc/ |
||||
systemd.services = { |
||||
rasdaemon = { |
||||
description = "the RAS logging daemon"; |
||||
documentation = [ "man:rasdaemon(1)" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "syslog.target" ]; |
||||
|
||||
serviceConfig = { |
||||
StateDirectory = optionalString (cfg.record) "rasdaemon"; |
||||
|
||||
ExecStart = "${pkgs.rasdaemon}/bin/rasdaemon --foreground" |
||||
+ optionalString (cfg.record) " --record"; |
||||
ExecStop = "${pkgs.rasdaemon}/bin/rasdaemon --disable"; |
||||
Restart = "on-abort"; |
||||
|
||||
# src/misc/rasdaemon.service.in shows this: |
||||
# ExecStartPost = ${pkgs.rasdaemon}/bin/rasdaemon --enable |
||||
# but that results in unpredictable existence of the database |
||||
# and everything seems to be enabled without this... |
||||
}; |
||||
}; |
||||
ras-mc-ctl = mkIf (cfg.labels != "") { |
||||
description = "register DIMM labels on startup"; |
||||
documentation = [ "man:ras-mc-ctl(8)" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
serviceConfig = { |
||||
Type = "oneshot"; |
||||
ExecStart = "${pkgs.rasdaemon}/bin/ras-mc-ctl --register-labels"; |
||||
RemainAfterExit = true; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
meta.maintainers = [ maintainers.evils ]; |
||||
|
||||
} |
@ -0,0 +1,105 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
let |
||||
cfg = config.services.signald; |
||||
dataDir = "/var/lib/signald"; |
||||
defaultUser = "signald"; |
||||
in |
||||
{ |
||||
options.services.signald = { |
||||
enable = mkEnableOption "the signald service"; |
||||
|
||||
user = mkOption { |
||||
type = types.str; |
||||
default = defaultUser; |
||||
description = "User under which signald runs."; |
||||
}; |
||||
|
||||
group = mkOption { |
||||
type = types.str; |
||||
default = defaultUser; |
||||
description = "Group under which signald runs."; |
||||
}; |
||||
|
||||
socketPath = mkOption { |
||||
type = types.str; |
||||
default = "/run/signald/signald.sock"; |
||||
description = "Path to the signald socket"; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
users.users = optionalAttrs (cfg.user == defaultUser) { |
||||
${defaultUser} = { |
||||
group = cfg.group; |
||||
isSystemUser = true; |
||||
}; |
||||
}; |
||||
|
||||
users.groups = optionalAttrs (cfg.group == defaultUser) { |
||||
${defaultUser} = { }; |
||||
}; |
||||
|
||||
systemd.services.signald = { |
||||
description = "A daemon for interacting with the Signal Private Messenger"; |
||||
wants = [ "network.target" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network.target" ]; |
||||
|
||||
serviceConfig = { |
||||
User = cfg.user; |
||||
Group = cfg.group; |
||||
ExecStart = "${pkgs.signald}/bin/signald -d ${dataDir} -s ${cfg.socketPath}"; |
||||
Restart = "on-failure"; |
||||
StateDirectory = "signald"; |
||||
RuntimeDirectory = "signald"; |
||||
StateDirectoryMode = "0750"; |
||||
RuntimeDirectoryMode = "0750"; |
||||
|
||||
BindReadOnlyPaths = [ |
||||
"/nix/store" |
||||
"-/etc/resolv.conf" |
||||
"-/etc/nsswitch.conf" |
||||
"-/etc/hosts" |
||||
"-/etc/localtime" |
||||
]; |
||||
CapabilityBoundingSet = ""; |
||||
# ProtectClock= adds DeviceAllow=char-rtc r |
||||
DeviceAllow = ""; |
||||
# Use a static user so other applications can access the files |
||||
#DynamicUser = true; |
||||
LockPersonality = true; |
||||
# Needed for java |
||||
#MemoryDenyWriteExecute = true; |
||||
NoNewPrivileges = true; |
||||
PrivateDevices = true; |
||||
PrivateMounts = true; |
||||
# Needs network access |
||||
#PrivateNetwork = true; |
||||
PrivateTmp = true; |
||||
PrivateUsers = true; |
||||
ProcSubset = "pid"; |
||||
ProtectClock = true; |
||||
ProtectHome = true; |
||||
ProtectHostname = true; |
||||
# Would re-mount paths ignored by temporary root |
||||
#ProtectSystem = "strict"; |
||||
ProtectControlGroups = true; |
||||
ProtectKernelLogs = true; |
||||
ProtectKernelModules = true; |
||||
ProtectKernelTunables = true; |
||||
ProtectProc = "invisible"; |
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ]; |
||||
RestrictNamespaces = true; |
||||
RestrictRealtime = true; |
||||
RestrictSUIDSGID = true; |
||||
SystemCallArchitectures = "native"; |
||||
SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ]; |
||||
TemporaryFileSystem = "/:ro"; |
||||
# Does not work well with the temporary root |
||||
#UMask = "0066"; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,71 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
|
||||
let |
||||
cfg = config.services.xmrig; |
||||
|
||||
json = pkgs.formats.json { }; |
||||
configFile = json.generate "config.json" cfg.settings; |
||||
in |
||||
|
||||
with lib; |
||||
|
||||
{ |
||||
options = { |
||||
services.xmrig = { |
||||
enable = mkEnableOption "XMRig Mining Software"; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.xmrig; |
||||
example = literalExpression "pkgs.xmrig-mo"; |
||||
description = "XMRig package to use."; |
||||
}; |
||||
|
||||
settings = mkOption { |
||||
default = { }; |
||||
type = json.type; |
||||
example = literalExpression '' |
||||
{ |
||||
autosave = true; |
||||
cpu = true; |
||||
opencl = false; |
||||
cuda = false; |
||||
pools = [ |
||||
{ |
||||
url = "pool.supportxmr.com:443"; |
||||
user = "your-wallet"; |
||||
keepalive = true; |
||||
tls = true; |
||||
} |
||||
] |
||||
} |
||||
''; |
||||
description = '' |
||||
XMRig configuration. Refer to |
||||
<link xlink:href="https://xmrig.com/docs/miner/config"/> |
||||
for details on supported values. |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
systemd.services.xmrig = { |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network.target" ]; |
||||
description = "XMRig Mining Software Service"; |
||||
serviceConfig = { |
||||
ExecStartPre = "${cfg.package}/bin/xmrig --config=${configFile} --dry-run"; |
||||
ExecStart = "${cfg.package}/bin/xmrig --config=${configFile}"; |
||||
DynamicUser = true; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
meta = with lib; { |
||||
description = "XMRig Mining Software Service"; |
||||
license = licenses.gpl3Only; |
||||
maintainers = with maintainers; [ ratsclub ]; |
||||
}; |
||||
} |
@ -0,0 +1,417 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.jibri; |
||||
|
||||
# Copied from the jitsi-videobridge.nix file. |
||||
toHOCON = x: |
||||
if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}") |
||||
else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}" |
||||
else if isList x then "[${ concatMapStringsSep "," toHOCON x }]" |
||||
else builtins.toJSON x; |
||||
|
||||
# We're passing passwords in environment variables that have names generated |
||||
# from an attribute name, which may not be a valid bash identifier. |
||||
toVarName = s: "XMPP_PASSWORD_" + stringAsChars (c: if builtins.match "[A-Za-z0-9]" c != null then c else "_") s; |
||||
|
||||
defaultJibriConfig = { |
||||
id = ""; |
||||
single-use-mode = false; |
||||
|
||||
api = { |
||||
http.external-api-port = 2222; |
||||
http.internal-api-port = 3333; |
||||
|
||||
xmpp.environments = flip mapAttrsToList cfg.xmppEnvironments (name: env: { |
||||
inherit name; |
||||
|
||||
xmpp-server-hosts = env.xmppServerHosts; |
||||
xmpp-domain = env.xmppDomain; |
||||
control-muc = { |
||||
domain = env.control.muc.domain; |
||||
room-name = env.control.muc.roomName; |
||||
nickname = env.control.muc.nickname; |
||||
}; |
||||
|
||||
control-login = { |
||||
domain = env.control.login.domain; |
||||
username = env.control.login.username; |
||||
password.__hocon_envvar = toVarName "${name}_control"; |
||||
}; |
||||
|
||||
call-login = { |
||||
domain = env.call.login.domain; |
||||
username = env.call.login.username; |
||||
password.__hocon_envvar = toVarName "${name}_call"; |
||||
}; |
||||
|
||||
strip-from-room-domain = env.stripFromRoomDomain; |
||||
usage-timeout = env.usageTimeout; |
||||
trust-all-xmpp-certs = env.disableCertificateVerification; |
||||
}); |
||||
}; |
||||
|
||||
recording = { |
||||
recordings-directory = "/tmp/recordings"; |
||||
finalize-script = "${cfg.finalizeScript}"; |
||||
}; |
||||
|
||||
streaming.rtmp-allow-list = [ ".*" ]; |
||||
|
||||
chrome.flags = [ |
||||
"--use-fake-ui-for-media-stream" |
||||
"--start-maximized" |
||||
"--kiosk" |
||||
"--enabled" |
||||
"--disable-infobars" |
||||
"--autoplay-policy=no-user-gesture-required" |
||||
] |
||||
++ lists.optional cfg.ignoreCert |
||||
"--ignore-certificate-errors"; |
||||
|
||||
|
||||
stats.enable-stats-d = true; |
||||
webhook.subscribers = [ ]; |
||||
|
||||
jwt-info = { }; |
||||
|
||||
call-status-checks = { |
||||
no-media-timout = "30 seconds"; |
||||
all-muted-timeout = "10 minutes"; |
||||
default-call-empty-timout = "30 seconds"; |
||||
}; |
||||
}; |
||||
# Allow overriding leaves of the default config despite types.attrs not doing any merging. |
||||
jibriConfig = recursiveUpdate defaultJibriConfig cfg.config; |
||||
configFile = pkgs.writeText "jibri.conf" (toHOCON { jibri = jibriConfig; }); |
||||
in |
||||
{ |
||||
options.services.jibri = with types; { |
||||
enable = mkEnableOption "Jitsi BRoadcasting Infrastructure. Currently Jibri must be run on a host that is also running <option>services.jitsi-meet.enable</option>, so for most use cases it will be simpler to run <option>services.jitsi-meet.jibri.enable</option>"; |
||||
config = mkOption { |
||||
type = attrs; |
||||
default = { }; |
||||
description = '' |
||||
Jibri configuration. |
||||
See <link xlink:href="https://github.com/jitsi/jibri/blob/master/src/main/resources/reference.conf" /> |
||||
for default configuration with comments. |
||||
''; |
||||
}; |
||||
|
||||
finalizeScript = mkOption { |
||||
type = types.path; |
||||
default = pkgs.writeScript "finalize_recording.sh" '' |
||||
#!/bin/sh |
||||
|
||||
RECORDINGS_DIR=$1 |
||||
|
||||
echo "This is a dummy finalize script" > /tmp/finalize.out |
||||
echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out |
||||
echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out |
||||
echo "or storage provider, etc.) in this script" >> /tmp/finalize.out |
||||
|
||||
exit 0 |
||||
''; |
||||
defaultText = literalExpression '' |
||||
pkgs.writeScript "finalize_recording.sh" '''''' |
||||
#!/bin/sh |
||||
|
||||
RECORDINGS_DIR=$1 |
||||
|
||||
echo "This is a dummy finalize script" > /tmp/finalize.out |
||||
echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out |
||||
echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out |
||||
echo "or storage provider, etc.) in this script" >> /tmp/finalize.out |
||||
|
||||
exit 0 |
||||
''''''; |
||||
''; |
||||
example = literalExpression '' |
||||
pkgs.writeScript "finalize_recording.sh" '''''' |
||||
#!/bin/sh |
||||
RECORDINGS_DIR=$1 |
||||
${pkgs.rclone}/bin/rclone copy $RECORDINGS_DIR RCLONE_REMOTE:jibri-recordings/ -v --log-file=/var/log/jitsi/jibri/recording-upload.txt |
||||
exit 0 |
||||
''''''; |
||||
''; |
||||
description = '' |
||||
This script runs when jibri finishes recording a video of a conference. |
||||
''; |
||||
}; |
||||
|
||||
ignoreCert = mkOption { |
||||
type = bool; |
||||
default = false; |
||||
example = true; |
||||
description = '' |
||||
Whether to enable the flag "--ignore-certificate-errors" for the Chromium browser opened by Jibri. |
||||
Intended for use in automated tests or anywhere else where using a verified cert for Jitsi-Meet is not possible. |
||||
''; |
||||
}; |
||||
|
||||
xmppEnvironments = mkOption { |
||||
description = '' |
||||
XMPP servers to connect to. |
||||
''; |
||||
example = literalExpression '' |
||||
"jitsi-meet" = { |
||||
xmppServerHosts = [ "localhost" ]; |
||||
xmppDomain = config.services.jitsi-meet.hostName; |
||||
|
||||
control.muc = { |
||||
domain = "internal.''${config.services.jitsi-meet.hostName}"; |
||||
roomName = "JibriBrewery"; |
||||
nickname = "jibri"; |
||||
}; |
||||
|
||||
control.login = { |
||||
domain = "auth.''${config.services.jitsi-meet.hostName}"; |
||||
username = "jibri"; |
||||
passwordFile = "/var/lib/jitsi-meet/jibri-auth-secret"; |
||||
}; |
||||
|
||||
call.login = { |
||||
domain = "recorder.''${config.services.jitsi-meet.hostName}"; |
||||
username = "recorder"; |
||||
passwordFile = "/var/lib/jitsi-meet/jibri-recorder-secret"; |
||||
}; |
||||
|
||||
usageTimeout = "0"; |
||||
disableCertificateVerification = true; |
||||
stripFromRoomDomain = "conference."; |
||||
}; |
||||
''; |
||||
default = { }; |
||||
type = attrsOf (submodule ({ name, ... }: { |
||||
options = { |
||||
xmppServerHosts = mkOption { |
||||
type = listOf str; |
||||
example = [ "xmpp.example.org" ]; |
||||
description = '' |
||||
Hostnames of the XMPP servers to connect to. |
||||
''; |
||||
}; |
||||
xmppDomain = mkOption { |
||||
type = str; |
||||
example = "xmpp.example.org"; |
||||
description = '' |
||||
The base XMPP domain. |
||||
''; |
||||
}; |
||||
control.muc.domain = mkOption { |
||||
type = str; |
||||
description = '' |
||||
The domain part of the MUC to connect to for control. |
||||
''; |
||||
}; |
||||
control.muc.roomName = mkOption { |
||||
type = str; |
||||
default = "JibriBrewery"; |
||||
description = '' |
||||
The room name of the MUC to connect to for control. |
||||
''; |
||||
}; |
||||
control.muc.nickname = mkOption { |
||||
type = str; |
||||
default = "jibri"; |
||||
description = '' |
||||
The nickname for this Jibri instance in the MUC. |
||||
''; |
||||
}; |
||||
control.login.domain = mkOption { |
||||
type = str; |
||||
description = '' |
||||
The domain part of the JID for this Jibri instance. |
||||
''; |
||||
}; |
||||
control.login.username = mkOption { |
||||
type = str; |
||||
default = "jvb"; |
||||
description = '' |
||||
User part of the JID. |
||||
''; |
||||
}; |
||||
control.login.passwordFile = mkOption { |
||||
type = str; |
||||
example = "/run/keys/jibri-xmpp1"; |
||||
description = '' |
||||
File containing the password for the user. |
||||
''; |
||||
}; |
||||
|
||||
call.login.domain = mkOption { |
||||
type = str; |
||||
example = "recorder.xmpp.example.org"; |
||||
description = '' |
||||
The domain part of the JID for the recorder. |
||||
''; |
||||
}; |
||||
call.login.username = mkOption { |
||||
type = str; |
||||
default = "recorder"; |
||||
description = '' |
||||
User part of the JID for the recorder. |
||||
''; |
||||
}; |
||||
call.login.passwordFile = mkOption { |
||||
type = str; |
||||
example = "/run/keys/jibri-recorder-xmpp1"; |
||||
description = '' |
||||
File containing the password for the user. |
||||
''; |
||||
}; |
||||
disableCertificateVerification = mkOption { |
||||
type = bool; |
||||
default = false; |
||||
description = '' |
||||
Whether to skip validation of the server's certificate. |
||||
''; |
||||
}; |
||||
|
||||
stripFromRoomDomain = mkOption { |
||||
type = str; |
||||
default = "0"; |
||||
example = "conference."; |
||||
description = '' |
||||
The prefix to strip from the room's JID domain to derive the call URL. |
||||
''; |
||||
}; |
||||
usageTimeout = mkOption { |
||||
type = str; |
||||
default = "0"; |
||||
example = "1 hour"; |
||||
description = '' |
||||
The duration that the Jibri session can be. |
||||
A value of zero means indefinitely. |
||||
''; |
||||
}; |
||||
}; |
||||
|
||||
config = |
||||
let |
||||
nick = mkDefault (builtins.replaceStrings [ "." ] [ "-" ] ( |
||||
config.networking.hostName + optionalString (config.networking.domain != null) ".${config.networking.domain}" |
||||
)); |
||||
in |
||||
{ |
||||
call.login.username = nick; |
||||
control.muc.nickname = nick; |
||||
}; |
||||
})); |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
users.groups.jibri = { }; |
||||
users.groups.plugdev = { }; |
||||
users.users.jibri = { |
||||
isSystemUser = true; |
||||
group = "jibri"; |
||||
home = "/var/lib/jibri"; |
||||
extraGroups = [ "jitsi-meet" "adm" "audio" "video" "plugdev" ]; |
||||
}; |
||||
|
||||
systemd.services.jibri-xorg = { |
||||
description = "Jitsi Xorg Process"; |
||||
|
||||
after = [ "network.target" ]; |
||||
wantedBy = [ "jibri.service" "jibri-icewm.service" ]; |
||||
|
||||
preStart = '' |
||||
cp --no-preserve=mode,ownership ${pkgs.jibri}/etc/jitsi/jibri/* /var/lib/jibri |
||||
mv /var/lib/jibri/{,.}asoundrc |
||||
''; |
||||
|
||||
environment.DISPLAY = ":0"; |
||||
serviceConfig = { |
||||
Type = "simple"; |
||||
|
||||
User = "jibri"; |
||||
Group = "jibri"; |
||||
KillMode = "process"; |
||||
Restart = "on-failure"; |
||||
RestartPreventExitStatus = 255; |
||||
|
||||
StateDirectory = "jibri"; |
||||
|
||||
ExecStart = "${pkgs.xorg.xorgserver}/bin/Xorg -nocursor -noreset +extension RANDR +extension RENDER -config ${pkgs.jibri}/etc/jitsi/jibri/xorg-video-dummy.conf -logfile /dev/null :0"; |
||||
}; |
||||
}; |
||||
|
||||
systemd.services.jibri-icewm = { |
||||
description = "Jitsi Window Manager"; |
||||
|
||||
requires = [ "jibri-xorg.service" ]; |
||||
after = [ "jibri-xorg.service" ]; |
||||
wantedBy = [ "jibri.service" ]; |
||||
|
||||
environment.DISPLAY = ":0"; |
||||
serviceConfig = { |
||||
Type = "simple"; |
||||
|
||||
User = "jibri"; |
||||
Group = "jibri"; |
||||
Restart = "on-failure"; |
||||
RestartPreventExitStatus = 255; |
||||
|
||||
StateDirectory = "jibri"; |
||||
|
||||
ExecStart = "${pkgs.icewm}/bin/icewm-session"; |
||||
}; |
||||
}; |
||||
|
||||
systemd.services.jibri = { |
||||
description = "Jibri Process"; |
||||
|
||||
requires = [ "jibri-icewm.service" "jibri-xorg.service" ]; |
||||
after = [ "network.target" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
|
||||
path = with pkgs; [ chromedriver chromium ffmpeg-full ]; |
||||
|
||||
script = (concatStrings (mapAttrsToList |
||||
(name: env: '' |
||||
export ${toVarName "${name}_control"}=$(cat ${env.control.login.passwordFile}) |
||||
export ${toVarName "${name}_call"}=$(cat ${env.call.login.passwordFile}) |
||||
'') |
||||
cfg.xmppEnvironments)) |
||||
+ '' |
||||
${pkgs.jre8_headless}/bin/java -Djava.util.logging.config.file=${./logging.properties-journal} -Dconfig.file=${configFile} -jar ${pkgs.jibri}/opt/jitsi/jibri/jibri.jar --config /var/lib/jibri/jibri.json |
||||
''; |
||||
|
||||
environment.HOME = "/var/lib/jibri"; |
||||
|
||||
serviceConfig = { |
||||
Type = "simple"; |
||||
|
||||
User = "jibri"; |
||||
Group = "jibri"; |
||||
Restart = "always"; |
||||
RestartPreventExitStatus = 255; |
||||
|
||||
StateDirectory = "jibri"; |
||||
}; |
||||
}; |
||||
|
||||
systemd.tmpfiles.rules = [ |
||||
"d /var/log/jitsi/jibri 755 jibri jibri" |
||||
]; |
||||
|
||||
|
||||
|
||||
# Configure Chromium to not show the "Chrome is being controlled by automatic test software" message. |
||||
environment.etc."chromium/policies/managed/managed_policies.json".text = builtins.toJSON { CommandLineFlagSecurityWarningsEnabled = false; }; |
||||
warnings = [ "All security warnings for Chromium have been disabled. This is necessary for Jibri, but it also impacts all other uses of Chromium on this system." ]; |
||||
|
||||
boot = { |
||||
extraModprobeConfig = '' |
||||
options snd-aloop enable=1,1,1,1,1,1,1,1 |
||||
''; |
||||
kernelModules = [ "snd-aloop" ]; |
||||
}; |
||||
}; |
||||
|
||||
meta.maintainers = lib.teams.jitsi.members; |
||||
} |
@ -0,0 +1,32 @@ |
||||
handlers = java.util.logging.FileHandler |
||||
|
||||
java.util.logging.FileHandler.level = FINE |
||||
java.util.logging.FileHandler.pattern = /var/log/jitsi/jibri/log.%g.txt |
||||
java.util.logging.FileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter |
||||
java.util.logging.FileHandler.count = 10 |
||||
java.util.logging.FileHandler.limit = 10000000 |
||||
|
||||
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.level = FINE |
||||
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.pattern = /var/log/jitsi/jibri/ffmpeg.%g.txt |
||||
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter |
||||
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.count = 10 |
||||
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.limit = 10000000 |
||||
|
||||
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.level = FINE |
||||
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.pattern = /var/log/jitsi/jibri/pjsua.%g.txt |
||||
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter |
||||
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.count = 10 |
||||
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.limit = 10000000 |
||||
|
||||
org.jitsi.jibri.selenium.util.BrowserFileHandler.level = FINE |
||||
org.jitsi.jibri.selenium.util.BrowserFileHandler.pattern = /var/log/jitsi/jibri/browser.%g.txt |
||||
org.jitsi.jibri.selenium.util.BrowserFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter |
||||
org.jitsi.jibri.selenium.util.BrowserFileHandler.count = 10 |
||||
org.jitsi.jibri.selenium.util.BrowserFileHandler.limit = 10000000 |
||||
|
||||
org.jitsi.level = FINE |
||||
org.jitsi.jibri.config.level = INFO |
||||
|
||||
org.glassfish.level = INFO |
||||
org.osgi.level = INFO |
||||
org.jitsi.xmpp.level = INFO |
@ -0,0 +1,138 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.lxd-image-server; |
||||
format = pkgs.formats.toml {}; |
||||
|
||||
location = "/var/www/simplestreams"; |
||||
in |
||||
{ |
||||
options = { |
||||
services.lxd-image-server = { |
||||
enable = mkEnableOption "lxd-image-server"; |
||||
|
||||
group = mkOption { |
||||
type = types.str; |
||||
description = "Group assigned to the user and the webroot directory."; |
||||
default = "nginx"; |
||||
example = "www-data"; |
||||
}; |
||||
|
||||
settings = mkOption { |
||||
type = format.type; |
||||
description = '' |
||||
Configuration for lxd-image-server. |
||||
|
||||
Example see <link xlink:href="https://github.com/Avature/lxd-image-server/blob/master/config.toml"/>. |
||||
''; |
||||
default = {}; |
||||
}; |
||||
|
||||
nginx = { |
||||
enable = mkEnableOption "nginx"; |
||||
domain = mkOption { |
||||
type = types.str; |
||||
description = "Domain to use for nginx virtual host."; |
||||
example = "images.example.org"; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
config = mkMerge [ |
||||
(mkIf (cfg.enable) { |
||||
users.users.lxd-image-server = { |
||||
isSystemUser = true; |
||||
group = cfg.group; |
||||
}; |
||||
users.groups.${cfg.group} = {}; |
||||
|
||||
environment.etc."lxd-image-server/config.toml".source = format.generate "config.toml" cfg.settings; |
||||
|
||||
services.logrotate.paths.lxd-image-server = { |
||||
path = "/var/log/lxd-image-server/lxd-image-server.log"; |
||||
frequency = "daily"; |
||||
keep = 21; |
||||
user = "lxd-image-server"; |
||||
group = cfg.group; |
||||
extraConfig = '' |
||||
missingok |
||||
compress |
||||
delaycompress |
||||
copytruncate |
||||
notifempty |
||||
''; |
||||
}; |
||||
|
||||
systemd.tmpfiles.rules = [ |
||||
"d /var/www/simplestreams 0755 lxd-image-server ${cfg.group}" |
||||
]; |
||||
|
||||
systemd.services.lxd-image-server = { |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network.target" ]; |
||||
|
||||
description = "LXD Image Server"; |
||||
|
||||
script = '' |
||||
${pkgs.lxd-image-server}/bin/lxd-image-server init |
||||
${pkgs.lxd-image-server}/bin/lxd-image-server watch |
||||
''; |
||||
|
||||
serviceConfig = { |
||||
User = "lxd-image-server"; |
||||
Group = cfg.group; |
||||
DynamicUser = true; |
||||
LogsDirectory = "lxd-image-server"; |
||||
RuntimeDirectory = "lxd-image-server"; |
||||
ExecReload = "${pkgs.lxd-image-server}/bin/lxd-image-server reload"; |
||||
ReadWritePaths = [ location ]; |
||||
}; |
||||
}; |
||||
}) |
||||
# this is seperate so it can be enabled on mirrored hosts |
||||
(mkIf (cfg.nginx.enable) { |
||||
# https://github.com/Avature/lxd-image-server/blob/master/resources/nginx/includes/lxd-image-server.pkg.conf |
||||
services.nginx.virtualHosts = { |
||||
"${cfg.nginx.domain}" = { |
||||
forceSSL = true; |
||||
enableACME = mkDefault true; |
||||
|
||||
root = location; |
||||
|
||||
locations = { |
||||
"/streams/v1/" = { |
||||
index = "index.json"; |
||||
}; |
||||
|
||||
# Serve json files with content type header application/json |
||||
"~ \.json$" = { |
||||
extraConfig = '' |
||||
add_header Content-Type application/json; |
||||
''; |
||||
}; |
||||
|
||||
"~ \.tar.xz$" = { |
||||
extraConfig = '' |
||||
add_header Content-Type application/octet-stream; |
||||
''; |
||||
}; |
||||
|
||||
"~ \.tar.gz$" = { |
||||
extraConfig = '' |
||||
add_header Content-Type application/octet-stream; |
||||
''; |
||||
}; |
||||
|
||||
# Deny access to document root and the images folder |
||||
"~ ^/(images/)?$" = { |
||||
return = "403"; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
}) |
||||
]; |
||||
} |
@ -0,0 +1,102 @@ |
||||
# Mosquitto {#module-services-mosquitto} |
||||
|
||||
Mosquitto is a MQTT broker often used for IoT or home automation data transport. |
||||
|
||||
## Quickstart {#module-services-mosquitto-quickstart} |
||||
|
||||
A minimal configuration for Mosquitto is |
||||
|
||||
```nix |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
acl = [ "pattern readwrite #" ]; |
||||
omitPasswordAuth = true; |
||||
settings.allow_anonymous = true; |
||||
} ]; |
||||
}; |
||||
``` |
||||
|
||||
This will start a broker on port 1883, listening on all interfaces of the machine, allowing |
||||
read/write access to all topics to any user without password requirements. |
||||
|
||||
User authentication can be configured with the `users` key of listeners. A config that gives |
||||
full read access to a user `monitor` and restricted write access to a user `service` could look |
||||
like |
||||
|
||||
```nix |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
users = { |
||||
monitor = { |
||||
acl = [ "read #" ]; |
||||
password = "monitor"; |
||||
}; |
||||
service = { |
||||
acl = [ "write service/#" ]; |
||||
password = "service"; |
||||
}; |
||||
}; |
||||
} ]; |
||||
}; |
||||
``` |
||||
|
||||
TLS authentication is configured by setting TLS-related options of the listener: |
||||
|
||||
```nix |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
port = 8883; # port change is not required, but helpful to avoid mistakes |
||||
# ... |
||||
settings = { |
||||
cafile = "/path/to/mqtt.ca.pem"; |
||||
certfile = "/path/to/mqtt.pem"; |
||||
keyfile = "/path/to/mqtt.key"; |
||||
}; |
||||
} ]; |
||||
``` |
||||
|
||||
## Configuration {#module-services-mosquitto-config} |
||||
|
||||
The Mosquitto configuration has four distinct types of settings: |
||||
the global settings of the daemon, listeners, plugins, and bridges. |
||||
Bridges and listeners are part of the global configuration, plugins are part of listeners. |
||||
Users of the broker are configured as parts of listeners rather than globally, allowing |
||||
configurations in which a given user is only allowed to log in to the broker using specific |
||||
listeners (eg to configure an admin user with full access to all topics, but restricted to |
||||
localhost). |
||||
|
||||
Almost all options of Mosquitto are available for configuration at their appropriate levels, some |
||||
as NixOS options written in camel case, the remainders under `settings` with their exact names in |
||||
the Mosquitto config file. The exceptions are `acl_file` (which is always set according to the |
||||
`acl` attributes of a listener and its users) and `per_listener_settings` (which is always set to |
||||
`true`). |
||||
|
||||
### Password authentication {#module-services-mosquitto-config-passwords} |
||||
|
||||
Mosquitto can be run in two modes, with a password file or without. Each listener has its own |
||||
password file, and different listeners may use different password files. Password file generation |
||||
can be disabled by setting `omitPasswordAuth = true` for a listener; in this case it is necessary |
||||
to either set `settings.allow_anonymous = true` to allow all logins, or to configure other |
||||
authentication methods like TLS client certificates with `settings.use_identity_as_username = true`. |
||||
|
||||
The default is to generate a password file for each listener from the users configured to that |
||||
listener. Users with no configured password will not be added to the password file and thus |
||||
will not be able to use the broker. |
||||
|
||||
### ACL format {#module-services-mosquitto-config-acl} |
||||
|
||||
Every listener has a Mosquitto `acl_file` attached to it. This ACL is configured via two |
||||
attributes of the config: |
||||
|
||||
* the `acl` attribute of the listener configures pattern ACL entries and topic ACL entries |
||||
for anonymous users. Each entry must be prefixed with `pattern` or `topic` to distinguish |
||||
between these two cases. |
||||
* the `acl` attribute of every user configures in the listener configured the ACL for that |
||||
given user. Only topic ACLs are supported by Mosquitto in this setting, so no prefix is |
||||
required or allowed. |
||||
|
||||
The default ACL for a listener is empty, disallowing all accesses from all clients. To configure |
||||
a completely open ACL, set `acl = [ "pattern readwrite #" ]` in the listener. |
@ -0,0 +1,147 @@ |
||||
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mosquitto"> |
||||
<title>Mosquitto</title> |
||||
<para> |
||||
Mosquitto is a MQTT broker often used for IoT or home automation |
||||
data transport. |
||||
</para> |
||||
<section xml:id="module-services-mosquitto-quickstart"> |
||||
<title>Quickstart</title> |
||||
<para> |
||||
A minimal configuration for Mosquitto is |
||||
</para> |
||||
<programlisting language="bash"> |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
acl = [ "pattern readwrite #" ]; |
||||
omitPasswordAuth = true; |
||||
settings.allow_anonymous = true; |
||||
} ]; |
||||
}; |
||||
</programlisting> |
||||
<para> |
||||
This will start a broker on port 1883, listening on all interfaces |
||||
of the machine, allowing read/write access to all topics to any |
||||
user without password requirements. |
||||
</para> |
||||
<para> |
||||
User authentication can be configured with the |
||||
<literal>users</literal> key of listeners. A config that gives |
||||
full read access to a user <literal>monitor</literal> and |
||||
restricted write access to a user <literal>service</literal> could |
||||
look like |
||||
</para> |
||||
<programlisting language="bash"> |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
users = { |
||||
monitor = { |
||||
acl = [ "read #" ]; |
||||
password = "monitor"; |
||||
}; |
||||
service = { |
||||
acl = [ "write service/#" ]; |
||||
password = "service"; |
||||
}; |
||||
}; |
||||
} ]; |
||||
}; |
||||
</programlisting> |
||||
<para> |
||||
TLS authentication is configured by setting TLS-related options of |
||||
the listener: |
||||
</para> |
||||
<programlisting language="bash"> |
||||
services.mosquitto = { |
||||
enable = true; |
||||
listeners = [ { |
||||
port = 8883; # port change is not required, but helpful to avoid mistakes |
||||
# ... |
||||
settings = { |
||||
cafile = "/path/to/mqtt.ca.pem"; |
||||
certfile = "/path/to/mqtt.pem"; |
||||
keyfile = "/path/to/mqtt.key"; |
||||
}; |
||||
} ]; |
||||
</programlisting> |
||||
</section> |
||||
<section xml:id="module-services-mosquitto-config"> |
||||
<title>Configuration</title> |
||||
<para> |
||||
The Mosquitto configuration has four distinct types of settings: |
||||
the global settings of the daemon, listeners, plugins, and |
||||
bridges. Bridges and listeners are part of the global |
||||
configuration, plugins are part of listeners. Users of the broker |
||||
are configured as parts of listeners rather than globally, |
||||
allowing configurations in which a given user is only allowed to |
||||
log in to the broker using specific listeners (eg to configure an |
||||
admin user with full access to all topics, but restricted to |
||||
localhost). |
||||
</para> |
||||
<para> |
||||
Almost all options of Mosquitto are available for configuration at |
||||
their appropriate levels, some as NixOS options written in camel |
||||
case, the remainders under <literal>settings</literal> with their |
||||
exact names in the Mosquitto config file. The exceptions are |
||||
<literal>acl_file</literal> (which is always set according to the |
||||
<literal>acl</literal> attributes of a listener and its users) and |
||||
<literal>per_listener_settings</literal> (which is always set to |
||||
<literal>true</literal>). |
||||
</para> |
||||
<section xml:id="module-services-mosquitto-config-passwords"> |
||||
<title>Password authentication</title> |
||||
<para> |
||||
Mosquitto can be run in two modes, with a password file or |
||||
without. Each listener has its own password file, and different |
||||
listeners may use different password files. Password file |
||||
generation can be disabled by setting |
||||
<literal>omitPasswordAuth = true</literal> for a listener; in |
||||
this case it is necessary to either set |
||||
<literal>settings.allow_anonymous = true</literal> to allow all |
||||
logins, or to configure other authentication methods like TLS |
||||
client certificates with |
||||
<literal>settings.use_identity_as_username = true</literal>. |
||||
</para> |
||||
<para> |
||||
The default is to generate a password file for each listener |
||||
from the users configured to that listener. Users with no |
||||
configured password will not be added to the password file and |
||||
thus will not be able to use the broker. |
||||
</para> |
||||
</section> |
||||
<section xml:id="module-services-mosquitto-config-acl"> |
||||
<title>ACL format</title> |
||||
<para> |
||||
Every listener has a Mosquitto <literal>acl_file</literal> |
||||
attached to it. This ACL is configured via two attributes of the |
||||
config: |
||||
</para> |
||||
<itemizedlist spacing="compact"> |
||||
<listitem> |
||||
<para> |
||||
the <literal>acl</literal> attribute of the listener |
||||
configures pattern ACL entries and topic ACL entries for |
||||
anonymous users. Each entry must be prefixed with |
||||
<literal>pattern</literal> or <literal>topic</literal> to |
||||
distinguish between these two cases. |
||||
</para> |
||||
</listitem> |
||||
<listitem> |
||||
<para> |
||||
the <literal>acl</literal> attribute of every user |
||||
configures in the listener configured the ACL for that given |
||||
user. Only topic ACLs are supported by Mosquitto in this |
||||
setting, so no prefix is required or allowed. |
||||
</para> |
||||
</listitem> |
||||
</itemizedlist> |
||||
<para> |
||||
The default ACL for a listener is empty, disallowing all |
||||
accesses from all clients. To configure a completely open ACL, |
||||
set <literal>acl = [ "pattern readwrite #" ]</literal> |
||||
in the listener. |
||||
</para> |
||||
</section> |
||||
</section> |
||||
</chapter> |
@ -0,0 +1,290 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
with lib; |
||||
let |
||||
python = pkgs.python3Packages.python; |
||||
cfg = config.services.seafile; |
||||
settingsFormat = pkgs.formats.ini { }; |
||||
|
||||
ccnetConf = settingsFormat.generate "ccnet.conf" cfg.ccnetSettings; |
||||
|
||||
seafileConf = settingsFormat.generate "seafile.conf" cfg.seafileSettings; |
||||
|
||||
seahubSettings = pkgs.writeText "seahub_settings.py" '' |
||||
FILE_SERVER_ROOT = '${cfg.ccnetSettings.General.SERVICE_URL}/seafhttp' |
||||
DATABASES = { |
||||
'default': { |
||||
'ENGINE': 'django.db.backends.sqlite3', |
||||
'NAME': '${seahubDir}/seahub.db', |
||||
} |
||||
} |
||||
MEDIA_ROOT = '${seahubDir}/media/' |
||||
THUMBNAIL_ROOT = '${seahubDir}/thumbnail/' |
||||
|
||||
with open('${seafRoot}/.seahubSecret') as f: |
||||
SECRET_KEY = f.readline().rstrip() |
||||
|
||||
${cfg.seahubExtraConf} |
||||
''; |
||||
|
||||
seafRoot = "/var/lib/seafile"; # hardcode it due to dynamicuser |
||||
ccnetDir = "${seafRoot}/ccnet"; |
||||
dataDir = "${seafRoot}/data"; |
||||
seahubDir = "${seafRoot}/seahub"; |
||||
|
||||
in { |
||||
|
||||
###### Interface |
||||
|
||||
options.services.seafile = { |
||||
enable = mkEnableOption "Seafile server"; |
||||
|
||||
ccnetSettings = mkOption { |
||||
type = types.submodule { |
||||
freeformType = settingsFormat.type; |
||||
|
||||
options = { |
||||
General = { |
||||
SERVICE_URL = mkOption { |
||||
type = types.str; |
||||
example = "https://www.example.com"; |
||||
description = '' |
||||
Seahub public URL. |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
default = { }; |
||||
description = '' |
||||
Configuration for ccnet, see |
||||
<link xlink:href="https://manual.seafile.com/config/ccnet-conf/"/> |
||||
for supported values. |
||||
''; |
||||
}; |
||||
|
||||
seafileSettings = mkOption { |
||||
type = types.submodule { |
||||
freeformType = settingsFormat.type; |
||||
|
||||
options = { |
||||
fileserver = { |
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 8082; |
||||
description = '' |
||||
The tcp port used by seafile fileserver. |
||||
''; |
||||
}; |
||||
host = mkOption { |
||||
type = types.str; |
||||
default = "127.0.0.1"; |
||||
example = "0.0.0.0"; |
||||
description = '' |
||||
The binding address used by seafile fileserver. |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
default = { }; |
||||
description = '' |
||||
Configuration for seafile-server, see |
||||
<link xlink:href="https://manual.seafile.com/config/seafile-conf/"/> |
||||
for supported values. |
||||
''; |
||||
}; |
||||
|
||||
workers = mkOption { |
||||
type = types.int; |
||||
default = 4; |
||||
example = 10; |
||||
description = '' |
||||
The number of gunicorn worker processes for handling requests. |
||||
''; |
||||
}; |
||||
|
||||
adminEmail = mkOption { |
||||
example = "john@example.com"; |
||||
type = types.str; |
||||
description = '' |
||||
Seafile Seahub Admin Account Email. |
||||
''; |
||||
}; |
||||
|
||||
initialAdminPassword = mkOption { |
||||
example = "someStrongPass"; |
||||
type = types.str; |
||||
description = '' |
||||
Seafile Seahub Admin Account initial password. |
||||
Should be change via Seahub web front-end. |
||||
''; |
||||
}; |
||||
|
||||
seafilePackage = mkOption { |
||||
type = types.package; |
||||
description = "Which package to use for the seafile server."; |
||||
default = pkgs.seafile-server; |
||||
}; |
||||
|
||||
seahubExtraConf = mkOption { |
||||
default = ""; |
||||
type = types.lines; |
||||
description = '' |
||||
Extra config to append to `seahub_settings.py` file. |
||||
Refer to <link xlink:href="https://manual.seafile.com/config/seahub_settings_py/" /> |
||||
for all available options. |
||||
''; |
||||
}; |
||||
}; |
||||
|
||||
###### Implementation |
||||
|
||||
config = mkIf cfg.enable { |
||||
|
||||
environment.etc."seafile/ccnet.conf".source = ccnetConf; |
||||
environment.etc."seafile/seafile.conf".source = seafileConf; |
||||
environment.etc."seafile/seahub_settings.py".source = seahubSettings; |
||||
|
||||
systemd.targets.seafile = { |
||||
wantedBy = [ "multi-user.target" ]; |
||||
description = "Seafile components"; |
||||
}; |
||||
|
||||
systemd.services = let |
||||
securityOptions = { |
||||
ProtectHome = true; |
||||
PrivateUsers = true; |
||||
PrivateDevices = true; |
||||
ProtectClock = true; |
||||
ProtectHostname = true; |
||||
ProtectProc = "invisible"; |
||||
ProtectKernelModules = true; |
||||
ProtectKernelTunables = true; |
||||
ProtectKernelLogs = true; |
||||
ProtectControlGroups = true; |
||||
RestrictNamespaces = true; |
||||
LockPersonality = true; |
||||
RestrictRealtime = true; |
||||
RestrictSUIDSGID = true; |
||||
MemoryDenyWriteExecute = true; |
||||
SystemCallArchitectures = "native"; |
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" ]; |
||||
}; |
||||
in { |
||||
seaf-server = { |
||||
description = "Seafile server"; |
||||
partOf = [ "seafile.target" ]; |
||||
after = [ "network.target" ]; |
||||
wantedBy = [ "seafile.target" ]; |
||||
restartTriggers = [ ccnetConf seafileConf ]; |
||||
serviceConfig = securityOptions // { |
||||
User = "seafile"; |
||||
Group = "seafile"; |
||||
DynamicUser = true; |
||||
StateDirectory = "seafile"; |
||||
RuntimeDirectory = "seafile"; |
||||
LogsDirectory = "seafile"; |
||||
ConfigurationDirectory = "seafile"; |
||||
ExecStart = '' |
||||
${cfg.seafilePackage}/bin/seaf-server \ |
||||
--foreground \ |
||||
-F /etc/seafile \ |
||||
-c ${ccnetDir} \ |
||||
-d ${dataDir} \ |
||||
-l /var/log/seafile/server.log \ |
||||
-P /run/seafile/server.pid \ |
||||
-p /run/seafile |
||||
''; |
||||
}; |
||||
preStart = '' |
||||
if [ ! -f "${seafRoot}/server-setup" ]; then |
||||
mkdir -p ${dataDir}/library-template |
||||
mkdir -p ${ccnetDir}/{GroupMgr,misc,OrgMgr,PeerMgr} |
||||
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/GroupMgr/groupmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/groupmgr.sql" |
||||
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/misc/config.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/config.sql" |
||||
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/OrgMgr/orgmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/org.sql" |
||||
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/PeerMgr/usermgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/user.sql" |
||||
${pkgs.sqlite}/bin/sqlite3 ${dataDir}/seafile.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/seafile.sql" |
||||
echo "${cfg.seafilePackage.version}-sqlite" > "${seafRoot}"/server-setup |
||||
fi |
||||
# checking for upgrades and handling them |
||||
# WARNING: needs to be extended to actually handle major version migrations |
||||
installedMajor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f1) |
||||
installedMinor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f2) |
||||
pkgMajor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f1) |
||||
pkgMinor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f2) |
||||
if [ $installedMajor != $pkgMajor ] || [ $installedMinor != $pkgMinor ]; then |
||||
echo "Unsupported upgrade" >&2 |
||||
exit 1 |
||||
fi |
||||
''; |
||||
}; |
||||
|
||||
seahub = let |
||||
penv = (pkgs.python3.withPackages (ps: with ps; [ gunicorn seahub ])); |
||||
in { |
||||
description = "Seafile Server Web Frontend"; |
||||
wantedBy = [ "seafile.target" ]; |
||||
partOf = [ "seafile.target" ]; |
||||
after = [ "network.target" "seaf-server.service" ]; |
||||
requires = [ "seaf-server.service" ]; |
||||
restartTriggers = [ seahubSettings ]; |
||||
environment = { |
||||
PYTHONPATH = |
||||
"${pkgs.python3Packages.seahub}/thirdpart:${pkgs.python3Packages.seahub}:${penv}/${python.sitePackages}"; |
||||
DJANGO_SETTINGS_MODULE = "seahub.settings"; |
||||
CCNET_CONF_DIR = ccnetDir; |
||||
SEAFILE_CONF_DIR = dataDir; |
||||
SEAFILE_CENTRAL_CONF_DIR = "/etc/seafile"; |
||||
SEAFILE_RPC_PIPE_PATH = "/run/seafile"; |
||||
SEAHUB_LOG_DIR = "/var/log/seafile"; |
||||
}; |
||||
serviceConfig = securityOptions // { |
||||
User = "seafile"; |
||||
Group = "seafile"; |
||||
DynamicUser = true; |
||||
RuntimeDirectory = "seahub"; |
||||
StateDirectory = "seafile"; |
||||
LogsDirectory = "seafile"; |
||||
ConfigurationDirectory = "seafile"; |
||||
ExecStart = '' |
||||
${penv}/bin/gunicorn seahub.wsgi:application \ |
||||
--name seahub \ |
||||
--workers ${toString cfg.workers} \ |
||||
--log-level=info \ |
||||
--preload \ |
||||
--timeout=1200 \ |
||||
--limit-request-line=8190 \ |
||||
--bind unix:/run/seahub/gunicorn.sock |
||||
''; |
||||
}; |
||||
preStart = '' |
||||
mkdir -p ${seahubDir}/media |
||||
# Link all media except avatars |
||||
for m in `find ${pkgs.python3Packages.seahub}/media/ -maxdepth 1 -not -name "avatars"`; do |
||||
ln -sf $m ${seahubDir}/media/ |
||||
done |
||||
if [ ! -e "${seafRoot}/.seahubSecret" ]; then |
||||
${penv}/bin/python ${pkgs.python3Packages.seahub}/tools/secret_key_generator.py > ${seafRoot}/.seahubSecret |
||||
chmod 400 ${seafRoot}/.seahubSecret |
||||
fi |
||||
if [ ! -f "${seafRoot}/seahub-setup" ]; then |
||||
# avatars directory should be writable |
||||
install -D -t ${seahubDir}/media/avatars/ ${pkgs.python3Packages.seahub}/media/avatars/default.png |
||||
install -D -t ${seahubDir}/media/avatars/groups ${pkgs.python3Packages.seahub}/media/avatars/groups/default.png |
||||
# init database |
||||
${pkgs.python3Packages.seahub}/manage.py migrate |
||||
# create admin account |
||||
${pkgs.expect}/bin/expect -c 'spawn ${pkgs.python3Packages.seahub}/manage.py createsuperuser --email=${cfg.adminEmail}; expect "Password: "; send "${cfg.initialAdminPassword}\r"; expect "Password (again): "; send "${cfg.initialAdminPassword}\r"; expect "Superuser created successfully."' |
||||
echo "${pkgs.python3Packages.seahub.version}-sqlite" > "${seafRoot}/seahub-setup" |
||||
fi |
||||
if [ $(cat "${seafRoot}/seahub-setup" | cut -d"-" -f1) != "${pkgs.python3Packages.seahub.version}" ]; then |
||||
# update database |
||||
${pkgs.python3Packages.seahub}/manage.py migrate |
||||
echo "${pkgs.python3Packages.seahub.version}-sqlite" > "${seafRoot}/seahub-setup" |
||||
fi |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,139 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
let |
||||
|
||||
cfg = config.services.code-server; |
||||
defaultUser = "code-server"; |
||||
defaultGroup = defaultUser; |
||||
|
||||
in { |
||||
###### interface |
||||
options = { |
||||
services.code-server = { |
||||
enable = mkEnableOption "code-server"; |
||||
|
||||
package = mkOption { |
||||
default = pkgs.code-server; |
||||
defaultText = "pkgs.code-server"; |
||||
description = "Which code-server derivation to use."; |
||||
type = types.package; |
||||
}; |
||||
|
||||
extraPackages = mkOption { |
||||
default = [ ]; |
||||
description = "Packages that are available in the PATH of code-server."; |
||||
example = "[ pkgs.go ]"; |
||||
type = types.listOf types.package; |
||||
}; |
||||
|
||||
extraEnvironment = mkOption { |
||||
type = types.attrsOf types.str; |
||||
description = |
||||
"Additional environment variables to passed to code-server."; |
||||
default = { }; |
||||
example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; }; |
||||
}; |
||||
|
||||
extraArguments = mkOption { |
||||
default = [ "--disable-telemetry" ]; |
||||
description = "Additional arguments that passed to code-server"; |
||||
example = ''[ "--verbose" ]''; |
||||
type = types.listOf types.str; |
||||
}; |
||||
|
||||
host = mkOption { |
||||
default = "127.0.0.1"; |
||||
description = "The host-ip to bind to."; |
||||
type = types.str; |
||||
}; |
||||
|
||||
port = mkOption { |
||||
default = 4444; |
||||
description = "The port where code-server runs."; |
||||
type = types.port; |
||||
}; |
||||
|
||||
auth = mkOption { |
||||
default = "password"; |
||||
description = "The type of authentication to use."; |
||||
type = types.enum [ "none" "password" ]; |
||||
}; |
||||
|
||||
hashedPassword = mkOption { |
||||
default = ""; |
||||
description = |
||||
"Create the password with: 'echo -n 'thisismypassword' | npx argon2-cli -e'."; |
||||
type = types.str; |
||||
}; |
||||
|
||||
user = mkOption { |
||||
default = defaultUser; |
||||
example = "yourUser"; |
||||
description = '' |
||||
The user to run code-server as. |
||||
By default, a user named <literal>${defaultUser}</literal> will be created. |
||||
''; |
||||
type = types.str; |
||||
}; |
||||
|
||||
group = mkOption { |
||||
default = defaultGroup; |
||||
example = "yourGroup"; |
||||
description = '' |
||||
The group to run code-server under. |
||||
By default, a group named <literal>${defaultGroup}</literal> will be created. |
||||
''; |
||||
type = types.str; |
||||
}; |
||||
|
||||
extraGroups = mkOption { |
||||
default = [ ]; |
||||
description = |
||||
"An array of additional groups for the <literal>${defaultUser}</literal> user."; |
||||
example = [ "docker" ]; |
||||
type = types.listOf types.str; |
||||
}; |
||||
|
||||
}; |
||||
}; |
||||
|
||||
###### implementation |
||||
config = mkIf cfg.enable { |
||||
systemd.services.code-server = { |
||||
description = "VSCode server"; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network-online.target" ]; |
||||
path = cfg.extraPackages; |
||||
environment = { |
||||
HASHED_PASSWORD = cfg.hashedPassword; |
||||
} // cfg.extraEnvironment; |
||||
serviceConfig = { |
||||
ExecStart = "${cfg.package}/bin/code-server --bind-addr ${cfg.host}:${toString cfg.port} --auth ${cfg.auth} " + builtins.concatStringsSep " " cfg.extraArguments; |
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; |
||||
RuntimeDirectory = cfg.user; |
||||
User = cfg.user; |
||||
Group = cfg.group; |
||||
Restart = "on-failure"; |
||||
}; |
||||
|
||||
}; |
||||
|
||||
users.users."${cfg.user}" = mkMerge [ |
||||
(mkIf (cfg.user == defaultUser) { |
||||
isNormalUser = true; |
||||
description = "code-server user"; |
||||
inherit (cfg) group; |
||||
}) |
||||
{ |
||||
packages = cfg.extraPackages; |
||||
inherit (cfg) extraGroups; |
||||
} |
||||
]; |
||||
|
||||
users.groups."${defaultGroup}" = mkIf (cfg.group == defaultGroup) { }; |
||||
|
||||
}; |
||||
|
||||
meta.maintainers = with maintainers; [ stackshadow ]; |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue