Merge commit 'c935f5e0add2cf0ae650d072c8357533e21b0c35'

main
Katharina Fey 2 years ago
commit ea90ec2205
Signed by: kookie
GPG Key ID: 90734A9E619C8A6C
  1. 8
      infra/libkookie/nixpkgs/unstable/.github/PULL_REQUEST_TEMPLATE.md
  2. 4
      infra/libkookie/nixpkgs/unstable/README.md
  3. 2
      infra/libkookie/nixpkgs/unstable/doc/contributing/coding-conventions.chapter.md
  4. 4
      infra/libkookie/nixpkgs/unstable/doc/languages-frameworks/crystal.section.md
  5. 6
      infra/libkookie/nixpkgs/unstable/doc/languages-frameworks/dotnet.section.md
  6. 4
      infra/libkookie/nixpkgs/unstable/doc/languages-frameworks/python.section.md
  7. 2
      infra/libkookie/nixpkgs/unstable/doc/languages-frameworks/ruby.section.md
  8. 145
      infra/libkookie/nixpkgs/unstable/doc/languages-frameworks/rust.section.md
  9. 4
      infra/libkookie/nixpkgs/unstable/doc/stdenv/cross-compilation.chapter.md
  10. 33
      infra/libkookie/nixpkgs/unstable/doc/stdenv/stdenv.chapter.md
  11. 2
      infra/libkookie/nixpkgs/unstable/doc/using/overlays.chapter.md
  12. 2
      infra/libkookie/nixpkgs/unstable/lib/attrsets.nix
  13. 5
      infra/libkookie/nixpkgs/unstable/lib/customisation.nix
  14. 7
      infra/libkookie/nixpkgs/unstable/lib/lists.nix
  15. 62
      infra/libkookie/nixpkgs/unstable/lib/modules.nix
  16. 10
      infra/libkookie/nixpkgs/unstable/lib/options.nix
  17. 2
      infra/libkookie/nixpkgs/unstable/lib/strings.nix
  18. 8
      infra/libkookie/nixpkgs/unstable/lib/systems/platforms.nix
  19. 7
      infra/libkookie/nixpkgs/unstable/lib/tests/modules.sh
  20. 28
      infra/libkookie/nixpkgs/unstable/lib/tests/modules/declare-submodule-via-evalModules.nix
  21. 56
      infra/libkookie/nixpkgs/unstable/lib/types.nix
  22. 205
      infra/libkookie/nixpkgs/unstable/maintainers/maintainer-list.nix
  23. 2
      infra/libkookie/nixpkgs/unstable/maintainers/scripts/find-tarballs.nix
  24. 2
      infra/libkookie/nixpkgs/unstable/maintainers/scripts/update.py
  25. 2
      infra/libkookie/nixpkgs/unstable/maintainers/team-list.nix
  26. 4
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/administration/cleaning-store.chapter.md
  27. 14
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/development/writing-nixos-tests.section.md
  28. 3
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/from_md/administration/cleaning-store.chapter.xml
  29. 24
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/from_md/development/writing-nixos-tests.section.xml
  30. 5
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/from_md/installation/installing.chapter.xml
  31. 287
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
  32. 5
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/installation/installing.chapter.md
  33. 71
      infra/libkookie/nixpkgs/unstable/nixos/doc/manual/release-notes/rl-2111.section.md
  34. 2
      infra/libkookie/nixpkgs/unstable/nixos/lib/eval-config.nix
  35. 88
      infra/libkookie/nixpkgs/unstable/nixos/lib/make-disk-image.nix
  36. 102
      infra/libkookie/nixpkgs/unstable/nixos/lib/test-driver/test-driver.py
  37. 30
      infra/libkookie/nixpkgs/unstable/nixos/lib/testing-python.nix
  38. 102
      infra/libkookie/nixpkgs/unstable/nixos/maintainers/scripts/lxd/lxd-image-inner.nix
  39. 34
      infra/libkookie/nixpkgs/unstable/nixos/maintainers/scripts/lxd/lxd-image.nix
  40. 9
      infra/libkookie/nixpkgs/unstable/nixos/maintainers/scripts/lxd/nix.tpl
  41. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/config/console.nix
  42. 12
      infra/libkookie/nixpkgs/unstable/nixos/modules/hardware/flirc.nix
  43. 18
      infra/libkookie/nixpkgs/unstable/nixos/modules/hardware/gkraken.nix
  44. 9
      infra/libkookie/nixpkgs/unstable/nixos/modules/hardware/printers.nix
  45. 4
      infra/libkookie/nixpkgs/unstable/nixos/modules/hardware/video/nvidia.nix
  46. 29
      infra/libkookie/nixpkgs/unstable/nixos/modules/installer/tools/nixos-enter.sh
  47. 11
      infra/libkookie/nixpkgs/unstable/nixos/modules/module-list.nix
  48. 17
      infra/libkookie/nixpkgs/unstable/nixos/modules/programs/file-roller.nix
  49. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/programs/neovim.nix
  50. 10
      infra/libkookie/nixpkgs/unstable/nixos/modules/security/acme.nix
  51. 19
      infra/libkookie/nixpkgs/unstable/nixos/modules/security/ca.nix
  52. 6
      infra/libkookie/nixpkgs/unstable/nixos/modules/security/pam.nix
  53. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/audio/roon-server.nix
  54. 43
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/backup/borgbackup.nix
  55. 18
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/backup/restic.nix
  56. 35
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/cluster/hadoop/conf.nix
  57. 118
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/cluster/hadoop/default.nix
  58. 186
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/cluster/hadoop/hdfs.nix
  59. 102
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/cluster/hadoop/yarn.nix
  60. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/daemon/client-rt.conf.json
  61. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/daemon/client.conf.json
  62. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/daemon/jack.conf.json
  63. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/daemon/pipewire-pulse.conf.json
  64. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/daemon/pipewire.conf.json
  65. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/media-session/alsa-monitor.conf.json
  66. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/media-session/bluez-monitor.conf.json
  67. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/media-session/media-session.conf.json
  68. 0
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/media-session/v4l2-monitor.conf.json
  69. 20
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/pipewire-media-session.nix
  70. 11
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/desktops/pipewire/pipewire.nix
  71. 13
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/development/blackfire.nix
  72. 3
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/development/blackfire.xml
  73. 122
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/finance/odoo.nix
  74. 1
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/hardware/fancontrol.nix
  75. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/hardware/power-profiles-daemon.nix
  76. 171
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/hardware/rasdaemon.nix
  77. 105
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/misc/signald.nix
  78. 71
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/misc/xmrig.nix
  79. 8
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/monitoring/grafana.nix
  80. 97
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/monitoring/prometheus/default.nix
  81. 80
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/monitoring/unifi-poller.nix
  82. 61
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/ddclient.nix
  83. 417
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/jibri/default.nix
  84. 32
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/jibri/logging.properties-journal
  85. 138
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/lxd-image-server.nix
  86. 102
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/mosquitto.md
  87. 703
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/mosquitto.nix
  88. 147
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/mosquitto.xml
  89. 7
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/networkmanager.nix
  90. 290
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/seafile.nix
  91. 48
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/smokeping.nix
  92. 80
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/networking/unifi.nix
  93. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/security/step-ca.nix
  94. 8
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/security/yubikey-agent.nix
  95. 320
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/torrent/transmission.nix
  96. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/video/epgstation/default.nix
  97. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/web-apps/bookstack.nix
  98. 139
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/web-apps/code-server.nix
  99. 2
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/web-apps/dokuwiki.nix
  100. 18
      infra/libkookie/nixpkgs/unstable/nixos/modules/services/web-apps/ihatemoney/default.nix
  101. Some files were not shown because too many files have changed in this diff Show More

@ -21,9 +21,13 @@ Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-
- [ ] x86_64-darwin
- [ ] aarch64-darwin
- [ ] For non-Linux: Is `sandbox = true` set in `nix.conf`? (See [Nix manual](https://nixos.org/manual/nix/stable/#sec-conf-file))
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested, as applicable:
- [NixOS test(s)](https://nixos.org/manual/nixos/unstable/index.html#sec-nixos-tests) (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- and/or [package tests](https://nixos.org/manual/nixpkgs/unstable/#sec-package-tests)
- or, for functions and "core" functionality, tests in [lib/tests](https://github.com/NixOS/nixpkgs/blob/master/lib/tests) or [pkgs/test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/test)
- made sure NixOS tests are [linked](https://nixos.org/manual/nixpkgs/unstable/#ssec-nixos-tests-linking) to the relevant packages
- [ ] Tested compilation of all packages that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Tested basic functionality of all binary files (usually in `./result/bin/`)
- [21.11 Release Notes (or backporting 21.05 Release notes)](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#generating-2111-release-notes)
- [ ] (Package updates) Added a release notes entry if the change is major or breaking
- [ ] (Module updates) Added a release notes entry if the change is significant

@ -8,7 +8,7 @@
</p>
[Nixpkgs](https://github.com/nixos/nixpkgs) is a collection of over
60,000 software packages that can be installed with the
80,000 software packages that can be installed with the
[Nix](https://nixos.org/nix/) package manager. It also implements
[NixOS](https://nixos.org/nixos/), a purely-functional Linux distribution.
@ -53,7 +53,7 @@ system, [Hydra](https://hydra.nixos.org/).
Artifacts successfully built with Hydra are published to cache at
https://cache.nixos.org/. When successful build and test criteria are
met, the Nixpkgs expressions are distributed via [Nix
channels](https://nixos.org/nix/manual/#sec-channels).
channels](https://nixos.org/manual/nix/stable/package-management/channels.html).
# Contributing

@ -181,7 +181,7 @@
rev = "${version}";
```
- Filling lists condionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
- Building lists conditionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
```nix
buildInputs = lib.optional stdenv.isDarwin iconv;

@ -4,12 +4,12 @@
This section uses [Mint](https://github.com/mint-lang/mint) as an example for how to build a Crystal package.
If the Crystal project has any dependencies, the first step is to get a `shards.nix` file encoding those. Get a copy of the project and go to its root directory such that its `shard.lock` file is in the current directory, then run `crystal2nix` in it
If the Crystal project has any dependencies, the first step is to get a `shards.nix` file encoding those. Get a copy of the project and go to its root directory such that its `shard.lock` file is in the current directory. Executable projects should usually commit the `shard.lock` file, but sometimes that's not the case, which means you need to generate it yourself. With an existing `shard.lock` file, `crystal2nix` can be run.
```bash
$ git clone https://github.com/mint-lang/mint
$ cd mint
$ git checkout 0.5.0
$ if [ ! -f shard.lock ]; then nix-shell -p shards --run "shards lock"; fi
$ nix-shell -p crystal2nix --run crystal2nix
```

@ -77,9 +77,13 @@ To package Dotnet applications, you can use `buildDotnetModule`. This has simila
* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. By default, this is set to the `projectFile` attribute.
* `disabledTests` is used to disable running specific unit tests. This gets passed as: `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all unit test frameworks.
* `dotnetRestoreFlags` can be used to pass flags to `dotnet restore`.
* `dotnetBuildFlags` can be used to pass flags to `dotnet build`.
* `dotnetTestFlags` can be used to pass flags to `dotnet test`.
* `dotnetInstallFlags` can be used to pass flags to `dotnet install`.
* `dotnetFlags` can be used to pass flags to all of the above phases.

@ -765,7 +765,7 @@ and in this case the `python38` interpreter is automatically used.
### Interpreters {#interpreters}
Versions 2.7, 3.6, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
respectively `python27`, `python36`, `python37`, `python38` and `python39`. The
respectively `python27`, `python37`, `python38` and `python39`. The
aliases `python2` and `python3` correspond to respectively `python27` and
`python39`. The attribute `python` maps to `python2`. The PyPy interpreters
compatible with Python 2.7 and 3 are available as `pypy27` and `pypy3`, with
@ -830,10 +830,10 @@ attribute set is created for each available Python interpreter. The available
sets are
* `pkgs.python27Packages`
* `pkgs.python36Packages`
* `pkgs.python37Packages`
* `pkgs.python38Packages`
* `pkgs.python39Packages`
* `pkgs.python310Packages`
* `pkgs.pypyPackages`
and the aliases

@ -8,7 +8,7 @@ In the Nixpkgs tree, Ruby packages can be found throughout, depending on what th
There are two main approaches for using Ruby with gems. One is to use a specifically locked `Gemfile` for an application that has very strict dependencies. The other is to depend on the common gems, which we'll explain further down, and rely on them being updated regularly.
The interpreters have common attributes, namely `gems`, and `withPackages`. So you can refer to `ruby.gems.nokogiri`, or `ruby_2_6.gems.nokogiri` to get the Nokogiri gem already compiled and ready to use.
The interpreters have common attributes, namely `gems`, and `withPackages`. So you can refer to `ruby.gems.nokogiri`, or `ruby_2_7.gems.nokogiri` to get the Nokogiri gem already compiled and ready to use.
Since not all gems have executables like `nokogiri`, it's usually more convenient to use the `withPackages` function like this: `ruby.withPackages (p: with p; [ nokogiri ])`. This will also make sure that the Ruby in your environment will be able to find the gem and it can be used in your Ruby code (for example via `ruby` or `irb` executables) via `require "nokogiri"` as usual.

@ -13,7 +13,7 @@ into your `configuration.nix` or bring them into scope with `nix-shell -p rustc
For other versions such as daily builds (beta and nightly),
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
or use Mozilla's [Rust nightlies overlay](#using-the-rust-nightlies-overlay).
or use a community maintained [Rust overlay](#using-community-rust-overlays).
## Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
@ -411,7 +411,7 @@ you of the correct hash.
`rustPlatform` provides the following hooks to automate Cargo builds:
* `cargoSetupHook`: configure Cargo to use depenencies vendored
* `cargoSetupHook`: configure Cargo to use dependencies vendored
through `fetchCargoTarball`. This hook uses the `cargoDeps`
environment variable to find the vendored dependencies. If a project
already vendors its dependencies, the variable `cargoVendorDir` can
@ -426,7 +426,7 @@ you of the correct hash.
* `maturinBuildHook`: use [Maturin](https://github.com/PyO3/maturin)
to build a Python wheel. Similar to `cargoBuildHook`, the optional
variable `buildAndTestSubdir` can be used to build a crate in a
Cargo workspace. Additional maturin flags can be passed through
Cargo workspace. Additional Maturin flags can be passed through
`maturinBuildFlags`.
* `cargoCheckHook`: run tests using Cargo. The build type for checks
can be set using `cargoCheckType`. Additional flags can be passed to
@ -447,7 +447,7 @@ dependencies. The build itself is then performed by
The following example outlines how the `tokenizers` Python package is
built. Since the Python package is in the `source/bindings/python`
directory of the *tokenizers* project's source archive, we use
directory of the `tokenizers` project's source archive, we use
`sourceRoot` to point the tooling to this directory:
```nix
@ -672,7 +672,7 @@ Some crates require external libraries. For crates from
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a seperate file.
or build inputs by overriding the hello crate in a separate file.
```nix
with import <nixpkgs> {};
@ -729,7 +729,7 @@ with import <nixpkgs> {};
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of rustc used to compile the crate:
- The version of `rustc` used to compile the crate:
```nix
(hello {}).override { rust = pkgs.rust; };
@ -742,7 +742,7 @@ general. A number of other parameters can be overridden:
(hello {}).override { release = false; };
```
- Whether to print the commands sent to rustc when building
- Whether to print the commands sent to `rustc` when building
(equivalent to `--verbose` in cargo:
```nix
@ -871,76 +871,107 @@ rustc 1.26.0-nightly (188e693b3 2018-03-26)
To see that you are using nightly.
## Using the Rust nightlies overlay {#using-the-rust-nightlies-overlay}
## Using community Rust overlays {#using-community-rust-overlays}
Mozilla provides an overlay for nixpkgs to bring a nightly version of Rust into scope.
This overlay can _also_ be used to install recent unstable or stable versions
of Rust, if desired.
There are two community maintained approaches to Rust toolchain management:
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
- [fenix](https://github.com/nix-community/fenix)
### Rust overlay installation {#rust-overlay-installation}
You can use this overlay by either changing your local nixpkgs configuration,
or by adding the overlay declaratively in a nix expression, e.g. in `configuration.nix`.
For more information see [the manual on installing overlays](#sec-overlays-install).
#### Imperative rust overlay installation {#imperative-rust-overlay-installation}
Oxalica's overlay allows you to select a particular Rust version and components.
See [their documentation](https://github.com/oxalica/rust-overlay#rust-overlay) for more
detailed usage.
Clone [nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla),
and create a symbolic link to the file
[rust-overlay.nix](https://github.com/mozilla/nixpkgs-mozilla/blob/master/rust-overlay.nix)
in the `~/.config/nixpkgs/overlays` directory.
Fenix is an alternative to `rustup` and can also be used as an overlay.
```ShellSession
$ git clone https://github.com/mozilla/nixpkgs-mozilla.git
$ mkdir -p ~/.config/nixpkgs/overlays
$ ln -s $(pwd)/nixpkgs-mozilla/rust-overlay.nix ~/.config/nixpkgs/overlays/rust-overlay.nix
```
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
Because of this and ergonomics, either of those community projects
should be preferred to the Mozilla's Rust overlay (`nixpkgs-mozilla`).
### Declarative rust overlay installation {#declarative-rust-overlay-installation}
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
### How to select a specific `rustc` and toolchain version {#how-to-select-a-specific-rustc-and-toolchain-version}
You can consume the oxalica overlay and use it to grab a specific Rust toolchain version.
Here is an example `shell.nix` showing how to grab the current stable toolchain:
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
# Further overlays go here
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
};
};
}
}:
pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
rust-bin.stable.latest.minimal
];
}
```
Note that this will fetch the latest overlay version when rebuilding your system.
You can try this out by:
1. Saving that to `shell.nix`
2. Executing `nix-shell --pure --command 'rustc --version'`
### Rust overlay usage {#rust-overlay-usage}
As of writing, this prints out `rustc 1.56.0 (09c42c458 2021-10-18)`.
The overlay contains attribute sets corresponding to different versions of the rust toolchain, such as:
### How to use an overlay toolchain in a derivation {#how-to-use-an-overlay-toolchain-in-a-derivation}
* `latest.rustChannels.stable`
* `latest.rustChannels.nightly`
* a function `rustChannelOf`, called as `(rustChannelOf { date = "2018-04-11"; channel = "nightly"; })`, or...
* `(nixpkgs.rustChannelOf { rustToolchain = ./rust-toolchain; })` if you have a local `rust-toolchain` file (see https://github.com/mozilla/nixpkgs-mozilla#using-in-nix-expressions for an example)
You can also use an overlay's Rust toolchain with `buildRustPackage`.
The below snippet demonstrates invoking `buildRustPackage` with an oxalica overlay selected Rust toolchain:
```nix
with import <nixpkgs> {
overlays = [
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
};
Each of these contain packages such as `rust`, which contains your usual rust development tools with the respective toolchain chosen.
For example, you might want to add `latest.rustChannels.stable.rust` to the list of packages in your configuration.
rustPlatform.buildRustPackage rec {
pname = "ripgrep";
version = "12.1.1";
nativeBuildInputs = [
rust-bin.stable.latest.minimal
];
src = fetchFromGitHub {
owner = "BurntSushi";
repo = "ripgrep";
rev = version;
sha256 = "1hqps7l5qrjh9f914r5i6kmcz6f1yb951nv4lby0cjnp5l253kps";
};
Imperatively, the latest stable version can be installed with the following command:
cargoSha256 = "03wf9r2csi6jpa7v5sw5lpxkrk4wfzwmzx7k3991q3bdjzcwnnwp";
```ShellSession
$ nix-env -Ai nixpkgs.latest.rustChannels.stable.rust
meta = with lib; {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
};
}
```
Or using the attribute with nix-shell:
Follow the below steps to try that snippet.
1. create a new directory
1. save the above snippet as `default.nix` in that directory
1. cd into that directory and run `nix-build`
```ShellSession
$ nix-shell -p nixpkgs.latest.rustChannels.stable.rust
```
### Rust overlay installation {#rust-overlay-installation}
You can use this overlay by either changing your local nixpkgs configuration,
or by adding the overlay declaratively in a nix expression, e.g. in `configuration.nix`.
For more information see [the manual on installing overlays](#sec-overlays-install).
### Declarative Rust overlay installation {#declarative-rust-overlay-installation}
Substitute the `nixpkgs` prefix with `nixos` on NixOS.
To install the beta or nightly channel, "stable" should be substituted by
"nightly" or "beta", or
use the function provided by this overlay to pull a version based on a
build date.
This snippet shows how to use oxalica's Rust overlay.
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
The overlay automatically updates itself as it uses the same source as
[rustup](https://www.rustup.rs/).
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (builtins.fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
# Further overlays go here
];
};
};
```
Note that this will fetch the latest overlay version when rebuilding your system.

@ -158,9 +158,9 @@ One would think that `localSystem` and `crossSystem` overlap horribly with the t
### Implementation of dependencies {#ssec-cross-dependency-implementation}
The categories of dependencies developed in [](#ssec-cross-dependency-categorization) are specified as lists of derivations given to `mkDerivation`, as documented in [](#ssec-stdenv-dependencies). In short, each list of dependencies for "host → target" of "foo → bar" is called `depsFooBar`, with exceptions for backwards compatibility that `depsBuildHost` is instead called `nativeBuildInputs` and `depsHostTarget` is instead called `buildInputs`. Nixpkgs is now structured so that each `depsFooBar` is automatically taken from `pkgsFooBar`. (These `pkgsFooBar`s are quite new, so there is no special case for `nativeBuildInputs` and `buildInputs`.) For example, `pkgsBuildHost.gcc` should be used at build-time, while `pkgsHostTarget.gcc` should be used at run-time.
The categories of dependencies developed in [](#ssec-cross-dependency-categorization) are specified as lists of derivations given to `mkDerivation`, as documented in [](#ssec-stdenv-dependencies). In short, each list of dependencies for "host → target" is called `deps<host><target>` (where `host`, and `target` values are either `build`, `host`, or `target`), with exceptions for backwards compatibility that `depsBuildHost` is instead called `nativeBuildInputs` and `depsHostTarget` is instead called `buildInputs`. Nixpkgs is now structured so that each `deps<host><target>` is automatically taken from `pkgs<host><target>`. (These `pkgs<host><target>`s are quite new, so there is no special case for `nativeBuildInputs` and `buildInputs`.) For example, `pkgsBuildHost.gcc` should be used at build-time, while `pkgsHostTarget.gcc` should be used at run-time.
Now, for most of Nixpkgs's history, there were no `pkgsFooBar` attributes, and most packages have not been refactored to use it explicitly. Prior to those, there were just `buildPackages`, `pkgs`, and `targetPackages`. Those are now redefined as aliases to `pkgsBuildHost`, `pkgsHostTarget`, and `pkgsTargetTarget`. It is acceptable, even recommended, to use them for libraries to show that the host platform is irrelevant.
Now, for most of Nixpkgs's history, there were no `pkgs<host><target>` attributes, and most packages have not been refactored to use it explicitly. Prior to those, there were just `buildPackages`, `pkgs`, and `targetPackages`. Those are now redefined as aliases to `pkgsBuildHost`, `pkgsHostTarget`, and `pkgsTargetTarget`. It is acceptable, even recommended, to use them for libraries to show that the host platform is irrelevant.
But before that, there was just `pkgs`, even though both `buildInputs` and `nativeBuildInputs` existed. \[Cross barely worked, and those were implemented with some hacks on `mkDerivation` to override dependencies.\] What this means is the vast majority of packages do not use any explicit package set to populate their dependencies, just using whatever `callPackage` gives them even if they do correctly sort their dependencies into the multiple lists described above. And indeed, asking that users both sort their dependencies, _and_ take them from the right attribute set, is both too onerous and redundant, so the recommended approach (for now) is to continue just categorizing by list and not using an explicit package set.

@ -116,15 +116,27 @@ On Linux, `stdenv` also includes the `patchelf` utility.
## Specifying dependencies {#ssec-stdenv-dependencies}
As described in the Nix manual, almost any `*.drv` store path in a derivation’s attribute set will induce a dependency on that derivation. `mkDerivation`, however, takes a few attributes intended to, between them, include all the dependencies of a package. This is done both for structure and consistency, but also so that certain other setup can take place. For example, certain dependencies need their bin directories added to the `PATH`. That is built-in, but other setup is done via a pluggable mechanism that works in conjunction with these dependency attributes. See [](#ssec-setup-hooks) for details.
As described in the Nix manual, almost any `*.drv` store path in a derivation’s attribute set will induce a dependency on that derivation. `mkDerivation`, however, takes a few attributes intended to include all the dependencies of a package. This is done both for structure and consistency, but also so that certain other setup can take place. For example, certain dependencies need their bin directories added to the `PATH`. That is built-in, but other setup is done via a pluggable mechanism that works in conjunction with these dependency attributes. See [](#ssec-setup-hooks) for details.
Dependencies can be broken down along three axes: their host and target platforms relative to the new derivation’s, and whether they are propagated. The platform distinctions are motivated by cross compilation; see [](#chap-cross) for exactly what each platform means. [^footnote-stdenv-ignored-build-platform] But even if one is not cross compiling, the platforms imply whether or not the dependency is needed at run-time or build-time, a concept that makes perfect sense outside of cross compilation. By default, the run-time/build-time distinction is just a hint for mental clarity, but with `strictDeps` set it is mostly enforced even in the native case.
The extension of `PATH` with dependencies, alluded to above, proceeds according to the relative platforms alone. The process is carried out only for dependencies whose host platform matches the new derivation’s build platform i.e. dependencies which run on the platform where the new derivation will be built. [^footnote-stdenv-native-dependencies-in-path] For each dependency \<dep\> of those dependencies, `dep/bin`, if present, is added to the `PATH` environment variable.
The dependency is propagated when it forces some of its other-transitive (non-immediate) downstream dependencies to also take it on as an immediate dependency. Nix itself already takes a package’s transitive dependencies into account, but this propagation ensures nixpkgs-specific infrastructure like setup hooks (mentioned above) also are run as if the propagated dependency.
A dependency is said to be **propagated** when some of its other-transitive (non-immediate) downstream dependencies also need it as an immediate dependency.
[^footnote-stdenv-propagated-dependencies]
It is important to note that dependencies are not necessarily propagated as the same sort of dependency that they were before, but rather as the corresponding sort so that the platform rules still line up. The exact rules for dependency propagation can be given by assigning to each dependency two integers based one how its host and target platforms are offset from the depending derivation’s platforms. Those offsets are given below in the descriptions of each dependency list attribute. Algorithmically, we traverse propagated inputs, accumulating every propagated dependency’s propagated dependencies and adjusting them to account for the “shift in perspective” described by the current dependency’s platform offsets. This results in sort a transitive closure of the dependency relation, with the offsets being approximately summed when two dependency links are combined. We also prune transitive dependencies whose combined offsets go out-of-bounds, which can be viewed as a filter over that transitive closure removing dependencies that are blatantly absurd.
It is important to note that dependencies are not necessarily propagated as the same sort of dependency that they were before, but rather as the corresponding sort so that the platform rules still line up. To determine the exact rules for dependency propagation, we start by assigning to each dependency a couple of ternary numbers (`-1` for `build`, `0` for `host`, and `1` for `target`), representing how respectively its host and target platforms are "offset" from the depending derivation’s platforms. The following table summarize the different combinations that can be obtained:
| `host → target` | attribute name | offset |
| ------------------- | ------------------- | -------- |
| `build --> build` | `depsBuildBuild` | `-1, -1` |
| `build --> host` | `nativeBuildInputs` | `-1, 0` |
| `build --> target` | `depsBuildTarget` | `-1, 1` |
| `host --> host` | `depsHostHost` | `0, 0` |
| `host --> target` | `buildInputs` | `0, 1` |
| `target --> target` | `depsTargetTarget` | `1, 1` |
Algorithmically, we traverse propagated inputs, accumulating every propagated dependency’s propagated dependencies and adjusting them to account for the “shift in perspective” described by the current dependency’s platform offsets. This results is sort of a transitive closure of the dependency relation, with the offsets being approximately summed when two dependency links are combined. We also prune transitive dependencies whose combined offsets go out-of-bounds, which can be viewed as a filter over that transitive closure removing dependencies that are blatantly absurd.
We can define the process precisely with [Natural Deduction](https://en.wikipedia.org/wiki/Natural_deduction) using the inference rules. This probably seems a bit obtuse, but so is the bash code that actually implements it! [^footnote-stdenv-find-inputs-location] They’re confusing in very different ways so… hopefully if something doesn’t make sense in one presentation, it will in the other!
@ -179,37 +191,37 @@ Overall, the unifying theme here is that propagation shouldn’t be introducing
#### `depsBuildBuild` {#var-stdenv-depsBuildBuild}
A list of dependencies whose host and target platforms are the new derivation’s build platform. This means a `-1` host and `-1` target offset from the new derivation’s platforms. These are programs and libraries used at build time that produce programs and libraries also used at build time. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it in `nativeBuildInputs` instead. The most common use of this `buildPackages.stdenv.cc`, the default C compiler for this role. That example crops up more than one might think in old commonly used C libraries.
A list of dependencies whose host and target platforms are the new derivation’s build platform. These are programs and libraries used at build time that produce programs and libraries also used at build time. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it in `nativeBuildInputs` instead. The most common use of this `buildPackages.stdenv.cc`, the default C compiler for this role. That example crops up more than one might think in old commonly used C libraries.
Since these packages are able to be run at build-time, they are always added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
#### `nativeBuildInputs` {#var-stdenv-nativeBuildInputs}
A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s host platform. This means a `-1` host offset and `0` target offset from the new derivation’s platforms. These are programs and libraries used at build-time that, if they are a compiler or similar tool, produce code to run at run-time—i.e. tools used to build the new derivation. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild` or `depsBuildTarget`. This could be called `depsBuildHost` but `nativeBuildInputs` is used for historical continuity.
A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s host platform. These are programs and libraries used at build-time that, if they are a compiler or similar tool, produce code to run at run-time—i.e. tools used to build the new derivation. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild` or `depsBuildTarget`. This could be called `depsBuildHost` but `nativeBuildInputs` is used for historical continuity.
Since these packages are able to be run at build-time, they are added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
#### `depsBuildTarget` {#var-stdenv-depsBuildTarget}
A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s target platform. This means a `-1` host offset and `1` target offset from the new derivation’s platforms. These are programs used at build time that produce code to run with code produced by the depending package. Most commonly, these are tools used to build the runtime or standard library that the currently-being-built compiler will inject into any code it compiles. In many cases, the currently-being-built-compiler is itself employed for that task, but when that compiler won’t run (i.e. its build and host platform differ) this is not possible. Other times, the compiler relies on some other tool, like binutils, that is always built separately so that the dependency is unconditional.
A list of dependencies whose host platform is the new derivation’s build platform, and target platform is the new derivation’s target platform. These are programs used at build time that produce code to run with code produced by the depending package. Most commonly, these are tools used to build the runtime or standard library that the currently-being-built compiler will inject into any code it compiles. In many cases, the currently-being-built-compiler is itself employed for that task, but when that compiler won’t run (i.e. its build and host platform differ) this is not possible. Other times, the compiler relies on some other tool, like binutils, that is always built separately so that the dependency is unconditional.
This is a somewhat confusing concept to wrap one’s head around, and for good reason. As the only dependency type where the platform offsets are not adjacent integers, it requires thinking of a bootstrapping stage *two* away from the current one. It and its use-case go hand in hand and are both considered poor form: try to not need this sort of dependency, and try to avoid building standard libraries and runtimes in the same derivation as the compiler produces code using them. Instead strive to build those like a normal library, using the newly-built compiler just as a normal library would. In short, do not use this attribute unless you are packaging a compiler and are sure it is needed.
This is a somewhat confusing concept to wrap one’s head around, and for good reason. As the only dependency type where the platform offsets, `-1` and `1`, are not adjacent integers, it requires thinking of a bootstrapping stage *two* away from the current one. It and its use-case go hand in hand and are both considered poor form: try to not need this sort of dependency, and try to avoid building standard libraries and runtimes in the same derivation as the compiler produces code using them. Instead strive to build those like a normal library, using the newly-built compiler just as a normal library would. In short, do not use this attribute unless you are packaging a compiler and are sure it is needed.
Since these packages are able to run at build time, they are added to the `PATH`, as described above. But since these packages are only guaranteed to be able to run then, they shouldn’t persist as run-time dependencies. This isn’t currently enforced, but could be in the future.
#### `depsHostHost` {#var-stdenv-depsHostHost}
A list of dependencies whose host and target platforms match the new derivation’s host platform. This means a `0` host offset and `0` target offset from the new derivation’s host platform. These are packages used at run-time to generate code also used at run-time. In practice, this would usually be tools used by compilers for macros or a metaprogramming system, or libraries used by the macros or metaprogramming code itself. It’s always preferable to use a `depsBuildBuild` dependency in the derivation being built over a `depsHostHost` on the tool doing the building for this purpose.
A list of dependencies whose host and target platforms match the new derivation’s host platform. In practice, this would usually be tools used by compilers for macros or a metaprogramming system, or libraries used by the macros or metaprogramming code itself. It’s always preferable to use a `depsBuildBuild` dependency in the derivation being built over a `depsHostHost` on the tool doing the building for this purpose.
#### `buildInputs` {#var-stdenv-buildInputs}
A list of dependencies whose host platform and target platform match the new derivation’s. This means a `0` host offset and a `1` target offset from the new derivation’s host platform. This would be called `depsHostTarget` but for historical continuity. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild`.
A list of dependencies whose host platform and target platform match the new derivation’s. This would be called `depsHostTarget` but for historical continuity. If the dependency doesn’t care about the target platform (i.e. isn’t a compiler or similar tool), put it here, rather than in `depsBuildBuild`.
These are often programs and libraries used by the new derivation at *run*-time, but that isn’t always the case. For example, the machine code in a statically-linked library is only used at run-time, but the derivation containing the library is only needed at build-time. Even in the dynamic case, the library may also be needed at build-time to appease the linker.
#### `depsTargetTarget` {#var-stdenv-depsTargetTarget}
A list of dependencies whose host platform matches the new derivation’s target platform. This means a `1` offset from the new derivation’s platforms. These are packages that run on the target platform, e.g. the standard library or run-time deps of standard library that a compiler insists on knowing about. It’s poor form in almost all cases for a package to depend on another from a future stage \[future stage corresponding to positive offset\]. Do not use this attribute unless you are packaging a compiler and are sure it is needed.
A list of dependencies whose host platform matches the new derivation’s target platform. These are packages that run on the target platform, e.g. the standard library or run-time deps of standard library that a compiler insists on knowing about. It’s poor form in almost all cases for a package to depend on another from a future stage \[future stage corresponding to positive offset\]. Do not use this attribute unless you are packaging a compiler and are sure it is needed.
#### `depsBuildBuildPropagated` {#var-stdenv-depsBuildBuildPropagated}
@ -1228,6 +1240,7 @@ If the libraries lack `-fPIE`, you will get the error `recompile with -fPIE`.
[^footnote-stdenv-ignored-build-platform]: The build platform is ignored because it is a mere implementation detail of the package satisfying the dependency: As a general programming principle, dependencies are always *specified* as interfaces, not concrete implementation.
[^footnote-stdenv-native-dependencies-in-path]: Currently, this means for native builds all dependencies are put on the `PATH`. But in the future that may not be the case for sake of matching cross: the platforms would be assumed to be unique for native and cross builds alike, so only the `depsBuild*` and `nativeBuildInputs` would be added to the `PATH`.
[^footnote-stdenv-propagated-dependencies]: Nix itself already takes a package’s transitive dependencies into account, but this propagation ensures nixpkgs-specific infrastructure like setup hooks (mentioned above) also are run as if the propagated dependency.
[^footnote-stdenv-find-inputs-location]: The `findInputs` function, currently residing in `pkgs/stdenv/generic/setup.sh`, implements the propagation logic.
[^footnote-stdenv-sys-lib-search-path]: It clears the `sys_lib_*search_path` variables in the Libtool script to prevent Libtool from using libraries in `/usr/lib` and such.
[^footnote-stdenv-build-time-guessing-impurity]: Eventually these will be passed building natively as well, to improve determinism: build-time guessing, as is done today, is a risk of impurity.

@ -112,7 +112,7 @@ self: super:
This overlay uses Intel's MKL library for both BLAS and LAPACK interfaces. Note that the same can be accomplished at runtime using `LD_LIBRARY_PATH` of `libblas.so.3` and `liblapack.so.3`. For instance:
```ShellSession
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib:$LD_LIBRARY_PATH nix-shell -p octave --run octave
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH nix-shell -p octave --run octave
```
Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.

@ -487,7 +487,7 @@ rec {
=> "/nix/store/9rz8gxhzf8sw4kf2j2f1grr49w8zx5vj-openssl-1.0.1r-dev"
*/
getOutput = output: pkg:
if pkg.outputUnspecified or false
if ! pkg ? outputSpecified || ! pkg.outputSpecified
then pkg.${output} or pkg.out or pkg
else pkg;

@ -145,14 +145,14 @@ rec {
let
outputs = drv.outputs or [ "out" ];
commonAttrs = (removeAttrs drv [ "outputUnspecified" ]) //
(builtins.listToAttrs outputsList) //
commonAttrs = drv // (builtins.listToAttrs outputsList) //
({ all = map (x: x.value) outputsList; }) // passthru;
outputToAttrListElement = outputName:
{ name = outputName;
value = commonAttrs // {
inherit (drv.${outputName}) type outputName;
outputSpecified = true;
drvPath = assert condition; drv.${outputName}.drvPath;
outPath = assert condition; drv.${outputName}.outPath;
};
@ -160,7 +160,6 @@ rec {
outputsList = map outputToAttrListElement outputs;
in commonAttrs // {
outputUnspecified = true;
drvPath = assert condition; drv.drvPath;
outPath = assert condition; drv.outPath;
};

@ -642,7 +642,7 @@ rec {
unique [ 3 2 3 4 ]
=> [ 3 2 4 ]
*/
unique = foldl' (acc: e: if elem e acc then acc else acc ++ [ e ]) [];
unique = foldl' (acc: e: if elem e acc then acc else acc ++ [ e ]) [];
/* Intersects list 'e' and another list. O(nm) complexity.
@ -663,9 +663,6 @@ rec {
/* Test if two lists have no common element.
It should be slightly more efficient than (intersectLists a b == [])
*/
mutuallyExclusive = a: b:
(builtins.length a) == 0 ||
(!(builtins.elem (builtins.head a) b) &&
mutuallyExclusive (builtins.tail a) b);
mutuallyExclusive = a: b: length a == 0 || !(any (x: elem x a) b);
}

@ -52,15 +52,39 @@ in
rec {
/* Evaluate a set of modules. The result is a set of two
attributes: options: the nested set of all option declarations,
and config: the nested set of all option values.
/*
Evaluate a set of modules. The result is a set with the attributes:
options: The nested set of all option declarations,
config: The nested set of all option values.
type: A module system type representing the module set as a submodule,
to be extended by configuration from the containing module set.
extendModules: A function similar to evalModules but building on top
of the module set. Its arguments, modules and specialArgs are
added to the existing values.
Using extendModules a few times has no performance impact as long
as you only reference the final options and config.
If you do reference multiple config (or options) from before and
after extendModules, performance is the same as with multiple
evalModules invocations, because the new modules' ability to
override existing configuration fundamentally requires a new
fixpoint to be constructed.
_module: A portion of the configuration tree which is elided from
config. It contains some values that are mostly internal to the
module system implementation.
!!! Please think twice before adding to this argument list! The more
that is specified here instead of in the modules themselves the harder
it is to transparently move a set of modules to be a submodule of another
config (as the proper arguments need to be replicated at each call to
evalModules) and the less declarative the module set is. */
evalModules = { modules
evalModules = evalModulesArgs@
{ modules
, prefix ? []
, # This should only be used for special arguments that need to be evaluated
# when resolving module structure (like in imports). For everything else,
@ -120,7 +144,9 @@ rec {
};
config = {
_module.args = args;
_module.args = {
inherit extendModules;
} // args;
};
};
@ -183,10 +209,28 @@ rec {
else throw baseMsg
else null;
result = builtins.seq checkUnmatched {
inherit options;
config = removeAttrs config [ "_module" ];
inherit (config) _module;
checked = builtins.seq checkUnmatched;
extendModules = extendArgs@{
modules ? [],
specialArgs ? {},
prefix ? [],
}:
evalModules (evalModulesArgs // {
modules = evalModulesArgs.modules ++ modules;
specialArgs = evalModulesArgs.specialArgs or {} // specialArgs;
prefix = extendArgs.prefix or evalModulesArgs.prefix;
});
type = lib.types.submoduleWith {
inherit modules specialArgs;
};
result = {
options = checked options;
config = checked (removeAttrs config [ "_module" ]);
_module = checked (config._module);
inherit extendModules type;
};
in result;

@ -74,7 +74,7 @@ rec {
apply ? null,
# Whether the option is for NixOS developers only.
internal ? null,
# Whether the option shows up in the manual.
# Whether the option shows up in the manual. Default: true. Use false to hide the option and any sub-options from submodules. Use "shallow" to hide only sub-options.
visible ? null,
# Whether the option can be set only once
readOnly ? null,
@ -180,7 +180,10 @@ rec {
description = opt.description or (lib.warn "Option `${name}' has no description." "This option has no description.");
declarations = filter (x: x != unknownModule) opt.declarations;
internal = opt.internal or false;
visible = opt.visible or true;
visible =
if (opt?visible && opt.visible == "shallow")
then true
else opt.visible or true;
readOnly = opt.readOnly or false;
type = opt.type.description or null;
}
@ -192,8 +195,9 @@ rec {
subOptions =
let ss = opt.type.getSubOptions opt.loc;
in if ss != {} then optionAttrSetToDocList' opt.loc ss else [];
subOptionsVisible = docOption.visible && opt.visible or null != "shallow";
in
[ docOption ] ++ optionals docOption.visible subOptions) (collect isOption options);
[ docOption ] ++ optionals subOptionsVisible subOptions) (collect isOption options);
/* This function recursively removes all derivation attributes from

@ -369,7 +369,7 @@ rec {
Example:
escapeXML ''"test" 'test' < & >''
=> "\\[\\^a-z]\\*"
=> "&quot;test&quot; &apos;test&apos; &lt; &amp; &gt;"
*/
escapeXML = builtins.replaceStrings
["\"" "'" "<" ">" "&"]

@ -20,15 +20,17 @@ rec {
name = "PowerNV";
baseConfig = "powernv_defconfig";
target = "zImage";
installTarget = "install";
file = "vmlinux";
target = "vmlinux";
autoModules = true;
# avoid driver/FS trouble arising from unusual page size
extraConfig = ''
PPC_64K_PAGES n
PPC_4K_PAGES y
IPV6 y
ATA_BMDMA y
ATA_SFF y
VIRTIO_MENU y
'';
};
};

@ -179,6 +179,13 @@ checkConfigOutput "true" config.submodule.outer ./declare-submoduleWith-modules.
# which evaluates all the modules defined by the type)
checkConfigOutput "submodule" options.submodule.type.description ./declare-submoduleWith-modules.nix
## submodules can be declared using (evalModules {...}).type
checkConfigOutput "true" config.submodule.inner ./declare-submodule-via-evalModules.nix
checkConfigOutput "true" config.submodule.outer ./declare-submodule-via-evalModules.nix
# Should also be able to evaluate the type name (which evaluates freeformType,
# which evaluates all the modules defined by the type)
checkConfigOutput "submodule" options.submodule.type.description ./declare-submodule-via-evalModules.nix
## Paths should be allowed as values and work as expected
checkConfigOutput "true" config.submodule.enable ./declare-submoduleWith-path.nix

@ -0,0 +1,28 @@
{ lib, ... }: {
options.submodule = lib.mkOption {
inherit (lib.evalModules {
modules = [
{
options.inner = lib.mkOption {
type = lib.types.bool;
default = false;
};
}
];
}) type;
default = {};
};
config.submodule = lib.mkMerge [
({ lib, ... }: {
options.outer = lib.mkOption {
type = lib.types.bool;
default = false;
};
})
{
inner = true;
outer = true;
}
];
}

@ -505,17 +505,36 @@ rec {
then setFunctionArgs (args: unify (value args)) (functionArgs value)
else unify (if shorthandOnlyDefinesConfig then { config = value; } else value);
allModules = defs: modules ++ imap1 (n: { value, file }:
allModules = defs: imap1 (n: { value, file }:
if isAttrs value || isFunction value then
# Annotate the value with the location of its definition for better error messages
coerce (lib.modules.unifyModuleSyntax file "${toString file}-${toString n}") value
else value
) defs;
freeformType = (evalModules {
inherit modules specialArgs;
args.name = "name";
})._module.freeformType;
base = evalModules {
inherit specialArgs;
modules = [{
# This is a work-around for the fact that some sub-modules,
# such as the one included in an attribute set, expects an "args"
# attribute to be given to the sub-module. As the option
# evaluation does not have any specific attribute name yet, we
# provide a default for the documentation and the freeform type.
#
# This is necessary as some option declaration might use the
# "name" attribute given as argument of the submodule and use it
# as the default of option declarations.
#
# We use lookalike unicode single angle quotation marks because
# of the docbook transformation the options receive. In all uses
# &gt; and &lt; wouldn't be encoded correctly so the encoded values
# would be used, and use of `<` and `>` would break the XML document.
# It shouldn't cause an issue since this is cosmetic for the manual.
_module.args.name = lib.mkOptionDefault "name";
}] ++ modules;
};
freeformType = base._module.freeformType;
in
mkOptionType rec {
@ -523,32 +542,13 @@ rec {
description = freeformType.description or name;
check = x: isAttrs x || isFunction x || path.check x;
merge = loc: defs:
(evalModules {
modules = allModules defs;
inherit specialArgs;
args.name = last loc;
(base.extendModules {
modules = [ { _module.args.name = last loc; } ] ++ allModules defs;
prefix = loc;
}).config;
emptyValue = { value = {}; };
getSubOptions = prefix: (evalModules
{ inherit modules prefix specialArgs;
# This is a work-around due to the fact that some sub-modules,
# such as the one included in an attribute set, expects a "args"
# attribute to be given to the sub-module. As the option
# evaluation does not have any specific attribute name, we
# provide a default one for the documentation.
#
# This is mandatory as some option declaration might use the
# "name" attribute given as argument of the submodule and use it
# as the default of option declarations.
#
# Using lookalike unicode single angle quotation marks because
# of the docbook transformation the options receive. In all uses
# &gt; and &lt; wouldn't be encoded correctly so the encoded values
# would be used, and use of `<` and `>` would break the XML document.
# It shouldn't cause an issue since this is cosmetic for the manual.
args.name = "name";
}).options // optionalAttrs (freeformType != null) {
getSubOptions = prefix: (base.extendModules
{ inherit prefix; }).options // optionalAttrs (freeformType != null) {
# Expose the sub options of the freeform type. Note that the option
# discovery doesn't care about the attribute name used here, so this
# is just to avoid conflicts with potential options from the submodule

@ -159,6 +159,7 @@
};
abbe = {
email = "ashish.is@lostca.se";
matrix = "@abbe:badti.me";
github = "wahjava";
githubId = 2255192;
name = "Ashish SHUKLA";
@ -741,6 +742,7 @@
angustrau = {
name = "Angus Trau";
email = "nix@angus.ws";
matrix = "@angustrau:matrix.org";
github = "angustrau";
githubId = 13267947;
};
@ -1079,6 +1081,12 @@
githubId = 354741;
name = "Austin Butler";
};
autophagy = {
email = "mail@autophagy.io";
github = "autophagy";
githubId = 12958979;
name = "Mika Naylor";
};
avaq = {
email = "nixpkgs@account.avaq.it";
github = "avaq";
@ -1144,6 +1152,12 @@
githubId = 56650223;
name = "Artturi N";
};
ayazhafiz = {
email = "ayaz.hafiz.1@gmail.com";
github = "ayazhafiz";
githubId = 262763;
name = "Ayaz Hafiz";
};
b4dm4n = {
email = "fabianm88@gmail.com";
github = "B4dM4n";
@ -1650,6 +1664,7 @@
};
bryanasdev000 = {
email = "bryanasdev000@gmail.com";
matrix = "@bryanasdev000:matrix.org";
github = "bryanasdev000";
githubId = 53131727;
name = "Bryan Albuquerque";
@ -2136,6 +2151,12 @@
githubId = 199180;
name = "Claes Wallin";
};
cleeyv = {
email = "cleeyv@riseup.net";
github = "cleeyv";
githubId = 71959829;
name = "Cleeyv";
};
cleverca22 = {
email = "cleverca22@gmail.com";
matrix = "@cleverca22:matrix.org";
@ -2872,6 +2893,12 @@
githubId = 28980797;
name = "David Leslie";
};
dlip = {
email = "dane@lipscombe.com.au";
github = "dlip";
githubId = 283316;
name = "Dane Lipscombe";
};
dmalikov = {
email = "malikov.d.y@gmail.com";
github = "dmalikov";
@ -3115,6 +3142,7 @@
};
earvstedt = {
email = "erik.arvstedt@gmail.com";
matrix = "@erikarvstedt:matrix.org";
github = "erikarvstedt";
githubId = 36110478;
name = "Erik Arvstedt";
@ -3207,12 +3235,6 @@
githubId = 119483;
name = "Matthew Brown";
};
eduardosm = {
email = "esm@eduardosm.net";
github = "eduardosm";
githubId = 761151;
name = "Eduardo Sánchez Muñoz";
};
eduarrrd = {
email = "e.bachmakov@gmail.com";
github = "eduarrrd";
@ -3257,6 +3279,7 @@
};
ekleog = {
email = "leo@gaspard.io";
matrix = "@leo:gaspard.ninja";
github = "ekleog";
githubId = 411447;
name = "Leo Gaspard";
@ -3627,10 +3650,10 @@
};
expipiplus1 = {
email = "nix@monoid.al";
matrix = "@joe:monoid.al";
matrix = "@ellie:monoid.al";
github = "expipiplus1";
githubId = 857308;
name = "Joe Hermaszewski";
name = "Ellie Hermaszewska";
};
extends = {
email = "sharosari@gmail.com";
@ -3726,6 +3749,13 @@
githubId = 541748;
name = "Felipe Espinoza";
};
fedx-sudo = {
email = "fedx-sudo@pm.me";
github = "Fedx-sudo";
githubId = 66258975;
name = "Fedx sudo";
matrix = "fedx:matrix.org";
};
fehnomenal = {
email = "fehnomenal@fehn.systems";
github = "fehnomenal";
@ -4076,6 +4106,12 @@
githubId = 20208;
name = "Rok Garbas";
};
gardspirito = {
name = "gardspirito";
email = "nyxoroso@gmail.com";
github = "gardspirito";
githubId = 29687558;
};
garrison = {
email = "jim@garrison.cc";
github = "garrison";
@ -4165,6 +4201,12 @@
githubId = 1713676;
name = "Luis G. Torres";
};
GKasparov = {
email = "mizozahr@gmail.com";
github = "GKasparov";
githubId = 60962839;
name = "Mazen Zahr";
};
gleber = {
email = "gleber.p@gmail.com";
github = "gleber";
@ -4816,6 +4858,7 @@
};
ilkecan = {
email = "ilkecan@protonmail.com";
matrix = "@ilkecan:matrix.org";
github = "ilkecan";
githubId = 40234257;
name = "ilkecan bozdogan";
@ -5754,6 +5797,16 @@
githubId = 20658981;
name = "Jarosław Wygoda";
};
jyooru = {
email = "joel@joel.tokyo";
github = "jyooru";
githubId = 63786778;
name = "Joel";
keys = [{
longkeyid = "rsa4096/18550BD205E9EF64";
fingerprint = "9148 DC9E F4D5 3EB6 A30E 8EF0 1855 0BD2 05E9 EF64";
}];
};
jyp = {
email = "jeanphilippe.bernardy@gmail.com";
github = "jyp";
@ -6025,6 +6078,12 @@
githubId = 8260207;
name = "Karthik Iyengar";
};
kjeremy = {
email = "kjeremy@gmail.com";
name = "Jeremy Kolb";
github = "kjeremy";
githubId = 4325700;
};
kkallio = {
email = "tierpluspluslists@gmail.com";
name = "Karn Kallio";
@ -6209,6 +6268,12 @@
githubId = 278013;
name = "Tomasz Kontusz";
};
kubukoz = {
email = "kubukoz@gmail.com";
github = "kubukoz";
githubId = 894884;
name = "Jakub Kozłowski";
};
kurnevsky = {
email = "kurnevsky@gmail.com";
github = "kurnevsky";
@ -6345,6 +6410,12 @@
githubId = 1104419;
name = "Lucas Hoffmann";
};
lucasew = {
email = "lucas59356@gmail.com";
github = "lucasew";
githubId = 15693688;
name = "Lucas Eduardo Wendt";
};
lde = {
email = "lilian.deloche@puck.fr";
github = "lde";
@ -7184,6 +7255,12 @@
githubId = 51356;
name = "Mathieu Boespflug";
};
mbprtpmnr = {
name = "mbprtpmnr";
email = "mbprtpmnr@pm.me";
github = "mbprtpmnr";
githubId = 88109321;
};
mbrgm = {
email = "marius@yeai.de";
github = "mbrgm";
@ -7646,6 +7723,7 @@
mohe2015 = {
name = "Moritz Hedtke";
email = "Moritz.Hedtke@t-online.de";
matrix = "@moritz.hedtke:matrix.org";
github = "mohe2015";
githubId = 13287984;
keys = [{
@ -8249,6 +8327,12 @@
githubId = 810877;
name = "Tom Doggett";
};
noisersup = {
email = "patryk@kwiatek.xyz";
github = "noisersup";
githubId = 42322511;
name = "Patryk Kwiatek";
};
nomeata = {
email = "mail@joachim-breitner.de";
github = "nomeata";
@ -8627,6 +8711,7 @@
};
pamplemousse = {
email = "xav.maso@gmail.com";
matrix = "@pamplemouss_:matrix.org";
github = "Pamplemousse";
githubId = 2647236;
name = "Xavier Maso";
@ -8776,6 +8861,7 @@
};
peterhoeg = {
email = "peter@hoeg.com";
matrix = "@peter:hoeg.com";
github = "peterhoeg";
githubId = 722550;
name = "Peter Hoeg";
@ -8840,6 +8926,12 @@
githubId = 421510;
name = "Noé Rubinstein";
};
photex = {
email = "photex@gmail.com";
github = "photex";
githubId = 301903;
name = "Chip Collier";
};
phreedom = {
email = "phreedom@yandex.ru";
github = "phreedom";
@ -9308,6 +9400,12 @@
githubId = 52847440;
name = "Ryan Burns";
};
r3dl3g = {
email = "redleg@rothfuss-web.de";
github = "r3dl3g";
githubId = 35229674;
name = "Armin Rothfuss";
};
raboof = {
email = "arnout@bzzt.net";
matrix = "@raboof:matrix.org";
@ -9646,6 +9744,7 @@
};
rnhmjoj = {
email = "rnhmjoj@inventati.org";
matrix = "@rnhmjoj:maxwell.ydns.eu";
github = "rnhmjoj";
githubId = 2817565;
name = "Michele Guerini Rocco";
@ -9747,6 +9846,7 @@
};
roosemberth = {
email = "roosembert.palacios+nixpkgs@posteo.ch";
matrix = "@roosemberth:orbstheorem.ch";
github = "roosemberth";
githubId = 3621083;
name = "Roosembert (Roosemberth) Palacios";
@ -9801,6 +9901,12 @@
githubId = 373566;
name = "Ronuk Raval";
};
rski = {
name = "rski";
email = "rom.skiad+nix@gmail.com";
github = "rski";
githubId = 2960312;
};
rszibele = {
email = "richard@szibele.com";
github = "rszibele";
@ -9887,6 +9993,7 @@
};
ryantm = {
email = "ryan@ryantm.com";
matrix = "@ryantm:matrix.org";
github = "ryantm";
githubId = 4804;
name = "Ryan Mulligan";
@ -10711,6 +10818,12 @@
github = "staccato";
githubId = 86573128;
};
stackshadow = {
email = "stackshadow@evilbrain.de";
github = "stackshadow";
githubId = 7512804;
name = "Martin Langlotz";
};
steell = {
email = "steve@steellworks.com";
github = "Steell";
@ -10788,6 +10901,12 @@
githubId = 1181362;
name = "Stefan Junker";
};
stevenroose = {
email = "github@stevenroose.org";
github = "stevenroose";
githubId = 853468;
name = "Steven Roose";
};
stianlagstad = {
email = "stianlagstad@gmail.com";
github = "stianlagstad";
@ -10854,13 +10973,6 @@
githubId = 2666479;
name = "Y Nguyen";
};
superherointj = {
name = "Sérgio G.";
email = "5861043+superherointj@users.noreply.github.com";
matrix = "@superherointj:matrix.org";
github = "superherointj";
githubId = 5861043;
};
SuperSandro2000 = {
email = "sandro.jaeckel@gmail.com";
matrix = "@sandro:supersandro.de";
@ -11200,6 +11312,17 @@
githubId = 1141680;
name = "Thane Gill";
};
thblt = {
name = "Thibault Polge";
email = "thibault@thb.lt";
matrix = "@thbltp:matrix.org";
github = "thblt";
githubId = 2453136;
keys = [{
longkeyid = "rsa4096/0x63A44817A52EAB7B";
fingerprint = "D2A2 F0A1 E7A8 5E6F B711 DEE5 63A4 4817 A52E AB7B";
}];
};
TheBrainScrambler = {
email = "esthromeris@riseup.net";
github = "TheBrainScrambler";
@ -11668,6 +11791,13 @@
fingerprint = "EE59 5E29 BB5B F2B3 5ED2 3F1C D276 FF74 6700 7335";
}];
};
uniquepointer = {
email = "uniquepointer@mailbox.org";
matrix = "@uniquepointer:matrix.org";
github = "uniquepointer";
githubId = 71751817;
name = "uniquepointer";
};
unode = {
email = "alves.rjc@gmail.com";
matrix = "@renato_alves:matrix.org";
@ -12054,6 +12184,22 @@
githubId = 9002575;
name = "Weihua Lu";
};
welteki = {
email = "welteki@pm.me";
github = "welteki";
githubId = 16267532;
name = "Han Verstraete";
keys = [{
longkeyid = "rsa4096/0x11F7BAEA856743FF";
fingerprint = "2145 955E 3F5E 0C95 3458 41B5 11F7 BAEA 8567 43FF";
}];
};
wentasah = {
name = "Michal Sojka";
email = "wsh@2x.cz";
github = "wentasah";
githubId = 140542;
};
wheelsandmetal = {
email = "jakob@schmutz.co.uk";
github = "wheelsandmetal";
@ -12452,6 +12598,16 @@
fingerprint = "9270 66BD 8125 A45B 4AC4 0326 6180 7181 F60E FCB2";
}];
};
yuu = {
email = "yuuyin@protonmail.com";
github = "yuuyins";
githubId = 86538850;
name = "Yuu Yin";
keys = [{
longkeyid = "rsa4096/0x416F303B43C20AC3";
fingerprint = "9F19 3AE8 AA25 647F FC31 46B5 416F 303B 43C2 0AC3";
}];
};
yvesf = {
email = "yvesf+nix@xapek.org";
github = "yvesf";
@ -12876,6 +13032,12 @@
fingerprint = "61AE D40F 368B 6F26 9DAE 3892 6861 6B2D 8AC4 DCC5";
}];
};
zbioe = {
name = "Iury Fukuda";
email = "zbioe@protonmail.com";
github = "zbioe";
githubId = 7332055;
};
zenithal = {
name = "zenithal";
email = "i@zenithal.me";
@ -12909,17 +13071,4 @@
github = "zupo";
githubId = 311580;
};
rski = {
name = "rski";
email = "rom.skiad+nix@gmail.com";
github = "rski";
githubId = 2960312;
};
mbprtpmnr = {
name = "mbprtpmnr";
email = "mbprtpmnr@pm.me";
github = "mbprtpmnr";
githubId = 88109321;
};
}

@ -37,7 +37,7 @@ let
keyDrv = drv: if canEval drv.drvPath then { key = drv.drvPath; value = drv; } else { };
immediateDependenciesOf = drv:
concatLists (mapAttrsToList (n: v: derivationsIn v) (removeAttrs drv ["meta" "passthru"]));
concatLists (mapAttrsToList (n: v: derivationsIn v) (removeAttrs drv (["meta" "passthru"] ++ optionals (drv?passthru) (attrNames drv.passthru))));
derivationsIn = x:
if !canEval x then []

@ -114,7 +114,7 @@ async def check_changes(package: Dict, worktree: str, update_info: str):
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8'))
if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines()
changes[0]['files'] = changed_files

@ -145,6 +145,7 @@ with lib.maintainers; {
jitsi = {
members = [
cleeyv
petabyteboy
ryantm
yuka
@ -199,7 +200,6 @@ with lib.maintainers; {
openstack = {
members = [
angustrau
superherointj
SuperSandro2000
];
scope = "Maintain the ecosystem around OpenStack";

@ -58,5 +58,5 @@ a while to finish.
## NixOS Boot Entries {#sect-nixos-gc-boot-entries}
If your `/boot` partition runs out of space, after clearing old profiles
you must rebuild your system with `nixos-rebuild` to update the `/boot`
partition and clear space.
you must rebuild your system with `nixos-rebuild boot` or `nixos-rebuild
switch` to update the `/boot` partition and clear space.

@ -159,6 +159,17 @@ The following methods are available on machine objects:
`execute`
: Execute a shell command, returning a list `(status, stdout)`.
If the command detaches, it must close stdout, as `execute` will wait
for this to consume all output reliably. This can be achieved by
redirecting stdout to stderr `>&2`, to `/dev/console`, `/dev/null` or
a file. Examples of detaching commands are `sleep 365d &`, where the
shell forks a new process that can write to stdout and `xclip -i`, where
the `xclip` command itself forks without closing stdout.
Takes an optional parameter `check_return` that defaults to `True`.
Setting this parameter to `False` will not check for the return code
and return -1 instead. This can be used for commands that shut down
the VM and would therefore break the pipe that would be used for
retrieving the return code.
`succeed`
@ -174,6 +185,9 @@ The following methods are available on machine objects:
- Dereferencing unset variables fail the command.
- It will wait for stdout to be closed. See `execute` for the
implications.
`fail`
: Like `succeed`, but raising an exception if the command returns a zero

@ -64,7 +64,8 @@ $ nix-store --optimise
<para>
If your <literal>/boot</literal> partition runs out of space,
after clearing old profiles you must rebuild your system with
<literal>nixos-rebuild</literal> to update the
<literal>nixos-rebuild boot</literal> or
<literal>nixos-rebuild switch</literal> to update the
<literal>/boot</literal> partition and clear space.
</para>
</section>

@ -266,7 +266,23 @@ start_all()
<listitem>
<para>
Execute a shell command, returning a list
<literal>(status, stdout)</literal>.
<literal>(status, stdout)</literal>. If the command detaches,
it must close stdout, as <literal>execute</literal> will wait
for this to consume all output reliably. This can be achieved
by redirecting stdout to stderr <literal>&gt;&amp;2</literal>,
to <literal>/dev/console</literal>,
<literal>/dev/null</literal> or a file. Examples of detaching
commands are <literal>sleep 365d &amp;</literal>, where the
shell forks a new process that can write to stdout and
<literal>xclip -i</literal>, where the
<literal>xclip</literal> command itself forks without closing
stdout. Takes an optional parameter
<literal>check_return</literal> that defaults to
<literal>True</literal>. Setting this parameter to
<literal>False</literal> will not check for the return code
and return -1 instead. This can be used for commands that shut
down the VM and would therefore break the pipe that would be
used for retrieving the return code.
</para>
</listitem>
</varlistentry>
@ -300,6 +316,12 @@ start_all()
Dereferencing unset variables fail the command.
</para>
</listitem>
<listitem>
<para>
It will wait for stdout to be closed. See
<literal>execute</literal> for the implications.
</para>
</listitem>
</itemizedlist>
</listitem>
</varlistentry>

@ -25,8 +25,11 @@
<para>
You are logged-in automatically as <literal>nixos</literal>. The
<literal>nixos</literal> user account has an empty password so you
can use <literal>sudo</literal> without a password.
can use <literal>sudo</literal> without a password:
</para>
<programlisting>
$ sudo -i
</programlisting>
<para>
If you downloaded the graphical ISO image, you can run
<literal>systemctl start display-manager</literal> to start the

@ -1,5 +1,5 @@
<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-release-21.11">
<title>Release 21.11 (“?”, 2021.11/??)</title>
<title>Release 21.11 (“Porcupine”, 2021.11/??)</title>
<para>
In addition to numerous new and upgraded packages, this release has
the following highlights:
@ -15,6 +15,12 @@
<section xml:id="sec-release-21.11-highlights">
<title>Highlights</title>
<itemizedlist>
<listitem>
<para>
<literal>iptables</literal> now uses
<literal>nf_tables</literal> backend.
</para>
</listitem>
<listitem>
<para>
PHP now defaults to PHP 8.0, updated from 7.4.
@ -44,6 +50,29 @@
guide</link> is available.
</para>
</listitem>
<listitem>
<para>
Improvements have been made to the Hadoop module and package:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
HDFS and YARN now support production-ready highly
available deployments with automatic failover.
</para>
</listitem>
<listitem>
<para>
Hadoop now defaults to Hadoop 3, updated from 2.
</para>
</listitem>
<listitem>
<para>
JournalNode, ZKFS and HTTPFS services have been added.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Activation scripts can now opt int to be run when running
@ -94,6 +123,25 @@
Notes</link> for details.
</para>
</listitem>
<listitem>
<para>
LXD support was greatly improved:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
building LXD images from configurations is now directly
possible with just nixpkgs
</para>
</listitem>
<listitem>
<para>
hydra is now building nixOS LXD images that can be used
standalone with full nixos-rebuild support
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-new-services">
@ -130,6 +178,14 @@
<link xlink:href="options.html#opt-services.geoipupdate.enable">services.geoipupdate</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/jitsi/jibri">Jibri</link>,
a service for recording or streaming a Jitsi Meet conference.
Available as
<link xlink:href="options.html#opt-services.jibri.enable">services.jibri</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.isc.org/kea/">Kea</link>, ISCs
@ -144,6 +200,14 @@
<link xlink:href="options.html#opt-services.owncast">services.owncast</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://joinpeertube.org/">PeerTube</link>,
developed by Framasoft, is the free and decentralized
alternative to video platforms. Available at
<link xlink:href="options.html#opt-services.peertube">services.peertube</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://sr.ht">sourcehut</link>, a
@ -357,11 +421,57 @@
<link linkend="opt-services.multipath.enable">services.multipath</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.seafile.com/en/home/">seafile</link>,
an open source file syncing &amp; sharing software. Available
as
<link xlink:href="options.html#opt-services.seafile.enable">services.seafile</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/mchehab/rasdaemon">rasdaemon</link>,
a hardware error logging daemon. Available as
<link linkend="opt-hardware.rasdaemon.enable">hardware.rasdaemon</link>.
</para>
</listitem>
<listitem>
<para>
<literal>code-server</literal>-module now available
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/xmrig/xmrig">xmrig</link>,
a high performance, open source, cross platform RandomX,
KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and
RandomX benchmark.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-incompatibilities">
<title>Backward Incompatibilities</title>
<itemizedlist>
<listitem>
<para>
The NixOS VM test framework,
<literal>pkgs.nixosTest</literal>/<literal>make-test-python.nix</literal>,
now requires detaching commands such as
<literal>succeed(&quot;foo &amp;&quot;)</literal> and
<literal>succeed(&quot;foo | xclip -i&quot;)</literal> to
close stdout. This can be done with a redirect such as
<literal>succeed(&quot;foo &gt;&amp;2 &amp;&quot;)</literal>.
This breaking change was necessitated by a race condition
causing tests to fail or hang. It applies to all methods that
invoke commands on the nodes, including
<literal>execute</literal>, <literal>succeed</literal>,
<literal>fail</literal>,
<literal>wait_until_succeeds</literal>,
<literal>wait_until_fails</literal>.
</para>
</listitem>
<listitem>
<para>
The <literal>services.wakeonlan</literal> option was removed,
@ -379,6 +489,14 @@
nobody/nogroup, which is unsafe.
</para>
</listitem>
<listitem>
<para>
Since <literal>iptables</literal> now uses
<literal>nf_tables</literal> backend and
<literal>ipset</literal> doesn’t support it, some applications
(ferm, shorewall, firehol) may have limited functionality.
</para>
</listitem>
<listitem>
<para>
The <literal>paperless</literal> module and package have been
@ -525,6 +643,17 @@ Superuser created successfully.
<link xlink:href="options.html#opt-services.geoipupdate.enable">services.geoipupdate</link>.
</para>
</listitem>
<listitem>
<para>
<literal>ihatemoney</literal> has been updated to version
5.1.1
(<link xlink:href="https://github.com/spiral-project/ihatemoney/blob/5.1.1/CHANGELOG.rst">release
notes</link>). If you serve ihatemoney by HTTP rather than
HTTPS, you must set
<link xlink:href="options.html#opt-services.ihatemoney.secureCookie">services.ihatemoney.secureCookie</link>
to <literal>false</literal>.
</para>
</listitem>
<listitem>
<para>
PHP 7.3 is no longer supported due to upstream not supporting
@ -1129,6 +1258,43 @@ Superuser created successfully.
would be parsed as 3 parameters.
</para>
</listitem>
<listitem>
<para>
The <literal>coursier</literal> package’s binary was renamed
from <literal>coursier</literal> to <literal>cs</literal>.
Completions which haven’t worked for a while should now work
with the renamed binary. To keep using
<literal>coursier</literal>, you can create a shell alias.
</para>
</listitem>
<listitem>
<para>
The <literal>services.mosquitto</literal> module has been
rewritten to support multiple listeners and per-listener
configuration. Module configurations from previous releases
will no longer work and must be updated.
</para>
</listitem>
<listitem>
<para>
Nextcloud 20 (<literal>pkgs.nextcloud20</literal>) has been
dropped because it was EOLed by upstream in 2021-10.
</para>
</listitem>
<listitem>
<para>
The <literal>virtualisation.pathsInNixDB</literal> option was
renamed
<link xlink:href="options.html#opt-virtualisation.additionalPaths"><literal>virtualisation.additionalPaths</literal></link>.
</para>
</listitem>
<listitem>
<para>
The <literal>services.ddclient.password</literal> option was
removed, and replaced with
<literal>services.ddclient.passwordFile</literal>.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-notable-changes">
@ -1152,25 +1318,31 @@ Superuser created successfully.
<para>
In NixOS virtual machines (QEMU), the
<literal>virtualisation</literal> module has been updated with
new options to configure:
new options:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
IPv4 port forwarding
(<link xlink:href="options.html#opt-virtualisation.forwardPorts"><literal>virtualisation.forwardPorts</literal></link>),
<link xlink:href="options.html#opt-virtualisation.forwardPorts"><literal>forwardPorts</literal></link>
to configure IPv4 port forwarding,
</para>
</listitem>
<listitem>
<para>
shared host directories
(<link xlink:href="options.html#opt-virtualisation.sharedDirectories"><literal>virtualisation.sharedDirectories</literal></link>),
<link xlink:href="options.html#opt-virtualisation.sharedDirectories"><literal>sharedDirectories</literal></link>
to set up shared host directories,
</para>
</listitem>
<listitem>
<para>
screen resolution
(<link xlink:href="options.html#opt-virtualisation.resolution"><literal>virtualisation.resolution</literal></link>).
<link xlink:href="options.html#opt-virtualisation.resolution"><literal>resolution</literal></link>
to set the screen resolution,
</para>
</listitem>
<listitem>
<para>
<link xlink:href="options.html#opt-virtualisation.useNixStoreImage"><literal>useNixStoreImage</literal></link>
to use a disk image for the Nix store instead of 9P.
</para>
</listitem>
</itemizedlist>
@ -1423,6 +1595,23 @@ Superuser created successfully.
option.
</para>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.smokeping.host">services.smokeping.host</link>
option was added and defaulted to
<literal>localhost</literal>. Before,
<literal>smokeping</literal> listened to all interfaces by
default. NixOS defaults generally aim to provide
non-Internet-exposed defaults for databases and internal
monitoring tools, see e.g.
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/100192">#100192</link>.
Further, the systemd service for <literal>smokeping</literal>
got reworked defaults for increased operational stability, see
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/144127">PR
#144127</link> for details.
</para>
</listitem>
<listitem>
<para>
The
@ -1596,6 +1785,88 @@ Superuser created successfully.
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
The <literal>cawbird</literal> Twitter client now uses its own
API keys to count as different application than upstream
builds. This is done to evade application-level rate limiting.
While existing accounts continue to work, users may want to
remove and re-register their account in the client to enjoy a
better user experience and benefit from this change.
</para>
</listitem>
<listitem>
<para>
A new option
<literal>services.prometheus.enableReload</literal> has been
added which can be enabled to reload the prometheus service
when its config file changes instead of restarting.
</para>
</listitem>
<listitem>
<para>
Dokuwiki now supports caddy! However
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
the nginx option has been removed, in the new
configuration, please use the
<literal>dokuwiki.webserver = &quot;nginx&quot;</literal>
instead.
</para>
</listitem>
<listitem>
<para>
The <quote>${hostname}</quote> option has been deprecated,
please use
<literal>dokuwiki.sites = [ &quot;${hostname}&quot; ]</literal>
instead
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.unifi.enable">services.unifi</link>
module has been reworked, solving a number of issues. This
leads to several user facing changes:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
The <literal>services.unifi.dataDir</literal> option is
removed and the data is now always located under
<literal>/var/lib/unifi/data</literal>. This is done to
make better use of systemd state direcotiry and thus
making the service restart more reliable.
</para>
</listitem>
<listitem>
<para>
The unifi logs can now be found under:
<literal>/var/log/unifi</literal> instead of
<literal>/var/lib/unifi/logs</literal>.
</para>
</listitem>
<listitem>
<para>
The unifi run directory can now be found under:
<literal>/run/unifi</literal> instead of
<literal>/var/lib/unifi/run</literal>.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
<literal>security.pam.services.&lt;name&gt;.makeHomeDir</literal>
now uses <literal>umask=0077</literal> instead of
<literal>umask=0022</literal> when creating the home
directory.
</para>
</listitem>
</itemizedlist>
</section>
</section>

@ -15,7 +15,10 @@ finished booting, it should have detected most of your hardware.
The NixOS manual is available by running `nixos-help`.
You are logged-in automatically as `nixos`. The `nixos` user account has
an empty password so you can use `sudo` without a password.
an empty password so you can use `sudo` without a password:
```ShellSession
$ sudo -i
```
If you downloaded the graphical ISO image, you can run `systemctl
start display-manager` to start the desktop environment. If you want

@ -1,4 +1,4 @@
# Release 21.11 (“?”, 2021.11/??) {#sec-release-21.11}
# Release 21.11 (“Porcupine”, 2021.11/??) {#sec-release-21.11}
In addition to numerous new and upgraded packages, this release has the following highlights:
@ -6,6 +6,8 @@ In addition to numerous new and upgraded packages, this release has the followin
## Highlights {#sec-release-21.11-highlights}
- `iptables` now uses `nf_tables` backend.
- PHP now defaults to PHP 8.0, updated from 7.4.
- kOps now defaults to 1.21.1, which uses containerd as the default runtime.
@ -16,6 +18,11 @@ In addition to numerous new and upgraded packages, this release has the followin
- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.
- Improvements have been made to the Hadoop module and package:
- HDFS and YARN now support production-ready highly available deployments with automatic failover.
- Hadoop now defaults to Hadoop 3, updated from 2.
- JournalNode, ZKFS and HTTPFS services have been added.
- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.
This allows activation scripts to output what they would change if the activation was really run.
The users/modules activation script supports this and outputs some of is actions.
@ -33,6 +40,10 @@ In addition to numerous new and upgraded packages, this release has the followin
- GNOME has been upgraded to 41. Please take a look at their [Release Notes](https://help.gnome.org/misc/release-notes/41.0/) for details.
- LXD support was greatly improved:
- building LXD images from configurations is now directly possible with just nixpkgs
- hydra is now building nixOS LXD images that can be used standalone with full nixos-rebuild support
## New Services {#sec-release-21.11-new-services}
- [btrbk](https://digint.ch/btrbk/index.html), a backup tool for btrfs subvolumes, taking advantage of btrfs specific capabilities to create atomic snapshots and transfer them incrementally to your backup locations. Available as [services.btrbk](options.html#opt-services.brtbk.instances).
@ -43,10 +54,14 @@ In addition to numerous new and upgraded packages, this release has the followin
- [geoipupdate](https://github.com/maxmind/geoipupdate), a GeoIP database updater from MaxMind. Available as [services.geoipupdate](options.html#opt-services.geoipupdate.enable).
- [Jibri](https://github.com/jitsi/jibri), a service for recording or streaming a Jitsi Meet conference. Available as [services.jibri](options.html#opt-services.jibri.enable).
- [Kea](https://www.isc.org/kea/), ISCs 2nd generation DHCP and DDNS server suite. Available at [services.kea](options.html#opt-services.kea).
- [owncast](https://owncast.online/), self-hosted video live streaming solution. Available at [services.owncast](options.html#opt-services.owncast).
- [PeerTube](https://joinpeertube.org/), developed by Framasoft, is the free and decentralized alternative to video platforms. Available at [services.peertube](options.html#opt-services.peertube).
- [sourcehut](https://sr.ht), a collection of tools useful for software development. Available as [services.sourcehut](options.html#opt-services.sourcehut.enable).
- [ucarp](https://download.pureftpd.org/pub/ucarp/README), an userspace implementation of the Common Address Redundancy Protocol (CARP). Available as [networking.ucarp](options.html#opt-networking.ucarp.enable).
@ -110,13 +125,27 @@ In addition to numerous new and upgraded packages, this release has the followin
- [multipath](https://github.com/opensvc/multipath-tools), the device mapper multipath (DM-MP) daemon. Available as [services.multipath](#opt-services.multipath.enable).
- [seafile](https://www.seafile.com/en/home/), an open source file syncing & sharing software. Available as [services.seafile](options.html#opt-services.seafile.enable).
- [rasdaemon](https://github.com/mchehab/rasdaemon), a hardware error logging daemon. Available as [hardware.rasdaemon](#opt-hardware.rasdaemon.enable).
- `code-server`-module now available
- [xmrig](https://github.com/xmrig/xmrig), a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and RandomX benchmark.
## Backward Incompatibilities {#sec-release-21.11-incompatibilities}
- The NixOS VM test framework, `pkgs.nixosTest`/`make-test-python.nix`, now requires detaching commands such as `succeed("foo &")` and `succeed("foo | xclip -i")` to close stdout.
This can be done with a redirect such as `succeed("foo >&2 &")`. This breaking change was necessitated by a race condition causing tests to fail or hang.
It applies to all methods that invoke commands on the nodes, including `execute`, `succeed`, `fail`, `wait_until_succeeds`, `wait_until_fails`.
- The `services.wakeonlan` option was removed, and replaced with `networking.interfaces.<name>.wakeOnLan`.
- The `security.wrappers` option now requires to always specify an owner, group and whether the setuid/setgid bit should be set.
This is motivated by the fact that before NixOS 21.11, specifying either setuid or setgid but not owner/group resulted in wrappers owned by nobody/nogroup, which is unsafe.
- Since `iptables` now uses `nf_tables` backend and `ipset` doesn't support it, some applications (ferm, shorewall, firehol) may have limited functionality.
- The `paperless` module and package have been removed. All users should migrate to the
successor `paperless-ng` instead. The Paperless project [has been
archived](https://github.com/the-paperless-project/paperless/commit/9b0063c9731f7c5f65b1852cb8caff97f5e40ba4)
@ -196,6 +225,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.geoip-updater` was broken and has been replaced by [services.geoipupdate](options.html#opt-services.geoipupdate.enable).
- `ihatemoney` has been updated to version 5.1.1 ([release notes](https://github.com/spiral-project/ihatemoney/blob/5.1.1/CHANGELOG.rst)). If you serve ihatemoney by HTTP rather than HTTPS, you must set [services.ihatemoney.secureCookie](options.html#opt-services.ihatemoney.secureCookie) to `false`.
- PHP 7.3 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 21.11 release.
- Those making use of `buildBazelPackage` will need to regenerate the fetch hashes (preferred), or set `fetchConfigured = false;`.
@ -349,16 +380,29 @@ In addition to numerous new and upgraded packages, this release has the followin
- `boot.kernelParams` now only accepts one command line parameter per string. This change is aimed to reduce common mistakes like "param = 12", which would be parsed as 3 parameters.
- The `coursier` package's binary was renamed from `coursier` to `cs`. Completions which haven't worked for a while should now work with the renamed binary. To keep using `coursier`, you can create a shell alias.
- The `services.mosquitto` module has been rewritten to support multiple listeners and per-listener configuration.
Module configurations from previous releases will no longer work and must be updated.
- Nextcloud 20 (`pkgs.nextcloud20`) has been dropped because it was EOLed by upstream in 2021-10.
- The `virtualisation.pathsInNixDB` option was renamed
[`virtualisation.additionalPaths`](options.html#opt-virtualisation.additionalPaths).
- The `services.ddclient.password` option was removed, and replaced with `services.ddclient.passwordFile`.
## Other Notable Changes {#sec-release-21.11-notable-changes}
- The linux kernel package infrastructure was moved out of `all-packages.nix`, and restructured. Linux related functions and attributes now live under the `pkgs.linuxKernel` attribute set.
In particular the versioned `linuxPackages_*` package sets (such as `linuxPackages_5_4`) and kernels from `pkgs` were moved there and now live under `pkgs.linuxKernel.packages.*`. The unversioned ones (such as `linuxPackages_latest`) remain untouched.
- In NixOS virtual machines (QEMU), the `virtualisation` module has been updated with new options to configure:
- IPv4 port forwarding ([`virtualisation.forwardPorts`](options.html#opt-virtualisation.forwardPorts)),
- shared host directories ([`virtualisation.sharedDirectories`](options.html#opt-virtualisation.sharedDirectories)),
- screen resolution ([`virtualisation.resolution`](options.html#opt-virtualisation.resolution)).
- In NixOS virtual machines (QEMU), the `virtualisation` module has been updated with new options:
- [`forwardPorts`](options.html#opt-virtualisation.forwardPorts) to configure IPv4 port forwarding,
- [`sharedDirectories`](options.html#opt-virtualisation.sharedDirectories) to set up shared host directories,
- [`resolution`](options.html#opt-virtualisation.resolution) to set the screen resolution,
- [`useNixStoreImage`](options.html#opt-virtualisation.useNixStoreImage) to use a disk image for the Nix store instead of 9P.
In addition, the default [`msize`](options.html#opt-virtualisation.msize) parameter in 9P filesystems (including /nix/store and all shared directories) has been increased to 16K for improved performance.
@ -420,6 +464,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [networking.wireless.iwd](options.html#opt-networking.wireless.iwd.enable) module has a new [networking.wireless.iwd.settings](options.html#opt-networking.wireless.iwd.settings) option.
- The [services.smokeping.host](options.html#opt-services.smokeping.host) option was added and defaulted to `localhost`. Before, `smokeping` listened to all interfaces by default. NixOS defaults generally aim to provide non-Internet-exposed defaults for databases and internal monitoring tools, see e.g. [#100192](https://github.com/NixOS/nixpkgs/issues/100192). Further, the systemd service for `smokeping` got reworked defaults for increased operational stability, see [PR #144127](https://github.com/NixOS/nixpkgs/pull/144127) for details.
- The [services.syncoid.enable](options.html#opt-services.syncoid.enable) module now properly drops ZFS permissions after usage. Before it delegated permissions to whole pools instead of datasets and didn't clean up after execution. You can manually look this up for your pools by running `zfs allow your-pool-name` and use `zfs unallow syncoid your-pool-name` to clean this up.
- Zfs: `latestCompatibleLinuxPackages` is now exported on the zfs package. One can use `boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;` to always track the latest compatible kernel with a given version of zfs.
@ -457,3 +503,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- `virtualisation.libvirtd.qemu*` options (e.g.: `virtualisation.libvirtd.qemuRunAsRoot`) were moved to [`virtualisation.libvirtd.qemu`](options.html#opt-virtualisation.libvirtd.qemu) submodule,
- software TPM1/TPM2 support (e.g.: Windows 11 guests) ([`virtualisation.libvirtd.qemu.swtpm`](options.html#opt-virtualisation.libvirtd.qemu.swtpm)),
- custom OVMF package (e.g.: `pkgs.OVMFFull` with HTTP, CSM and Secure Boot support) ([`virtualisation.libvirtd.qemu.ovmf.package`](options.html#opt-virtualisation.libvirtd.qemu.ovmf.package)).
- The `cawbird` Twitter client now uses its own API keys to count as different application than upstream builds. This is done to evade application-level rate limiting. While existing accounts continue to work, users may want to remove and re-register their account in the client to enjoy a better user experience and benefit from this change.
- A new option `services.prometheus.enableReload` has been added which can be enabled to reload the prometheus service when its config file changes instead of restarting.
- Dokuwiki now supports caddy! However
- the nginx option has been removed, in the new configuration, please use the `dokuwiki.webserver = "nginx"` instead.
- The "${hostname}" option has been deprecated, please use `dokuwiki.sites = [ "${hostname}" ]` instead
- The [services.unifi](options.html#opt-services.unifi.enable) module has been reworked, solving a number of issues. This leads to several user facing changes:
- The `services.unifi.dataDir` option is removed and the data is now always located under `/var/lib/unifi/data`. This is done to make better use of systemd state direcotiry and thus making the service restart more reliable.
- The unifi logs can now be found under: `/var/log/unifi` instead of `/var/lib/unifi/logs`.
- The unifi run directory can now be found under: `/run/unifi` instead of `/var/lib/unifi/run`.
- `security.pam.services.<name>.makeHomeDir` now uses `umask=0077` instead of `umask=0022` when creating the home directory.

@ -61,7 +61,7 @@ in rec {
args = extraArgs;
specialArgs =
{ modulesPath = builtins.toString ../modules; } // specialArgs;
}) config options _module;
}) config options _module type;
# These are the extra arguments passed to every module. In
# particular, Nixpkgs is passed through the "pkgs" argument.

@ -44,11 +44,14 @@
# most likely fails as GRUB will probably refuse to install.
partitionTableType ? "legacy"
, # Whether to invoke `switch-to-configuration boot` during image creation
installBootLoader ? true
, # The root file system type.
fsType ? "ext4"
, # Filesystem label
label ? "nixos"
label ? if onlyNixStore then "nix-store" else "nixos"
, # The initial NixOS configuration file to be copied to
# /etc/nixos/configuration.nix.
@ -57,10 +60,24 @@
, # Shell code executed after the VM has finished.
postVM ? ""
, # Copy the contents of the Nix store to the root of the image and
# skip further setup. Incompatible with `contents`,
# `installBootLoader` and `configFile`.
onlyNixStore ? false
, name ? "nixos-disk-image"
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
format ? "raw"
, # Whether a nix channel based on the current source tree should be
# made available inside the image. Useful for interactive use of nix
# utils, but changes the hash of the image when the sources are
# updated.
copyChannel ? true
, # Additional store paths to copy to the image's store.
additionalPaths ? []
}:
assert partitionTableType == "legacy" || partitionTableType == "legacy+gpt" || partitionTableType == "efi" || partitionTableType == "hybrid" || partitionTableType == "none";
@ -71,6 +88,7 @@ assert lib.all
(attrs: ((attrs.user or null) == null)
== ((attrs.group or null) == null))
contents;
assert onlyNixStore -> contents == [] && configFile == null && !installBootLoader;
with lib;
@ -163,7 +181,14 @@ let format' = format; in let
users = map (x: x.user or "''") contents;
groups = map (x: x.group or "''") contents;
closureInfo = pkgs.closureInfo { rootPaths = [ config.system.build.toplevel channelSources ]; };
basePaths = [ config.system.build.toplevel ]
++ lib.optional copyChannel channelSources;
additionalPaths' = subtractLists basePaths additionalPaths;
closureInfo = pkgs.closureInfo {
rootPaths = basePaths ++ additionalPaths';
};
blockSize = toString (4 * 1024); # ext4fs block size (not block device sector size)
@ -251,7 +276,13 @@ let format' = format; in let
chmod 755 "$TMPDIR"
echo "running nixos-install..."
nixos-install --root $root --no-bootloader --no-root-passwd \
--system ${config.system.build.toplevel} --channel ${channelSources} --substituters ""
--system ${config.system.build.toplevel} \
${if copyChannel then "--channel ${channelSources}" else "--no-channel-copy"} \
--substituters ""
${optionalString (additionalPaths' != []) ''
nix copy --to $root --no-check-sigs ${concatStringsSep " " additionalPaths'}
''}
diskImage=nixos.raw
@ -320,25 +351,29 @@ let format' = format; in let
''}
echo "copying staging root to image..."
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* / ||
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} \
-t ${fsType} \
-i $diskImage \
$root${optionalString onlyNixStore builtins.storeDir}/* / ||
(echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
'';
in pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name
{ preVM = prepareImage;
moveOrConvertImage = ''
${if format == "raw" then ''
mv $diskImage $out/${filename}
'' else ''
${pkgs.qemu}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
''}
diskImage=$out/${filename}
'';
buildImage = pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name {
preVM = prepareImage;
buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
postVM = ''
${if format == "raw" then ''
mv $diskImage $out/${filename}
'' else ''
${pkgs.qemu}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
''}
diskImage=$out/${filename}
${postVM}
'';
postVM = moveOrConvertImage + postVM;
memSize = 1024;
}
''
} ''
export PATH=${binPath}:$PATH
rootDisk=${if partitionTableType != "none" then "/dev/vda${rootPartition}" else "/dev/vda"}
@ -368,11 +403,13 @@ in pkgs.vmTools.runInLinuxVM (
cp ${configFile} /mnt/etc/nixos/configuration.nix
''}
# Set up core system link, GRUB, etc.
NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
${lib.optionalString installBootLoader ''
# Set up core system link, GRUB, etc.
NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
# The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
rm -f $mountPoint/etc/machine-id
# The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
rm -f $mountPoint/etc/machine-id
''}
# Set the ownerships of the contents. The modes are set in preVM.
# No globbing on targets, so no need to set -f
@ -398,4 +435,9 @@ in pkgs.vmTools.runInLinuxVM (
tune2fs -T now -c 0 -i 0 $rootDisk
''}
''
)
);
in
if onlyNixStore then
pkgs.runCommand name {}
(prepareImage + moveOrConvertImage + postVM)
else buildImage

@ -6,9 +6,8 @@ from xml.sax.saxutils import XMLGenerator
from colorama import Style
import queue
import io
import _thread
import threading
import argparse
import atexit
import base64
import codecs
import os
@ -405,13 +404,14 @@ class Machine:
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen] = None
pid: Optional[int] = None
monitor: Optional[socket.socket] = None
shell: Optional[socket.socket] = None
process: Optional[subprocess.Popen]
pid: Optional[int]
monitor: Optional[socket.socket]
shell: Optional[socket.socket]
serial_thread: Optional[threading.Thread]
booted: bool = False
connected: bool = False
booted: bool
connected: bool
# Store last serial console lines for use
# of wait_for_console_text
last_lines: Queue = Queue()
@ -444,6 +444,15 @@ class Machine:
self.cleanup_statedir()
self.state_dir.mkdir(mode=0o700, exist_ok=True)
self.process = None
self.pid = None
self.monitor = None
self.shell = None
self.serial_thread = None
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
@ -572,24 +581,40 @@ class Machine:
+ "'{}' but it is in state ‘{}".format(require_state, state)
)
def execute(self, command: str) -> Tuple[int, str]:
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(self, command: str, check_return: bool = True) -> Tuple[int, str]:
self.connect()
out_command = "( set -euo pipefail; {} ); echo '|!=EOF' $?\n".format(command)
out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
assert self.shell
self.shell.send(out_command.encode())
output = ""
status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
while True:
chunk = self.shell.recv(4096).decode(errors="ignore")
match = status_code_pattern.match(chunk)
if match:
output += match[1]
status_code = int(match[2])
return (status_code, output)
output += chunk
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
@ -921,7 +946,8 @@ class Machine:
self.last_lines.put(line)
self.log_serial(line)
_thread.start_new_thread(process_serial_output, ())
self.serial_thread = threading.Thread(target=process_serial_output)
self.serial_thread.start()
self.wait_for_monitor_prompt()
@ -1021,9 +1047,12 @@ class Machine:
assert self.process
assert self.shell
assert self.monitor
assert self.serial_thread
self.process.terminate()
self.shell.close()
self.monitor.close()
self.serial_thread.join()
class VLan:
@ -1114,11 +1143,13 @@ class Driver:
for cmd in cmd(start_scripts)
]
@atexit.register
def clean_up() -> None:
with rootlog.nested("clean up"):
for machine in self.machines:
machine.release()
def __enter__(self) -> "Driver":
return self
def __exit__(self, *_: Any) -> None:
with rootlog.nested("cleanup"):
for machine in self.machines:
machine.release()
def subtest(self, name: str) -> Iterator[None]:
"""Group logs under a given test name"""
@ -1293,14 +1324,13 @@ if __name__ == "__main__":
if not args.keep_vm_state:
rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
driver = Driver(
with Driver(
args.start_scripts, args.vlans, args.testscript.read_text(), args.keep_vm_state
)
if args.interactive:
ptpython.repl.embed(driver.test_symbols(), {})
else:
tic = time.time()
driver.run_tests()
toc = time.time()
rootlog.info(f"test script finished in {(toc-tic):.2f}s")
) as driver:
if args.interactive:
ptpython.repl.embed(driver.test_symbols(), {})
else:
tic = time.time()
driver.run_tests()
toc = time.time()
rootlog.info(f"test script finished in {(toc-tic):.2f}s")

@ -209,11 +209,41 @@ rec {
let
nodes = qemu_pkg:
let
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript
then testScript { nodes = nodes qemu_pkg; }
else testScript;
build-vms = import ./build-vms.nix {
inherit system lib pkgs minimal specialArgs;
extraConfigurations = extraConfigurations ++ [(
{ config, ... }:
{
virtualisation.qemu.package = qemu_pkg;
# Make sure all derivations referenced by the test
# script are available on the nodes. When the store is
# accessed through 9p, this isn't important, since
# everything in the store is available to the guest,
# but when building a root image it is, as all paths
# that should be available to the guest has to be
# copied to the image.
virtualisation.additionalPaths =
lib.optional
# A testScript may evaluate nodes, which has caused
# infinite recursions. The demand cycle involves:
# testScript -->
# nodes -->
# toplevel -->
# additionalPaths -->
# hasContext testScript' -->
# testScript (ad infinitum)
# If we don't need to build an image, we can break this
# cycle by short-circuiting when useNixStoreImage is false.
(config.virtualisation.useNixStoreImage && builtins.hasContext testScript')
(pkgs.writeStringReferencesToFile testScript');
# Ensure we do not use aliases. Ideally this is only set
# when the test framework is used by Nixpkgs NixOS tests.
nixpkgs.config.allowAliases = false;

@ -0,0 +1,102 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running ‘nixos-help’).
{ config, pkgs, lib, ... }:
with lib;
{
imports =
[ # Include the default lxd configuration.
../../../modules/virtualisation/lxc-container.nix
# Include the container-specific autogenerated configuration.
./lxd.nix
];
# networking.hostName = mkForce "nixos"; # Overwrite the hostname.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
# Set your time zone.
# time.timeZone = "Europe/Amsterdam";
# The global useDHCP flag is deprecated, therefore explicitly set to false here.
# Per-interface useDHCP will be mandatory in the future, so this generated config
# replicates the default behaviour.
networking.useDHCP = false;
networking.interfaces.eth0.useDHCP = true;
# Configure network proxy if necessary
# networking.proxy.default = "http://user:password@proxy:port/";
# networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# Select internationalisation properties.
# i18n.defaultLocale = "en_US.UTF-8";
# console = {
# font = "Lat2-Terminus16";
# keyMap = "us";
# };
# Enable the X11 windowing system.
# services.xserver.enable = true;
# Configure keymap in X11
# services.xserver.layout = "us";
# services.xserver.xkbOptions = "eurosign:e";
# Enable CUPS to print documents.
# services.printing.enable = true;
# Enable sound.
# sound.enable = true;
# hardware.pulseaudio.enable = true;
# Enable touchpad support (enabled default in most desktopManager).
# services.xserver.libinput.enable = true;
# Define a user account. Don't forget to set a password with ‘passwd’.
# users.users.jane = {
# isNormalUser = true;
# extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
# };
# List packages installed in system profile. To search, run:
# $ nix search wget
# environment.systemPackages = with pkgs; [
# vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
# wget
# firefox
# ];
# Some programs need SUID wrappers, can be configured further or are
# started in user sessions.
# programs.mtr.enable = true;
# programs.gnupg.agent = {
# enable = true;
# enableSSHSupport = true;
# };
# List services that you want to enable:
# Enable the OpenSSH daemon.
# services.openssh.enable = true;
# Open ports in the firewall.
# networking.firewall.allowedTCPPorts = [ ... ];
# networking.firewall.allowedUDPPorts = [ ... ];
# Or disable the firewall altogether.
# networking.firewall.enable = false;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. It‘s perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "21.05"; # Did you read the comment?
# As this is intended as a stadalone image, undo some of the minimal profile stuff
documentation.enable = true;
documentation.nixos.enable = true;
environment.noXlibs = false;
}

@ -0,0 +1,34 @@
{ lib, config, pkgs, ... }:
with lib;
{
imports = [
../../../modules/virtualisation/lxc-container.nix
];
virtualisation.lxc.templates.nix = {
enable = true;
target = "/etc/nixos/lxd.nix";
template = ./nix.tpl;
when = [ "create" "copy" ];
};
# copy the config for nixos-rebuild
system.activationScripts.config = ''
if [ ! -e /etc/nixos/configuration.nix ]; then
mkdir -p /etc/nixos
cat ${./lxd-image-inner.nix} > /etc/nixos/configuration.nix
sed 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
fi
'';
# Network
networking.useDHCP = false;
networking.interfaces.eth0.useDHCP = true;
# As this is intended as a stadalone image, undo some of the minimal profile stuff
documentation.enable = true;
documentation.nixos.enable = true;
environment.noXlibs = false;
}

@ -0,0 +1,9 @@
{ lib, config, pkgs, ... }:
with lib;
# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY
{
networking.hostName = "{{ container.name }}";
}

@ -116,7 +116,7 @@ in
{ console.keyMap = with config.services.xserver;
mkIf cfg.useXkbConfig
(pkgs.runCommand "xkb-console-keymap" { preferLocalBuild = true; } ''
'${pkgs.ckbcomp}/bin/ckbcomp' \
'${pkgs.buildPackages.ckbcomp}/bin/ckbcomp' \
${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
"-I${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
} \

@ -0,0 +1,12 @@
{ config, lib, pkgs, ... }:
let
cfg = config.hardware.flirc;
in
{
options.hardware.flirc.enable = lib.mkEnableOption "software to configure a Flirc USB device";
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.flirc ];
services.udev.packages = [ pkgs.flirc ];
};
}

@ -0,0 +1,18 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.gkraken;
in
{
options.hardware.gkraken = {
enable = mkEnableOption "gkraken's udev rules for NZXT AIO liquid coolers";
};
config = mkIf cfg.enable {
services.udev.packages = with pkgs; [
gkraken
];
};
}

@ -116,19 +116,14 @@ in {
description = "Ensure NixOS-configured CUPS printers";
wantedBy = [ "multi-user.target" ];
requires = [ cupsUnit ];
# in contrast to cups.socket, for cups.service, this is actually not enough,
# as the cups service reports its activation before clients can actually interact with it.
# Because of this, commands like `lpinfo -v` will report a bad file descriptor
# due to the missing UNIX socket without sufficient sleep time.
after = [ cupsUnit ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
# sleep 10 is required to wait until cups.service is actually initialized and has created its UNIX socket file
script = (optionalString (!config.services.printing.startWhenNeeded) "sleep 10\n")
+ (concatMapStringsSep "\n" ensurePrinter cfg.ensurePrinters)
script = concatMapStringsSep "\n" ensurePrinter cfg.ensurePrinters
+ optionalString (cfg.ensureDefaultPrinter != null) (ensureDefaultPrinter cfg.ensureDefaultPrinter);
};
};

@ -284,6 +284,10 @@ in
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
};
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
environment.etc."egl/egl_external_platform.d".source =
"/run/opengl-driver/share/egl/egl_external_platform.d/";
hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out;

@ -60,6 +60,35 @@ chmod 0755 "$mountPoint/dev" "$mountPoint/sys"
mount --rbind /dev "$mountPoint/dev"
mount --rbind /sys "$mountPoint/sys"
# modified from https://github.com/archlinux/arch-install-scripts/blob/bb04ab435a5a89cd5e5ee821783477bc80db797f/arch-chroot.in#L26-L52
chroot_add_resolv_conf() {
local chrootdir=$1 resolv_conf=$1/etc/resolv.conf
[[ -e /etc/resolv.conf ]] || return 0
# Handle resolv.conf as a symlink to somewhere else.
if [[ -L $chrootdir/etc/resolv.conf ]]; then
# readlink(1) should always give us *something* since we know at this point
# it's a symlink. For simplicity, ignore the case of nested symlinks.
# We also ignore the possibility if `../`s escaping the root.
resolv_conf=$(readlink "$chrootdir/etc/resolv.conf")
if [[ $resolv_conf = /* ]]; then
resolv_conf=$chrootdir$resolv_conf
else
resolv_conf=$chrootdir/etc/$resolv_conf
fi
fi
# ensure file exists to bind mount over
if [[ ! -f $resolv_conf ]]; then
install -Dm644 /dev/null "$resolv_conf" || return 1
fi
mount --bind /etc/resolv.conf "$resolv_conf"
}
chroot_add_resolv_conf "$mountPoint" || print "ERROR: failed to set up resolv.conf"
(
# If silent, write both stdout and stderr of activation script to /dev/null
# otherwise, write both streams to stderr of this process

@ -48,6 +48,8 @@
./hardware/corectrl.nix
./hardware/digitalbitbox.nix
./hardware/device-tree.nix
./hardware/gkraken.nix
./hardware/flirc.nix
./hardware/i2c.nix
./hardware/sensor/hddtemp.nix
./hardware/sensor/iio.nix
@ -389,6 +391,7 @@
./services/display-managers/greetd.nix
./services/editors/emacs.nix
./services/editors/infinoted.nix
./services/finance/odoo.nix
./services/games/crossfire-server.nix
./services/games/deliantra-server.nix
./services/games/factorio.nix
@ -419,6 +422,7 @@
./services/hardware/pcscd.nix
./services/hardware/pommed.nix
./services/hardware/power-profiles-daemon.nix
./services/hardware/rasdaemon.nix
./services/hardware/ratbagd.nix
./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix
@ -583,6 +587,7 @@
./services/misc/safeeyes.nix
./services/misc/sdrplay.nix
./services/misc/sickbeard.nix
./services/misc/signald.nix
./services/misc/siproxd.nix
./services/misc/snapper.nix
./services/misc/sonarr.nix
@ -755,6 +760,7 @@
./services/networking/iscsi/root-initiator.nix
./services/networking/iscsi/target.nix
./services/networking/iwd.nix
./services/networking/jibri/default.nix
./services/networking/jicofo.nix
./services/networking/jitsi-videobridge.nix
./services/networking/kea.nix
@ -766,6 +772,7 @@
./services/networking/libreswan.nix
./services/networking/lldpd.nix
./services/networking/logmein-hamachi.nix
./services/networking/lxd-image-server.nix
./services/networking/mailpile.nix
./services/networking/magic-wormhole-mailbox-server.nix
./services/networking/matterbridge.nix
@ -836,6 +843,7 @@
./services/networking/rpcbind.nix
./services/networking/rxe.nix
./services/networking/sabnzbd.nix
./services/networking/seafile.nix
./services/networking/searx.nix
./services/networking/skydns.nix
./services/networking/shadowsocks.nix
@ -966,6 +974,7 @@
./services/web-apps/atlassian/jira.nix
./services/web-apps/bookstack.nix
./services/web-apps/calibre-web.nix
./services/web-apps/code-server.nix
./services/web-apps/convos.nix
./services/web-apps/cryptpad.nix
./services/web-apps/dex.nix
@ -988,6 +997,7 @@
./services/web-apps/jitsi-meet.nix
./services/web-apps/keycloak.nix
./services/web-apps/lemmy.nix
./services/web-apps/invidious.nix
./services/web-apps/limesurvey.nix
./services/web-apps/mastodon.nix
./services/web-apps/mattermost.nix
@ -998,6 +1008,7 @@
./services/web-apps/nexus.nix
./services/web-apps/node-red.nix
./services/web-apps/pict-rs.nix
./services/web-apps/peertube.nix
./services/web-apps/plantuml-server.nix
./services/web-apps/plausible.nix
./services/web-apps/pgpkeyserver-lite.nix

@ -4,7 +4,9 @@
with lib;
{
let cfg = config.programs.file-roller;
in {
# Added 2019-08-09
imports = [
@ -21,6 +23,13 @@ with lib;
enable = mkEnableOption "File Roller, an archive manager for GNOME";
package = mkOption {
type = types.package;
default = pkgs.gnome.file-roller;
defaultText = literalExpression "pkgs.gnome.file-roller";
description = "File Roller derivation to use.";
};
};
};
@ -28,11 +37,11 @@ with lib;
###### implementation
config = mkIf config.programs.file-roller.enable {
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.gnome.file-roller ];
environment.systemPackages = [ cfg.package ];
services.dbus.packages = [ pkgs.gnome.file-roller ];
services.dbus.packages = [ cfg.package ];
};

@ -139,7 +139,7 @@ in {
environment.systemPackages = [
cfg.finalPackage
];
environment.variables = { EDITOR = mkOverride 900 "nvim"; };
environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "nvim");
programs.neovim.finalPackage = pkgs.wrapNeovim cfg.package {
inherit (cfg) viAlias vimAlias;

@ -192,6 +192,14 @@ let
++ data.extraLegoRenewFlags
);
# We need to collect all the ACME webroots to grant them write
# access in the systemd service.
webroots =
lib.remove null
(lib.unique
(builtins.map
(certAttrs: certAttrs.webroot)
(lib.attrValues config.security.acme.certs)));
in {
inherit accountHash cert selfsignedDeps;
@ -288,6 +296,8 @@ let
"acme/.lego/accounts/${accountHash}"
];
ReadWritePaths = commonServiceConfig.ReadWritePaths ++ webroots;
# Needs to be space separated, but can't use a multiline string because that'll include newlines
BindPaths = [
"${accountDir}:/tmp/accounts"

@ -8,12 +8,10 @@ let
cacertPackage = pkgs.cacert.override {
blacklist = cfg.caCertificateBlacklist;
extraCertificateFiles = cfg.certificateFiles;
extraCertificateStrings = cfg.certificates;
};
caCertificates = pkgs.runCommand "ca-certificates.crt" {
files = cfg.certificateFiles ++ [ (builtins.toFile "extra.crt" (concatStringsSep "\n" cfg.certificates)) ];
preferLocalBuild = true;
} "awk 1 $files > $out"; # awk ensures a newline between each pair of consecutive files
caBundle = "${cacertPackage}/etc/ssl/certs/ca-bundle.crt";
in
@ -74,16 +72,17 @@ in
config = {
security.pki.certificateFiles = [ "${cacertPackage}/etc/ssl/certs/ca-bundle.crt" ];
# NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility.
environment.etc."ssl/certs/ca-certificates.crt".source = caCertificates;
environment.etc."ssl/certs/ca-certificates.crt".source = caBundle;
# Old NixOS compatibility.
environment.etc."ssl/certs/ca-bundle.crt".source = caCertificates;
environment.etc."ssl/certs/ca-bundle.crt".source = caBundle;
# CentOS/Fedora compatibility.
environment.etc."pki/tls/certs/ca-bundle.crt".source = caCertificates;
environment.etc."pki/tls/certs/ca-bundle.crt".source = caBundle;
# P11-Kit trust source.
environment.etc."ssl/trust-source".source = "${cacertPackage.p11kit}/etc/ssl/trust-source";
};

@ -428,7 +428,7 @@ let
${optionalString config.security.pam.enableEcryptfs
"auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap"}
${optionalString cfg.pamMount
"auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
"auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so disable_interactive"}
${optionalString cfg.enableKwallet
("auth optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5")}
@ -483,13 +483,13 @@ let
if config.boot.isContainer then "optional" else "required"
} pam_loginuid.so"}
${optionalString cfg.makeHomeDir
"session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=0022"}
"session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=0077"}
${optionalString cfg.updateWtmp
"session required ${pkgs.pam}/lib/security/pam_lastlog.so silent"}
${optionalString config.security.pam.enableEcryptfs
"session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"}
${optionalString cfg.pamMount
"session optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
"session optional ${pkgs.pam_mount}/lib/security/pam_mount.so disable_interactive"}
${optionalString use_ldap
"session optional ${pam_ldap}/lib/security/pam_ldap.so"}
${optionalString config.services.sssd.enable

@ -42,7 +42,7 @@ in {
environment.ROON_DATAROOT = "/var/lib/${name}";
serviceConfig = {
ExecStart = "${pkgs.roon-server}/start.sh";
ExecStart = "${pkgs.roon-server}/bin/RoonServer";
LimitNOFILE = 8192;
User = cfg.user;
Group = cfg.group;

@ -42,12 +42,16 @@ let
${cfg.postInit}
fi
'' + ''
borg create $extraArgs \
--compression ${cfg.compression} \
--exclude-from ${mkExcludeFile cfg} \
$extraCreateArgs \
"::$archiveName$archiveSuffix" \
${escapeShellArgs cfg.paths}
(
set -o pipefail
${optionalString (cfg.dumpCommand != null) ''${escapeShellArg cfg.dumpCommand} | \''}
borg create $extraArgs \
--compression ${cfg.compression} \
--exclude-from ${mkExcludeFile cfg} \
$extraCreateArgs \
"::$archiveName$archiveSuffix" \
${if cfg.paths == null then "-" else escapeShellArgs cfg.paths}
)
'' + optionalString cfg.appendFailedSuffix ''
borg rename $extraArgs \
"::$archiveName$archiveSuffix" "$archiveName"
@ -182,6 +186,14 @@ let
+ " without at least one public key";
};
mkSourceAssertions = name: cfg: {
assertion = count isNull [ cfg.dumpCommand cfg.paths ] == 1;
message = ''
Exactly one of borgbackup.jobs.${name}.paths or borgbackup.jobs.${name}.dumpCommand
must be set.
'';
};
mkRemovableDeviceAssertions = name: cfg: {
assertion = !(isLocalPath cfg.repo) -> !cfg.removableDevice;
message = ''
@ -240,11 +252,25 @@ in {
options = {
paths = mkOption {
type = with types; coercedTo str lib.singleton (listOf str);
description = "Path(s) to back up.";
type = with types; nullOr (coercedTo str lib.singleton (listOf str));
default = null;
description = ''
Path(s) to back up.
Mutually exclusive with <option>dumpCommand</option>.
'';
example = "/home/user";
};
dumpCommand = mkOption {
type = with types; nullOr path;
default = null;
description = ''
Backup the stdout of this program instead of filesystem paths.
Mutually exclusive with <option>paths</option>.
'';
example = "/path/to/createZFSsend.sh";
};
repo = mkOption {
type = types.str;
description = "Remote or local repository to back up to.";
@ -657,6 +683,7 @@ in {
assertions =
mapAttrsToList mkPassAssertion jobs
++ mapAttrsToList mkKeysAssertion repos
++ mapAttrsToList mkSourceAssertions jobs
++ mapAttrsToList mkRemovableDeviceAssertions jobs;
system.activationScripts = mapAttrs' mkActivationScript jobs;

@ -11,7 +11,7 @@ in
description = ''
Periodic backups to create with Restic.
'';
type = types.attrsOf (types.submodule ({ name, ... }: {
type = types.attrsOf (types.submodule ({ config, name, ... }: {
options = {
passwordFile = mkOption {
type = types.str;
@ -21,6 +21,17 @@ in
example = "/etc/nixos/restic-password";
};
environmentFile = mkOption {
type = with types; nullOr str;
# added on 2021-08-28, s3CredentialsFile should
# be removed in the future (+ remember the warning)
default = config.s3CredentialsFile;
description = ''
file containing the credentials to access the repository, in the
format of an EnvironmentFile as described by systemd.exec(5)
'';
};
s3CredentialsFile = mkOption {
type = with types; nullOr str;
default = null;
@ -212,6 +223,7 @@ in
};
config = {
warnings = mapAttrsToList (n: v: "services.restic.backups.${n}.s3CredentialsFile is deprecated, please use services.restic.backups.${n}.environmentFile instead.") (filterAttrs (n: v: v.s3CredentialsFile != null) config.services.restic.backups);
systemd.services =
mapAttrs' (name: backup:
let
@ -251,8 +263,8 @@ in
RuntimeDirectory = "restic-backups-${name}";
CacheDirectory = "restic-backups-${name}";
CacheDirectoryMode = "0700";
} // optionalAttrs (backup.s3CredentialsFile != null) {
EnvironmentFile = backup.s3CredentialsFile;
} // optionalAttrs (backup.environmentFile != null) {
EnvironmentFile = backup.environmentFile;
};
} // optionalAttrs (backup.initialize || backup.dynamicFilesFrom != null) {
preStart = ''

@ -1,4 +1,4 @@
{ hadoop, pkgs }:
{ cfg, pkgs, lib }:
let
propertyXml = name: value: ''
<property>
@ -13,19 +13,32 @@ let
${builtins.concatStringsSep "\n" (pkgs.lib.mapAttrsToList propertyXml properties)}
</configuration>
'';
cfgLine = name: value: ''
${name}=${builtins.toString value}
'';
cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
# generated by NixOS
${builtins.concatStringsSep "" (pkgs.lib.mapAttrsToList cfgLine properties)}
'';
userFunctions = ''
hadoop_verify_logdir() {
echo Skipping verification of log directory
}
'';
hadoopEnv = ''
export HADOOP_LOG_DIR=/tmp/hadoop/$USER
'';
in
pkgs.buildEnv {
name = "hadoop-conf";
paths = [
(siteXml "core-site.xml" hadoop.coreSite)
(siteXml "hdfs-site.xml" hadoop.hdfsSite)
(siteXml "mapred-site.xml" hadoop.mapredSite)
(siteXml "yarn-site.xml" hadoop.yarnSite)
(pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions)
];
}
pkgs.runCommand "hadoop-conf" {} ''
mkdir -p $out/
cp ${siteXml "core-site.xml" cfg.coreSite}/* $out/
cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
cp ${cfg.log4jProperties} $out/log4j.properties
${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") cfg.extraConfDirs}
''

@ -1,5 +1,7 @@
{ config, lib, pkgs, ...}:
let
cfg = config.services.hadoop;
in
with lib;
{
imports = [ ./yarn.nix ./hdfs.nix ];
@ -13,40 +15,125 @@ with lib;
"fs.defaultFS" = "hdfs://localhost";
}
'';
description = "Hadoop core-site.xml definition";
description = ''
Hadoop core-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
'';
};
hdfsSite = mkOption {
default = {};
default = {
"dfs.namenode.rpc-bind-host" = "0.0.0.0";
};
type = types.attrsOf types.anything;
example = literalExpression ''
{
"dfs.nameservices" = "namenode1";
}
'';
description = "Hadoop hdfs-site.xml definition";
description = ''
Hadoop hdfs-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
'';
};
mapredSite = mkOption {
default = {};
default = {
"mapreduce.framework.name" = "yarn";
"yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
"mapreduce.map.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
"mapreduce.reduce.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
};
type = types.attrsOf types.anything;
example = literalExpression ''
{
"mapreduce.map.cpu.vcores" = "1";
options.services.hadoop.mapredSite.default // {
"mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
}
'';
description = "Hadoop mapred-site.xml definition";
description = ''
Hadoop mapred-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
'';
};
yarnSite = mkOption {
default = {};
default = {
"yarn.nodemanager.admin-env" = "PATH=$PATH";
"yarn.nodemanager.aux-services" = "mapreduce_shuffle";
"yarn.nodemanager.aux-services.mapreduce_shuffle.class" = "org.apache.hadoop.mapred.ShuffleHandler";
"yarn.nodemanager.bind-host" = "0.0.0.0";
"yarn.nodemanager.container-executor.class" = "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
"yarn.nodemanager.env-whitelist" = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,LANG,TZ";
"yarn.nodemanager.linux-container-executor.group" = "hadoop";
"yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
"yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
"yarn.resourcemanager.bind-host" = "0.0.0.0";
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
type = types.attrsOf types.anything;
example = literalExpression ''
options.services.hadoop.yarnSite.default // {
"yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
}
'';
description = ''
Hadoop yarn-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
'';
};
httpfsSite = mkOption {
default = { };
type = types.attrsOf types.anything;
example = literalExpression ''
{
"yarn.resourcemanager.ha.id" = "resourcemanager1";
"hadoop.http.max.threads" = 500;
}
'';
description = "Hadoop yarn-site.xml definition";
description = ''
Hadoop httpfs-site.xml definition
<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-hdfs-httpfs/httpfs-default.html"/>
'';
};
log4jProperties = mkOption {
default = "${cfg.package}/lib/${cfg.package.untarDir}/etc/hadoop/log4j.properties";
type = types.path;
example = literalExpression ''
"''${pkgs.hadoop}/lib/''${pkgs.hadoop.untarDir}/etc/hadoop/log4j.properties";
'';
description = "log4j.properties file added to HADOOP_CONF_DIR";
};
containerExecutorCfg = mkOption {
default = {
# must be the same as yarn.nodemanager.linux-container-executor.group in yarnSite
"yarn.nodemanager.linux-container-executor.group"="hadoop";
"min.user.id"=1000;
"feature.terminal.enabled"=1;
};
type = types.attrsOf types.anything;
example = literalExpression ''
options.services.hadoop.containerExecutorCfg.default // {
"feature.terminal.enabled" = 0;
}
'';
description = ''
Yarn container-executor.cfg definition
<link xlink:href="https://hadoop.apache.org/docs/r2.7.2/hadoop-yarn/hadoop-yarn-site/SecureContainer.html"/>
'';
};
extraConfDirs = mkOption {
default = [];
type = types.listOf types.path;
example = literalExpression ''
[
./extraHDFSConfs
./extraYARNConfs
]
'';
description = "Directories containing additional config files to be added to HADOOP_CONF_DIR";
};
package = mkOption {
@ -60,10 +147,17 @@ with lib;
config = mkMerge [
(mkIf (builtins.hasAttr "yarn" config.users.users ||
builtins.hasAttr "hdfs" config.users.users) {
builtins.hasAttr "hdfs" config.users.users ||
builtins.hasAttr "httpfs" config.users.users) {
users.groups.hadoop = {
gid = config.ids.gids.hadoop;
};
environment = {
systemPackages = [ cfg.package ];
etc."hadoop-conf".source = let
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
in "${hadoopConf}";
};
})
];

@ -1,66 +1,190 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.hadoop;
hadoopConf = import ./conf.nix { hadoop = cfg; pkgs = pkgs; };
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
restartIfChanged = mkOption {
type = types.bool;
description = ''
Automatically restart the service on config change.
This can be set to false to defer restarts on clusters running critical applications.
Please consider the security implications of inadvertently running an older version,
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
'';
default = false;
};
in
with lib;
{
options.services.hadoop.hdfs = {
namenode.enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN NameNode
'';
namenode = {
enable = mkEnableOption "Whether to run the HDFS NameNode";
formatOnInit = mkOption {
type = types.bool;
default = false;
description = ''
Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
to initialize an HA cluster manually.
'';
};
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for namenode
'';
};
};
datanode = {
enable = mkEnableOption "Whether to run the HDFS DataNode";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for datanode
'';
};
};
journalnode = {
enable = mkEnableOption "Whether to run the HDFS JournalNode";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for journalnode
'';
};
};
zkfc = {
enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
inherit restartIfChanged;
};
datanode.enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN DataNode
'';
httpfs = {
enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
tempPath = mkOption {
type = types.path;
default = "/tmp/hadoop/httpfs";
description = ''
HTTPFS_TEMP path used by HTTPFS
'';
};
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for HTTPFS
'';
};
};
};
config = mkMerge [
(mkIf cfg.hdfs.namenode.enabled {
(mkIf cfg.hdfs.namenode.enable {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.namenode) restartIfChanged;
environment = {
HADOOP_HOME = "${cfg.package}";
};
preStart = ''
preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
'';
'');
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-namenode";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
9870 # namenode.http-address
8020 # namenode.rpc-address
8022 # namenode. servicerpc-address
]);
})
(mkIf cfg.hdfs.datanode.enabled {
(mkIf cfg.hdfs.datanode.enable {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
environment = {
HADOOP_HOME = "${cfg.package}";
};
inherit (cfg.hdfs.datanode) restartIfChanged;
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-datanode";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
9864 # datanode.http.address
9866 # datanode.address
9867 # datanode.ipc.address
]);
})
(mkIf cfg.hdfs.journalnode.enable {
systemd.services.hdfs-journalnode = {
description = "Hadoop HDFS JournalNode";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.journalnode) restartIfChanged;
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-journalnode";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
8480 # dfs.journalnode.http-address
8485 # dfs.journalnode.rpc-address
]);
})
(mkIf cfg.hdfs.zkfc.enable {
systemd.services.hdfs-zkfc = {
description = "Hadoop HDFS ZooKeeper failover controller";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.zkfc) restartIfChanged;
serviceConfig = {
User = "hdfs";
SyslogIdentifier = "hdfs-zkfc";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
Restart = "always";
};
};
})
(mkIf cfg.hdfs.httpfs.enable {
systemd.services.hdfs-httpfs = {
description = "Hadoop httpfs";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.httpfs) restartIfChanged;
environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
preStart = ''
mkdir -p $HTTPFS_TEMP
'';
serviceConfig = {
User = "httpfs";
SyslogIdentifier = "hdfs-httpfs";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
14000 # httpfs.http.port
]);
})
(mkIf (
cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
@ -68,6 +192,12 @@ with lib;
uid = config.ids.uids.hdfs;
};
})
(mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";
isSystemUser = true;
};
})
];
}

@ -1,30 +1,56 @@
{ config, lib, pkgs, ...}:
with lib;
let
cfg = config.services.hadoop;
hadoopConf = import ./conf.nix { hadoop = cfg; pkgs = pkgs; };
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
restartIfChanged = mkOption {
type = types.bool;
description = ''
Automatically restart the service on config change.
This can be set to false to defer restarts on clusters running critical applications.
Please consider the security implications of inadvertently running an older version,
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
'';
default = false;
};
in
with lib;
{
options.services.hadoop.yarn = {
resourcemanager.enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN ResourceManager
'';
resourcemanager = {
enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for resourcemanager
'';
};
};
nodemanager.enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run the Hadoop YARN NodeManager
'';
nodemanager = {
enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
inherit restartIfChanged;
addBinBash = mkOption {
type = types.bool;
default = true;
description = ''
Add /bin/bash. This is needed by the linux container executor's launch script.
'';
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Open firewall ports for nodemanager.
Because containers can listen on any ephemeral port, TCP ports 102465535 will be opened.
'';
};
};
};
config = mkMerge [
(mkIf (
cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
) {
users.users.yarn = {
@ -34,40 +60,68 @@ with lib;
};
})
(mkIf cfg.yarn.resourcemanager.enabled {
(mkIf cfg.yarn.resourcemanager.enable {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
environment = {
HADOOP_HOME = "${cfg.package}";
};
inherit (cfg.yarn.resourcemanager) restartIfChanged;
serviceConfig = {
User = "yarn";
SyslogIdentifier = "yarn-resourcemanager";
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
" resourcemanager";
Restart = "always";
};
};
networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
8088 # resourcemanager.webapp.address
8030 # resourcemanager.scheduler.address
8031 # resourcemanager.resource-tracker.address
8032 # resourcemanager.address
8033 # resourcemanager.admin.address
]);
})
(mkIf cfg.yarn.nodemanager.enabled {
(mkIf cfg.yarn.nodemanager.enable {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [
(mkIf cfg.yarn.nodemanager.addBinBash "L /bin/bash - - - - /run/current-system/sw/bin/bash")
];
systemd.services.yarn-nodemanager = {
description = "Hadoop YARN NodeManager";
wantedBy = [ "multi-user.target" ];
inherit (cfg.yarn.nodemanager) restartIfChanged;
environment = {
HADOOP_HOME = "${cfg.package}";
};
preStart = ''
# create log dir
mkdir -p /var/log/hadoop/yarn/nodemanager
chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
# set up setuid container executor binary
rm -rf /run/wrappers/yarn-nodemanager/ || true
mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop}
cp ${cfg.package}/lib/${cfg.package.untarDir}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
cp ${hadoopConf}/container-executor.cfg /run/wrappers/yarn-nodemanager/etc/hadoop/
'';
serviceConfig = {
User = "yarn";
SyslogIdentifier = "yarn-nodemanager";
PermissionsStartOnly = true;
ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
" nodemanager";
Restart = "always";
};
};
networking.firewall.allowedTCPPortRanges = [
(mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
];
})
];

@ -13,10 +13,10 @@ let
# Use upstream config files passed through spa-json-dump as the base
# Patched here as necessary for them to work with this module
defaults = {
alsa-monitor = (builtins.fromJSON (builtins.readFile ./alsa-monitor.conf.json));
bluez-monitor = (builtins.fromJSON (builtins.readFile ./bluez-monitor.conf.json));
media-session = (builtins.fromJSON (builtins.readFile ./media-session.conf.json));
v4l2-monitor = (builtins.fromJSON (builtins.readFile ./v4l2-monitor.conf.json));
alsa-monitor = lib.importJSON ./media-session/alsa-monitor.conf.json;
bluez-monitor = lib.importJSON ./media-session/bluez-monitor.conf.json;
media-session = lib.importJSON ./media-session/media-session.conf.json;
v4l2-monitor = lib.importJSON ./media-session/v4l2-monitor.conf.json;
};
configs = {
@ -43,8 +43,8 @@ in {
package = mkOption {
type = types.package;
default = pkgs.pipewire.mediaSession;
defaultText = literalExpression "pkgs.pipewire.mediaSession";
default = pkgs.pipewire-media-session;
defaultText = literalExpression "pkgs.pipewire-media-session";
description = ''
The pipewire-media-session derivation to use.
'';
@ -55,7 +55,7 @@ in {
type = json.type;
description = ''
Configuration for the media session core. For details see
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/media-session.conf
https://gitlab.freedesktop.org/pipewire/media-session/-/blob/${cfg.package.version}/src/daemon/media-session.d/media-session.conf
'';
default = {};
};
@ -64,7 +64,7 @@ in {
type = json.type;
description = ''
Configuration for the alsa monitor. For details see
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/alsa-monitor.conf
https://gitlab.freedesktop.org/pipewire/media-session/-/blob/${cfg.package.version}/src/daemon/media-session.d/alsa-monitor.conf
'';
default = {};
};
@ -73,7 +73,7 @@ in {
type = json.type;
description = ''
Configuration for the bluez5 monitor. For details see
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/bluez-monitor.conf
https://gitlab.freedesktop.org/pipewire/media-session/-/blob/${cfg.package.version}/src/daemon/media-session.d/bluez-monitor.conf
'';
default = {};
};
@ -82,7 +82,7 @@ in {
type = json.type;
description = ''
Configuration for the V4L2 monitor. For details see
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/v4l2-monitor.conf
https://gitlab.freedesktop.org/pipewire/media-session/-/blob/${cfg.package.version}/src/daemon/media-session.d/v4l2-monitor.conf
'';
default = {};
};

@ -22,12 +22,11 @@ let
# Use upstream config files passed through spa-json-dump as the base
# Patched here as necessary for them to work with this module
defaults = {
client = builtins.fromJSON (builtins.readFile ./client.conf.json);
client-rt = builtins.fromJSON (builtins.readFile ./client-rt.conf.json);
jack = builtins.fromJSON (builtins.readFile ./jack.conf.json);
# Remove session manager invocation from the upstream generated file, it points to the wrong path
pipewire = builtins.fromJSON (builtins.readFile ./pipewire.conf.json);
pipewire-pulse = builtins.fromJSON (builtins.readFile ./pipewire-pulse.conf.json);
client = lib.importJSON ./daemon/client.conf.json;
client-rt = lib.importJSON ./daemon/client-rt.conf.json;
jack = lib.importJSON ./daemon/jack.conf.json;
pipewire = lib.importJSON ./daemon/pipewire.conf.json;
pipewire-pulse = lib.importJSON ./daemon/pipewire-pulse.conf.json;
};
configs = {

@ -19,7 +19,7 @@ in {
enable = lib.mkEnableOption "Blackfire profiler agent";
settings = lib.mkOption {
description = ''
See https://blackfire.io/docs/configuration/agent
See https://blackfire.io/docs/up-and-running/configuration/agent
'';
type = lib.types.submodule {
freeformType = with lib.types; attrsOf str;
@ -53,13 +53,8 @@ in {
services.blackfire-agent.settings.socket = "unix:///run/${agentSock}";
systemd.services.blackfire-agent = {
description = "Blackfire agent";
serviceConfig = {
ExecStart = "${pkgs.blackfire}/bin/blackfire-agent";
RuntimeDirectory = "blackfire";
};
};
systemd.packages = [
pkgs.blackfire
];
};
}

@ -28,13 +28,14 @@ in {
enable = true;
settings = {
# You will need to get credentials at https://blackfire.io/my/settings/credentials
# You can also use other options described in https://blackfire.io/docs/configuration/agent
# You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent
server-id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX";
server-token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
};
};
# Make the agent run on start-up.
# (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138)
# Alternately, you can start it manually with `systemctl start blackfire-agent`.
systemd.services.blackfire-agent.wantedBy = [ "phpfpm-foo.service" ];
}</programlisting>

@ -0,0 +1,122 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.odoo;
format = pkgs.formats.ini {};
in
{
options = {
services.odoo = {
enable = mkEnableOption "odoo";
package = mkOption {
type = types.package;
default = pkgs.odoo;
defaultText = literalExpression "pkgs.odoo";
description = "Odoo package to use.";
};
addons = mkOption {
type = with types; listOf package;
default = [];
example = literalExpression "[ pkgs.odoo_enterprise ]";
description = "Odoo addons.";
};
settings = mkOption {
type = format.type;
default = {};
description = ''
Odoo configuration settings. For more details see <link xlink:href="https://www.odoo.com/documentation/15.0/administration/install/deploy.html"/>
'';
};
domain = mkOption {
type = with types; nullOr str;
description = "Domain to host Odoo with nginx";
default = null;
};
};
};
config = mkIf (cfg.enable) (let
cfgFile = format.generate "odoo.cfg" cfg.settings;
in {
services.nginx = mkIf (cfg.domain != null) {
upstreams = {
odoo.servers = {
"127.0.0.1:8069" = {};
};
odoochat.servers = {
"127.0.0.1:8072" = {};
};
};
virtualHosts."${cfg.domain}" = {
extraConfig = ''
proxy_read_timeout 720s;
proxy_connect_timeout 720s;
proxy_send_timeout 720s;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
'';
locations = {
"/longpolling" = {
proxyPass = "http://odoochat";
};
"/" = {
proxyPass = "http://odoo";
extraConfig = ''
proxy_redirect off;
'';
};
};
};
};
services.odoo.settings.options = {
proxy_mode = cfg.domain != null;
};
users.users.odoo = {
isSystemUser = true;
group = "odoo";
};
users.groups.odoo = {};
systemd.services.odoo = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "postgresql.service" ];
# pg_dump
path = [ config.services.postgresql.package ];
requires = [ "postgresql.service" ];
script = "HOME=$STATE_DIRECTORY ${cfg.package}/bin/odoo ${optionalString (cfg.addons != []) "--addons-path=${concatMapStringsSep "," escapeShellArg cfg.addons}"} -c ${cfgFile}";
serviceConfig = {
DynamicUser = true;
User = "odoo";
StateDirectory = "odoo";
};
};
services.postgresql = {
enable = true;
ensureUsers = [{
name = "odoo";
ensurePermissions = { "DATABASE odoo" = "ALL PRIVILEGES"; };
}];
ensureDatabases = [ "odoo" ];
};
});
}

@ -38,6 +38,7 @@ in
after = [ "lm_sensors.service" ];
serviceConfig = {
Restart = "on-failure";
ExecStart = "${pkgs.lm_sensors}/sbin/fancontrol ${configFile}";
};
};

@ -42,6 +42,8 @@ in
}
];
environment.systemPackages = [ package ];
services.dbus.packages = [ package ];
services.udev.packages = [ package ];

@ -0,0 +1,171 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.rasdaemon;
in
{
options.hardware.rasdaemon = {
enable = mkEnableOption "RAS logging daemon";
record = mkOption {
type = types.bool;
default = true;
description = "record events via sqlite3, required for ras-mc-ctl";
};
mainboard = mkOption {
type = types.lines;
default = "";
description = "Custom mainboard description, see <citerefentry><refentrytitle>ras-mc-ctl</refentrytitle><manvolnum>8</manvolnum></citerefentry> for more details.";
example = ''
vendor = ASRock
model = B450M Pro4
# it should default to such values from
# /sys/class/dmi/id/board_[vendor|name]
# alternatively one can supply a script
# that returns the same format as above
script = <path to script>
'';
};
# TODO, accept `rasdaemon.labels = " ";` or `rasdaemon.labels = { dell = " "; asrock = " "; };'
labels = mkOption {
type = types.lines;
default = "";
description = "Additional memory module label descriptions to be placed in /etc/ras/dimm_labels.d/labels";
example = ''
# vendor and model may be shown by 'ras-mc-ctl --mainboard'
vendor: ASRock
product: To Be Filled By O.E.M.
model: B450M Pro4
# these labels are names for the motherboard slots
# the numbers may be shown by `ras-mc-ctl --error-count`
# they are mc:csrow:channel
DDR4_A1: 0.2.0; DDR4_B1: 0.2.1;
DDR4_A2: 0.3.0; DDR4_B2: 0.3.1;
'';
};
config = mkOption {
type = types.lines;
default = "";
description = ''
rasdaemon configuration, currently only used for CE PFA
for details, read rasdaemon.outPath/etc/sysconfig/rasdaemon's comments
'';
example = ''
# defaults from included config
PAGE_CE_REFRESH_CYCLE="24h"
PAGE_CE_THRESHOLD="50"
PAGE_CE_ACTION="soft"
'';
};
extraModules = mkOption {
type = types.listOf types.str;
default = [];
description = "extra kernel modules to load";
example = [ "i7core_edac" ];
};
testing = mkEnableOption "error injection infrastructure";
};
config = mkIf cfg.enable {
environment.etc = {
"ras/mainboard" = {
enable = cfg.mainboard != "";
text = cfg.mainboard;
};
# TODO, handle multiple cfg.labels.brand = " ";
"ras/dimm_labels.d/labels" = {
enable = cfg.labels != "";
text = cfg.labels;
};
"sysconfig/rasdaemon" = {
enable = cfg.config != "";
text = cfg.config;
};
};
environment.systemPackages = [ pkgs.rasdaemon ]
++ optionals (cfg.testing) (with pkgs.error-inject; [
edac-inject
mce-inject
aer-inject
]);
boot.initrd.kernelModules = cfg.extraModules
++ optionals (cfg.testing) [
# edac_core and amd64_edac should get loaded automatically
# i7core_edac may not be, and may not be required, but should load successfully
"edac_core"
"amd64_edac"
"i7core_edac"
"mce-inject"
"aer-inject"
];
boot.kernelPatches = optionals (cfg.testing) [{
name = "rasdaemon-tests";
patch = null;
extraConfig = ''
EDAC_DEBUG y
X86_MCE_INJECT y
PCIEPORTBUS y
PCIEAER y
PCIEAER_INJECT y
'';
}];
# i tried to set up a group for this
# but rasdaemon needs higher permissions?
# `rasdaemon: Can't locate a mounted debugfs`
# most of this taken from src/misc/
systemd.services = {
rasdaemon = {
description = "the RAS logging daemon";
documentation = [ "man:rasdaemon(1)" ];
wantedBy = [ "multi-user.target" ];
after = [ "syslog.target" ];
serviceConfig = {
StateDirectory = optionalString (cfg.record) "rasdaemon";
ExecStart = "${pkgs.rasdaemon}/bin/rasdaemon --foreground"
+ optionalString (cfg.record) " --record";
ExecStop = "${pkgs.rasdaemon}/bin/rasdaemon --disable";
Restart = "on-abort";
# src/misc/rasdaemon.service.in shows this:
# ExecStartPost = ${pkgs.rasdaemon}/bin/rasdaemon --enable
# but that results in unpredictable existence of the database
# and everything seems to be enabled without this...
};
};
ras-mc-ctl = mkIf (cfg.labels != "") {
description = "register DIMM labels on startup";
documentation = [ "man:ras-mc-ctl(8)" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.rasdaemon}/bin/ras-mc-ctl --register-labels";
RemainAfterExit = true;
};
};
};
};
meta.maintainers = [ maintainers.evils ];
}

@ -0,0 +1,105 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.signald;
dataDir = "/var/lib/signald";
defaultUser = "signald";
in
{
options.services.signald = {
enable = mkEnableOption "the signald service";
user = mkOption {
type = types.str;
default = defaultUser;
description = "User under which signald runs.";
};
group = mkOption {
type = types.str;
default = defaultUser;
description = "Group under which signald runs.";
};
socketPath = mkOption {
type = types.str;
default = "/run/signald/signald.sock";
description = "Path to the signald socket";
};
};
config = mkIf cfg.enable {
users.users = optionalAttrs (cfg.user == defaultUser) {
${defaultUser} = {
group = cfg.group;
isSystemUser = true;
};
};
users.groups = optionalAttrs (cfg.group == defaultUser) {
${defaultUser} = { };
};
systemd.services.signald = {
description = "A daemon for interacting with the Signal Private Messenger";
wants = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
User = cfg.user;
Group = cfg.group;
ExecStart = "${pkgs.signald}/bin/signald -d ${dataDir} -s ${cfg.socketPath}";
Restart = "on-failure";
StateDirectory = "signald";
RuntimeDirectory = "signald";
StateDirectoryMode = "0750";
RuntimeDirectoryMode = "0750";
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
];
CapabilityBoundingSet = "";
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = "";
# Use a static user so other applications can access the files
#DynamicUser = true;
LockPersonality = true;
# Needed for java
#MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
# Needs network access
#PrivateNetwork = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectHome = true;
ProtectHostname = true;
# Would re-mount paths ignored by temporary root
#ProtectSystem = "strict";
ProtectControlGroups = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
TemporaryFileSystem = "/:ro";
# Does not work well with the temporary root
#UMask = "0066";
};
};
};
}

@ -0,0 +1,71 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.xmrig;
json = pkgs.formats.json { };
configFile = json.generate "config.json" cfg.settings;
in
with lib;
{
options = {
services.xmrig = {
enable = mkEnableOption "XMRig Mining Software";
package = mkOption {
type = types.package;
default = pkgs.xmrig;
example = literalExpression "pkgs.xmrig-mo";
description = "XMRig package to use.";
};
settings = mkOption {
default = { };
type = json.type;
example = literalExpression ''
{
autosave = true;
cpu = true;
opencl = false;
cuda = false;
pools = [
{
url = "pool.supportxmr.com:443";
user = "your-wallet";
keepalive = true;
tls = true;
}
]
}
'';
description = ''
XMRig configuration. Refer to
<link xlink:href="https://xmrig.com/docs/miner/config"/>
for details on supported values.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.xmrig = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "XMRig Mining Software Service";
serviceConfig = {
ExecStartPre = "${cfg.package}/bin/xmrig --config=${configFile} --dry-run";
ExecStart = "${cfg.package}/bin/xmrig --config=${configFile}";
DynamicUser = true;
};
};
};
meta = with lib; {
description = "XMRig Mining Software Service";
license = licenses.gpl3Only;
maintainers = with maintainers; [ ratsclub ];
};
}

@ -677,15 +677,13 @@ in {
RuntimeDirectory = "grafana";
RuntimeDirectoryMode = "0755";
# Hardening
CapabilityBoundingSet = [ "" ];
AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
CapabilityBoundingSet = if (cfg.port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
@ -701,6 +699,8 @@ in {
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
# Upstream grafana is not setting SystemCallFilter for compatibility
# reasons, see https://github.com/grafana/grafana/pull/40176
SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
UMask = "0027";
};

@ -7,6 +7,30 @@ let
workingDir = "/var/lib/" + cfg.stateDir;
prometheusYmlOut = "${workingDir}/prometheus-substituted.yaml";
writeConfig = pkgs.writeShellScriptBin "write-prometheus-config" ''
PATH="${makeBinPath (with pkgs; [ coreutils envsubst ])}"
touch '${prometheusYmlOut}'
chmod 600 '${prometheusYmlOut}'
envsubst -o '${prometheusYmlOut}' -i '${prometheusYml}'
'';
triggerReload = pkgs.writeShellScriptBin "trigger-reload-prometheus" ''
PATH="${makeBinPath (with pkgs; [ systemd ])}"
if systemctl -q is-active prometheus.service; then
systemctl reload prometheus.service
fi
'';
reload = pkgs.writeShellScriptBin "reload-prometheus" ''
PATH="${makeBinPath (with pkgs; [ systemd coreutils gnugrep ])}"
cursor=$(journalctl --show-cursor -n0 | grep -oP "cursor: \K.*")
kill -HUP $MAINPID
journalctl -u prometheus.service --after-cursor="$cursor" -f \
| grep -m 1 "Completed loading of configuration file" > /dev/null
'';
# a wrapper that verifies that the configuration is valid
promtoolCheck = what: name: file:
if cfg.checkConfig then
@ -47,7 +71,11 @@ let
cmdlineArgs = cfg.extraFlags ++ [
"--storage.tsdb.path=${workingDir}/data/"
"--config.file=/run/prometheus/prometheus-substituted.yaml"
"--config.file=${
if cfg.enableReload
then prometheusYmlOut
else "/run/prometheus/prometheus-substituted.yaml"
}"
"--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
"--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
"--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
@ -731,6 +759,25 @@ in {
'';
};
enableReload = mkOption {
default = false;
type = types.bool;
description = ''
Reload prometheus when configuration file changes (instead of restart).
The following property holds: switching to a configuration
(<literal>switch-to-configuration</literal>) that changes the prometheus
configuration only finishes successully when prometheus has finished
loading the new configuration.
Note that prometheus will also get reloaded when the location of the
<option>environmentFile</option> changes but not when its contents
changes. So when you change it contents make sure to reload prometheus
manually or include the hash of <option>environmentFile</option> in its
name.
'';
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
@ -928,7 +975,7 @@ in {
systemd.services.prometheus = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart = ''
preStart = mkIf (!cfg.enableReload) ''
${lib.getBin pkgs.envsubst}/bin/envsubst -o "/run/prometheus/prometheus-substituted.yaml" \
-i "${prometheusYml}"
'';
@ -936,9 +983,10 @@ in {
ExecStart = "${cfg.package}/bin/prometheus" +
optionalString (length cmdlineArgs != 0) (" \\\n " +
concatStringsSep " \\\n " cmdlineArgs);
ExecReload = mkIf cfg.enableReload "+${reload}/bin/reload-prometheus";
User = "prometheus";
Restart = "always";
EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
EnvironmentFile = mkIf (cfg.environmentFile != null && !cfg.enableReload) [ cfg.environmentFile ];
RuntimeDirectory = "prometheus";
RuntimeDirectoryMode = "0700";
WorkingDirectory = workingDir;
@ -946,5 +994,48 @@ in {
StateDirectoryMode = "0700";
};
};
systemd.services.prometheus-config-write = mkIf cfg.enableReload {
wantedBy = [ "prometheus.service" ];
before = [ "prometheus.service" ];
serviceConfig = {
Type = "oneshot";
User = "prometheus";
StateDirectory = cfg.stateDir;
StateDirectoryMode = "0700";
EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
ExecStart = "${writeConfig}/bin/write-prometheus-config";
};
};
# prometheus-config-reload will activate after prometheus. However, what we
# don't want is that on startup it immediately reloads prometheus because
# prometheus itself might have just started.
#
# Instead we only want to reload prometheus when the config file has
# changed. So on startup prometheus-config-reload will just output a
# harmless message and then stay active (RemainAfterExit).
#
# Then, when the config file has changed, switch-to-configuration notices
# that this service has changed and needs to be reloaded
# (reloadIfChanged). The reload command then actually writes the new config
# and reloads prometheus.
systemd.services.prometheus-config-reload = mkIf cfg.enableReload {
wantedBy = [ "prometheus.service" ];
after = [ "prometheus.service" ];
reloadIfChanged = true;
serviceConfig = {
Type = "oneshot";
User = "prometheus";
StateDirectory = cfg.stateDir;
StateDirectoryMode = "0700";
EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
RemainAfterExit = true;
TimeoutSec = 60;
ExecStart = "${pkgs.logger}/bin/logger 'prometheus-config-reload will only reload prometheus when reloaded itself.'";
ExecReload = [
"${writeConfig}/bin/write-prometheus-config"
"+${triggerReload}/bin/trigger-reload-prometheus"
];
};
};
};
}

@ -6,7 +6,7 @@ let
cfg = config.services.unifi-poller;
configFile = pkgs.writeText "unifi-poller.json" (generators.toJSON {} {
inherit (cfg) poller influxdb prometheus unifi;
inherit (cfg) poller influxdb loki prometheus unifi;
});
in {
@ -118,6 +118,61 @@ in {
};
};
loki = {
url = mkOption {
type = types.str;
default = "";
description = ''
URL of the Loki host.
'';
};
user = mkOption {
type = types.str;
default = "";
description = ''
Username for Loki.
'';
};
pass = mkOption {
type = types.path;
default = pkgs.writeText "unifi-poller-loki-default.password" "";
defaultText = "unifi-poller-influxdb-default.password";
description = ''
Path of a file containing the password for Loki.
This file needs to be readable by the unifi-poller user.
'';
apply = v: "file://${v}";
};
verify_ssl = mkOption {
type = types.bool;
default = false;
description = ''
Verify Loki's certificate.
'';
};
tenant_id = mkOption {
type = types.str;
default = "";
description = ''
Tenant ID to use in Loki.
'';
};
interval = mkOption {
type = types.str;
default = "2m";
description = ''
How often the events are polled and pushed to Loki.
'';
};
timeout = mkOption {
type = types.str;
default = "10s";
description = ''
Should be increased in case of timeout errors.
'';
};
};
unifi = let
controllerOptions = {
user = mkOption {
@ -157,7 +212,28 @@ in {
type = types.bool;
default = false;
description = ''
Collect and save data from the intrusion detection system to influxdb.
Collect and save data from the intrusion detection system to influxdb and Loki.
'';
};
save_events = mkOption {
type = types.bool;
default = false;
description = ''
Collect and save data from UniFi events to influxdb and Loki.
'';
};
save_alarms = mkOption {
type = types.bool;
default = false;
description = ''
Collect and save data from UniFi alarms to influxdb and Loki.
'';
};
save_anomalies = mkOption {
type = types.bool;
default = false;
description = ''
Collect and save data from UniFi anomalies to influxdb and Loki.
'';
};
save_dpi = mkOption {

@ -4,14 +4,16 @@ let
cfg = config.services.ddclient;
boolToStr = bool: if bool then "yes" else "no";
dataDir = "/var/lib/ddclient";
StateDirectory = builtins.baseNameOf dataDir;
RuntimeDirectory = StateDirectory;
configText = ''
configFile' = pkgs.writeText "ddclient.conf" ''
# This file can be used as a template for configFile or is automatically generated by Nix options.
cache=${dataDir}/ddclient.cache
foreground=YES
use=${cfg.use}
login=${cfg.username}
password=${cfg.password}
password=
protocol=${cfg.protocol}
${lib.optionalString (cfg.script != "") "script=${cfg.script}"}
${lib.optionalString (cfg.server != "") "server=${cfg.server}"}
@ -24,6 +26,17 @@ let
${cfg.extraConfig}
${lib.concatStringsSep "," cfg.domains}
'';
configFile = if (cfg.configFile != null) then cfg.configFile else configFile';
preStart = ''
install ${configFile} /run/${RuntimeDirectory}/ddclient.conf
${lib.optionalString (cfg.configFile == null) (if (cfg.passwordFile != null) then ''
password=$(head -n 1 ${cfg.passwordFile})
sed -i "s/^password=$/password=$password/" /run/${RuntimeDirectory}/ddclient.conf
'' else ''
sed -i '/^password=$/d' /run/${RuntimeDirectory}/ddclient.conf
'')}
'';
in
@ -37,6 +50,7 @@ with lib;
let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
in if value != "" then [ value ] else []))
(mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
(mkRemovedOptionModule [ "services" "ddclient" "password" ] "Use services.ddclient.passwordFile instead.")
];
###### interface
@ -53,6 +67,15 @@ with lib;
'';
};
package = mkOption {
type = package;
default = pkgs.ddclient;
defaultText = "pkgs.ddclient";
description = ''
The ddclient executable package run by the service.
'';
};
domains = mkOption {
default = [ "" ];
type = listOf str;
@ -69,11 +92,11 @@ with lib;
'';
};
password = mkOption {
default = "";
type = str;
passwordFile = mkOption {
default = null;
type = nullOr str;
description = ''
Password. WARNING: The password becomes world readable in the Nix store.
A file containing the password.
'';
};
@ -87,12 +110,11 @@ with lib;
};
configFile = mkOption {
default = "/etc/ddclient.conf";
type = path;
default = null;
type = nullOr path;
description = ''
Path to configuration file.
When set to the default '/etc/ddclient.conf' it will be populated with the various other options in this module. When it is changed (for example: '/root/nixos/secrets/ddclient.conf') the file read directly to configure ddclient. This is a source of impurity.
The purpose of this is to avoid placing secrets into the store.
When set this overrides the generated configuration from module options.
'';
example = "/root/nixos/secrets/ddclient.conf";
};
@ -184,25 +206,20 @@ with lib;
###### implementation
config = mkIf config.services.ddclient.enable {
environment.etc."ddclient.conf" = {
enable = cfg.configFile == "/etc/ddclient.conf";
mode = "0600";
text = configText;
};
systemd.services.ddclient = {
description = "Dynamic DNS Client";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
restartTriggers = [ config.environment.etc."ddclient.conf".source ];
restartTriggers = optional (cfg.configFile != null) cfg.configFile;
serviceConfig = rec {
serviceConfig = {
DynamicUser = true;
RuntimeDirectory = StateDirectory;
StateDirectory = builtins.baseNameOf dataDir;
RuntimeDirectoryMode = "0700";
inherit RuntimeDirectory;
inherit StateDirectory;
Type = "oneshot";
ExecStartPre = "!${lib.getBin pkgs.coreutils}/bin/install -m666 ${cfg.configFile} /run/${RuntimeDirectory}/ddclient.conf";
ExecStart = "${lib.getBin pkgs.ddclient}/bin/ddclient -file /run/${RuntimeDirectory}/ddclient.conf";
ExecStartPre = "!${pkgs.writeShellScript "ddclient-prestart" preStart}";
ExecStart = "${lib.getBin cfg.package}/bin/ddclient -file /run/${RuntimeDirectory}/ddclient.conf";
};
};

@ -0,0 +1,417 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.jibri;
# Copied from the jitsi-videobridge.nix file.
toHOCON = x:
if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}")
else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}"
else if isList x then "[${ concatMapStringsSep "," toHOCON x }]"
else builtins.toJSON x;
# We're passing passwords in environment variables that have names generated
# from an attribute name, which may not be a valid bash identifier.
toVarName = s: "XMPP_PASSWORD_" + stringAsChars (c: if builtins.match "[A-Za-z0-9]" c != null then c else "_") s;
defaultJibriConfig = {
id = "";
single-use-mode = false;
api = {
http.external-api-port = 2222;
http.internal-api-port = 3333;
xmpp.environments = flip mapAttrsToList cfg.xmppEnvironments (name: env: {
inherit name;
xmpp-server-hosts = env.xmppServerHosts;
xmpp-domain = env.xmppDomain;
control-muc = {
domain = env.control.muc.domain;
room-name = env.control.muc.roomName;
nickname = env.control.muc.nickname;
};
control-login = {
domain = env.control.login.domain;
username = env.control.login.username;
password.__hocon_envvar = toVarName "${name}_control";
};
call-login = {
domain = env.call.login.domain;
username = env.call.login.username;
password.__hocon_envvar = toVarName "${name}_call";
};
strip-from-room-domain = env.stripFromRoomDomain;
usage-timeout = env.usageTimeout;
trust-all-xmpp-certs = env.disableCertificateVerification;
});
};
recording = {
recordings-directory = "/tmp/recordings";
finalize-script = "${cfg.finalizeScript}";
};
streaming.rtmp-allow-list = [ ".*" ];
chrome.flags = [
"--use-fake-ui-for-media-stream"
"--start-maximized"
"--kiosk"
"--enabled"
"--disable-infobars"
"--autoplay-policy=no-user-gesture-required"
]
++ lists.optional cfg.ignoreCert
"--ignore-certificate-errors";
stats.enable-stats-d = true;
webhook.subscribers = [ ];
jwt-info = { };
call-status-checks = {
no-media-timout = "30 seconds";
all-muted-timeout = "10 minutes";
default-call-empty-timout = "30 seconds";
};
};
# Allow overriding leaves of the default config despite types.attrs not doing any merging.
jibriConfig = recursiveUpdate defaultJibriConfig cfg.config;
configFile = pkgs.writeText "jibri.conf" (toHOCON { jibri = jibriConfig; });
in
{
options.services.jibri = with types; {
enable = mkEnableOption "Jitsi BRoadcasting Infrastructure. Currently Jibri must be run on a host that is also running <option>services.jitsi-meet.enable</option>, so for most use cases it will be simpler to run <option>services.jitsi-meet.jibri.enable</option>";
config = mkOption {
type = attrs;
default = { };
description = ''
Jibri configuration.
See <link xlink:href="https://github.com/jitsi/jibri/blob/master/src/main/resources/reference.conf" />
for default configuration with comments.
'';
};
finalizeScript = mkOption {
type = types.path;
default = pkgs.writeScript "finalize_recording.sh" ''
#!/bin/sh
RECORDINGS_DIR=$1
echo "This is a dummy finalize script" > /tmp/finalize.out
echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out
echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out
echo "or storage provider, etc.) in this script" >> /tmp/finalize.out
exit 0
'';
defaultText = literalExpression ''
pkgs.writeScript "finalize_recording.sh" ''''''
#!/bin/sh
RECORDINGS_DIR=$1
echo "This is a dummy finalize script" > /tmp/finalize.out
echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out
echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out
echo "or storage provider, etc.) in this script" >> /tmp/finalize.out
exit 0
'''''';
'';
example = literalExpression ''
pkgs.writeScript "finalize_recording.sh" ''''''
#!/bin/sh
RECORDINGS_DIR=$1
${pkgs.rclone}/bin/rclone copy $RECORDINGS_DIR RCLONE_REMOTE:jibri-recordings/ -v --log-file=/var/log/jitsi/jibri/recording-upload.txt
exit 0
'''''';
'';
description = ''
This script runs when jibri finishes recording a video of a conference.
'';
};
ignoreCert = mkOption {
type = bool;
default = false;
example = true;
description = ''
Whether to enable the flag "--ignore-certificate-errors" for the Chromium browser opened by Jibri.
Intended for use in automated tests or anywhere else where using a verified cert for Jitsi-Meet is not possible.
'';
};
xmppEnvironments = mkOption {
description = ''
XMPP servers to connect to.
'';
example = literalExpression ''
"jitsi-meet" = {
xmppServerHosts = [ "localhost" ];
xmppDomain = config.services.jitsi-meet.hostName;
control.muc = {
domain = "internal.''${config.services.jitsi-meet.hostName}";
roomName = "JibriBrewery";
nickname = "jibri";
};
control.login = {
domain = "auth.''${config.services.jitsi-meet.hostName}";
username = "jibri";
passwordFile = "/var/lib/jitsi-meet/jibri-auth-secret";
};
call.login = {
domain = "recorder.''${config.services.jitsi-meet.hostName}";
username = "recorder";
passwordFile = "/var/lib/jitsi-meet/jibri-recorder-secret";
};
usageTimeout = "0";
disableCertificateVerification = true;
stripFromRoomDomain = "conference.";
};
'';
default = { };
type = attrsOf (submodule ({ name, ... }: {
options = {
xmppServerHosts = mkOption {
type = listOf str;
example = [ "xmpp.example.org" ];
description = ''
Hostnames of the XMPP servers to connect to.
'';
};
xmppDomain = mkOption {
type = str;
example = "xmpp.example.org";
description = ''
The base XMPP domain.
'';
};
control.muc.domain = mkOption {
type = str;
description = ''
The domain part of the MUC to connect to for control.
'';
};
control.muc.roomName = mkOption {
type = str;
default = "JibriBrewery";
description = ''
The room name of the MUC to connect to for control.
'';
};
control.muc.nickname = mkOption {
type = str;
default = "jibri";
description = ''
The nickname for this Jibri instance in the MUC.
'';
};
control.login.domain = mkOption {
type = str;
description = ''
The domain part of the JID for this Jibri instance.
'';
};
control.login.username = mkOption {
type = str;
default = "jvb";
description = ''
User part of the JID.
'';
};
control.login.passwordFile = mkOption {
type = str;
example = "/run/keys/jibri-xmpp1";
description = ''
File containing the password for the user.
'';
};
call.login.domain = mkOption {
type = str;
example = "recorder.xmpp.example.org";
description = ''
The domain part of the JID for the recorder.
'';
};
call.login.username = mkOption {
type = str;
default = "recorder";
description = ''
User part of the JID for the recorder.
'';
};
call.login.passwordFile = mkOption {
type = str;
example = "/run/keys/jibri-recorder-xmpp1";
description = ''
File containing the password for the user.
'';
};
disableCertificateVerification = mkOption {
type = bool;
default = false;
description = ''
Whether to skip validation of the server's certificate.
'';
};
stripFromRoomDomain = mkOption {
type = str;
default = "0";
example = "conference.";
description = ''
The prefix to strip from the room's JID domain to derive the call URL.
'';
};
usageTimeout = mkOption {
type = str;
default = "0";
example = "1 hour";
description = ''
The duration that the Jibri session can be.
A value of zero means indefinitely.
'';
};
};
config =
let
nick = mkDefault (builtins.replaceStrings [ "." ] [ "-" ] (
config.networking.hostName + optionalString (config.networking.domain != null) ".${config.networking.domain}"
));
in
{
call.login.username = nick;
control.muc.nickname = nick;
};
}));
};
};
config = mkIf cfg.enable {
users.groups.jibri = { };
users.groups.plugdev = { };
users.users.jibri = {
isSystemUser = true;
group = "jibri";
home = "/var/lib/jibri";
extraGroups = [ "jitsi-meet" "adm" "audio" "video" "plugdev" ];
};
systemd.services.jibri-xorg = {
description = "Jitsi Xorg Process";
after = [ "network.target" ];
wantedBy = [ "jibri.service" "jibri-icewm.service" ];
preStart = ''
cp --no-preserve=mode,ownership ${pkgs.jibri}/etc/jitsi/jibri/* /var/lib/jibri
mv /var/lib/jibri/{,.}asoundrc
'';
environment.DISPLAY = ":0";
serviceConfig = {
Type = "simple";
User = "jibri";
Group = "jibri";
KillMode = "process";
Restart = "on-failure";
RestartPreventExitStatus = 255;
StateDirectory = "jibri";
ExecStart = "${pkgs.xorg.xorgserver}/bin/Xorg -nocursor -noreset +extension RANDR +extension RENDER -config ${pkgs.jibri}/etc/jitsi/jibri/xorg-video-dummy.conf -logfile /dev/null :0";
};
};
systemd.services.jibri-icewm = {
description = "Jitsi Window Manager";
requires = [ "jibri-xorg.service" ];
after = [ "jibri-xorg.service" ];
wantedBy = [ "jibri.service" ];
environment.DISPLAY = ":0";
serviceConfig = {
Type = "simple";
User = "jibri";
Group = "jibri";
Restart = "on-failure";
RestartPreventExitStatus = 255;
StateDirectory = "jibri";
ExecStart = "${pkgs.icewm}/bin/icewm-session";
};
};
systemd.services.jibri = {
description = "Jibri Process";
requires = [ "jibri-icewm.service" "jibri-xorg.service" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ chromedriver chromium ffmpeg-full ];
script = (concatStrings (mapAttrsToList
(name: env: ''
export ${toVarName "${name}_control"}=$(cat ${env.control.login.passwordFile})
export ${toVarName "${name}_call"}=$(cat ${env.call.login.passwordFile})
'')
cfg.xmppEnvironments))
+ ''
${pkgs.jre8_headless}/bin/java -Djava.util.logging.config.file=${./logging.properties-journal} -Dconfig.file=${configFile} -jar ${pkgs.jibri}/opt/jitsi/jibri/jibri.jar --config /var/lib/jibri/jibri.json
'';
environment.HOME = "/var/lib/jibri";
serviceConfig = {
Type = "simple";
User = "jibri";
Group = "jibri";
Restart = "always";
RestartPreventExitStatus = 255;
StateDirectory = "jibri";
};
};
systemd.tmpfiles.rules = [
"d /var/log/jitsi/jibri 755 jibri jibri"
];
# Configure Chromium to not show the "Chrome is being controlled by automatic test software" message.
environment.etc."chromium/policies/managed/managed_policies.json".text = builtins.toJSON { CommandLineFlagSecurityWarningsEnabled = false; };
warnings = [ "All security warnings for Chromium have been disabled. This is necessary for Jibri, but it also impacts all other uses of Chromium on this system." ];
boot = {
extraModprobeConfig = ''
options snd-aloop enable=1,1,1,1,1,1,1,1
'';
kernelModules = [ "snd-aloop" ];
};
};
meta.maintainers = lib.teams.jitsi.members;
}

@ -0,0 +1,32 @@
handlers = java.util.logging.FileHandler
java.util.logging.FileHandler.level = FINE
java.util.logging.FileHandler.pattern = /var/log/jitsi/jibri/log.%g.txt
java.util.logging.FileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
java.util.logging.FileHandler.count = 10
java.util.logging.FileHandler.limit = 10000000
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.level = FINE
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.pattern = /var/log/jitsi/jibri/ffmpeg.%g.txt
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.count = 10
org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.limit = 10000000
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.level = FINE
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.pattern = /var/log/jitsi/jibri/pjsua.%g.txt
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.count = 10
org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.limit = 10000000
org.jitsi.jibri.selenium.util.BrowserFileHandler.level = FINE
org.jitsi.jibri.selenium.util.BrowserFileHandler.pattern = /var/log/jitsi/jibri/browser.%g.txt
org.jitsi.jibri.selenium.util.BrowserFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
org.jitsi.jibri.selenium.util.BrowserFileHandler.count = 10
org.jitsi.jibri.selenium.util.BrowserFileHandler.limit = 10000000
org.jitsi.level = FINE
org.jitsi.jibri.config.level = INFO
org.glassfish.level = INFO
org.osgi.level = INFO
org.jitsi.xmpp.level = INFO

@ -0,0 +1,138 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.lxd-image-server;
format = pkgs.formats.toml {};
location = "/var/www/simplestreams";
in
{
options = {
services.lxd-image-server = {
enable = mkEnableOption "lxd-image-server";
group = mkOption {
type = types.str;
description = "Group assigned to the user and the webroot directory.";
default = "nginx";
example = "www-data";
};
settings = mkOption {
type = format.type;
description = ''
Configuration for lxd-image-server.
Example see <link xlink:href="https://github.com/Avature/lxd-image-server/blob/master/config.toml"/>.
'';
default = {};
};
nginx = {
enable = mkEnableOption "nginx";
domain = mkOption {
type = types.str;
description = "Domain to use for nginx virtual host.";
example = "images.example.org";
};
};
};
};
config = mkMerge [
(mkIf (cfg.enable) {
users.users.lxd-image-server = {
isSystemUser = true;
group = cfg.group;
};
users.groups.${cfg.group} = {};
environment.etc."lxd-image-server/config.toml".source = format.generate "config.toml" cfg.settings;
services.logrotate.paths.lxd-image-server = {
path = "/var/log/lxd-image-server/lxd-image-server.log";
frequency = "daily";
keep = 21;
user = "lxd-image-server";
group = cfg.group;
extraConfig = ''
missingok
compress
delaycompress
copytruncate
notifempty
'';
};
systemd.tmpfiles.rules = [
"d /var/www/simplestreams 0755 lxd-image-server ${cfg.group}"
];
systemd.services.lxd-image-server = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "LXD Image Server";
script = ''
${pkgs.lxd-image-server}/bin/lxd-image-server init
${pkgs.lxd-image-server}/bin/lxd-image-server watch
'';
serviceConfig = {
User = "lxd-image-server";
Group = cfg.group;
DynamicUser = true;
LogsDirectory = "lxd-image-server";
RuntimeDirectory = "lxd-image-server";
ExecReload = "${pkgs.lxd-image-server}/bin/lxd-image-server reload";
ReadWritePaths = [ location ];
};
};
})
# this is seperate so it can be enabled on mirrored hosts
(mkIf (cfg.nginx.enable) {
# https://github.com/Avature/lxd-image-server/blob/master/resources/nginx/includes/lxd-image-server.pkg.conf
services.nginx.virtualHosts = {
"${cfg.nginx.domain}" = {
forceSSL = true;
enableACME = mkDefault true;
root = location;
locations = {
"/streams/v1/" = {
index = "index.json";
};
# Serve json files with content type header application/json
"~ \.json$" = {
extraConfig = ''
add_header Content-Type application/json;
'';
};
"~ \.tar.xz$" = {
extraConfig = ''
add_header Content-Type application/octet-stream;
'';
};
"~ \.tar.gz$" = {
extraConfig = ''
add_header Content-Type application/octet-stream;
'';
};
# Deny access to document root and the images folder
"~ ^/(images/)?$" = {
return = "403";
};
};
};
};
})
];
}

@ -0,0 +1,102 @@
# Mosquitto {#module-services-mosquitto}
Mosquitto is a MQTT broker often used for IoT or home automation data transport.
## Quickstart {#module-services-mosquitto-quickstart}
A minimal configuration for Mosquitto is
```nix
services.mosquitto = {
enable = true;
listeners = [ {
acl = [ "pattern readwrite #" ];
omitPasswordAuth = true;
settings.allow_anonymous = true;
} ];
};
```
This will start a broker on port 1883, listening on all interfaces of the machine, allowing
read/write access to all topics to any user without password requirements.
User authentication can be configured with the `users` key of listeners. A config that gives
full read access to a user `monitor` and restricted write access to a user `service` could look
like
```nix
services.mosquitto = {
enable = true;
listeners = [ {
users = {
monitor = {
acl = [ "read #" ];
password = "monitor";
};
service = {
acl = [ "write service/#" ];
password = "service";
};
};
} ];
};
```
TLS authentication is configured by setting TLS-related options of the listener:
```nix
services.mosquitto = {
enable = true;
listeners = [ {
port = 8883; # port change is not required, but helpful to avoid mistakes
# ...
settings = {
cafile = "/path/to/mqtt.ca.pem";
certfile = "/path/to/mqtt.pem";
keyfile = "/path/to/mqtt.key";
};
} ];
```
## Configuration {#module-services-mosquitto-config}
The Mosquitto configuration has four distinct types of settings:
the global settings of the daemon, listeners, plugins, and bridges.
Bridges and listeners are part of the global configuration, plugins are part of listeners.
Users of the broker are configured as parts of listeners rather than globally, allowing
configurations in which a given user is only allowed to log in to the broker using specific
listeners (eg to configure an admin user with full access to all topics, but restricted to
localhost).
Almost all options of Mosquitto are available for configuration at their appropriate levels, some
as NixOS options written in camel case, the remainders under `settings` with their exact names in
the Mosquitto config file. The exceptions are `acl_file` (which is always set according to the
`acl` attributes of a listener and its users) and `per_listener_settings` (which is always set to
`true`).
### Password authentication {#module-services-mosquitto-config-passwords}
Mosquitto can be run in two modes, with a password file or without. Each listener has its own
password file, and different listeners may use different password files. Password file generation
can be disabled by setting `omitPasswordAuth = true` for a listener; in this case it is necessary
to either set `settings.allow_anonymous = true` to allow all logins, or to configure other
authentication methods like TLS client certificates with `settings.use_identity_as_username = true`.
The default is to generate a password file for each listener from the users configured to that
listener. Users with no configured password will not be added to the password file and thus
will not be able to use the broker.
### ACL format {#module-services-mosquitto-config-acl}
Every listener has a Mosquitto `acl_file` attached to it. This ACL is configured via two
attributes of the config:
* the `acl` attribute of the listener configures pattern ACL entries and topic ACL entries
for anonymous users. Each entry must be prefixed with `pattern` or `topic` to distinguish
between these two cases.
* the `acl` attribute of every user configures in the listener configured the ACL for that
given user. Only topic ACLs are supported by Mosquitto in this setting, so no prefix is
required or allowed.
The default ACL for a listener is empty, disallowing all accesses from all clients. To configure
a completely open ACL, set `acl = [ "pattern readwrite #" ]` in the listener.

@ -5,215 +5,553 @@ with lib;
let
cfg = config.services.mosquitto;
listenerConf = optionalString cfg.ssl.enable ''
listener ${toString cfg.ssl.port} ${cfg.ssl.host}
cafile ${cfg.ssl.cafile}
certfile ${cfg.ssl.certfile}
keyfile ${cfg.ssl.keyfile}
'';
passwordConf = optionalString cfg.checkPasswords ''
password_file ${cfg.dataDir}/passwd
'';
mosquittoConf = pkgs.writeText "mosquitto.conf" ''
acl_file ${aclFile}
persistence true
allow_anonymous ${boolToString cfg.allowAnonymous}
listener ${toString cfg.port} ${cfg.host}
${passwordConf}
${listenerConf}
${cfg.extraConf}
'';
userAcl = (concatStringsSep "\n\n" (mapAttrsToList (n: c:
"user ${n}\n" + (concatStringsSep "\n" c.acl)) cfg.users
));
aclFile = pkgs.writeText "mosquitto.acl" ''
${cfg.aclExtraConf}
${userAcl}
'';
# note that mosquitto config parsing is very simplistic as of may 2021.
# often times they'll e.g. strtok() a line, check the first two tokens, and ignore the rest.
# there's no escaping available either, so we have to prevent any being necessary.
str = types.strMatching "[^\r\n]*" // {
description = "single-line string";
};
path = types.addCheck types.path (p: str.check "${p}");
configKey = types.strMatching "[^\r\n\t ]+";
optionType = with types; oneOf [ str path bool int ] // {
description = "string, path, bool, or integer";
};
optionToString = v:
if isBool v then boolToString v
else if path.check v then "${v}"
else toString v;
assertKeysValid = prefix: valid: config:
mapAttrsToList
(n: _: {
assertion = valid ? ${n};
message = "Invalid config key ${prefix}.${n}.";
})
config;
formatFreeform = { prefix ? "" }: mapAttrsToList (n: v: "${prefix}${n} ${optionToString v}");
userOptions = with types; submodule {
options = {
password = mkOption {
type = uniq (nullOr str);
default = null;
description = ''
Specifies the (clear text) password for the MQTT User.
'';
};
in
passwordFile = mkOption {
type = uniq (nullOr types.path);
example = "/path/to/file";
default = null;
description = ''
Specifies the path to a file containing the
clear text password for the MQTT user.
'';
};
{
hashedPassword = mkOption {
type = uniq (nullOr str);
default = null;
description = ''
Specifies the hashed password for the MQTT User.
To generate hashed password install <literal>mosquitto</literal>
package and use <literal>mosquitto_passwd</literal>.
'';
};
###### Interface
hashedPasswordFile = mkOption {
type = uniq (nullOr types.path);
example = "/path/to/file";
default = null;
description = ''
Specifies the path to a file containing the
hashed password for the MQTT user.
To generate hashed password install <literal>mosquitto</literal>
package and use <literal>mosquitto_passwd</literal>.
'';
};
options = {
services.mosquitto = {
enable = mkEnableOption "the MQTT Mosquitto broker";
acl = mkOption {
type = listOf str;
example = [ "read A/B" "readwrite A/#" ];
default = [];
description = ''
Control client access to topics on the broker.
'';
};
};
};
host = mkOption {
default = "127.0.0.1";
example = "0.0.0.0";
type = types.str;
userAsserts = prefix: users:
mapAttrsToList
(n: _: {
assertion = builtins.match "[^:\r\n]+" n != null;
message = "Invalid user name ${n} in ${prefix}";
})
users
++ mapAttrsToList
(n: u: {
assertion = count (s: s != null) [
u.password u.passwordFile u.hashedPassword u.hashedPasswordFile
] <= 1;
message = "Cannot set more than one password option for user ${n} in ${prefix}";
}) users;
makePasswordFile = users: path:
let
makeLines = store: file:
mapAttrsToList
(n: u: "addLine ${escapeShellArg n} ${escapeShellArg u.${store}}")
(filterAttrs (_: u: u.${store} != null) users)
++ mapAttrsToList
(n: u: "addFile ${escapeShellArg n} ${escapeShellArg "${u.${file}}"}")
(filterAttrs (_: u: u.${file} != null) users);
plainLines = makeLines "password" "passwordFile";
hashedLines = makeLines "hashedPassword" "hashedPasswordFile";
in
pkgs.writeScript "make-mosquitto-passwd"
(''
#! ${pkgs.runtimeShell}
set -eu
file=${escapeShellArg path}
rm -f "$file"
touch "$file"
addLine() {
echo "$1:$2" >> "$file"
}
addFile() {
if [ $(wc -l <"$2") -gt 1 ]; then
echo "invalid mosquitto password file $2" >&2
return 1
fi
echo "$1:$(cat "$2")" >> "$file"
}
''
+ concatStringsSep "\n"
(plainLines
++ optional (plainLines != []) ''
${pkgs.mosquitto}/bin/mosquitto_passwd -U "$file"
''
++ hashedLines));
makeACLFile = idx: users: supplement:
pkgs.writeText "mosquitto-acl-${toString idx}.conf"
(concatStringsSep
"\n"
(flatten [
supplement
(mapAttrsToList
(n: u: [ "user ${n}" ] ++ map (t: "topic ${t}") u.acl)
users)
]));
authPluginOptions = with types; submodule {
options = {
plugin = mkOption {
type = path;
description = ''
Host to listen on without SSL.
Plugin path to load, should be a <literal>.so</literal> file.
'';
};
port = mkOption {
default = 1883;
type = types.int;
denySpecialChars = mkOption {
type = bool;
description = ''
Port on which to listen without SSL.
Automatically disallow all clients using <literal>#</literal>
or <literal>+</literal> in their name/id.
'';
default = true;
};
ssl = {
enable = mkEnableOption "SSL listener";
options = mkOption {
type = attrsOf optionType;
description = ''
Options for the auth plugin. Each key turns into a <literal>auth_opt_*</literal>
line in the config.
'';
default = {};
};
};
};
cafile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to PEM encoded CA certificates.";
};
authAsserts = prefix: auth:
mapAttrsToList
(n: _: {
assertion = configKey.check n;
message = "Invalid auth plugin key ${prefix}.${n}";
})
auth;
formatAuthPlugin = plugin:
[
"auth_plugin ${plugin.plugin}"
"auth_plugin_deny_special_chars ${optionToString plugin.denySpecialChars}"
]
++ formatFreeform { prefix = "auth_opt_"; } plugin.options;
freeformListenerKeys = {
allow_anonymous = 1;
allow_zero_length_clientid = 1;
auto_id_prefix = 1;
cafile = 1;
capath = 1;
certfile = 1;
ciphers = 1;
"ciphers_tls1.3" = 1;
crlfile = 1;
dhparamfile = 1;
http_dir = 1;
keyfile = 1;
max_connections = 1;
max_qos = 1;
max_topic_alias = 1;
mount_point = 1;
protocol = 1;
psk_file = 1;
psk_hint = 1;
require_certificate = 1;
socket_domain = 1;
tls_engine = 1;
tls_engine_kpass_sha1 = 1;
tls_keyform = 1;
tls_version = 1;
use_identity_as_username = 1;
use_subject_as_username = 1;
use_username_as_clientid = 1;
};
certfile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to PEM encoded server certificate.";
};
listenerOptions = with types; submodule {
options = {
port = mkOption {
type = port;
description = ''
Port to listen on. Must be set to 0 to listen on a unix domain socket.
'';
default = 1883;
};
keyfile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to PEM encoded server key.";
};
address = mkOption {
type = nullOr str;
description = ''
Address to listen on. Listen on <literal>0.0.0.0</literal>/<literal>::</literal>
when unset.
'';
default = null;
};
host = mkOption {
default = "0.0.0.0";
example = "localhost";
type = types.str;
description = ''
Host to listen on with SSL.
'';
};
authPlugins = mkOption {
type = listOf authPluginOptions;
description = ''
Authentication plugin to attach to this listener.
Refer to the <link xlink:href="https://mosquitto.org/man/mosquitto-conf-5.html">
mosquitto.conf documentation</link> for details on authentication plugins.
'';
default = [];
};
port = mkOption {
default = 8883;
type = types.int;
description = ''
Port on which to listen with SSL.
'';
};
users = mkOption {
type = attrsOf userOptions;
example = { john = { password = "123456"; acl = [ "readwrite john/#" ]; }; };
description = ''
A set of users and their passwords and ACLs.
'';
default = {};
};
dataDir = mkOption {
default = "/var/lib/mosquitto";
type = types.path;
omitPasswordAuth = mkOption {
type = bool;
description = ''
The data directory.
Omits password checking, allowing anyone to log in with any user name unless
other mandatory authentication methods (eg TLS client certificates) are configured.
'';
default = false;
};
users = mkOption {
type = types.attrsOf (types.submodule {
options = {
password = mkOption {
type = with types; uniq (nullOr str);
default = null;
description = ''
Specifies the (clear text) password for the MQTT User.
'';
};
acl = mkOption {
type = listOf str;
description = ''
Additional ACL items to prepend to the generated ACL file.
'';
example = [ "pattern read #" "topic readwrite anon/report/#" ];
default = [];
};
passwordFile = mkOption {
type = with types; uniq (nullOr str);
example = "/path/to/file";
default = null;
description = ''
Specifies the path to a file containing the
clear text password for the MQTT user.
'';
};
settings = mkOption {
type = submodule {
freeformType = attrsOf optionType;
};
description = ''
Additional settings for this listener.
'';
default = {};
};
};
};
hashedPassword = mkOption {
type = with types; uniq (nullOr str);
default = null;
description = ''
Specifies the hashed password for the MQTT User.
To generate hashed password install <literal>mosquitto</literal>
package and use <literal>mosquitto_passwd</literal>.
'';
};
listenerAsserts = prefix: listener:
assertKeysValid prefix freeformListenerKeys listener.settings
++ userAsserts prefix listener.users
++ imap0
(i: v: authAsserts "${prefix}.authPlugins.${toString i}" v)
listener.authPlugins;
formatListener = idx: listener:
[
"listener ${toString listener.port} ${toString listener.address}"
"acl_file ${makeACLFile idx listener.users listener.acl}"
]
++ optional (! listener.omitPasswordAuth) "password_file ${cfg.dataDir}/passwd-${toString idx}"
++ formatFreeform {} listener.settings
++ concatMap formatAuthPlugin listener.authPlugins;
freeformBridgeKeys = {
bridge_alpn = 1;
bridge_attempt_unsubscribe = 1;
bridge_bind_address = 1;
bridge_cafile = 1;
bridge_capath = 1;
bridge_certfile = 1;
bridge_identity = 1;
bridge_insecure = 1;
bridge_keyfile = 1;
bridge_max_packet_size = 1;
bridge_outgoing_retain = 1;
bridge_protocol_version = 1;
bridge_psk = 1;
bridge_require_ocsp = 1;
bridge_tls_version = 1;
cleansession = 1;
idle_timeout = 1;
keepalive_interval = 1;
local_cleansession = 1;
local_clientid = 1;
local_password = 1;
local_username = 1;
notification_topic = 1;
notifications = 1;
notifications_local_only = 1;
remote_clientid = 1;
remote_password = 1;
remote_username = 1;
restart_timeout = 1;
round_robin = 1;
start_type = 1;
threshold = 1;
try_private = 1;
};
hashedPasswordFile = mkOption {
type = with types; uniq (nullOr str);
example = "/path/to/file";
default = null;
bridgeOptions = with types; submodule {
options = {
addresses = mkOption {
type = listOf (submodule {
options = {
address = mkOption {
type = str;
description = ''
Specifies the path to a file containing the
hashed password for the MQTT user.
To generate hashed password install <literal>mosquitto</literal>
package and use <literal>mosquitto_passwd</literal>.
Address of the remote MQTT broker.
'';
};
acl = mkOption {
type = types.listOf types.str;
example = [ "topic read A/B" "topic A/#" ];
port = mkOption {
type = port;
description = ''
Control client access to topics on the broker.
Port of the remote MQTT broker.
'';
default = 1883;
};
};
});
example = { john = { password = "123456"; acl = [ "topic readwrite john/#" ]; }; };
default = [];
description = ''
A set of users and their passwords and ACLs.
Remote endpoints for the bridge.
'';
};
allowAnonymous = mkOption {
default = false;
type = types.bool;
topics = mkOption {
type = listOf str;
description = ''
Allow clients to connect without authentication.
Topic patterns to be shared between the two brokers.
Refer to the <link xlink:href="https://mosquitto.org/man/mosquitto-conf-5.html">
mosquitto.conf documentation</link> for details on the format.
'';
default = [];
example = [ "# both 2 local/topic/ remote/topic/" ];
};
checkPasswords = mkOption {
default = false;
example = true;
type = types.bool;
settings = mkOption {
type = submodule {
freeformType = attrsOf optionType;
};
description = ''
Refuse connection when clients provide incorrect passwords.
Additional settings for this bridge.
'';
default = {};
};
};
};
extraConf = mkOption {
default = "";
type = types.lines;
description = ''
Extra config to append to `mosquitto.conf` file.
'';
};
bridgeAsserts = prefix: bridge:
assertKeysValid prefix freeformBridgeKeys bridge.settings
++ [ {
assertion = length bridge.addresses > 0;
message = "Bridge ${prefix} needs remote broker addresses";
} ];
formatBridge = name: bridge:
[
"connection ${name}"
"addresses ${concatMapStringsSep " " (a: "${a.address}:${toString a.port}") bridge.addresses}"
]
++ map (t: "topic ${t}") bridge.topics
++ formatFreeform {} bridge.settings;
freeformGlobalKeys = {
allow_duplicate_messages = 1;
autosave_interval = 1;
autosave_on_changes = 1;
check_retain_source = 1;
connection_messages = 1;
log_facility = 1;
log_timestamp = 1;
log_timestamp_format = 1;
max_inflight_bytes = 1;
max_inflight_messages = 1;
max_keepalive = 1;
max_packet_size = 1;
max_queued_bytes = 1;
max_queued_messages = 1;
memory_limit = 1;
message_size_limit = 1;
persistence_file = 1;
persistence_location = 1;
persistent_client_expiration = 1;
pid_file = 1;
queue_qos0_messages = 1;
retain_available = 1;
set_tcp_nodelay = 1;
sys_interval = 1;
upgrade_outgoing_qos = 1;
websockets_headers_size = 1;
websockets_log_level = 1;
};
aclExtraConf = mkOption {
default = "";
type = types.lines;
description = ''
Extra config to prepend to the ACL file.
'';
};
globalOptions = with types; {
enable = mkEnableOption "the MQTT Mosquitto broker";
bridges = mkOption {
type = attrsOf bridgeOptions;
default = {};
description = ''
Bridges to build to other MQTT brokers.
'';
};
listeners = mkOption {
type = listOf listenerOptions;
default = {};
description = ''
Listeners to configure on this broker.
'';
};
includeDirs = mkOption {
type = listOf path;
description = ''
Directories to be scanned for further config files to include.
Directories will processed in the order given,
<literal>*.conf</literal> files in the directory will be
read in case-sensistive alphabetical order.
'';
default = [];
};
logDest = mkOption {
type = listOf (either path (enum [ "stdout" "stderr" "syslog" "topic" "dlt" ]));
description = ''
Destinations to send log messages to.
'';
default = [ "stderr" ];
};
logType = mkOption {
type = listOf (enum [ "debug" "error" "warning" "notice" "information"
"subscribe" "unsubscribe" "websockets" "none" "all" ]);
description = ''
Types of messages to log.
'';
default = [];
};
persistence = mkOption {
type = bool;
description = ''
Enable persistent storage of subscriptions and messages.
'';
default = true;
};
dataDir = mkOption {
default = "/var/lib/mosquitto";
type = types.path;
description = ''
The data directory.
'';
};
settings = mkOption {
type = submodule {
freeformType = attrsOf optionType;
};
description = ''
Global configuration options for the mosquitto broker.
'';
default = {};
};
};
globalAsserts = prefix: cfg:
flatten [
(assertKeysValid prefix freeformGlobalKeys cfg.settings)
(imap0 (n: l: listenerAsserts "${prefix}.listener.${toString n}" l) cfg.listeners)
(mapAttrsToList (n: b: bridgeAsserts "${prefix}.bridge.${n}" b) cfg.bridges)
];
formatGlobal = cfg:
[
"per_listener_settings true"
"persistence ${optionToString cfg.persistence}"
]
++ map
(d: if path.check d then "log_dest file ${d}" else "log_dest ${d}")
cfg.logDest
++ map (t: "log_type ${t}") cfg.logType
++ formatFreeform {} cfg.settings
++ concatLists (imap0 formatListener cfg.listeners)
++ concatLists (mapAttrsToList formatBridge cfg.bridges)
++ map (d: "include_dir ${d}") cfg.includeDirs;
configFile = pkgs.writeText "mosquitto.conf"
(concatStringsSep "\n" (formatGlobal cfg));
in
{
###### Interface
options.services.mosquitto = globalOptions;
###### Implementation
config = mkIf cfg.enable {
assertions = mapAttrsToList (name: cfg: {
assertion = length (filter (s: s != null) (with cfg; [
password passwordFile hashedPassword hashedPasswordFile
])) <= 1;
message = "Cannot set more than one password option";
}) cfg.users;
assertions = globalAsserts "services.mosquitto" cfg;
systemd.services.mosquitto = {
description = "Mosquitto MQTT Broker Daemon";
@ -227,7 +565,7 @@ in
RuntimeDirectory = "mosquitto";
WorkingDirectory = cfg.dataDir;
Restart = "on-failure";
ExecStart = "${pkgs.mosquitto}/bin/mosquitto -c ${mosquittoConf}";
ExecStart = "${pkgs.mosquitto}/bin/mosquitto -c ${configFile}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
# Hardening
@ -252,12 +590,34 @@ in
ReadWritePaths = [
cfg.dataDir
"/tmp" # mosquitto_passwd creates files in /tmp before moving them
];
ReadOnlyPaths = with cfg.ssl; lib.optionals (enable) [
certfile
keyfile
cafile
];
] ++ filter path.check cfg.logDest;
ReadOnlyPaths =
map (p: "${p}")
(cfg.includeDirs
++ filter
(v: v != null)
(flatten [
(map
(l: [
(l.settings.psk_file or null)
(l.settings.http_dir or null)
(l.settings.cafile or null)
(l.settings.capath or null)
(l.settings.certfile or null)
(l.settings.crlfile or null)
(l.settings.dhparamfile or null)
(l.settings.keyfile or null)
])
cfg.listeners)
(mapAttrsToList
(_: b: [
(b.settings.bridge_cafile or null)
(b.settings.bridge_capath or null)
(b.settings.bridge_certfile or null)
(b.settings.bridge_keyfile or null)
])
cfg.bridges)
]));
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_UNIX" # for sd_notify() call
@ -275,20 +635,12 @@ in
];
UMask = "0077";
};
preStart = ''
rm -f ${cfg.dataDir}/passwd
touch ${cfg.dataDir}/passwd
'' + concatStringsSep "\n" (
mapAttrsToList (n: c:
if c.hashedPasswordFile != null then
"echo '${n}:'$(cat '${c.hashedPasswordFile}') >> ${cfg.dataDir}/passwd"
else if c.passwordFile != null then
"${pkgs.mosquitto}/bin/mosquitto_passwd -b ${cfg.dataDir}/passwd ${n} $(cat '${c.passwordFile}')"
else if c.hashedPassword != null then
"echo '${n}:${c.hashedPassword}' >> ${cfg.dataDir}/passwd"
else optionalString (c.password != null)
"${pkgs.mosquitto}/bin/mosquitto_passwd -b ${cfg.dataDir}/passwd ${n} '${c.password}'"
) cfg.users);
preStart =
concatStringsSep
"\n"
(imap0
(idx: listener: makePasswordFile listener.users "${cfg.dataDir}/passwd-${toString idx}")
cfg.listeners);
};
users.users.mosquitto = {
@ -302,4 +654,11 @@ in
users.groups.mosquitto.gid = config.ids.gids.mosquitto;
};
meta = {
maintainers = with lib.maintainers; [ pennae ];
# Don't edit the docbook xml directly, edit the md and generate it:
# `pandoc mosquitto.md -t docbook --top-level-division=chapter --extract-media=media -f markdown+smart > mosquitto.xml`
doc = ./mosquitto.xml;
};
}

@ -0,0 +1,147 @@
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mosquitto">
<title>Mosquitto</title>
<para>
Mosquitto is a MQTT broker often used for IoT or home automation
data transport.
</para>
<section xml:id="module-services-mosquitto-quickstart">
<title>Quickstart</title>
<para>
A minimal configuration for Mosquitto is
</para>
<programlisting language="bash">
services.mosquitto = {
enable = true;
listeners = [ {
acl = [ &quot;pattern readwrite #&quot; ];
omitPasswordAuth = true;
settings.allow_anonymous = true;
} ];
};
</programlisting>
<para>
This will start a broker on port 1883, listening on all interfaces
of the machine, allowing read/write access to all topics to any
user without password requirements.
</para>
<para>
User authentication can be configured with the
<literal>users</literal> key of listeners. A config that gives
full read access to a user <literal>monitor</literal> and
restricted write access to a user <literal>service</literal> could
look like
</para>
<programlisting language="bash">
services.mosquitto = {
enable = true;
listeners = [ {
users = {
monitor = {
acl = [ &quot;read #&quot; ];
password = &quot;monitor&quot;;
};
service = {
acl = [ &quot;write service/#&quot; ];
password = &quot;service&quot;;
};
};
} ];
};
</programlisting>
<para>
TLS authentication is configured by setting TLS-related options of
the listener:
</para>
<programlisting language="bash">
services.mosquitto = {
enable = true;
listeners = [ {
port = 8883; # port change is not required, but helpful to avoid mistakes
# ...
settings = {
cafile = &quot;/path/to/mqtt.ca.pem&quot;;
certfile = &quot;/path/to/mqtt.pem&quot;;
keyfile = &quot;/path/to/mqtt.key&quot;;
};
} ];
</programlisting>
</section>
<section xml:id="module-services-mosquitto-config">
<title>Configuration</title>
<para>
The Mosquitto configuration has four distinct types of settings:
the global settings of the daemon, listeners, plugins, and
bridges. Bridges and listeners are part of the global
configuration, plugins are part of listeners. Users of the broker
are configured as parts of listeners rather than globally,
allowing configurations in which a given user is only allowed to
log in to the broker using specific listeners (eg to configure an
admin user with full access to all topics, but restricted to
localhost).
</para>
<para>
Almost all options of Mosquitto are available for configuration at
their appropriate levels, some as NixOS options written in camel
case, the remainders under <literal>settings</literal> with their
exact names in the Mosquitto config file. The exceptions are
<literal>acl_file</literal> (which is always set according to the
<literal>acl</literal> attributes of a listener and its users) and
<literal>per_listener_settings</literal> (which is always set to
<literal>true</literal>).
</para>
<section xml:id="module-services-mosquitto-config-passwords">
<title>Password authentication</title>
<para>
Mosquitto can be run in two modes, with a password file or
without. Each listener has its own password file, and different
listeners may use different password files. Password file
generation can be disabled by setting
<literal>omitPasswordAuth = true</literal> for a listener; in
this case it is necessary to either set
<literal>settings.allow_anonymous = true</literal> to allow all
logins, or to configure other authentication methods like TLS
client certificates with
<literal>settings.use_identity_as_username = true</literal>.
</para>
<para>
The default is to generate a password file for each listener
from the users configured to that listener. Users with no
configured password will not be added to the password file and
thus will not be able to use the broker.
</para>
</section>
<section xml:id="module-services-mosquitto-config-acl">
<title>ACL format</title>
<para>
Every listener has a Mosquitto <literal>acl_file</literal>
attached to it. This ACL is configured via two attributes of the
config:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
the <literal>acl</literal> attribute of the listener
configures pattern ACL entries and topic ACL entries for
anonymous users. Each entry must be prefixed with
<literal>pattern</literal> or <literal>topic</literal> to
distinguish between these two cases.
</para>
</listitem>
<listitem>
<para>
the <literal>acl</literal> attribute of every user
configures in the listener configured the ACL for that given
user. Only topic ACLs are supported by Mosquitto in this
setting, so no prefix is required or allowed.
</para>
</listitem>
</itemizedlist>
<para>
The default ACL for a listener is empty, disallowing all
accesses from all clients. To configure a completely open ACL,
set <literal>acl = [ &quot;pattern readwrite #&quot; ]</literal>
in the listener.
</para>
</section>
</section>
</chapter>

@ -502,13 +502,6 @@ in {
systemd.services.ModemManager.aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
# override unit as recommended by upstream - see https://github.com/NixOS/nixpkgs/issues/88089
# TODO: keep an eye on modem-manager releases as this will eventually be added to the upstream unit
systemd.services.ModemManager.serviceConfig.ExecStart = [
""
"${pkgs.modemmanager}/sbin/ModemManager --filter-policy=STRICT"
];
systemd.services.NetworkManager-dispatcher = {
wantedBy = [ "network.target" ];
restartTriggers = [ configFile overrideNameserversScript ];

@ -0,0 +1,290 @@
{ config, lib, pkgs, ... }:
with lib;
let
python = pkgs.python3Packages.python;
cfg = config.services.seafile;
settingsFormat = pkgs.formats.ini { };
ccnetConf = settingsFormat.generate "ccnet.conf" cfg.ccnetSettings;
seafileConf = settingsFormat.generate "seafile.conf" cfg.seafileSettings;
seahubSettings = pkgs.writeText "seahub_settings.py" ''
FILE_SERVER_ROOT = '${cfg.ccnetSettings.General.SERVICE_URL}/seafhttp'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '${seahubDir}/seahub.db',
}
}
MEDIA_ROOT = '${seahubDir}/media/'
THUMBNAIL_ROOT = '${seahubDir}/thumbnail/'
with open('${seafRoot}/.seahubSecret') as f:
SECRET_KEY = f.readline().rstrip()
${cfg.seahubExtraConf}
'';
seafRoot = "/var/lib/seafile"; # hardcode it due to dynamicuser
ccnetDir = "${seafRoot}/ccnet";
dataDir = "${seafRoot}/data";
seahubDir = "${seafRoot}/seahub";
in {
###### Interface
options.services.seafile = {
enable = mkEnableOption "Seafile server";
ccnetSettings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
General = {
SERVICE_URL = mkOption {
type = types.str;
example = "https://www.example.com";
description = ''
Seahub public URL.
'';
};
};
};
};
default = { };
description = ''
Configuration for ccnet, see
<link xlink:href="https://manual.seafile.com/config/ccnet-conf/"/>
for supported values.
'';
};
seafileSettings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
fileserver = {
port = mkOption {
type = types.port;
default = 8082;
description = ''
The tcp port used by seafile fileserver.
'';
};
host = mkOption {
type = types.str;
default = "127.0.0.1";
example = "0.0.0.0";
description = ''
The binding address used by seafile fileserver.
'';
};
};
};
};
default = { };
description = ''
Configuration for seafile-server, see
<link xlink:href="https://manual.seafile.com/config/seafile-conf/"/>
for supported values.
'';
};
workers = mkOption {
type = types.int;
default = 4;
example = 10;
description = ''
The number of gunicorn worker processes for handling requests.
'';
};
adminEmail = mkOption {
example = "john@example.com";
type = types.str;
description = ''
Seafile Seahub Admin Account Email.
'';
};
initialAdminPassword = mkOption {
example = "someStrongPass";
type = types.str;
description = ''
Seafile Seahub Admin Account initial password.
Should be change via Seahub web front-end.
'';
};
seafilePackage = mkOption {
type = types.package;
description = "Which package to use for the seafile server.";
default = pkgs.seafile-server;
};
seahubExtraConf = mkOption {
default = "";
type = types.lines;
description = ''
Extra config to append to `seahub_settings.py` file.
Refer to <link xlink:href="https://manual.seafile.com/config/seahub_settings_py/" />
for all available options.
'';
};
};
###### Implementation
config = mkIf cfg.enable {
environment.etc."seafile/ccnet.conf".source = ccnetConf;
environment.etc."seafile/seafile.conf".source = seafileConf;
environment.etc."seafile/seahub_settings.py".source = seahubSettings;
systemd.targets.seafile = {
wantedBy = [ "multi-user.target" ];
description = "Seafile components";
};
systemd.services = let
securityOptions = {
ProtectHome = true;
PrivateUsers = true;
PrivateDevices = true;
ProtectClock = true;
ProtectHostname = true;
ProtectProc = "invisible";
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictNamespaces = true;
LockPersonality = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true;
SystemCallArchitectures = "native";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" ];
};
in {
seaf-server = {
description = "Seafile server";
partOf = [ "seafile.target" ];
after = [ "network.target" ];
wantedBy = [ "seafile.target" ];
restartTriggers = [ ccnetConf seafileConf ];
serviceConfig = securityOptions // {
User = "seafile";
Group = "seafile";
DynamicUser = true;
StateDirectory = "seafile";
RuntimeDirectory = "seafile";
LogsDirectory = "seafile";
ConfigurationDirectory = "seafile";
ExecStart = ''
${cfg.seafilePackage}/bin/seaf-server \
--foreground \
-F /etc/seafile \
-c ${ccnetDir} \
-d ${dataDir} \
-l /var/log/seafile/server.log \
-P /run/seafile/server.pid \
-p /run/seafile
'';
};
preStart = ''
if [ ! -f "${seafRoot}/server-setup" ]; then
mkdir -p ${dataDir}/library-template
mkdir -p ${ccnetDir}/{GroupMgr,misc,OrgMgr,PeerMgr}
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/GroupMgr/groupmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/groupmgr.sql"
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/misc/config.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/config.sql"
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/OrgMgr/orgmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/org.sql"
${pkgs.sqlite}/bin/sqlite3 ${ccnetDir}/PeerMgr/usermgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/user.sql"
${pkgs.sqlite}/bin/sqlite3 ${dataDir}/seafile.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/seafile.sql"
echo "${cfg.seafilePackage.version}-sqlite" > "${seafRoot}"/server-setup
fi
# checking for upgrades and handling them
# WARNING: needs to be extended to actually handle major version migrations
installedMajor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f1)
installedMinor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f2)
pkgMajor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f1)
pkgMinor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f2)
if [ $installedMajor != $pkgMajor ] || [ $installedMinor != $pkgMinor ]; then
echo "Unsupported upgrade" >&2
exit 1
fi
'';
};
seahub = let
penv = (pkgs.python3.withPackages (ps: with ps; [ gunicorn seahub ]));
in {
description = "Seafile Server Web Frontend";
wantedBy = [ "seafile.target" ];
partOf = [ "seafile.target" ];
after = [ "network.target" "seaf-server.service" ];
requires = [ "seaf-server.service" ];
restartTriggers = [ seahubSettings ];
environment = {
PYTHONPATH =
"${pkgs.python3Packages.seahub}/thirdpart:${pkgs.python3Packages.seahub}:${penv}/${python.sitePackages}";
DJANGO_SETTINGS_MODULE = "seahub.settings";
CCNET_CONF_DIR = ccnetDir;
SEAFILE_CONF_DIR = dataDir;
SEAFILE_CENTRAL_CONF_DIR = "/etc/seafile";
SEAFILE_RPC_PIPE_PATH = "/run/seafile";
SEAHUB_LOG_DIR = "/var/log/seafile";
};
serviceConfig = securityOptions // {
User = "seafile";
Group = "seafile";
DynamicUser = true;
RuntimeDirectory = "seahub";
StateDirectory = "seafile";
LogsDirectory = "seafile";
ConfigurationDirectory = "seafile";
ExecStart = ''
${penv}/bin/gunicorn seahub.wsgi:application \
--name seahub \
--workers ${toString cfg.workers} \
--log-level=info \
--preload \
--timeout=1200 \
--limit-request-line=8190 \
--bind unix:/run/seahub/gunicorn.sock
'';
};
preStart = ''
mkdir -p ${seahubDir}/media
# Link all media except avatars
for m in `find ${pkgs.python3Packages.seahub}/media/ -maxdepth 1 -not -name "avatars"`; do
ln -sf $m ${seahubDir}/media/
done
if [ ! -e "${seafRoot}/.seahubSecret" ]; then
${penv}/bin/python ${pkgs.python3Packages.seahub}/tools/secret_key_generator.py > ${seafRoot}/.seahubSecret
chmod 400 ${seafRoot}/.seahubSecret
fi
if [ ! -f "${seafRoot}/seahub-setup" ]; then
# avatars directory should be writable
install -D -t ${seahubDir}/media/avatars/ ${pkgs.python3Packages.seahub}/media/avatars/default.png
install -D -t ${seahubDir}/media/avatars/groups ${pkgs.python3Packages.seahub}/media/avatars/groups/default.png
# init database
${pkgs.python3Packages.seahub}/manage.py migrate
# create admin account
${pkgs.expect}/bin/expect -c 'spawn ${pkgs.python3Packages.seahub}/manage.py createsuperuser --email=${cfg.adminEmail}; expect "Password: "; send "${cfg.initialAdminPassword}\r"; expect "Password (again): "; send "${cfg.initialAdminPassword}\r"; expect "Superuser created successfully."'
echo "${pkgs.python3Packages.seahub.version}-sqlite" > "${seafRoot}/seahub-setup"
fi
if [ $(cat "${seafRoot}/seahub-setup" | cut -d"-" -f1) != "${pkgs.python3Packages.seahub.version}" ]; then
# update database
${pkgs.python3Packages.seahub}/manage.py migrate
echo "${pkgs.python3Packages.seahub.version}-sqlite" > "${seafRoot}/seahub-setup"
fi
'';
};
};
};
}

@ -131,10 +131,15 @@ in
};
imgUrl = mkOption {
type = types.str;
default = "http://${cfg.hostName}:${toString cfg.port}/cache";
defaultText = literalExpression ''"http://''${hostName}:''${toString port}/cache"'';
default = "cache";
defaultText = literalExpression ''"cache"'';
example = "https://somewhere.example.com/cache";
description = "Base url for images generated in the cgi.";
description = ''
Base url for images generated in the cgi.
The default is a relative URL to ensure it works also when e.g. forwarding
the GUI port via SSH.
'';
};
linkStyle = mkOption {
type = types.enum ["original" "absolute" "relative"];
@ -167,6 +172,17 @@ in
defaultText = literalExpression "pkgs.smokeping";
description = "Specify a custom smokeping package";
};
host = mkOption {
type = types.nullOr types.str;
default = "localhost";
example = "192.0.2.1"; # rfc5737 example IP for documentation
description = ''
Host/IP to bind to for the web server.
Setting it to <literal>null</literal> skips passing the -h option to thttpd,
which makes it bind to all interfaces.
'';
};
port = mkOption {
type = types.int;
default = 8081;
@ -297,10 +313,11 @@ in
};
users.groups.${cfg.user} = {};
systemd.services.smokeping = {
wantedBy = [ "multi-user.target"];
requiredBy = [ "multi-user.target"];
serviceConfig = {
User = cfg.user;
Restart = "on-failure";
ExecStart = "${cfg.package}/bin/smokeping --config=${configPath} --nodaemon";
};
preStart = ''
mkdir -m 0755 -p ${smokepingHome}/cache ${smokepingHome}/data
@ -311,18 +328,29 @@ in
${cfg.package}/bin/smokeping --check --config=${configPath}
${cfg.package}/bin/smokeping --static --config=${configPath}
'';
script = "${cfg.package}/bin/smokeping --config=${configPath} --nodaemon";
};
systemd.services.thttpd = mkIf cfg.webService {
wantedBy = [ "multi-user.target"];
requiredBy = [ "multi-user.target"];
requires = [ "smokeping.service"];
partOf = [ "smokeping.service"];
path = with pkgs; [ bash rrdtool smokeping thttpd ];
script = ''thttpd -u ${cfg.user} -c "**.fcgi" -d ${smokepingHome} -p ${builtins.toString cfg.port} -D -nos'';
serviceConfig.Restart = "always";
serviceConfig = {
Restart = "always";
ExecStart = lib.concatStringsSep " " (lib.concatLists [
[ "${pkgs.thttpd}/bin/thttpd" ]
[ "-u ${cfg.user}" ]
[ ''-c "**.fcgi"'' ]
[ "-d ${smokepingHome}" ]
(lib.optional (cfg.host != null) "-h ${cfg.host}")
[ "-p ${builtins.toString cfg.port}" ]
[ "-D -nos" ]
]);
};
};
};
meta.maintainers = with lib.maintainers; [ erictapen ];
meta.maintainers = with lib.maintainers; [
erictapen
nh2
];
}

@ -9,25 +9,6 @@ let
${optionalString (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m"} \
-jar ${stateDir}/lib/ace.jar
'';
mountPoints = [
{
what = "${cfg.unifiPackage}/dl";
where = "${stateDir}/dl";
}
{
what = "${cfg.unifiPackage}/lib";
where = "${stateDir}/lib";
}
{
what = "${cfg.mongodbPackage}/bin";
where = "${stateDir}/bin";
}
{
what = "${cfg.dataDir}";
where = "${stateDir}/data";
}
];
systemdMountPoints = map (m: "${utils.escapeSystemdPath m.where}.mount") mountPoints;
in
{
@ -68,16 +49,6 @@ in
'';
};
services.unifi.dataDir = mkOption {
type = types.str;
default = "${stateDir}/data";
description = ''
Where to store the database and other data.
This directory will be bind-mounted to ${stateDir}/data as part of the service startup.
'';
};
services.unifi.openPorts = mkOption {
type = types.bool;
default = true;
@ -136,32 +107,11 @@ in
];
};
# We must create the binary directories as bind mounts instead of symlinks
# This is because the controller resolves all symlinks to absolute paths
# to be used as the working directory.
systemd.mounts = map ({ what, where }: {
bindsTo = [ "unifi.service" ];
partOf = [ "unifi.service" ];
unitConfig.RequiresMountsFor = stateDir;
options = "bind";
what = what;
where = where;
}) mountPoints;
systemd.tmpfiles.rules = [
"d '${stateDir}' 0700 unifi - - -"
"d '${stateDir}/data' 0700 unifi - - -"
"d '${stateDir}/webapps' 0700 unifi - - -"
"L+ '${stateDir}/webapps/ROOT' - - - - ${cfg.unifiPackage}/webapps/ROOT"
];
systemd.services.unifi = {
description = "UniFi controller daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ] ++ systemdMountPoints;
partOf = systemdMountPoints;
bindsTo = systemdMountPoints;
unitConfig.RequiresMountsFor = stateDir;
after = [ "network.target" ];
# This a HACK to fix missing dependencies of dynamic libs extracted from jars
environment.LD_LIBRARY_PATH = with pkgs.stdenv; "${cc.cc.lib}/lib";
# Make sure package upgrades trigger a service restart
@ -209,8 +159,27 @@ in
SystemCallErrorNumber = "EPERM";
SystemCallFilter = [ "@system-service" ];
# Required for ProtectSystem=strict
BindPaths = [ stateDir ];
StateDirectory = "unifi";
RuntimeDirectory = "unifi";
LogsDirectory = "unifi";
CacheDirectory= "unifi";
TemporaryFileSystem = [
# required as we want to create bind mounts below
"${stateDir}/webapps:rw"
];
# We must create the binary directories as bind mounts instead of symlinks
# This is because the controller resolves all symlinks to absolute paths
# to be used as the working directory.
BindPaths = [
"/var/log/unifi:${stateDir}/logs"
"/run/unifi:${stateDir}/run"
"${cfg.unifiPackage}/dl:${stateDir}/dl"
"${cfg.unifiPackage}/lib:${stateDir}/lib"
"${cfg.mongodbPackage}/bin:${stateDir}/bin"
"${cfg.unifiPackage}/webapps/ROOT:${stateDir}/webapps/ROOT"
];
# Needs network access
PrivateNetwork = false;
@ -220,6 +189,9 @@ in
};
};
imports = [
(mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data" )
];
meta.maintainers = with lib.maintainers; [ erictapen pennae ];
}

@ -119,7 +119,7 @@ in
];
# ProtectProc = "invisible"; # not supported by upstream yet
# ProcSubset = "pid"; # not supported by upstream upstream yet
# ProcSubset = "pid"; # not supported by upstream yet
# PrivateUsers = true; # doesn't work with privileged ports therefore not supported by upstream
DynamicUser = true;

@ -13,7 +13,7 @@ in
{
###### interface
meta.maintainers = with maintainers; [ philandstuff rawkode ];
meta.maintainers = with maintainers; [ philandstuff rawkode jwoudenberg ];
options = {
@ -49,6 +49,12 @@ in
# yubikey-agent package
systemd.user.services.yubikey-agent = mkIf (pinentryFlavor != null) {
path = [ pkgs.pinentry.${pinentryFlavor} ];
wantedBy = [
(if pinentryFlavor == "tty" || pinentryFlavor == "curses" then
"default.target"
else
"graphical-session.target")
];
};
environment.extraInit = ''

@ -7,15 +7,20 @@ let
inherit (config.environment) etc;
apparmor = config.security.apparmor;
rootDir = "/run/transmission";
homeDir = "/var/lib/transmission";
settingsDir = ".config/transmission-daemon";
downloadsDir = "Downloads";
incompleteDir = ".incomplete";
watchDir = "watchdir";
# TODO: switch to configGen.json once RFC0042 is implemented
settingsFile = pkgs.writeText "settings.json" (builtins.toJSON cfg.settings);
settingsFormat = pkgs.formats.json {};
settingsFile = settingsFormat.generate "settings.json" cfg.settings;
in
{
imports = [
(mkRenamedOptionModule ["services" "transmission" "port"]
["services" "transmission" "settings" "rpc-port"])
(mkAliasOptionModule ["services" "transmission" "openFirewall"]
["services" "transmission" "openPeerPorts"])
];
options = {
services.transmission = {
enable = mkEnableOption ''the headless Transmission BitTorrent daemon.
@ -24,48 +29,141 @@ in
transmission-remote, the WebUI (http://127.0.0.1:9091/ by default),
or other clients like stig or tremc.
Torrents are downloaded to ${homeDir}/${downloadsDir} by default and are
Torrents are downloaded to <xref linkend="opt-services.transmission.home"/>/${downloadsDir} by default and are
accessible to users in the "transmission" group'';
settings = mkOption rec {
# TODO: switch to types.config.json as prescribed by RFC0042 once it's implemented
type = types.attrs;
apply = recursiveUpdate default;
default =
{
download-dir = "${cfg.home}/${downloadsDir}";
incomplete-dir = "${cfg.home}/${incompleteDir}";
incomplete-dir-enabled = true;
watch-dir = "${cfg.home}/${watchDir}";
watch-dir-enabled = false;
message-level = 1;
peer-port = 51413;
peer-port-random-high = 65535;
peer-port-random-low = 49152;
peer-port-random-on-start = false;
rpc-bind-address = "127.0.0.1";
rpc-port = 9091;
script-torrent-done-enabled = false;
script-torrent-done-filename = "";
umask = 2; # 0o002 in decimal as expected by Transmission
utp-enabled = true;
};
example =
{
download-dir = "/srv/torrents/";
incomplete-dir = "/srv/torrents/.incomplete/";
incomplete-dir-enabled = true;
rpc-whitelist = "127.0.0.1,192.168.*.*";
};
settings = mkOption {
description = ''
Attribute set whose fields overwrites fields in
Settings whose options overwrite fields in
<literal>.config/transmission-daemon/settings.json</literal>
(each time the service starts). String values must be quoted, integer and
boolean values must not.
(each time the service starts).
See <link xlink:href="https://github.com/transmission/transmission/wiki/Editing-Configuration-Files">Transmission's Wiki</link>
for documentation.
for documentation of settings not explicitely covered by this module.
'';
default = {};
type = types.submodule {
freeformType = settingsFormat.type;
options.download-dir = mkOption {
type = types.path;
default = "${cfg.home}/${downloadsDir}";
description = "Directory where to download torrents.";
};
options.incomplete-dir = mkOption {
type = types.path;
default = "${cfg.home}/${incompleteDir}";
description = ''
When enabled with
services.transmission.home
<xref linkend="opt-services.transmission.settings.incomplete-dir-enabled"/>,
new torrents will download the files to this directory.
When complete, the files will be moved to download-dir
<xref linkend="opt-services.transmission.settings.download-dir"/>.
'';
};
options.incomplete-dir-enabled = mkOption {
type = types.bool;
default = true;
description = "";
};
options.message-level = mkOption {
type = types.ints.between 0 2;
default = 2;
description = "Set verbosity of transmission messages.";
};
options.peer-port = mkOption {
type = types.port;
default = 51413;
description = "The peer port to listen for incoming connections.";
};
options.peer-port-random-high = mkOption {
type = types.port;
default = 65535;
description = ''
The maximum peer port to listen to for incoming connections
when <xref linkend="opt-services.transmission.settings.peer-port-random-on-start"/> is enabled.
'';
};
options.peer-port-random-low = mkOption {
type = types.port;
default = 65535;
description = ''
The minimal peer port to listen to for incoming connections
when <xref linkend="opt-services.transmission.settings.peer-port-random-on-start"/> is enabled.
'';
};
options.peer-port-random-on-start = mkOption {
type = types.bool;
default = false;
description = "Randomize the peer port.";
};
options.rpc-bind-address = mkOption {
type = types.str;
default = "127.0.0.1";
example = "0.0.0.0";
description = ''
Where to listen for RPC connections.
Use \"0.0.0.0\" to listen on all interfaces.
'';
};
options.rpc-port = mkOption {
type = types.port;
default = 9091;
description = "The RPC port to listen to.";
};
options.script-torrent-done-enabled = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run
<xref linkend="opt-services.transmission.settings.script-torrent-done-filename"/>
at torrent completion.
'';
};
options.script-torrent-done-filename = mkOption {
type = types.nullOr types.path;
default = null;
description = "Executable to be run at torrent completion.";
};
options.umask = mkOption {
type = types.int;
default = 2;
description = ''
Sets transmission's file mode creation mask.
See the umask(2) manpage for more information.
Users who want their saved torrents to be world-writable
may want to set this value to 0.
Bear in mind that the json markup language only accepts numbers in base 10,
so the standard umask(2) octal notation "022" is written in settings.json as 18.
'';
};
options.utp-enabled = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable <link xlink:href="http://en.wikipedia.org/wiki/Micro_Transport_Protocol">Micro Transport Protocol (µTP)</link>.
'';
};
options.watch-dir = mkOption {
type = types.path;
default = "${cfg.home}/${watchDir}";
description = "Watch a directory for torrent files and add them to transmission.";
};
options.watch-dir-enabled = mkOption {
type = types.bool;
default = false;
description = ''Whether to enable the
<xref linkend="opt-services.transmission.settings.watch-dir"/>.
'';
};
options.trash-original-torrent-files = mkOption {
type = types.bool;
default = false;
description = ''Whether to delete torrents added from the
<xref linkend="opt-services.transmission.settings.watch-dir"/>.
'';
};
};
};
downloadDirPermissions = mkOption {
@ -74,31 +172,22 @@ in
example = "775";
description = ''
The permissions set by <literal>systemd.activationScripts.transmission-daemon</literal>
on the directories <link linkend="opt-services.transmission.settings">settings.download-dir</link>
and <link linkend="opt-services.transmission.settings">settings.incomplete-dir</link>.
on the directories <xref linkend="opt-services.transmission.settings.download-dir"/>
and <xref linkend="opt-services.transmission.settings.incomplete-dir"/>.
Note that you may also want to change
<link linkend="opt-services.transmission.settings">settings.umask</link>.
'';
};
port = mkOption {
type = types.port;
description = ''
TCP port number to run the RPC/web interface.
If instead you want to change the peer port,
use <link linkend="opt-services.transmission.settings">settings.peer-port</link>
or <link linkend="opt-services.transmission.settings">settings.peer-port-random-on-start</link>.
<xref linkend="opt-services.transmission.settings.umask"/>.
'';
};
home = mkOption {
type = types.path;
default = homeDir;
default = "/var/lib/transmission";
description = ''
The directory where Transmission will create <literal>${settingsDir}</literal>.
as well as <literal>${downloadsDir}/</literal> unless <link linkend="opt-services.transmission.settings">settings.download-dir</link> is changed,
and <literal>${incompleteDir}/</literal> unless <link linkend="opt-services.transmission.settings">settings.incomplete-dir</link> is changed.
as well as <literal>${downloadsDir}/</literal> unless
<xref linkend="opt-services.transmission.settings.download-dir"/> is changed,
and <literal>${incompleteDir}/</literal> unless
<xref linkend="opt-services.transmission.settings.incomplete-dir"/> is changed.
'';
};
@ -119,19 +208,30 @@ in
description = ''
Path to a JSON file to be merged with the settings.
Useful to merge a file which is better kept out of the Nix store
because it contains sensible data like <link linkend="opt-services.transmission.settings">settings.rpc-password</link>.
to set secret config parameters like <code>rpc-password</code>.
'';
default = "/dev/null";
example = "/var/lib/secrets/transmission/settings.json";
};
openFirewall = mkEnableOption "opening of the peer port(s) in the firewall";
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--log-debug" ];
description = ''
Extra flags passed to the transmission command in the service definition.
'';
};
openPeerPorts = mkEnableOption "opening of the peer port(s) in the firewall";
openRPCPort = mkEnableOption "opening of the RPC port in the firewall";
performanceNetParameters = mkEnableOption ''tweaking of kernel parameters
to open many more connections at the same time.
Note that you may also want to increase
<link linkend="opt-services.transmission.settings">settings.peer-limit-global</link>.
<code>peer-limit-global"</code>.
And be aware that these settings are quite aggressive
and might not suite your regular desktop use.
For instance, SSH sessions may time out more easily'';
@ -156,34 +256,6 @@ in
install -d -m '${cfg.downloadDirPermissions}' -o '${cfg.user}' -g '${cfg.group}' '${cfg.settings.watch-dir}'
'';
assertions = [
{ assertion = builtins.match "^/.*" cfg.home != null;
message = "`services.transmission.home' must be an absolute path.";
}
{ assertion = types.path.check cfg.settings.download-dir;
message = "`services.transmission.settings.download-dir' must be an absolute path.";
}
{ assertion = types.path.check cfg.settings.incomplete-dir;
message = "`services.transmission.settings.incomplete-dir' must be an absolute path.";
}
{ assertion = types.path.check cfg.settings.watch-dir;
message = "`services.transmission.settings.watch-dir' must be an absolute path.";
}
{ assertion = cfg.settings.script-torrent-done-filename == "" || types.path.check cfg.settings.script-torrent-done-filename;
message = "`services.transmission.settings.script-torrent-done-filename' must be an absolute path.";
}
{ assertion = types.port.check cfg.settings.rpc-port;
message = "${toString cfg.settings.rpc-port} is not a valid port number for `services.transmission.settings.rpc-port`.";
}
# In case both port and settings.rpc-port are explicitely defined: they must be the same.
{ assertion = !options.services.transmission.port.isDefined || cfg.port == cfg.settings.rpc-port;
message = "`services.transmission.port' is not equal to `services.transmission.settings.rpc-port'";
}
];
services.transmission.settings =
optionalAttrs options.services.transmission.port.isDefined { rpc-port = cfg.port; };
systemd.services.transmission = {
description = "Transmission BitTorrent Service";
after = [ "network.target" ] ++ optional apparmor.enable "apparmor.service";
@ -199,15 +271,13 @@ in
install -D -m 600 -o '${cfg.user}' -g '${cfg.group}' /dev/stdin \
'${cfg.home}/${settingsDir}/settings.json'
'')];
ExecStart="${pkgs.transmission}/bin/transmission-daemon -f -g ${cfg.home}/${settingsDir}";
ExecStart="${pkgs.transmission}/bin/transmission-daemon -f -g ${cfg.home}/${settingsDir} ${escapeShellArgs cfg.extraFlags}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
User = cfg.user;
Group = cfg.group;
# Create rootDir in the host's mount namespace.
RuntimeDirectory = [(baseNameOf rootDir)];
RuntimeDirectoryMode = "755";
# Avoid mounting rootDir in the own rootDir of ExecStart='s mount namespace.
InaccessiblePaths = ["-+${rootDir}"];
# This is for BindPaths= and BindReadOnlyPaths=
# to allow traversal of directories they create in RootDirectory=.
UMask = "0066";
@ -228,11 +298,9 @@ in
cfg.settings.download-dir
] ++
optional cfg.settings.incomplete-dir-enabled
cfg.settings.incomplete-dir
++
optional cfg.settings.watch-dir-enabled
cfg.settings.watch-dir
;
cfg.settings.incomplete-dir ++
optional (cfg.settings.watch-dir-enabled && cfg.settings.trash-original-torrent-files)
cfg.settings.watch-dir;
BindReadOnlyPaths = [
# No confinement done of /nix/store here like in systemd-confinement.nix,
# an AppArmor profile is provided to get a confinement based upon paths and rights.
@ -241,8 +309,10 @@ in
"/run"
] ++
optional (cfg.settings.script-torrent-done-enabled &&
cfg.settings.script-torrent-done-filename != "")
cfg.settings.script-torrent-done-filename;
cfg.settings.script-torrent-done-filename != null)
cfg.settings.script-torrent-done-filename ++
optional (cfg.settings.watch-dir-enabled && !cfg.settings.trash-original-torrent-files)
cfg.settings.watch-dir;
# The following options are only for optimizing:
# systemd-analyze security transmission
AmbientCapabilities = "";
@ -287,7 +357,6 @@ in
"quotactl"
];
SystemCallArchitectures = "native";
SystemCallErrorNumber = "EPERM";
};
};
@ -309,25 +378,28 @@ in
};
});
networking.firewall = mkIf cfg.openFirewall (
if cfg.settings.peer-port-random-on-start
then
{ allowedTCPPortRanges =
[ { from = cfg.settings.peer-port-random-low;
to = cfg.settings.peer-port-random-high;
}
];
allowedUDPPortRanges =
[ { from = cfg.settings.peer-port-random-low;
to = cfg.settings.peer-port-random-high;
}
];
}
else
{ allowedTCPPorts = [ cfg.settings.peer-port ];
allowedUDPPorts = [ cfg.settings.peer-port ];
}
);
networking.firewall = mkMerge [
(mkIf cfg.openPeerPorts (
if cfg.settings.peer-port-random-on-start
then
{ allowedTCPPortRanges =
[ { from = cfg.settings.peer-port-random-low;
to = cfg.settings.peer-port-random-high;
}
];
allowedUDPPortRanges =
[ { from = cfg.settings.peer-port-random-low;
to = cfg.settings.peer-port-random-high;
}
];
}
else
{ allowedTCPPorts = [ cfg.settings.peer-port ];
allowedUDPPorts = [ cfg.settings.peer-port ];
}
))
(mkIf cfg.openRPCPort { allowedTCPPorts = [ cfg.settings.rpc-port ]; })
];
boot.kernel.sysctl = mkMerge [
# Transmission uses a single UDP socket in order to implement multiple uTP sockets,
@ -342,21 +414,21 @@ in
# Increase the number of available source (local) TCP and UDP ports to 49151.
# Usual default is 32768 60999, ie. 28231 ports.
# Find out your current usage with: ss -s
"net.ipv4.ip_local_port_range" = "16384 65535";
"net.ipv4.ip_local_port_range" = mkDefault "16384 65535";
# Timeout faster generic TCP states.
# Usual default is 600.
# Find out your current usage with: watch -n 1 netstat -nptuo
"net.netfilter.nf_conntrack_generic_timeout" = 60;
"net.netfilter.nf_conntrack_generic_timeout" = mkDefault 60;
# Timeout faster established but inactive connections.
# Usual default is 432000.
"net.netfilter.nf_conntrack_tcp_timeout_established" = 600;
"net.netfilter.nf_conntrack_tcp_timeout_established" = mkDefault 600;
# Clear immediately TCP states after timeout.
# Usual default is 120.
"net.netfilter.nf_conntrack_tcp_timeout_time_wait" = 1;
"net.netfilter.nf_conntrack_tcp_timeout_time_wait" = mkDefault 1;
# Increase the number of trackable connections.
# Usual default is 262144.
# Find out your current usage with: conntrack -C
"net.netfilter.nf_conntrack_max" = 1048576;
"net.netfilter.nf_conntrack_max" = mkDefault 1048576;
})
];
@ -372,7 +444,7 @@ in
rw ${cfg.settings.incomplete-dir}/**,
''}
${optionalString cfg.settings.watch-dir-enabled ''
rw ${cfg.settings.watch-dir}/**,
r${optionalString cfg.settings.trash-original-torrent-files "w"} ${cfg.settings.watch-dir}/**,
''}
profile dirs {
rw ${cfg.settings.download-dir}/**,
@ -380,12 +452,12 @@ in
rw ${cfg.settings.incomplete-dir}/**,
''}
${optionalString cfg.settings.watch-dir-enabled ''
rw ${cfg.settings.watch-dir}/**,
r${optionalString cfg.settings.trash-original-torrent-files "w"} ${cfg.settings.watch-dir}/**,
''}
}
${optionalString (cfg.settings.script-torrent-done-enabled &&
cfg.settings.script-torrent-done-filename != "") ''
cfg.settings.script-torrent-done-filename != null) ''
# Stack transmission_directories profile on top of
# any existing profile for script-torrent-done-filename
# FIXME: to be tested as I'm not sure it works well with NoNewPrivileges=

@ -33,7 +33,7 @@ let
fi
'';
streamingConfig = builtins.fromJSON (builtins.readFile ./streaming.json);
streamingConfig = lib.importJSON ./streaming.json;
logConfig = {
appenders.stdout.type = "stdout";
categories = {

@ -221,7 +221,7 @@ in {
assertions = [
{ assertion = db.createLocally -> db.user == user;
message = "services.bookstack.database.user must be set to ${user} if services.mediawiki.database.createLocally is set true.";
message = "services.bookstack.database.user must be set to ${user} if services.bookstack.database.createLocally is set true.";
}
{ assertion = db.createLocally -> db.passwordFile == null;
message = "services.bookstack.database.passwordFile cannot be specified if services.bookstack.database.createLocally is set to true.";

@ -0,0 +1,139 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.code-server;
defaultUser = "code-server";
defaultGroup = defaultUser;
in {
###### interface
options = {
services.code-server = {
enable = mkEnableOption "code-server";
package = mkOption {
default = pkgs.code-server;
defaultText = "pkgs.code-server";
description = "Which code-server derivation to use.";
type = types.package;
};
extraPackages = mkOption {
default = [ ];
description = "Packages that are available in the PATH of code-server.";
example = "[ pkgs.go ]";
type = types.listOf types.package;
};
extraEnvironment = mkOption {
type = types.attrsOf types.str;
description =
"Additional environment variables to passed to code-server.";
default = { };
example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; };
};
extraArguments = mkOption {
default = [ "--disable-telemetry" ];
description = "Additional arguments that passed to code-server";
example = ''[ "--verbose" ]'';
type = types.listOf types.str;
};
host = mkOption {
default = "127.0.0.1";
description = "The host-ip to bind to.";
type = types.str;
};
port = mkOption {
default = 4444;
description = "The port where code-server runs.";
type = types.port;
};
auth = mkOption {
default = "password";
description = "The type of authentication to use.";
type = types.enum [ "none" "password" ];
};
hashedPassword = mkOption {
default = "";
description =
"Create the password with: 'echo -n 'thisismypassword' | npx argon2-cli -e'.";
type = types.str;
};
user = mkOption {
default = defaultUser;
example = "yourUser";
description = ''
The user to run code-server as.
By default, a user named <literal>${defaultUser}</literal> will be created.
'';
type = types.str;
};
group = mkOption {
default = defaultGroup;
example = "yourGroup";
description = ''
The group to run code-server under.
By default, a group named <literal>${defaultGroup}</literal> will be created.
'';
type = types.str;
};
extraGroups = mkOption {
default = [ ];
description =
"An array of additional groups for the <literal>${defaultUser}</literal> user.";
example = [ "docker" ];
type = types.listOf types.str;
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.code-server = {
description = "VSCode server";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
path = cfg.extraPackages;
environment = {
HASHED_PASSWORD = cfg.hashedPassword;
} // cfg.extraEnvironment;
serviceConfig = {
ExecStart = "${cfg.package}/bin/code-server --bind-addr ${cfg.host}:${toString cfg.port} --auth ${cfg.auth} " + builtins.concatStringsSep " " cfg.extraArguments;
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
RuntimeDirectory = cfg.user;
User = cfg.user;
Group = cfg.group;
Restart = "on-failure";
};
};
users.users."${cfg.user}" = mkMerge [
(mkIf (cfg.user == defaultUser) {
isNormalUser = true;
description = "code-server user";
inherit (cfg) group;
})
{
packages = cfg.extraPackages;
inherit (cfg) extraGroups;
}
];
users.groups."${defaultGroup}" = mkIf (cfg.group == defaultGroup) { };
};
meta.maintainers = with maintainers; [ stackshadow ];
}

@ -66,6 +66,8 @@ let
siteOpts = { config, lib, name, ... }:
{
options = {
enable = mkEnableOption "DokuWiki web application.";
package = mkOption {
type = types.package;
default = pkgs.dokuwiki;

@ -33,11 +33,14 @@ let
then "sqlite:////var/lib/ihatemoney/ihatemoney.sqlite"
else "postgresql:///${db}"}'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_DEFAULT_SENDER = ("${cfg.defaultSender.name}", "${cfg.defaultSender.email}")
MAIL_DEFAULT_SENDER = (r"${cfg.defaultSender.name}", r"${cfg.defaultSender.email}")
ACTIVATE_DEMO_PROJECT = ${toBool cfg.enableDemoProject}
ADMIN_PASSWORD = "${toString cfg.adminHashedPassword /*toString null == ""*/}"
ADMIN_PASSWORD = r"${toString cfg.adminHashedPassword /*toString null == ""*/}"
ALLOW_PUBLIC_PROJECT_CREATION = ${toBool cfg.enablePublicProjectCreation}
ACTIVATE_ADMIN_DASHBOARD = ${toBool cfg.enableAdminDashboard}
SESSION_COOKIE_SECURE = ${toBool cfg.secureCookie}
ENABLE_CAPTCHA = ${toBool cfg.enableCaptcha}
LEGAL_LINK = r"${toString cfg.legalLink}"
${cfg.extraConfig}
'';
@ -79,9 +82,20 @@ in
description = "The email of the sender of ihatemoney emails";
};
};
secureCookie = mkOption {
type = types.bool;
default = true;
description = "Use secure cookies. Disable this when ihatemoney is served via http instead of https";
};
enableDemoProject = mkEnableOption "access to the demo project in ihatemoney";
enablePublicProjectCreation = mkEnableOption "permission to create projects in ihatemoney by anyone";
enableAdminDashboard = mkEnableOption "ihatemoney admin dashboard";
enableCaptcha = mkEnableOption "a simplistic captcha for some forms";
legalLink = mkOption {
type = types.nullOr types.str;
default = null;
description = "The URL to a page explaining legal statements about your service, eg. GDPR-related information.";
};
extraConfig = mkOption {
type = types.str;
default = "";

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save