Merge commit '102eb68ceecbbd32ab1906a53ef5a7269dc9794a'

wip/yesman
Katharina Fey 3 years ago
commit fdbac8e7d0
Signed by: kookie
GPG Key ID: 90734A9E619C8A6C
  1. 2
      infra/libkookie/nixpkgs/.github/CODEOWNERS
  2. 60
      infra/libkookie/nixpkgs/.github/workflows/rebase-staging.yml
  3. 134
      infra/libkookie/nixpkgs/.github/workflows/rebase.yml
  4. 4
      infra/libkookie/nixpkgs/doc/builders/fetchers.chapter.md
  5. 2
      infra/libkookie/nixpkgs/doc/builders/images.xml
  6. 298
      infra/libkookie/nixpkgs/doc/builders/images/dockertools.section.md
  7. 499
      infra/libkookie/nixpkgs/doc/builders/images/dockertools.xml
  8. 64
      infra/libkookie/nixpkgs/doc/builders/packages/eclipse.section.md
  9. 72
      infra/libkookie/nixpkgs/doc/builders/packages/eclipse.xml
  10. 2
      infra/libkookie/nixpkgs/doc/builders/packages/index.xml
  11. 5
      infra/libkookie/nixpkgs/doc/contributing/submitting-changes.chapter.md
  12. 9
      infra/libkookie/nixpkgs/doc/languages-frameworks/python.section.md
  13. 112
      infra/libkookie/nixpkgs/doc/languages-frameworks/qt.section.md
  14. 221
      infra/libkookie/nixpkgs/doc/languages-frameworks/rust.section.md
  15. 2
      infra/libkookie/nixpkgs/doc/manual.xml
  16. 19
      infra/libkookie/nixpkgs/doc/stdenv/cross-compilation.chapter.md
  17. 12
      infra/libkookie/nixpkgs/doc/stdenv/multiple-output.xml
  18. 62
      infra/libkookie/nixpkgs/doc/stdenv/platform-notes.chapter.md
  19. 83
      infra/libkookie/nixpkgs/doc/stdenv/platform-notes.xml
  20. 12
      infra/libkookie/nixpkgs/doc/using/configuration.xml
  21. 4
      infra/libkookie/nixpkgs/lib/licenses.nix
  22. 166
      infra/libkookie/nixpkgs/maintainers/maintainer-list.nix
  23. 3
      infra/libkookie/nixpkgs/maintainers/scripts/nixpkgs-lint.nix
  24. 7
      infra/libkookie/nixpkgs/nixos/doc/manual/configuration/config-file.xml
  25. 1
      infra/libkookie/nixpkgs/nixos/doc/manual/configuration/networking.xml
  26. 2
      infra/libkookie/nixpkgs/nixos/doc/manual/configuration/profiles/clone-config.xml
  27. 67
      infra/libkookie/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.xml
  28. 5
      infra/libkookie/nixpkgs/nixos/doc/manual/development/writing-modules.xml
  29. 7
      infra/libkookie/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.xml
  30. 60
      infra/libkookie/nixpkgs/nixos/doc/manual/release-notes/rl-2105.xml
  31. 2
      infra/libkookie/nixpkgs/nixos/lib/make-squashfs.nix
  32. 13
      infra/libkookie/nixpkgs/nixos/modules/config/console.nix
  33. 2
      infra/libkookie/nixpkgs/nixos/modules/config/swap.nix
  34. 2
      infra/libkookie/nixpkgs/nixos/modules/config/users-groups.nix
  35. 12
      infra/libkookie/nixpkgs/nixos/modules/hardware/ksm.nix
  36. 17
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64-new-kernel.nix
  37. 84
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64.nix
  38. 61
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix
  39. 50
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
  40. 18
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi4.nix
  41. 247
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/sd-image.nix
  42. 2
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/system-tarball-fuloong2f.nix
  43. 4
      infra/libkookie/nixpkgs/nixos/modules/installer/cd-dvd/system-tarball-pc.nix
  44. 10
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix
  45. 10
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix
  46. 7
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel.nix
  47. 75
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64.nix
  48. 10
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix
  49. 52
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix
  50. 10
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi-installer.nix
  51. 41
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix
  52. 10
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi4-installer.nix
  53. 8
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi4.nix
  54. 245
      infra/libkookie/nixpkgs/nixos/modules/installer/sd-card/sd-image.nix
  55. 18
      infra/libkookie/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
  56. 7
      infra/libkookie/nixpkgs/nixos/modules/installer/tools/tools.nix
  57. 2
      infra/libkookie/nixpkgs/nixos/modules/misc/crashdump.nix
  58. 11
      infra/libkookie/nixpkgs/nixos/modules/module-list.nix
  59. 2
      infra/libkookie/nixpkgs/nixos/modules/profiles/hardened.nix
  60. 10
      infra/libkookie/nixpkgs/nixos/modules/programs/steam.nix
  61. 5
      infra/libkookie/nixpkgs/nixos/modules/rename.nix
  62. 31
      infra/libkookie/nixpkgs/nixos/modules/security/hidepid.nix
  63. 28
      infra/libkookie/nixpkgs/nixos/modules/security/hidepid.xml
  64. 64
      infra/libkookie/nixpkgs/nixos/modules/security/rngd.nix
  65. 54
      infra/libkookie/nixpkgs/nixos/modules/services/backup/zrepl.nix
  66. 178
      infra/libkookie/nixpkgs/nixos/modules/services/blockchain/ethereum/geth.nix
  67. 27
      infra/libkookie/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
  68. 15
      infra/libkookie/nixpkgs/nixos/modules/services/databases/mysql.nix
  69. 183
      infra/libkookie/nixpkgs/nixos/modules/services/desktops/pipewire.nix
  70. 341
      infra/libkookie/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire-media-session.nix
  71. 265
      infra/libkookie/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
  72. 31
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/acpid.nix
  73. 152
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/bluetooth.nix
  74. 26
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/spacenavd.nix
  75. 2
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/trezord.nix
  76. 23
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/udev.nix
  77. 3
      infra/libkookie/nixpkgs/nixos/modules/services/hardware/xow.nix
  78. 43
      infra/libkookie/nixpkgs/nixos/modules/services/mail/mlmmj.nix
  79. 69
      infra/libkookie/nixpkgs/nixos/modules/services/misc/defaultUnicornConfig.rb
  80. 92
      infra/libkookie/nixpkgs/nixos/modules/services/misc/etesync-dav.nix
  81. 324
      infra/libkookie/nixpkgs/nixos/modules/services/misc/gitlab.nix
  82. 2
      infra/libkookie/nixpkgs/nixos/modules/services/misc/gollum.nix
  83. 164
      infra/libkookie/nixpkgs/nixos/modules/services/misc/lifecycled.nix
  84. 53
      infra/libkookie/nixpkgs/nixos/modules/services/misc/nix-gc.nix
  85. 82
      infra/libkookie/nixpkgs/nixos/modules/services/misc/plikd.nix
  86. 143
      infra/libkookie/nixpkgs/nixos/modules/services/monitoring/grafana.nix
  87. 2
      infra/libkookie/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
  88. 2
      infra/libkookie/nixpkgs/nixos/modules/services/network-filesystems/ceph.nix
  89. 3
      infra/libkookie/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
  90. 1
      infra/libkookie/nixpkgs/nixos/modules/services/networking/dnscrypt-proxy2.nix
  91. 6
      infra/libkookie/nixpkgs/nixos/modules/services/networking/kresd.nix
  92. 2
      infra/libkookie/nixpkgs/nixos/modules/services/printing/cupsd.nix
  93. 67
      infra/libkookie/nixpkgs/nixos/modules/services/security/clamav.nix
  94. 7
      infra/libkookie/nixpkgs/nixos/modules/services/ttys/kmscon.nix
  95. 4
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix
  96. 110
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
  97. 2
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/miniflux.nix
  98. 4
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/moinmoin.nix
  99. 56
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
  100. 2
      infra/libkookie/nixpkgs/nixos/modules/services/web-apps/nextcloud.xml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -100,7 +100,7 @@
# Rust
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq
/pkgs/build-support/rust @andir @zowoq
/pkgs/build-support/rust @andir @danieldk @zowoq
# Darwin-related
/pkgs/stdenv/darwin @NixOS/darwin-maintainers

@ -1,60 +0,0 @@
on:
issue_comment:
types:
- created
# This action allows people with write access to the repo to rebase a PRs base branch from
# master to staging by commenting `/rebase-staging` on the PR while avoiding CODEOWNER notifications.
jobs:
rebase:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase-staging')
steps:
- uses: peter-evans/create-or-update-comment@v1
with:
comment-id: ${{ github.event.comment.id }}
reactions: eyes
- uses: scherermichael-oss/action-has-permission@1.0.6
id: check-write-access
with:
required-permission: write
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: check base branch is master
if: steps.check-write-access.outputs.has-permission
run: |
if [ "$(curl https://api.github.com/repos/NixOS/nixpkgs/pulls/${{ github.event.issue.number }} | jq -r '.base.ref')" != "master" ]; then
echo "This action only works when the current base branch is master."
exit 1
fi
- uses: actions/checkout@v2
with:
fetch-depth: 0
if: steps.check-write-access.outputs.has-permission
- name: rebase pull request
if: steps.check-write-access.outputs.has-permission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PULL_REQUEST: ${{ github.event.issue.number }}
run: |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git fetch origin
gh pr checkout "$PULL_REQUEST"
git rebase \
--onto="$(git merge-base origin/master origin/staging)" \
"HEAD~$(git rev-list --count HEAD ^master)"
git push --force
curl \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d '{ "base": "staging" }' \
"https://api.github.com/repos/NixOS/nixpkgs/pulls/$PULL_REQUEST"
- uses: peter-evans/create-or-update-comment@v1
if: ${{ failure() }}
with:
issue-number: ${{ github.event.issue.number }}
body: |
[Failed to rebase on `staging`](https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }})

@ -0,0 +1,134 @@
on:
issue_comment:
types:
- created
# This action allows people with write access to the repo to rebase a PRs base branch
# by commenting `/rebase ${branch}` on the PR while avoiding CODEOWNER notifications.
jobs:
rebase:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase')
steps:
- uses: peter-evans/create-or-update-comment@v1
with:
comment-id: ${{ github.event.comment.id }}
reactions: eyes
- uses: scherermichael-oss/action-has-permission@1.0.6
id: check-write-access
with:
required-permission: write
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: check permissions
run: |
echo "Commenter doesn't have write access to the repo"
exit 1
if: "! steps.check-write-access.outputs.has-permission"
- name: setup
run: |
curl "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.issue.number }}" 2>/dev/null >pr.json
cat <<EOF >>"$GITHUB_ENV"
CAN_MODIFY=$(jq -r '.maintainer_can_modify' pr.json)
COMMITS=$(jq -r '.commits' pr.json)
CURRENT_BASE=$(jq -r '.base.ref' pr.json)
PR_BRANCH=$(jq -r '.head.ref' pr.json)
COMMENT_BRANCH=$(echo ${{ github.event.comment.body }} | awk "/^\/rebase / {print \$2}")
PULL_REQUEST=${{ github.event.issue.number }}
EOF
rm pr.json
- name: check branch
env:
PERMANENT_BRANCHES: "haskell-updates|master|nixos|nixpkgs|python-unstable|release|staging"
VALID_BRANCHES: "haskell-updates|master|python-unstable|release-20.09|staging|staging-20.09|staging-next"
run: |
message() {
cat <<EOF
Can't rebase $PR_BRANCH from $CURRENT_BASE onto $COMMENT_BRANCH (PR:$PULL_REQUEST COMMITS:$COMMITS)
EOF
}
if ! [[ "$COMMENT_BRANCH" =~ ^($VALID_BRANCHES)$ ]]; then
cat <<EOF
Check that the branch from the comment is valid:
$(message)
This action can only rebase onto these branches:
$VALID_BRANCHES
\`/rebase \${branch}\` must be at the start of the line
EOF
exit 1
fi
if [[ "$COMMENT_BRANCH" == "$CURRENT_BASE" ]]; then
cat <<EOF
Check that the branch from the comment isn't the current base branch:
$(message)
EOF
exit 1
fi
if [[ "$COMMENT_BRANCH" == "$PR_BRANCH" ]]; then
cat <<EOF
Check that the branch from the comment isn't the current branch:
$(message)
EOF
exit 1
fi
if [[ "$PR_BRANCH" =~ ^($PERMANENT_BRANCHES) ]]; then
cat <<EOF
Check that the PR branch isn't a permanent branch:
$(message)
EOF
exit 1
fi
if [[ "$CAN_MODIFY" != "true" ]]; then
cat <<EOF
Check that maintainers can edit the PR branch:
$(message)
EOF
exit 1
fi
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: rebase pull request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
git fetch origin
gh pr checkout "$PULL_REQUEST"
git rebase \
--onto="$(git merge-base origin/"$CURRENT_BASE" origin/"$COMMENT_BRANCH")" \
"HEAD~$COMMITS"
git push --force
curl \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d "{ \"base\": \"$COMMENT_BRANCH\" }" \
"https://api.github.com/repos/${{ github.repository }}/pulls/$PULL_REQUEST"
curl \
-X PATCH \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d '{ "state": "closed" }' \
"https://api.github.com/repos/${{ github.repository }}/pulls/$PULL_REQUEST"
- uses: peter-evans/create-or-update-comment@v1
with:
issue-number: ${{ github.event.issue.number }}
body: |
Rebased, please reopen the pull request to restart CI
- uses: peter-evans/create-or-update-comment@v1
if: failure()
with:
issue-number: ${{ github.event.issue.number }}
body: |
[Failed to rebase](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})

@ -31,6 +31,8 @@ Used with Subversion. Expects `url` to a Subversion directory, `rev`, and `sha25
Used with Git. Expects `url` to a Git repo, `rev`, and `sha256`. `rev` in this case can be full the git commit id (SHA1 hash) or a tag name like `refs/tags/v1.0`.
Additionally the following optional arguments can be given: `fetchSubmodules = true` makes `fetchgit` also fetch the submodules of a repository. If `deepClone` is set to true, the entire repository is cloned as opposing to just creating a shallow clone. `deepClone = true` also implies `leaveDotGit = true` which means that the `.git` directory of the clone won't be removed after checkout.
## `fetchfossil`
Used with Fossil. Expects `url` to a Fossil archive, `rev`, and `sha256`.
@ -49,6 +51,8 @@ A number of fetcher functions wrap part of `fetchurl` and `fetchzip`. They are m
`fetchFromGitHub` expects four arguments. `owner` is a string corresponding to the GitHub user or organization that controls this repository. `repo` corresponds to the name of the software repository. These are located at the top of every GitHub HTML page as `owner`/`repo`. `rev` corresponds to the Git commit hash or tag (e.g `v1.0`) that will be downloaded from Git. Finally, `sha256` corresponds to the hash of the extracted directory. Again, other hash algorithms are also available but `sha256` is currently preferred.
`fetchFromGitHub` uses `fetchzip` to download the source archive generated by GitHub for the specified revision. If `leaveDotGit`, `deepClone` or `fetchSubmodules` are set to `true`, `fetchFromGitHub` will use `fetchgit` instead. Refer to its section for documentation of these options.
## `fetchFromGitLab`
This is used with GitLab repositories. The arguments expected are very similar to fetchFromGitHub above.

@ -6,7 +6,7 @@
This chapter describes tools for creating various types of images.
</para>
<xi:include href="images/appimagetools.xml" />
<xi:include href="images/dockertools.xml" />
<xi:include href="images/dockertools.section.xml" />
<xi:include href="images/ocitools.xml" />
<xi:include href="images/snaptools.xml" />
</chapter>

@ -0,0 +1,298 @@
# pkgs.dockerTools {#sec-pkgs-dockerTools}
`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120). Docker itself is not used to perform any of the operations done by these functions.
## buildImage {#ssec-pkgs-dockerTools-buildImage}
This function is analogous to the `docker build` command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with `docker load`.
The parameters of `buildImage` with relative example values are described below:
[]{#ex-dockerTools-buildImage}
[]{#ex-dockerTools-buildImage-runAsRoot}
```nix
buildImage {
name = "redis";
tag = "latest";
fromImage = someBaseImage;
fromImageName = null;
fromImageTag = "latest";
contents = pkgs.redis;
runAsRoot = ''
#!${pkgs.runtimeShell}
mkdir -p /data
'';
config = {
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = { "/data" = { }; };
};
}
```
The above example will build a Docker image `redis/latest` from the given base image. Loading and running this image in Docker results in `redis-server` being started automatically.
- `name` specifies the name of the resulting image. This is the only required argument for `buildImage`.
- `tag` specifies the tag of the resulting image. By default it\'s `null`, which indicates that the nix output hash will be used as tag.
- `fromImage` is the repository tarball containing the base image. It must be a valid Docker image, such as exported by `docker save`. By default it\'s `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
- `fromImageName` can be used to further specify the base image within the repository, in case it contains multiple images. By default it\'s `null`, in which case `buildImage` will peek the first image available in the repository.
- `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it\'s `null`, in which case `buildImage` will peek the first tag available for the base image.
- `contents` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it\'s `null`.
- `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`.
> **_NOTE:_** Using this parameter requires the `kvm` device to be available.
- `config` is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
After the new layer has been created, its closure (to which `contents`, `config` and `runAsRoot` contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied.
At the end of the process, only one new single layer will be produced and added to the resulting image.
The resulting repository will only list the single image `image/tag`. In the case of [the `buildImage` example](#ex-dockerTools-buildImage) it would be `redis/latest`.
It is possible to inspect the arguments with which an image was built using its `buildArgs` attribute.
> **_NOTE:_** If you see errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)` you may need to add `pkgs.iana-etc` to `contents`.
> **_NOTE:_** If you see errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)` you may need to add `pkgs.cacert` to `contents`.
By default `buildImage` will use a static date of one second past the UNIX Epoch. This allows `buildImage` to produce binary reproducible images. When listing images with `docker images`, the newly created images will be listed like this:
```ShellSession
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
```
You can break binary reproducibility but have a sorted, meaningful `CREATED` column by setting `created` to `now`.
```nix
pkgs.dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
contents = pkgs.hello;
config.Cmd = [ "/bin/hello" ];
}
```
and now the Docker CLI will display a reasonable date and sort the images as expected:
```ShellSession
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB
```
however, the produced images will not be binary reproducible.
## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage}
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use `streamLayeredImage` instead, which this function uses internally.
`name`
: The name of the resulting image.
`tag` _optional_
: Tag of the generated image.
*Default:* the output path\'s hash
`contents` _optional_
: Top level paths in the container. Either a single derivation, or a list of derivations.
*Default:* `[]`
`config` _optional_
: Run-time configuration of the container. A full list of the options are available at in the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
*Default:* `{}`
`created` _optional_
: Date and time the layers were created. Follows the same `now` exception supported by `buildImage`.
*Default:* `1970-01-01T00:00:01Z`
`maxLayers` _optional_
: Maximum number of layers to create.
*Default:* `100`
*Maximum:* `125`
`extraCommands` _optional_
: Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are \"on top\" of all the other layers, so can create additional directories and files.
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents}
Each path directly listed in `contents` will have a symlink in the root of the image.
For example:
```nix
pkgs.dockerTools.buildLayeredImage {
name = "hello";
contents = [ pkgs.hello ];
}
```
will create symlinks for all the paths in the `hello` package:
```ShellSession
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo
```
### Automatic inclusion of `config` references {#dockerTools-buildLayeredImage-arg-config}
The closure of `config` is automatically included in the closure of the final image.
This allows you to make very simple Docker images with very little code. This container will start up and run `hello`:
```nix
pkgs.dockerTools.buildLayeredImage {
name = "hello";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
}
```
### Adjusting `maxLayers` {#dockerTools-buildLayeredImage-arg-maxLayers}
Increasing the `maxLayers` increases the number of layers which have a chance to be shared between different images.
Modern Docker installations support up to 128 layers, however older versions support as few as 42.
If the produced image will not be extended by other Docker builds, it is safe to set `maxLayers` to `128`. However it will be impossible to extend the image further.
The first (`maxLayers-2`) most \"popular\" paths will have their own individual layers, then layer \#`maxLayers-1` will contain all the remaining \"unpopular\" paths, and finally layer \#`maxLayers` will contain the Image configuration.
Docker\'s Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image.
## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage}
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for `buildLayeredImage`. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images.
The image produced by running the output script can be piped directly into `docker load`, to load it into the local docker daemon:
```ShellSession
$(nix-build) | docker load
```
Alternatively, the image be piped via `gzip` into `skopeo`, e.g. to copy it into a registry:
```ShellSession
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag
```
## pullImage {#ssec-pkgs-dockerTools-fetchFromRegistry}
This function is analogous to the `docker pull` command, in that it can be used to pull a Docker image from a Docker registry. By default [Docker Hub](https://hub.docker.com/) is used to pull images.
Its parameters are described in the example below:
```nix
pullImage {
imageName = "nixos/nix";
imageDigest =
"sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b";
finalImageName = "nix";
finalImageTag = "1.11";
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8";
os = "linux";
arch = "x86_64";
}
```
- `imageName` specifies the name of the image to be downloaded, which can also include the registry namespace (e.g. `nixos`). This argument is required.
- `imageDigest` specifies the digest of the image to be downloaded. This argument is required.
- `finalImageName`, if specified, this is the name of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it\'s equal to `imageName`.
- `finalImageTag`, if specified, this is the tag of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it\'s `latest`.
- `sha256` is the checksum of the whole fetched image. This argument is required.
- `os`, if specified, is the operating system of the fetched image. By default it\'s `linux`.
- `arch`, if specified, is the cpu architecture of the fetched image. By default it\'s `x86_64`.
`nix-prefetch-docker` command can be used to get required image parameters:
```ShellSession
$ nix run nixpkgs.nix-prefetch-docker -c nix-prefetch-docker --image-name mysql --image-tag 5
```
Since a given `imageName` may transparently refer to a manifest list of images which support multiple architectures and/or operating systems, you can supply the `--os` and `--arch` arguments to specify exactly which image you want. By default it will match the OS and architecture of the host the command is run on.
```ShellSession
$ nix-prefetch-docker --image-name mysql --image-tag 5 --arch x86_64 --os linux
```
Desired image name and tag can be set using `--final-image-name` and `--final-image-tag` arguments:
```ShellSession
$ nix-prefetch-docker --image-name mysql --image-tag 5 --final-image-name eu.gcr.io/my-project/mysql --final-image-tag prod
```
## exportImage {#ssec-pkgs-dockerTools-exportImage}
This function is analogous to the `docker export` command, in that it can be used to flatten a Docker image that contains multiple layers. It is in fact the result of the merge of all the layers of the image. As such, the result is suitable for being imported in Docker with `docker import`.
> **_NOTE:_** Using this function requires the `kvm` device to be available.
The parameters of `exportImage` are the following:
```nix
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
```
The parameters relative to the base image have the same synopsis as described in [buildImage](#ssec-pkgs-dockerTools-buildImage), except that `fromImage` is the only required argument in this case.
The `name` argument is the name of the derivation output, which defaults to `fromImage.name`.
## shadowSetup {#ssec-pkgs-dockerTools-shadowSetup}
This constant string is a helper for setting up the base files for managing users and groups, only if such files don\'t exist already. It is suitable for being used in a [`buildImage` `runAsRoot`](#ex-dockerTools-buildImage-runAsRoot) script for cases like in the example below:
```nix
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${pkgs.runtimeShell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
```
Creating base files like `/etc/passwd` or `/etc/login.defs` is necessary for shadow-utils to manipulate users and groups.

@ -1,499 +0,0 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and manipulating Docker images according to the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120"> Docker Image Specification v1.2.0 </link>. Docker itself is not used to perform any of the operations done by these functions.
</para>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are described below:
</para>
<example xml:id='ex-dockerTools-buildImage'>
<title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${pkgs.runtimeShell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
}
</programlisting>
</example>
<para>
The above example will build a Docker image <literal>redis/latest</literal> from the given base image. Loading and running this image in Docker results in <literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image. This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image. By default it's <literal>null</literal>, which indicates that the nix output hash will be used as tag.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image. It must be a valid Docker image, such as exported by <command>docker save</command>. By default it's <literal>null</literal>, which can be seen as equivalent to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify the base image within the repository, in case it contains multiple images. By default it's <literal>null</literal>, in which case <varname>buildImage</varname> will peek the first image available in the repository.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's <literal>null</literal>, in which case <varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as <command>ADD contents/ /</command> in a <filename>Dockerfile</filename>. By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied <varname>contents</varname> derivation. This can be similarly seen as <command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal> device to be available.
</para>
</note>
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions"> Docker Image Specification v1.2.0 </link>.
</para>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure (to which <varname>contents</varname>, <varname>config</varname> and <varname>runAsRoot</varname> contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and added to the resulting image.
</para>
<para>
The resulting repository will only list the single image <varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/> it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built using its <varname>buildArgs</varname> attribute.
</para>
<note>
<para>
If you see errors similar to <literal>getProtocolByName: does not exist (no such protocol name: tcp)</literal> you may need to add <literal>pkgs.iana-etc</literal> to <varname>contents</varname>.
</para>
</note>
<note>
<para>
If you see errors similar to <literal>Error_Protocol ("certificate has unknown CA",True,UnknownCa)</literal> you may need to add <literal>pkgs.cacert</literal> to <varname>contents</varname>.
</para>
</note>
<example xml:id="example-pkgs-dockerTools-buildImage-creation-date">
<title>Impurely Defining a Docker Layer's Creation Date</title>
<para>
By default <function>buildImage</function> will use a static date of one second past the UNIX Epoch. This allows <function>buildImage</function> to produce binary reproducible images. When listing images with <command>docker images</command>, the newly created images will be listed like this:
</para>
<screen>
<prompt>$ </prompt>docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
</screen>
<para>
You can break binary reproducibility but have a sorted, meaningful <literal>CREATED</literal> column by setting <literal>created</literal> to <literal>now</literal>.
</para>
<programlisting><![CDATA[
pkgs.dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
contents = pkgs.hello;
config.Cmd = [ "/bin/hello" ];
}
]]></programlisting>
<para>
and now the Docker CLI will display a reasonable date and sort the images as expected:
<screen>
<prompt>$ </prompt>docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB
</screen>
however, the produced images will not be binary reproducible.
</para>
</example>
</section>
<section xml:id="ssec-pkgs-dockerTools-buildLayeredImage">
<title>buildLayeredImage</title>
<para>
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use <function>streamLayeredImage</function> instead, which this function uses internally.
</para>
<variablelist>
<varlistentry>
<term>
<varname>name</varname>
</term>
<listitem>
<para>
The name of the resulting image.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>tag</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Tag of the generated image.
</para>
<para>
<emphasis>Default:</emphasis> the output path's hash
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>contents</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Top level paths in the container. Either a single derivation, or a list of derivations.
</para>
<para>
<emphasis>Default:</emphasis> <literal>[]</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>config</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Run-time configuration of the container. A full list of the options are available at in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions"> Docker Image Specification v1.2.0 </link>.
</para>
<para>
<emphasis>Default:</emphasis> <literal>{}</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>created</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Date and time the layers were created. Follows the same <literal>now</literal> exception supported by <literal>buildImage</literal>.
</para>
<para>
<emphasis>Default:</emphasis> <literal>1970-01-01T00:00:01Z</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>maxLayers</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Maximum number of layers to create.
</para>
<para>
<emphasis>Default:</emphasis> <literal>100</literal>
</para>
<para>
<emphasis>Maximum:</emphasis> <literal>125</literal>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>extraCommands</varname> <emphasis>optional</emphasis>
</term>
<listitem>
<para>
Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are "on top" of all the other layers, so can create additional directories and files.
</para>
</listitem>
</varlistentry>
</variablelist>
<section xml:id="dockerTools-buildLayeredImage-arg-contents">
<title>Behavior of <varname>contents</varname> in the final image</title>
<para>
Each path directly listed in <varname>contents</varname> will have a symlink in the root of the image.
</para>
<para>
For example:
<programlisting><![CDATA[
pkgs.dockerTools.buildLayeredImage {
name = "hello";
contents = [ pkgs.hello ];
}
]]></programlisting>
will create symlinks for all the paths in the <literal>hello</literal> package:
<screen><![CDATA[
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo
]]></screen>
</para>
</section>
<section xml:id="dockerTools-buildLayeredImage-arg-config">
<title>Automatic inclusion of <varname>config</varname> references</title>
<para>
The closure of <varname>config</varname> is automatically included in the closure of the final image.
</para>
<para>
This allows you to make very simple Docker images with very little code. This container will start up and run <command>hello</command>:
<programlisting><![CDATA[
pkgs.dockerTools.buildLayeredImage {
name = "hello";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
}
]]></programlisting>
</para>
</section>
<section xml:id="dockerTools-buildLayeredImage-arg-maxLayers">
<title>Adjusting <varname>maxLayers</varname></title>
<para>
Increasing the <varname>maxLayers</varname> increases the number of layers which have a chance to be shared between different images.
</para>
<para>
Modern Docker installations support up to 128 layers, however older versions support as few as 42.
</para>
<para>
If the produced image will not be extended by other Docker builds, it is safe to set <varname>maxLayers</varname> to <literal>128</literal>. However it will be impossible to extend the image further.
</para>
<para>
The first (<literal>maxLayers-2</literal>) most "popular" paths will have their own individual layers, then layer #<literal>maxLayers-1</literal> will contain all the remaining "unpopular" paths, and finally layer #<literal>maxLayers</literal> will contain the Image configuration.
</para>
<para>
Docker's Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image.
</para>
</section>
</section>
<section xml:id="ssec-pkgs-dockerTools-streamLayeredImage">
<title>streamLayeredImage</title>
<para>
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for <function>buildLayeredImage</function>. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images.
</para>
<para>
The image produced by running the output script can be piped directly into <command>docker load</command>, to load it into the local docker daemon:
<screen><![CDATA[
$(nix-build) | docker load
]]></screen>
</para>
<para>
Alternatively, the image be piped via <command>gzip</command> into <command>skopeo</command>, e.g. to copy it into a registry:
<screen><![CDATA[
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag
]]></screen>
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command, in that it can be used to pull a Docker image from a Docker registry. By default <link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull images.
</para>
<para>
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'>
<title>Docker pull</title>
<programlisting>
pullImage {
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' />
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' />
finalImageName = "nix"; <co xml:id='ex-dockerTools-pullImage-3' />
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-4' />
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-5' />
os = "linux"; <co xml:id='ex-dockerTools-pullImage-6' />
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-7' />
}
</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded, which can also include the registry namespace (e.g. <literal>nixos</literal>). This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageDigest</varname> specifies the digest of the image to be downloaded. This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>finalImageName</varname>, if specified, this is the name of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it's equal to <varname>imageName</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>finalImageTag</varname>, if specified, this is the tag of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image. This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-6'>
<para>
<varname>os</varname>, if specified, is the operating system of the fetched image. By default it's <literal>linux</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-7'>
<para>
<varname>arch</varname>, if specified, is the cpu architecture of the fetched image. By default it's <literal>x86_64</literal>.
</para>
</callout>
</calloutlist>
<para>
<literal>nix-prefetch-docker</literal> command can be used to get required image parameters:
<screen>
<prompt>$ </prompt>nix run nixpkgs.nix-prefetch-docker -c nix-prefetch-docker --image-name mysql --image-tag 5
</screen>
Since a given <varname>imageName</varname> may transparently refer to a manifest list of images which support multiple architectures and/or operating systems, you can supply the <option>--os</option> and <option>--arch</option> arguments to specify exactly which image you want. By default it will match the OS and architecture of the host the command is run on.
<screen>
<prompt>$ </prompt>nix-prefetch-docker --image-name mysql --image-tag 5 --arch x86_64 --os linux
</screen>
Desired image name and tag can be set using <option>--final-image-name</option> and <option>--final-image-tag</option> arguments:
<screen>
<prompt>$ </prompt>nix-prefetch-docker --image-name mysql --image-tag 5 --final-image-name eu.gcr.io/my-project/mysql --final-image-tag prod
</screen>
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command, in that it can be used to flatten a Docker image that contains multiple layers. It is in fact the result of the merge of all the layers of the image. As such, the result is suitable for being imported in Docker with <command>docker import</command>.
</para>
<note>
<para>
Using this function requires the <literal>kvm</literal> device to be available.
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'>
<title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that <varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output, which defaults to <varname>fromImage.name</varname>.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing users and groups, only if such files don't exist already. It is suitable for being used in a <varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'>
<title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${pkgs.runtimeShell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or <literal>/etc/login.defs</literal> is necessary for shadow-utils to manipulate users and groups.
</para>
</section>
</section>

@ -0,0 +1,64 @@
# Eclipse {#sec-eclipse}
The Nix expressions related to the Eclipse platform and IDE are in [`pkgs/applications/editors/eclipse`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse).
Nixpkgs provides a number of packages that will install Eclipse in its various forms. These range from the bare-bones Eclipse Platform to the more fully featured Eclipse SDK or Scala-IDE packages and multiple version are often available. It is possible to list available Eclipse packages by issuing the command:
```ShellSession
$ nix-env -f '<nixpkgs>' -qaP -A eclipses --description
```
Once an Eclipse variant is installed it can be run using the `eclipse` command, as expected. From within Eclipse it is then possible to install plugins in the usual manner by either manually specifying an Eclipse update site or by installing the Marketplace Client plugin and using it to discover and install other plugins. This installation method provides an Eclipse installation that closely resemble a manually installed Eclipse.
If you prefer to install plugins in a more declarative manner then Nixpkgs also offer a number of Eclipse plugins that can be installed in an _Eclipse environment_. This type of environment is created using the function `eclipseWithPlugins` found inside the `nixpkgs.eclipses` attribute set. This function takes as argument `{ eclipse, plugins ? [], jvmArgs ? [] }` where `eclipse` is a one of the Eclipse packages described above, `plugins` is a list of plugin derivations, and `jvmArgs` is a list of arguments given to the JVM running the Eclipse. For example, say you wish to install the latest Eclipse Platform with the popular Eclipse Color Theme plugin and also allow Eclipse to use more RAM. You could then add
```nix
packageOverrides = pkgs: {
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
eclipse = eclipse-platform;
jvmArgs = [ "-Xmx2048m" ];
plugins = [ plugins.color-theme ];
};
}
```
to your Nixpkgs configuration (`~/.config/nixpkgs/config.nix`) and install it by running `nix-env -f '<nixpkgs>' -iA myEclipse` and afterward run Eclipse as usual. It is possible to find out which plugins are available for installation using `eclipseWithPlugins` by running
```ShellSession
$ nix-env -f '<nixpkgs>' -qaP -A eclipses.plugins --description
```
If there is a need to install plugins that are not available in Nixpkgs then it may be possible to define these plugins outside Nixpkgs using the `buildEclipseUpdateSite` and `buildEclipsePlugin` functions found in the `nixpkgs.eclipses.plugins` attribute set. Use the `buildEclipseUpdateSite` function to install a plugin distributed as an Eclipse update site. This function takes `{ name, src }` as argument where `src` indicates the Eclipse update site archive. All Eclipse features and plugins within the downloaded update site will be installed. When an update site archive is not available then the `buildEclipsePlugin` function can be used to install a plugin that consists of a pair of feature and plugin JARs. This function takes an argument `{ name, srcFeature, srcPlugin }` where `srcFeature` and `srcPlugin` are the feature and plugin JARs, respectively.
Expanding the previous example with two plugins using the above functions we have
```nix
packageOverrides = pkgs: {
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
eclipse = eclipse-platform;
jvmArgs = [ "-Xmx2048m" ];
plugins = [
plugins.color-theme
(plugins.buildEclipsePlugin {
name = "myplugin1-1.0";
srcFeature = fetchurl {
url = "http://…/features/myplugin1.jar";
sha256 = "123…";
};
srcPlugin = fetchurl {
url = "http://…/plugins/myplugin1.jar";
sha256 = "123…";
};
});
(plugins.buildEclipseUpdateSite {
name = "myplugin2-1.0";
src = fetchurl {
stripRoot = false;
url = "http://…/myplugin2.zip";
sha256 = "123…";
};
});
];
};
}
```

@ -1,72 +0,0 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-eclipse">
<title>Eclipse</title>
<para>
The Nix expressions related to the Eclipse platform and IDE are in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse"><filename>pkgs/applications/editors/eclipse</filename></link>.
</para>
<para>
Nixpkgs provides a number of packages that will install Eclipse in its various forms. These range from the bare-bones Eclipse Platform to the more fully featured Eclipse SDK or Scala-IDE packages and multiple version are often available. It is possible to list available Eclipse packages by issuing the command:
<screen>
<prompt>$ </prompt>nix-env -f '&lt;nixpkgs&gt;' -qaP -A eclipses --description
</screen>
Once an Eclipse variant is installed it can be run using the <command>eclipse</command> command, as expected. From within Eclipse it is then possible to install plugins in the usual manner by either manually specifying an Eclipse update site or by installing the Marketplace Client plugin and using it to discover and install other plugins. This installation method provides an Eclipse installation that closely resemble a manually installed Eclipse.
</para>
<para>
If you prefer to install plugins in a more declarative manner then Nixpkgs also offer a number of Eclipse plugins that can be installed in an <emphasis>Eclipse environment</emphasis>. This type of environment is created using the function <varname>eclipseWithPlugins</varname> found inside the <varname>nixpkgs.eclipses</varname> attribute set. This function takes as argument <literal>{ eclipse, plugins ? [], jvmArgs ? [] }</literal> where <varname>eclipse</varname> is a one of the Eclipse packages described above, <varname>plugins</varname> is a list of plugin derivations, and <varname>jvmArgs</varname> is a list of arguments given to the JVM running the Eclipse. For example, say you wish to install the latest Eclipse Platform with the popular Eclipse Color Theme plugin and also allow Eclipse to use more RAM. You could then add
<screen>
packageOverrides = pkgs: {
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
eclipse = eclipse-platform;
jvmArgs = [ "-Xmx2048m" ];
plugins = [ plugins.color-theme ];
};
}
</screen>
to your Nixpkgs configuration (<filename>~/.config/nixpkgs/config.nix</filename>) and install it by running <command>nix-env -f '&lt;nixpkgs&gt;' -iA myEclipse</command> and afterward run Eclipse as usual. It is possible to find out which plugins are available for installation using <varname>eclipseWithPlugins</varname> by running
<screen>
<prompt>$ </prompt>nix-env -f '&lt;nixpkgs&gt;' -qaP -A eclipses.plugins --description
</screen>
</para>
<para>
If there is a need to install plugins that are not available in Nixpkgs then it may be possible to define these plugins outside Nixpkgs using the <varname>buildEclipseUpdateSite</varname> and <varname>buildEclipsePlugin</varname> functions found in the <varname>nixpkgs.eclipses.plugins</varname> attribute set. Use the <varname>buildEclipseUpdateSite</varname> function to install a plugin distributed as an Eclipse update site. This function takes <literal>{ name, src }</literal> as argument where <literal>src</literal> indicates the Eclipse update site archive. All Eclipse features and plugins within the downloaded update site will be installed. When an update site archive is not available then the <varname>buildEclipsePlugin</varname> function can be used to install a plugin that consists of a pair of feature and plugin JARs. This function takes an argument <literal>{ name, srcFeature, srcPlugin }</literal> where <literal>srcFeature</literal> and <literal>srcPlugin</literal> are the feature and plugin JARs, respectively.
</para>
<para>
Expanding the previous example with two plugins using the above functions we have
<screen>
packageOverrides = pkgs: {
myEclipse = with pkgs.eclipses; eclipseWithPlugins {
eclipse = eclipse-platform;
jvmArgs = [ "-Xmx2048m" ];
plugins = [
plugins.color-theme
(plugins.buildEclipsePlugin {
name = "myplugin1-1.0";
srcFeature = fetchurl {
url = "http://…/features/myplugin1.jar";
sha256 = "123…";
};
srcPlugin = fetchurl {
url = "http://…/plugins/myplugin1.jar";
sha256 = "123…";
};
});
(plugins.buildEclipseUpdateSite {
name = "myplugin2-1.0";
src = fetchurl {
stripRoot = false;
url = "http://…/myplugin2.zip";
sha256 = "123…";
};
});
];
};
}
</screen>
</para>
</section>

@ -7,7 +7,7 @@
</para>
<xi:include href="citrix.xml" />
<xi:include href="dlib.xml" />
<xi:include href="eclipse.xml" />
<xi:include href="eclipse.section.xml" />
<xi:include href="elm.section.xml" />
<xi:include href="emacs.section.xml" />
<xi:include href="firefox.section.xml" />

@ -174,10 +174,13 @@ digraph {
"staging-next" -> master [color="#E85EB0"] [label="stabilization ends"] [fontcolor="#E85EB0"]
"staging" -> "staging-next" [color="#E85EB0"] [label="stabilization starts"] [fontcolor="#E85EB0"]
master -> "staging-next" -> staging [color="#5F5EE8"] [label="every six hours/any time"] [fontcolor="#5F5EE8"]
master -> "staging-next" -> staging [color="#5F5EE8"] [label="every six hours (GitHub Action)"] [fontcolor="#5F5EE8"]
}
```
[This GitHub Action](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/merge-staging.yml) brings changes from `master` to `staging-next` and from `staging-next` to `staging` every 6 hours.
### Master branch {#submitting-changes-master-branch}
The `master` branch is the main development branch. It should only see non-breaking commits that do not cause mass rebuilds.

@ -611,7 +611,7 @@ Using the example above, the analagous pytestCheckHook usage would be:
"update"
];
disabledTestFiles = [
disabledTestPaths = [
"tests/test_failing.py"
];
```
@ -1554,9 +1554,9 @@ Following rules are desired to be respected:
* Python libraries are called from `python-packages.nix` and packaged with
`buildPythonPackage`. The expression of a library should be in
`pkgs/development/python-modules/<name>/default.nix`. Libraries in
`pkgs/top-level/python-packages.nix` are sorted quasi-alphabetically to avoid
merge conflicts.
`pkgs/development/python-modules/<name>/default.nix`.
* Libraries in `pkgs/top-level/python-packages.nix` are sorted
alphanumerically to avoid merge conflicts and ease locating attributes.
* Python applications live outside of `python-packages.nix` and are packaged
with `buildPythonApplication`.
* Make sure libraries build for all Python interpreters.
@ -1570,3 +1570,4 @@ Following rules are desired to be respected:
[PEP 0503](https://www.python.org/dev/peps/pep-0503/#normalized-names). This
means that characters should be converted to lowercase and `.` and `_` should
be replaced by a single `-` (foo-bar-baz instead of Foo__Bar.baz )
* Attribute names in `python-packages.nix` should be sorted alphanumerically.

@ -1,77 +1,88 @@
# Qt {#sec-language-qt}
This section describes the differences between Nix expressions for Qt libraries and applications and Nix expressions for other C++ software. Some knowledge of the latter is assumed.
Writing Nix expressions for Qt libraries and applications is largely similar as for other C++ software.
This section assumes some knowledge of the latter.
There are two problems that the Nixpkgs Qt infrastructure addresses,
which are not shared by other C++ software:
There are primarily two problems which the Qt infrastructure is designed to address: ensuring consistent versioning of all dependencies and finding dependencies at runtime.
1. There are usually multiple supported versions of Qt in Nixpkgs.
All of a package's dependencies must be built with the same version of Qt.
This is similar to the version constraints imposed on interpreted languages like Python.
2. Qt makes extensive use of runtime dependency detection.
Runtime dependencies are made into build dependencies through wrappers.
## Nix expression for a Qt package (default.nix) {#qt-default-nix}
```{=docbook}
<programlisting>
{ mkDerivation, qtbase }: <co xml:id='qt-default-nix-co-1' />
{ stdenv, lib, qtbase, wrapQtAppsHook }: <co xml:id='qt-default-nix-co-1' />
mkDerivation { <co xml:id='qt-default-nix-co-2' />
stdenv.mkDerivation {
pname = "myapp";
version = "1.0";
buildInputs = [ qtbase ]; <co xml:id='qt-default-nix-co-3' />
buildInputs = [ qtbase ];
nativeBuildInputs = [ wrapQtAppsHook ]; <co xml:id='qt-default-nix-co-2' />
}
</programlisting>
<calloutlist>
<callout arearefs='qt-default-nix-co-1'>
<para>
Import <literal>mkDerivation</literal> and Qt (such as <literal>qtbase</literal> modules directly. <emphasis>Do not</emphasis> import Qt package sets; the Qt versions of dependencies may not be coherent, causing build and runtime failures.
Import Qt modules directly, that is: <literal>qtbase</literal>, <literal>qtdeclarative</literal>, etc.
<emphasis>Do not</emphasis> import Qt package sets such as <literal>qt5</literal>
because the Qt versions of dependencies may not be coherent, causing build and runtime failures.
</para>
</callout>
<callout arearefs='qt-default-nix-co-2'>
<para>
Use <literal>mkDerivation</literal> instead of <literal>stdenv.mkDerivation</literal>. <literal>mkDerivation</literal> is a wrapper around <literal>stdenv.mkDerivation</literal> which applies some Qt-specific settings. This deriver accepts the same arguments as <literal>stdenv.mkDerivation</literal>; refer to <xref linkend='chap-stdenv' /> for details.
</para>
<para>
To use another deriver instead of <literal>stdenv.mkDerivation</literal>, use <literal>mkDerivationWith</literal>:
<programlisting>
mkDerivationWith myDeriver {
# ...
}
</programlisting>
If you cannot use <literal>mkDerivationWith</literal>, please refer to <xref linkend='qt-runtime-dependencies' />.
</para>
</callout>
<callout arearefs='qt-default-nix-co-3'>
<para>
<literal>mkDerivation</literal> accepts the same arguments as <literal>stdenv.mkDerivation</literal>, such as <literal>buildInputs</literal>.
</para>
<para>
All Qt packages must include <literal>wrapQtAppsHook</literal> in
<literal>nativeBuildInputs</literal>, or you must explicitly set
<literal>dontWrapQtApps</literal>.
</para>
</callout>
</calloutlist>
```
## Locating runtime dependencies {#qt-runtime-dependencies}
Qt applications need to be wrapped to find runtime dependencies. If you cannot use `mkDerivation` or `mkDerivationWith` above, include `wrapQtAppsHook` in `nativeBuildInputs`:
Qt applications must be wrapped to find runtime dependencies.
Include `wrapQtAppsHook` in `nativeBuildInputs`:
```nix
{ stdenv, wrapQtAppsHook }:
stdenv.mkDerivation {
# ...
nativeBuildInputs = [ wrapQtAppsHook ];
}
```
Entries added to `qtWrapperArgs` are used to modify the wrappers created by `wrapQtAppsHook`. The entries are passed as arguments to [wrapProgram executable makeWrapperArgs](#fun-wrapProgram).
Add entries to `qtWrapperArgs` are to modify the wrappers created by
`wrapQtAppsHook`:
```nix
mkDerivation {
# ...
{ stdenv, wrapQtAppsHook }:
stdenv.mkDerivation {
# ...
nativeBuildInputs = [ wrapQtAppsHook ];
qtWrapperArgs = [ ''--prefix PATH : /path/to/bin'' ];
}
```
Set `dontWrapQtApps` to stop applications from being wrapped automatically. It is required to wrap applications manually with `wrapQtApp`, using the syntax of [wrapProgram executable makeWrapperArgs](#fun-wrapProgram):
The entries are passed as arguments to [wrapProgram](#fun-wrapProgram).
Set `dontWrapQtApps` to stop applications from being wrapped automatically.
Wrap programs manually with `wrapQtApp`, using the syntax of
[wrapProgram](#fun-wrapProgram):
```nix
mkDerivation {
# ...
{ stdenv, lib, wrapQtAppsHook }:
stdenv.mkDerivation {
# ...
nativeBuildInputs = [ wrapQtAppsHook ];
dontWrapQtApps = true;
preFixup = ''
wrapQtApp "$out/bin/myapp" --prefix PATH : /path/to/bin
@ -79,21 +90,16 @@ mkDerivation {
}
```
> Note: `wrapQtAppsHook` ignores files that are non-ELF executables. This means that scripts won't be automatically wrapped so you'll need to manually wrap them as previously mentioned. An example of when you'd always need to do this is with Python applications that use PyQT.
::: note
`wrapQtAppsHook` ignores files that are non-ELF executables.
This means that scripts won't be automatically wrapped so you'll need to manually wrap them as previously mentioned.
An example of when you'd always need to do this is with Python applications that use PyQt.
:::
Libraries are built with every available version of Qt. Use the `meta.broken` attribute to disable the package for unsupported Qt versions:
```nix
mkDerivation {
# ...
# Disable this library with Qt &lt; 5.9.0
meta.broken = builtins.compareVersions qtbase.version "5.9.0" &lt; 0;
}
```
## Adding a library to Nixpkgs
Qt libraries are added to `qt5-packages.nix` and are made available for every Qt
version supported.
Add Qt libraries to `qt5-packages.nix` to make them available for every
supported Qt version.
### Example adding a Qt library {#qt-library-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
@ -106,9 +112,23 @@ The following represents the contents of `qt5-packages.nix`.
# ...
}
```
Libraries are built with every available version of Qt.
Use the `meta.broken` attribute to disable the package for unsupported Qt versions:
```nix
{ stdenv, lib, qtbase }:
stdenv.mkDerivation {
# ...
# Disable this library with Qt &lt; 5.9.0
meta.broken = lib.versionOlder qtbase.version "5.9.0";
}
```
## Adding an application to Nixpkgs
Applications that use Qt are also added to `qt5-packages.nix`. An alias is added
in the top-level `all-packages.nix` pointing to the package with the desired Qt5 version.
Add Qt applications to `qt5-packages.nix`. Add an alias to `all-packages.nix`
to select the Qt 5 version used for the application.
### Example adding a Qt application {#qt-application-all-packages-nix}

@ -80,6 +80,33 @@ The fetcher will verify that the `Cargo.lock` file is in sync with the `src`
attribute, and fail the build if not. It will also will compress the vendor
directory into a tar.gz archive.
The tarball with vendored dependencies contains a directory with the
package's `name`, which is normally composed of `pname` and
`version`. This means that the vendored dependencies hash
(`cargoSha256`/`cargoHash`) is dependent on the package name and
version. The `cargoDepsName` attribute can be used to use another name
for the directory of vendored dependencies. For example, the hash can
be made invariant to the version by setting `cargoDepsName` to
`pname`:
```nix
rustPlatform.buildRustPackage rec {
pname = "broot";
version = "1.2.0";
src = fetchCrate {
inherit pname version;
sha256 = "1mqaynrqaas82f5957lx31x80v74zwmwmjxxlbywajb61vh00d38";
};
cargoHash = "sha256-JmBZcDVYJaK1cK05cxx5BrnGWp4t8ca6FLUbvIot67s=";
cargoDepsName = pname;
# ...
}
```
### Cross compilation
By default, Rust packages are compiled for the host platform, just like any
@ -196,7 +223,7 @@ sometimes it may be necessary to disable this so the tests run consecutively.
```nix
rustPlatform.buildRustPackage {
/* ... */
cargoParallelTestThreads = false;
dontUseCargoParallelTests = true;
}
```
@ -237,6 +264,198 @@ rustPlatform.buildRustPackage rec {
}
```
## Compiling non-Rust packages that include Rust code
Several non-Rust packages incorporate Rust code for performance- or
security-sensitive parts. `rustPlatform` exposes several functions and
hooks that can be used to integrate Cargo in non-Rust packages.
### Vendoring of dependencies
Since network access is not allowed in sandboxed builds, Rust crate
dependencies need to be retrieved using a fetcher. `rustPlatform`
provides the `fetchCargoTarball` fetcher, which vendors all
dependencies of a crate. For example, given a source path `src`
containing `Cargo.toml` and `Cargo.lock`, `fetchCargoTarball`
can be used as follows:
```nix
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src;
hash = "sha256-BoHIN/519Top1NUBjpB/oEMqi86Omt3zTQcXFWqrek0=";
};
```
The `src` attribute is required, as well as a hash specified through
one of the `sha256` or `hash` attributes. The following optional
attributes can also be used:
* `name`: the name that is used for the dependencies tarball. If
`name` is not specified, then the name `cargo-deps` will be used.
* `sourceRoot`: when the `Cargo.lock`/`Cargo.toml` are in a
subdirectory, `sourceRoot` specifies the relative path to these
files.
* `patches`: patches to apply before vendoring. This is useful when
the `Cargo.lock`/`Cargo.toml` files need to be patched before
vendoring.
### Hooks
`rustPlatform` provides the following hooks to automate Cargo builds:
* `cargoSetupHook`: configure Cargo to use depenencies vendored
through `fetchCargoTarball`. This hook uses the `cargoDeps`
environment variable to find the vendored dependencies. If a project
already vendors its dependencies, the variable `cargoVendorDir` can
be used instead. When the `Cargo.toml`/`Cargo.lock` files are not in
`sourceRoot`, then the optional `cargoRoot` is used to specify the
Cargo root directory relative to `sourceRoot`.
* `cargoBuildHook`: use Cargo to build a crate. If the crate to be
built is a crate in e.g. a Cargo workspace, the relative path to the
crate to build can be set through the optional `buildAndTestSubdir`
environment variable. Additional Cargo build flags can be passed
through `cargoBuildFlags`.
* `maturinBuildHook`: use [Maturin](https://github.com/PyO3/maturin)
to build a Python wheel. Similar to `cargoBuildHook`, the optional
variable `buildAndTestSubdir` can be used to build a crate in a
Cargo workspace. Additional maturin flags can be passed through
`maturinBuildFlags`.
* `cargoCheckHook`: run tests using Cargo. Additional flags can be
passed to Cargo using `checkFlags` and `checkFlagsArray`. By
default, tests are run in parallel. This can be disabled by setting
`dontUseCargoParallelTests`.
* `cargoInstallHook`: install binaries and static/shared libraries
that were built using `cargoBuildHook`.
### Examples
#### Python package using `setuptools-rust`
For Python packages using `setuptools-rust`, you can use
`fetchCargoTarball` and `cargoSetupHook` to retrieve and set up Cargo
dependencies. The build itself is then performed by
`buildPythonPackage`.
The following example outlines how the `tokenizers` Python package is
built. Since the Python package is in the `source/bindings/python`
directory of the *tokenizers* project's source archive, we use
`sourceRoot` to point the tooling to this directory:
```nix
{ fetchFromGitHub
, buildPythonPackage
, rustPlatform
, setuptools-rust
}:
buildPythonPackage rec {
pname = "tokenizers";
version = "0.10.0";
src = fetchFromGitHub {
owner = "huggingface";
repo = pname;
rev = "python-v${version}";
hash = "sha256-rQ2hRV52naEf6PvRsWVCTN7B1oXAQGmnpJw4iIdhamw=";
};
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src sourceRoot;
name = "${pname}-${version}";
hash = "sha256-BoHIN/519Top1NUBjpB/oEMqi86Omt3zTQcXFWqrek0=";
};
sourceRoot = "source/bindings/python";
nativeBuildInputs = [ setuptools-rust ] ++ (with rustPlatform; [
cargoSetupHook
rust.cargo
rust.rustc
]);
# ...
}
```
In some projects, the Rust crate is not in the main Python source
directory. In such cases, the `cargoRoot` attribute can be used to
specify the crate's directory relative to `sourceRoot`. In the
following example, the crate is in `src/rust`, as specified in the
`cargoRoot` attribute. Note that we also need to specify the correct
path for `fetchCargoTarball`.
```nix
{ buildPythonPackage
, fetchPypi
, rustPlatform
, setuptools-rust
, openssl
}:
buildPythonPackage rec {
pname = "cryptography";
version = "3.4.2"; # Also update the hash in vectors.nix
src = fetchPypi {
inherit pname version;
sha256 = "1i1mx5y9hkyfi9jrrkcw804hmkcglxi6rmf7vin7jfnbr2bf4q64";
};
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src;
sourceRoot = "${pname}-${version}/${cargoRoot}";
name = "${pname}-${version}";
hash = "sha256-PS562W4L1NimqDV2H0jl5vYhL08H9est/pbIxSdYVfo=";
};
cargoRoot = "src/rust";
# ...
}
```
#### Python package using `maturin`
Python packages that use [Maturin](https://github.com/PyO3/maturin)
can be built with `fetchCargoTarball`, `cargoSetupHook`, and
`maturinBuildHook`. For example, the following (partial) derivation
builds the `retworkx` Python package. `fetchCargoTarball` and
`cargoSetupHook` are used to fetch and set up the crate dependencies.
`maturinBuildHook` is used to perform the build.
```nix
{ lib
, buildPythonPackage
, rustPlatform
, fetchFromGitHub
}:
buildPythonPackage rec {
pname = "retworkx";
version = "0.6.0";
src = fetchFromGitHub {
owner = "Qiskit";
repo = "retworkx";
rev = version;
sha256 = "11n30ldg3y3y6qxg3hbj837pnbwjkqw3nxq6frds647mmmprrd20";
};
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src;
name = "${pname}-${version}";
hash = "sha256-heOBK8qi2nuc/Ib+I/vLzZ1fUUD/G/KTw9d7M4Hz5O0=";
};
format = "pyproject";
nativeBuildInputs = with rustPlatform; [ cargoSetupHook maturinBuildHook ];
# ...
}
```
## Compiling Rust crates using Nix instead of Cargo
### Simple operation

@ -19,7 +19,7 @@
<xi:include href="stdenv/meta.xml" />
<xi:include href="stdenv/multiple-output.xml" />
<xi:include href="stdenv/cross-compilation.chapter.xml" />
<xi:include href="stdenv/platform-notes.xml" />
<xi:include href="stdenv/platform-notes.chapter.xml" />
</part>
<part>
<title>Builders</title>

@ -16,7 +16,7 @@ Nixpkgs follows the [conventions of GNU autoconf](https://gcc.gnu.org/onlinedocs
In Nixpkgs, these three platforms are defined as attribute sets under the names `buildPlatform`, `hostPlatform`, and `targetPlatform`. They are always defined as attributes in the standard environment. That means one can access them like:
```nix
{ stdenv, fooDep, barDep, .. }: ...stdenv.buildPlatform...
{ stdenv, fooDep, barDep, ... }: ...stdenv.buildPlatform...
```
`buildPlatform`
@ -99,15 +99,26 @@ Some examples will make this table clearer. Suppose there's some package that is
Some frequently encountered problems when packaging for cross-compilation should be answered here. Ideally, the information above is exhaustive, so this section cannot provide any new information, but it is ludicrous and cruel to expect everyone to spend effort working through the interaction of many features just to figure out the same answer to the same common problem. Feel free to add to this list!
#### My package fails to find a binutils command (`cc`/`ar`/`ld` etc.) {#cross-qa-fails-to-find-binutils}
Many packages assume that an unprefixed binutils (`cc`/`ar`/`ld` etc.) is available, but Nix doesn't provide one. It only provides a prefixed one, just as it only does for all the other binutils programs. It may be necessary to patch the package to fix the build system to use a prefix. For instance, instead of `cc`, use `${stdenv.cc.targetPrefix}cc`.
```nix
makeFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ];
```
#### How do I avoid compiling a GCC cross-compiler from source? {#cross-qa-avoid-compiling-gcc-cross-compiler}
On less powerful machines, it can be inconvenient to cross-compile a package only to find out that GCC has to be compiled from source, which could take up to several hours. Nixpkgs maintains a limited [cross-related jobset on Hydra](https://hydra.nixos.org/jobset/nixpkgs/cross-trunk), which tests cross-compilation to various platforms from build platforms "x86\_64-darwin", "x86\_64-linux", and "aarch64-linux". See `pkgs/top-level/release-cross.nix` for the full list of target platforms and packages. For instance, the following invocation fetches the pre-built cross-compiled GCC for `armv6l-unknown-linux-gnueabihf` and builds GNU Hello from source.
```ShellSession
$ nix-build '<nixpkgs>' -A pkgsCross.raspberryPi.hello
```
#### What if my package's build system needs to build a C program to be run under the build environment? {#cross-qa-build-c-program-in-build-environment}
Add the following to your `mkDerivation` invocation.
```nix
depsBuildBuild = [ buildPackages.stdenv.cc ];
```
#### My package fails to find `ar`. {#cross-qa-fails-to-find-ar}
Many packages assume that an unprefixed `ar` is available, but Nix doesn't provide one. It only provides a prefixed one, just as it only does for all the other binutils programs. It may be necessary to patch the package to fix the build system to use a prefixed `ar`.
#### My package's testsuite needs to run host platform code. {#cross-testsuite-runs-host-code}
Add the following to your `mkDerivation` invocation.

@ -154,7 +154,7 @@
</term>
<listitem>
<para>
is for development-only files. These include C(++) headers, pkg-config, cmake and aclocal files. They go to <varname>dev</varname> or <varname>out</varname> by default.
is for development-only files. These include C(++) headers (<filename>include/</filename>), pkg-config (<filename>lib/pkgconfig/</filename>), cmake (<filename>lib/cmake/</filename>) and aclocal files (<varname>share/aclocal/</varname>). They go to <varname>dev</varname> or <varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
@ -164,7 +164,7 @@
</term>
<listitem>
<para>
is meant for user-facing binaries, typically residing in bin/. They go to <varname>bin</varname> or <varname>out</varname> by default.
is meant for user-facing binaries, typically residing in <filename>bin/</filename>. They go to <varname>bin</varname> or <varname>out</varname> by default.
</para>
</listitem>
</varlistentry>
@ -194,7 +194,7 @@
</term>
<listitem>
<para>
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and devhelp books in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
is for <emphasis>developer</emphasis> documentation. Currently we count gtk-doc and devhelp books, typically residing in <filename>share/gtk-doc/</filename> and <filename>share/devhelp/</filename>, in there. It goes to <varname>devdoc</varname> or is removed (!) by default. This is because e.g. gtk-doc tends to be rather large and completely unused by nixpkgs users.
</para>
</listitem>
</varlistentry>
@ -204,7 +204,7 @@
</term>
<listitem>
<para>
is for man pages (except for section 3). They go to <varname>man</varname> or <varname>$outputBin</varname> by default.
is for man pages (except for section 3), typically residing in <filename>share/man/man[0-9]/</filename>. They go to <varname>man</varname> or <varname>$outputBin</varname> by default.
</para>
</listitem>
</varlistentry>
@ -214,7 +214,7 @@
</term>
<listitem>
<para>
is for section 3 man pages. They go to <varname>devman</varname> or <varname>$outputMan</varname> by default.
is for section 3 man pages, typically residing in <filename>share/man/man3/</filename>. They go to <varname>devman</varname> or <varname>$outputMan</varname> by default.
</para>
</listitem>
</varlistentry>
@ -224,7 +224,7 @@
</term>
<listitem>
<para>
is for info pages. They go to <varname>info</varname> or <varname>$outputBin</varname> by default.
is for info pages, typically residing in <filename>share/info/</filename>. They go to <varname>info</varname> or <varname>$outputBin</varname> by default.
</para>
</listitem>
</varlistentry>

@ -0,0 +1,62 @@
# Platform Notes {#chap-platform-notes}
## Darwin (macOS) {#sec-darwin}
Some common issues when packaging software for Darwin:
- The Darwin `stdenv` uses clang instead of gcc. When referring to the compiler `$CC` or `cc` will work in both cases. Some builds hardcode gcc/g++ in their build scripts, that can usually be fixed with using something like `makeFlags = [ "CC=cc" ];` or by patching the build scripts.
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
buildPhase = ''
$CC -o hello hello.c
'';
}
```
- On Darwin, libraries are linked using absolute paths, libraries are resolved by their `install_name` at link time. Sometimes packages won’t set this correctly causing the library lookups to fail at runtime. This can be fixed by adding extra linker flags or by running `install_name_tool -id` during the `fixupPhase`.
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
}
```
- Even if the libraries are linked using absolute paths and resolved via their `install_name` correctly, tests can sometimes fail to run binaries. This happens because the `checkPhase` runs before the libraries are installed.
This can usually be solved by running the tests after the `installPhase` or alternatively by using `DYLD_LIBRARY_PATH`. More information about this variable can be found in the *dyld(1)* manpage.
```
dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib
Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq
Reason: image not found
./tests/jqtest: line 5: 75779 Abort trap: 6
```
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
doInstallCheck = true;
installCheckTarget = "check";
}
```
- Some packages assume xcode is available and use `xcrun` to resolve build tools like `clang`, etc. This causes errors like `xcode-select: error: no developer tools were found at '/Applications/Xcode.app'` while the build doesn’t actually depend on xcode.
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
prePatch = ''
substituteInPlace Makefile \
--replace '/usr/bin/xcrun clang' clang
'';
}
```
The package `xcbuild` can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues.

@ -1,83 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-platform-notes">
<title>Platform Notes</title>
<section xml:id="sec-darwin">
<title>Darwin (macOS)</title>
<para>
Some common issues when packaging software for Darwin:
</para>
<itemizedlist>
<listitem>
<para>
The Darwin <literal>stdenv</literal> uses clang instead of gcc. When referring to the compiler <varname>$CC</varname> or <command>cc</command> will work in both cases. Some builds hardcode gcc/g++ in their build scripts, that can usually be fixed with using something like <literal>makeFlags = [ "CC=cc" ];</literal> or by patching the build scripts.
</para>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
buildPhase = ''
$CC -o hello hello.c
'';
}
</programlisting>
</listitem>
<listitem>
<para>
On Darwin, libraries are linked using absolute paths, libraries are resolved by their <literal>install_name</literal> at link time. Sometimes packages won't set this correctly causing the library lookups to fail at runtime. This can be fixed by adding extra linker flags or by running <command>install_name_tool -id</command> during the <function>fixupPhase</function>.
</para>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib";
}
</programlisting>
</listitem>
<listitem>
<para>
Even if the libraries are linked using absolute paths and resolved via their <literal>install_name</literal> correctly, tests can sometimes fail to run binaries. This happens because the <varname>checkPhase</varname> runs before the libraries are installed.
</para>
<para>
This can usually be solved by running the tests after the <varname>installPhase</varname> or alternatively by using <varname>DYLD_LIBRARY_PATH</varname>. More information about this variable can be found in the <citerefentry>
<refentrytitle>dyld</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> manpage.
</para>
<programlisting>
dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib
Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq
Reason: image not found
./tests/jqtest: line 5: 75779 Abort trap: 6
</programlisting>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
doInstallCheck = true;
installCheckTarget = "check";
}
</programlisting>
</listitem>
<listitem>
<para>
Some packages assume xcode is available and use <command>xcrun</command> to resolve build tools like <command>clang</command>, etc. This causes errors like <code>xcode-select: error: no developer tools were found at '/Applications/Xcode.app'</code> while the build doesn't actually depend on xcode.
</para>
<programlisting>
stdenv.mkDerivation {
name = "libfoo-1.2.3";
# ...
prePatch = ''
substituteInPlace Makefile \
--replace '/usr/bin/xcrun clang' clang
'';
}
</programlisting>
<para>
The package <literal>xcbuild</literal> can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues.
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

@ -151,26 +151,26 @@
</listitem>
<listitem>
<para>
It is also possible to whitelist and blacklist licenses that are specifically acceptable or not acceptable, using <literal>whitelistedLicenses</literal> and <literal>blacklistedLicenses</literal>, respectively.
It is also possible to allow and block licenses that are specifically acceptable or not acceptable, using <literal>allowlistedLicenses</literal> and <literal>blocklistedLicenses</literal>, respectively.
</para>
<para>
The following example configuration whitelists the licenses <literal>amd</literal> and <literal>wtfpl</literal>:
The following example configuration allowlists the licenses <literal>amd</literal> and <literal>wtfpl</literal>:
<programlisting>
{
whitelistedLicenses = with lib.licenses; [ amd wtfpl ];
allowlistedLicenses = with lib.licenses; [ amd wtfpl ];
}
</programlisting>
</para>
<para>
The following example configuration blacklists the <literal>gpl3Only</literal> and <literal>agpl3Only</literal> licenses:
The following example configuration blocklists the <literal>gpl3Only</literal> and <literal>agpl3Only</literal> licenses:
<programlisting>
{
blacklistedLicenses = with lib.licenses; [ agpl3Only gpl3Only ];
blocklistedLicenses = with lib.licenses; [ agpl3Only gpl3Only ];
}
</programlisting>
</para>
<para>
Note that <literal>whitelistedLicenses</literal> only applies to unfree licenses unless <literal>allowUnfree</literal> is enabled. It is not a generic whitelist for all types of licenses. <literal>blacklistedLicenses</literal> applies to all licenses.
Note that <literal>allowlistedLicenses</literal> only applies to unfree licenses unless <literal>allowUnfree</literal> is enabled. It is not a generic allowlist for all types of licenses. <literal>blocklistedLicenses</literal> applies to all licenses.
</para>
</listitem>
</itemizedlist>

@ -7,7 +7,7 @@ let
in
lib.mapAttrs (n: v: v // { shortName = n; }) {
lib.mapAttrs (n: v: v // { shortName = n; }) ({
/* License identifiers from spdx.org where possible.
* If you cannot find your license here, then look for a similar license or
* add it to this list. The URL mentioned above is a good source for inspiration.
@ -877,4 +877,4 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
fullName = "GNU Lesser General Public License v3.0";
deprecated = true;
};
}
})

@ -82,6 +82,12 @@
githubId = 882455;
name = "Elliot Cameron";
};
_414owen = {
email = "owen@owen.cafe";
github = "414owen";
githubId = 1714287;
name = "Owen Shepherd";
};
_6AA4FD = {
email = "f6442954@gmail.com";
github = "6AA4FD";
@ -267,7 +273,7 @@
name = "James Alexander Feldman-Crough";
};
aforemny = {
email = "alexanderforemny@googlemail.com";
email = "aforemny@posteo.de";
github = "aforemny";
githubId = 610962;
name = "Alexander Foremny";
@ -768,6 +774,12 @@
githubId = 3965744;
name = "Arthur Lee";
};
arthurteisseire = {
email = "arthurteisseire33@gmail.com";
github = "arthurteisseire";
githubId = 37193992;
name = "Arthur Teisseire";
};
arturcygan = {
email = "arczicygan@gmail.com";
github = "arcz";
@ -1591,12 +1603,6 @@
githubId = 89596;
name = "Florian Friesdorf";
};
charvp = {
email = "nixpkgs@cvpetegem.be";
github = "charvp";
githubId = 42220376;
name = "Charlotte Van Petegem";
};
chattered = {
email = "me@philscotted.com";
name = "Phil Scott";
@ -1705,6 +1711,18 @@
githubId = 2245737;
name = "Christopher Mark Poole";
};
chuahou = {
email = "human+github@chuahou.dev";
github = "chuahou";
githubId = 12386805;
name = "Chua Hou";
};
chvp = {
email = "nixpkgs@cvpetegem.be";
github = "chvp";
githubId = 42220376;
name = "Charlotte Van Petegem";
};
ciil = {
email = "simon@lackerbauer.com";
github = "ciil";
@ -2097,6 +2115,12 @@
email = "christoph.senjak@googlemail.com";
name = "Christoph-Simon Senjak";
};
davhau = {
email = "d.hauer.it@gmail.com";
name = "David Hauer";
github = "DavHau";
githubId = 42246742;
};
david-sawatzke = {
email = "d-nix@sawatzke.dev";
github = "david-sawatzke";
@ -2399,6 +2423,16 @@
githubId = 6806011;
name = "Robert Schütz";
};
dottedmag = {
email = "dottedmag@dottedmag.net";
github = "dottedmag";
githubId = 16120;
name = "Misha Gusarov";
keys = [{
longkeyid = "rsa4096/0x9D20F6503E338888";
fingerprint = "A8DF 1326 9E5D 9A38 E57C FAC2 9D20 F650 3E33 8888";
}];
};
doublec = {
email = "chris.double@double.co.nz";
github = "doublec";
@ -3043,6 +3077,12 @@
githubId = 1276854;
name = "Florian Peter";
};
fbrs = {
email = "yuuki@protonmail.com";
github = "cideM";
githubId = 4246921;
name = "Florian Beeres";
};
fdns = {
email = "fdns02@gmail.com";
github = "fdns";
@ -3279,6 +3319,12 @@
githubId = 10528737;
name = "Severin Fürbringer";
};
fufexan = {
email = "fufexan@protonmail.com";
github = "fufexan";
githubId = 36706276;
name = "Fufezan Mihai";
};
funfunctor = {
email = "eocallaghan@alterapraxis.com";
name = "Edward O'Callaghan";
@ -3499,6 +3545,12 @@
githubId = 6893840;
name = "Yacine Hmito";
};
graham33 = {
email = "graham@grahambennett.org";
github = "graham33";
githubId = 10908649;
name = "Graham Bennett";
};
grahamc = {
email = "graham@grahamc.com";
github = "grahamc";
@ -3581,6 +3633,12 @@
githubId = 443978;
name = "Gabriel Volpe";
};
gytis-ivaskevicius = {
name = "Gytis Ivaskevicius";
email = "me@gytis.io";
github = "gytis-ivaskevicius";
githubId = 23264966;
};
hakuch = {
email = "hakuch@gmail.com";
github = "hakuch";
@ -5848,6 +5906,12 @@
githubId = 35892750;
name = "Maxine Aubrey";
};
maxhbr = {
email = "nixos@maxhbr.dev";
github = "maxhbr";
githubId = 1187050;
name = "Maximilian Huber";
};
maxxk = {
email = "maxim.krivchikov@gmail.com";
github = "maxxk";
@ -5860,6 +5924,12 @@
githubId = 22836301;
name = "Mateusz Mazur";
};
mbaeten = {
email = "mbaeten@users.noreply.github.com";
github = "mbaeten";
githubId = 2649304;
name = "M. Baeten";
};
mbakke = {
email = "mbakke@fastmail.com";
github = "mbakke";
@ -6007,7 +6077,7 @@
name = "Celine Mercier";
};
metadark = {
email = "kira.bruneau@gmail.com";
email = "kira.bruneau@pm.me";
name = "Kira Bruneau";
github = "metadark";
githubId = 382041;
@ -7155,6 +7225,12 @@
githubId = 157610;
name = "Piotr Bogdan";
};
pborzenkov = {
email = "pavel@borzenkov.net";
github = "pborzenkov";
githubId = 434254;
name = "Pavel Borzenkov";
};
pblkt = {
email = "pebblekite@gmail.com";
github = "pblkt";
@ -7371,6 +7447,16 @@
githubId = 103822;
name = "Patrick Mahoney";
};
pmenke = {
email = "nixos@pmenke.de";
github = "pmenke-de";
githubId = 898922;
name = "Philipp Menke";
keys = [{
longkeyid = "rsa4096/0xEB7F2D4CCBE23B69";
fingerprint = "ED54 5EFD 64B6 B5AA EC61 8C16 EB7F 2D4C CBE2 3B69";
}];
};
pmeunier = {
email = "pierre-etienne.meunier@inria.fr";
github = "P-E-Meunier";
@ -7401,6 +7487,16 @@
githubId = 11365056;
name = "Kevin Liu";
};
pnotequalnp = {
email = "kevin@pnotequalnp.com";
github = "pnotequalnp";
githubId = 46154511;
name = "Kevin Mullins";
keys = [{
longkeyid = "rsa4096/361820A45DB41E9A";
fingerprint = "2CD2 B030 BD22 32EF DF5A 008A 3618 20A4 5DB4 1E9A";
}];
};
polyrod = {
email = "dc1mdp@gmail.com";
github = "polyrod";
@ -7985,6 +8081,12 @@
githubId = 3708689;
name = "Roberto Di Remigio";
};
robertoszek = {
email = "robertoszek@robertoszek.xyz";
github = "robertoszek";
githubId = 1080963;
name = "Roberto";
};
robgssp = {
email = "robgssp@gmail.com";
github = "robgssp";
@ -8289,6 +8391,12 @@
githubId = 2320433;
name = "Sam Boosalis";
};
sbruder = {
email = "nixos@sbruder.de";
github = "sbruder";
githubId = 15986681;
name = "Simon Bruder";
};
scalavision = {
email = "scalavision@gmail.com";
github = "scalavision";
@ -8848,7 +8956,7 @@
name = "Guillaume Loetscher";
};
sternenseemann = {
email = "post@lukasepple.de";
email = "sternenseemann@systemli.org";
github = "sternenseemann";
githubId = 3154475;
name = "Lukas Epple";
@ -9294,7 +9402,7 @@
name = "Jan Beinke";
};
thesola10 = {
email = "thesola10@bobile.fr";
email = "me@thesola.io";
github = "thesola10";
githubId = 7287268;
keys = [{
@ -9363,6 +9471,16 @@
githubId = 1391883;
name = "Tom Hall";
};
Thunderbottom = {
email = "chinmaydpai@gmail.com";
github = "Thunderbottom";
githubId = 11243138;
name = "Chinmay D. Pai";
keys = [{
longkeyid = "rsa4096/0x75507BE256F40CED";
fingerprint = "7F3E EEAA EE66 93CC 8782 042A 7550 7BE2 56F4 0CED";
}];
};
tiagolobocastro = {
email = "tiagolobocastro@gmail.com";
github = "tiagolobocastro";
@ -9948,6 +10066,12 @@
githubId = 7677567;
name = "Victor SENE";
};
vtuan10 = {
email = "mail@tuan-vo.de";
github = "vtuan10";
githubId = 16415673;
name = "Van Tuan Vo";
};
vyorkin = {
email = "vasiliy.yorkin@gmail.com";
github = "vyorkin";
@ -10686,6 +10810,16 @@
github = "pulsation";
githubId = 1838397;
};
zseri = {
name = "zseri";
email = "zseri.devel@ytrizja.de";
github = "zseri";
githubId = 1618343;
keys = [{
longkeyid = "rsa4096/0x229E63AE5644A96D";
fingerprint = "7AFB C595 0D3A 77BD B00F 947B 229E 63AE 5644 A96D";
}];
};
zupo = {
name = "Nejc Zupan";
email = "nejczupan+nix@gmail.com";
@ -10698,4 +10832,16 @@
github = "felixscheinost";
githubId = 31761492;
};
benneti = {
name = "Benedikt Tissot";
email = "benedikt.tissot@googlemail.com";
github = "benneti";
githubId = 11725645;
};
p3psi = {
name = "Elliot Boo";
email = "p3psi.boo@gmail.com";
github = "p3psi-boo";
githubId = 43925055;
};
}

@ -3,7 +3,8 @@
stdenv.mkDerivation {
name = "nixpkgs-lint-1";
buildInputs = [ makeWrapper perl perlPackages.XMLSimple ];
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ perl perlPackages.XMLSimple ];
dontUnpack = true;
buildPhase = "true";

@ -16,9 +16,10 @@
The first line (<literal>{ config, pkgs, ... }:</literal>) denotes that this
is actually a function that takes at least the two arguments
<varname>config</varname> and <varname>pkgs</varname>. (These are explained
later.) The function returns a <emphasis>set</emphasis> of option definitions
(<literal>{ <replaceable>...</replaceable> }</literal>). These definitions
have the form <literal><replaceable>name</replaceable> =
later, in chapter <xref linkend="sec-writing-modules" />) The function returns
a <emphasis>set</emphasis> of option definitions (<literal>{
<replaceable>...</replaceable> }</literal>). These definitions have the form
<literal><replaceable>name</replaceable> =
<replaceable>value</replaceable></literal>, where
<replaceable>name</replaceable> is the name of an option and
<replaceable>value</replaceable> is its value. For example,

@ -15,5 +15,6 @@
<xi:include href="firewall.xml" />
<xi:include href="wireless.xml" />
<xi:include href="ad-hoc-network-config.xml" />
<xi:include href="renaming-interfaces.xml" />
<!-- TODO: OpenVPN, NAT -->
</chapter>

@ -16,6 +16,6 @@
On images where the installation media also becomes an installation target,
copying over <literal>configuration.nix</literal> should be disabled by
setting <literal>installer.cloneConfig</literal> to <literal>false</literal>.
For example, this is done in <literal>sd-image-aarch64.nix</literal>.
For example, this is done in <literal>sd-image-aarch64-installer.nix</literal>.
</para>
</section>

@ -0,0 +1,67 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-rename-ifs">
<title>Renaming network interfaces</title>
<para>
NixOS uses the udev
<link xlink:href="https://systemd.io/PREDICTABLE_INTERFACE_NAMES/">predictable naming scheme</link>
to assign names to network interfaces. This means that by default
cards are not given the traditional names like
<literal>eth0</literal> or <literal>eth1</literal>, whose order can
change unpredictably across reboots. Instead, relying on physical
locations and firmware information, the scheme produces names like
<literal>ens1</literal>, <literal>enp2s0</literal>, etc.
</para>
<para>
These names are predictable but less memorable and not necessarily
stable: for example installing new hardware or changing firmware
settings can result in a
<link xlink:href="https://github.com/systemd/systemd/issues/3715#issue-165347602">name change</link>.
If this is undesirable, for example if you have a single ethernet
card, you can revert to the traditional scheme by setting
<xref linkend="opt-networking.usePredictableInterfaceNames"/> to
<literal>false</literal>.
</para>
<section xml:id="sec-custom-ifnames">
<title>Assigning custom names</title>
<para>
In case there are multiple interfaces of the same type, it’s better to
assign custom names based on the device hardware address. For
example, we assign the name <literal>wan</literal> to the interface
with MAC address <literal>52:54:00:12:01:01</literal> using a
netword link unit:
</para>
<programlisting>
<link linkend="opt-systemd.network.links">systemd.network.links."10-wan"</link> = {
matchConfig.MACAddress = "52:54:00:12:01:01";
linkConfig.Name = "wan";
};
</programlisting>
<para>
Note that links are directly read by udev, <emphasis>not networkd</emphasis>,
and will work even if networkd is disabled.
</para>
<para>
Alternatively, we can use a plain old udev rule:
</para>
<programlisting>
<link linkend="opt-services.udev.initrdRules">services.udev.initrdRules</link> = ''
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", \
ATTR{address}=="52:54:00:12:01:01", KERNEL=="eth*", NAME="wan"
'';
</programlisting>
<warning><para>
The rule must be installed in the initrd using
<literal>services.udev.initrdRules</literal>, not the usual
<literal>services.udev.extraRules</literal> option. This is to avoid race
conditions with other programs controlling the interface.
</para></warning>
</section>
</section>

@ -74,7 +74,10 @@ linkend="sec-configuration-syntax"/>, we saw the following structure
<callout arearefs='module-syntax-1'>
<para>
This line makes the current Nix expression a function. The variable
<varname>pkgs</varname> contains Nixpkgs, while <varname>config</varname>
<varname>pkgs</varname> contains Nixpkgs (by default, it takes the
<varname>nixpkgs</varname> entry of <envar>NIX_PATH</envar>, see the <link
xlink:href="https://nixos.org/manual/nix/stable/#sec-common-env">Nix
manual</link> for further details), while <varname>config</varname>
contains the full system configuration. This line can be omitted if there
is no reference to <varname>pkgs</varname> and <varname>config</varname>
inside the module.

@ -83,17 +83,12 @@
VirtualBox settings (Machine / Settings / Shared Folders, then click on the
"Add" icon). Add the following to the
<literal>/etc/nixos/configuration.nix</literal> to auto-mount them. If you do
not add <literal>"nofail"</literal>, the system will not boot properly. The
same goes for disabling <literal>rngd</literal> which is normally used to get
randomness but this does not work in virtual machines.
not add <literal>"nofail"</literal>, the system will not boot properly.
</para>
<programlisting>
{ config, pkgs, ...} :
{
security.rngd.enable = false; // otherwise vm will not boot
...
fileSystems."/virtualboxshare" = {
fsType = "vboxsf";
device = "nameofthesharedfolder";

@ -91,6 +91,27 @@
</para>
<itemizedlist>
<listitem>
<para>
If you are using <option>services.udev.extraRules</option> to assign
custom names to network interfaces, this may stop working due to a change
in the initialisation of dhcpcd and systemd networkd. To avoid this, either
move them to <option>services.udev.initrdRules</option> or see the new
<link linkend="sec-custom-ifnames">Assigning custom names</link> section
of the NixOS manual for an example using networkd links.
</para>
</listitem>
<listitem>
<para>
The <option>security.hideProcessInformation</option> module has been removed.
It was broken since the switch to cgroups-v2.
</para>
</listitem>
<listitem>
<para>
The <literal>systemConfig</literal> kernel parameter is no longer added to boot loader entries. It has been unused since September 2010, but if do have a system generation from that era, you will now be unable to boot into them.
</para>
</listitem>
<listitem>
<para>
<literal>systemd-journal2gelf</literal> no longer parses json and expects the receiving system to handle it. How to achieve this with Graylog is described in this <link xlink:href="https://github.com/parse-nl/SystemdJournal2Gelf/issues/10">GitHub issue</link>.
@ -494,6 +515,30 @@ self: super:
<varname>services.flashpolicyd</varname> module.
</para>
</listitem>
<listitem>
<para>
The <literal>security.rngd</literal> module has been removed.
It was disabled by default in 20.09 as it was functionally redundant
with krngd in the linux kernel. It is not necessary for any device that the kernel recognises
as an hardware RNG, as it will automatically run the krngd task to periodically collect random
data from the device and mix it into the kernel's RNG.
</para>
<para>
The default SMTP port for GitLab has been changed to
<literal>25</literal> from its previous default of
<literal>465</literal>. If you depended on this default, you
should now set the <xref linkend="opt-services.gitlab.smtp.port" />
option.
</para>
</listitem>
<listitem>
<para>
The default version of ImageMagick has been updated from 6 to 7.
You can use <package>imagemagick6</package>,
<package>imagemagick6_light</package>, and
<package>imagemagick6Big</package> if you need the older version.
</para>
</listitem>
</itemizedlist>
</section>
@ -528,14 +573,16 @@ self: super:
</listitem>
<listitem>
<para>
The default-version of <literal>nextcloud</literal> is <package>nextcloud20</package>.
The default-version of <literal>nextcloud</literal> is <package>nextcloud21</package>.
Please note that it's <emphasis>not</emphasis> possible to upgrade <literal>nextcloud</literal>
across multiple major versions! This means that it's e.g. not possible to upgrade
from <package>nextcloud18</package> to <package>nextcloud20</package> in a single deploy.
from <package>nextcloud18</package> to <package>nextcloud20</package> in a single deploy and
most <literal>20.09</literal> users will have to upgrade to <package>nextcloud20</package>
first.
</para>
<para>
The package can be manually upgraded by setting <xref linkend="opt-services.nextcloud.package" />
to <package>nextcloud20</package>.
to <package>nextcloud21</package>.
</para>
</listitem>
<listitem>
@ -700,6 +747,13 @@ self: super:
terminology has been deprecated and should be replaced with Far/Near in the configuration file.
</para>
</listitem>
<listitem>
<para>
The nix-gc service now accepts randomizedDelaySec (default: 0) and persistent (default: true) parameters.
By default nix-gc will now run immediately if it would have been triggered at least
once during the time when the timer was inactive.
</para>
</listitem>
</itemizedlist>
</section>
</section>

@ -23,6 +23,6 @@ stdenv.mkDerivation {
# Generate the squashfs image.
mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out \
-keep-as-directory -all-root -b 1048576 -comp ${comp}
-no-hardlinks -keep-as-directory -all-root -b 1048576 -comp ${comp}
'';
}

@ -144,11 +144,16 @@ in
''}
'';
systemd.services.systemd-vconsole-setup =
{
before = optional config.services.xserver.enable "display-manager.service";
after = [ "systemd-udev-settle.service" ];
systemd.services.reload-systemd-vconsole-setup =
{ description = "Reset console on configuration changes";
wantedBy = [ "multi-user.target" ];
restartTriggers = [ vconsoleConf consoleEnv ];
reloadIfChanged = true;
serviceConfig =
{ RemainAfterExit = true;
ExecStart = "${pkgs.coreutils}/bin/true";
ExecReload = "/run/current-system/systemd/bin/systemctl restart systemd-vconsole-setup";
};
};
}

@ -185,8 +185,6 @@ in
{ description = "Initialisation of swap device ${sw.device}";
wantedBy = [ "${realDevice'}.swap" ];
before = [ "${realDevice'}.swap" ];
# If swap is encrypted, depending on rngd resolves a possible entropy starvation during boot
after = mkIf (config.security.rngd.enable && sw.randomEncryption.enable) [ "rngd.service" ];
path = [ pkgs.util-linux ] ++ optional sw.randomEncryption.enable pkgs.cryptsetup;
script =

@ -568,7 +568,7 @@ in {
# Install all the user shells
environment.systemPackages = systemShells;
environment.etc = (mapAttrs' (name: { packages, ... }: {
environment.etc = (mapAttrs' (_: { packages, name, ... }: {
name = "profiles/per-user/${name}";
value.source = pkgs.buildEnv {
name = "user-environment";

@ -26,13 +26,13 @@ in {
systemd.services.enable-ksm = {
description = "Enable Kernel Same-Page Merging";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
script = ''
if [ -e /sys/kernel/mm/ksm ]; then
script =
''
echo 1 > /sys/kernel/mm/ksm/run
${optionalString (cfg.sleep != null) ''echo ${toString cfg.sleep} > /sys/kernel/mm/ksm/sleep_millisecs''}
fi
'';
'' + optionalString (cfg.sleep != null)
''
echo ${toString cfg.sleep} > /sys/kernel/mm/ksm/sleep_millisecs
'';
};
};
}

@ -1,7 +1,14 @@
{ pkgs, ... }:
{ config, ... }:
{
imports = [ ./sd-image-aarch64.nix ];
boot.kernelPackages = pkgs.linuxPackages_latest;
imports = [
../sd-card/sd-image-aarch64-new-kernel-installer.nix
];
config = {
warnings = [
''
.../cd-dvd/sd-image-aarch64-new-kernel.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image-aarch64-new-kernel-installer.nix, instead.
''
];
};
}

@ -1,80 +1,14 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-aarch64.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{ config, ... }:
{
imports = [
../../profiles/base.nix
../../profiles/installation-device.nix
./sd-image.nix
../sd-card/sd-image-aarch64-installer.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
# The serial ports listed here are:
# - ttyS0: for Tegra (Jetson TX1)
# - ttyAMA0: for QEMU's -machine virt
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"];
boot.initrd.availableKernelModules = [
# Allows early (earlier) modesetting for the Raspberry Pi
"vc4" "bcm2835_dma" "i2c_bcm2835"
# Allows early (earlier) modesetting for Allwinner SoCs
"sun4i_drm" "sun8i_drm_hdmi" "sun8i_mixer"
];
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
[pi3]
kernel=u-boot-rpi3.bin
[pi4]
kernel=u-boot-rpi4.bin
enable_gic=1
armstub=armstub8-gic.bin
# Otherwise the resolution will be weird in most cases, compared to
# what the pi3 firmware does by default.
disable_overscan=1
[all]
# Boot in 64-bit mode.
arm_64bit=1
# U-Boot needs this to work, regardless of whether UART is actually used or not.
# Look in arch/arm/mach-bcm283x/Kconfig in the U-Boot tree to see if this is still
# a requirement in the future.
enable_uart=1
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
# Add the config
cp ${configTxt} firmware/config.txt
# Add pi3 specific files
cp ${pkgs.ubootRaspberryPi3_64bit}/u-boot.bin firmware/u-boot-rpi3.bin
# Add pi4 specific files
cp ${pkgs.ubootRaspberryPi4_64bit}/u-boot.bin firmware/u-boot-rpi4.bin
cp ${pkgs.raspberrypi-armstubs}/armstub8-gic.bin firmware/armstub8-gic.bin
cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb firmware/
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
config = {
warnings = [
''
.../cd-dvd/sd-image-aarch64.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image-aarch64-installer.nix, instead.
''
];
};
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -1,57 +1,14 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{ config, ... }:
{
imports = [
../../profiles/base.nix
../../profiles/installation-device.nix
./sd-image.nix
../sd-card/sd-image-armv7l-multiplatform-installer.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
boot.kernelPackages = pkgs.linuxPackages_latest;
# The serial ports listed here are:
# - ttyS0: for Tegra (Jetson TK1)
# - ttymxc0: for i.MX6 (Wandboard)
# - ttyAMA0: for Allwinner (pcDuino3 Nano) and QEMU's -machine virt
# - ttyO0: for OMAP (BeagleBone Black)
# - ttySAC2: for Exynos (ODROID-XU3)
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=ttySAC2,115200n8" "console=tty0"];
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
[pi2]
kernel=u-boot-rpi2.bin
[pi3]
kernel=u-boot-rpi3.bin
# U-Boot used to need this to work, regardless of whether UART is actually used or not.
# TODO: check when/if this can be removed.
enable_uart=1
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
cp ${pkgs.ubootRaspberryPi2}/u-boot.bin firmware/u-boot-rpi2.bin
cp ${pkgs.ubootRaspberryPi3_32bit}/u-boot.bin firmware/u-boot-rpi3.bin
cp ${configTxt} firmware/config.txt
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
config = {
warnings = [
''
.../cd-dvd/sd-image-armv7l-multiplatform.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image-armv7l-multiplatform-installer.nix, instead.
''
];
};
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -1,46 +1,14 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{ config, ... }:
{
imports = [
../../profiles/base.nix
../../profiles/installation-device.nix
./sd-image.nix
../sd-card/sd-image-raspberrypi-installer.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
boot.kernelPackages = pkgs.linuxPackages_rpi1;
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
[pi0]
kernel=u-boot-rpi0.bin
[pi1]
kernel=u-boot-rpi1.bin
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin firmware/u-boot-rpi0.bin
cp ${pkgs.ubootRaspberryPi}/u-boot.bin firmware/u-boot-rpi1.bin
cp ${configTxt} firmware/config.txt
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
config = {
warnings = [
''
.../cd-dvd/sd-image-raspberrypi.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image-raspberrypi-installer.nix, instead.
''
];
};
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -1,8 +1,14 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-raspberrypi4.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{ config, ... }:
{
imports = [ ./sd-image-aarch64.nix ];
boot.kernelPackages = pkgs.linuxPackages_rpi4;
imports = [
../sd-card/sd-image-raspberrypi4-installer.nix
];
config = {
warnings = [
''
.../cd-dvd/sd-image-raspberrypi4.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image-raspberrypi4-installer.nix, instead.
''
];
};
}

@ -1,245 +1,14 @@
# This module creates a bootable SD card image containing the given NixOS
# configuration. The generated image is MBR partitioned, with a FAT
# /boot/firmware partition, and ext4 root partition. The generated image
# is sized to fit its contents, and a boot script automatically resizes
# the root partition to fit the device on the first boot.
#
# The firmware partition is built with expectation to hold the Raspberry
# Pi firmware and bootloader, and be removed and replaced with a firmware
# build for the target SoC for other board families.
#
# The derivation for the SD image will be placed in
# config.system.build.sdImage
{ config, lib, pkgs, ... }:
with lib;
let
rootfsImage = pkgs.callPackage ../../../lib/make-ext4-fs.nix ({
inherit (config.sdImage) storePaths;
compressImage = true;
populateImageCommands = config.sdImage.populateRootCommands;
volumeLabel = "NIXOS_SD";
} // optionalAttrs (config.sdImage.rootPartitionUUID != null) {
uuid = config.sdImage.rootPartitionUUID;
});
in
{ config, ... }:
{
imports = [
(mkRemovedOptionModule [ "sdImage" "bootPartitionID" ] "The FAT partition for SD image now only holds the Raspberry Pi firmware files. Use firmwarePartitionID to configure that partition's ID.")
(mkRemovedOptionModule [ "sdImage" "bootSize" ] "The boot files for SD image have been moved to the main ext4 partition. The FAT partition now only holds the Raspberry Pi firmware files. Changing its size may not be required.")
../sd-card/sd-image.nix
];
options.sdImage = {
imageName = mkOption {
default = "${config.sdImage.imageBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img";
description = ''
Name of the generated image file.
'';
};
imageBaseName = mkOption {
default = "nixos-sd-image";
description = ''
Prefix of the name of the generated image file.
'';
};
storePaths = mkOption {
type = with types; listOf package;
example = literalExample "[ pkgs.stdenv ]";
description = ''
Derivations to be included in the Nix store in the generated SD image.
'';
};
firmwarePartitionID = mkOption {
type = types.str;
default = "0x2178694e";
description = ''
Volume ID for the /boot/firmware partition on the SD card. This value
must be a 32-bit hexadecimal number.
'';
};
firmwarePartitionName = mkOption {
type = types.str;
default = "FIRMWARE";
description = ''
Name of the filesystem which holds the boot firmware.
'';
};
rootPartitionUUID = mkOption {
type = types.nullOr types.str;
default = null;
example = "14e19a7b-0ae0-484d-9d54-43bd6fdc20c7";
description = ''
UUID for the filesystem on the main NixOS partition on the SD card.
'';
};
firmwareSize = mkOption {
type = types.int;
# As of 2019-08-18 the Raspberry pi firmware + u-boot takes ~18MiB
default = 30;
description = ''
Size of the /boot/firmware partition, in megabytes.
'';
};
populateFirmwareCommands = mkOption {
example = literalExample "'' cp \${pkgs.myBootLoader}/u-boot.bin firmware/ ''";
description = ''
Shell commands to populate the ./firmware directory.
All files in that directory are copied to the
/boot/firmware partition on the SD image.
'';
};
populateRootCommands = mkOption {
example = literalExample "''\${config.boot.loader.generic-extlinux-compatible.populateCmd} -c \${config.system.build.toplevel} -d ./files/boot''";
description = ''
Shell commands to populate the ./files directory.
All files in that directory are copied to the
root (/) partition on the SD image. Use this to
populate the ./files/boot (/boot) directory.
'';
};
postBuildCommands = mkOption {
example = literalExample "'' dd if=\${pkgs.myBootLoader}/SPL of=$img bs=1024 seek=1 conv=notrunc ''";
default = "";
description = ''
Shell commands to run after the image is built.
Can be used for boards requiring to dd u-boot SPL before actual partitions.
'';
};
compressImage = mkOption {
type = types.bool;
default = true;
description = ''
Whether the SD image should be compressed using
<command>zstd</command>.
'';
};
};
config = {
fileSystems = {
"/boot/firmware" = {
device = "/dev/disk/by-label/${config.sdImage.firmwarePartitionName}";
fsType = "vfat";
# Alternatively, this could be removed from the configuration.
# The filesystem is not needed at runtime, it could be treated
# as an opaque blob instead of a discrete FAT32 filesystem.
options = [ "nofail" "noauto" ];
};
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
};
};
sdImage.storePaths = [ config.system.build.toplevel ];
system.build.sdImage = pkgs.callPackage ({ stdenv, dosfstools, e2fsprogs,
mtools, libfaketime, util-linux, zstd }: stdenv.mkDerivation {
name = config.sdImage.imageName;
nativeBuildInputs = [ dosfstools e2fsprogs mtools libfaketime util-linux zstd ];
inherit (config.sdImage) compressImage;
buildCommand = ''
mkdir -p $out/nix-support $out/sd-image
export img=$out/sd-image/${config.sdImage.imageName}
echo "${pkgs.stdenv.buildPlatform.system}" > $out/nix-support/system
if test -n "$compressImage"; then
echo "file sd-image $img.zst" >> $out/nix-support/hydra-build-products
else
echo "file sd-image $img" >> $out/nix-support/hydra-build-products
fi
echo "Decompressing rootfs image"
zstd -d --no-progress "${rootfsImage}" -o ./root-fs.img
# Gap in front of the first partition, in MiB
gap=8
# Create the image file sized to fit /boot/firmware and /, plus slack for the gap.
rootSizeBlocks=$(du -B 512 --apparent-size ./root-fs.img | awk '{ print $1 }')
firmwareSizeBlocks=$((${toString config.sdImage.firmwareSize} * 1024 * 1024 / 512))
imageSize=$((rootSizeBlocks * 512 + firmwareSizeBlocks * 512 + gap * 1024 * 1024))
truncate -s $imageSize $img
# type=b is 'W95 FAT32', type=83 is 'Linux'.
# The "bootable" partition is where u-boot will look file for the bootloader
# information (dtbs, extlinux.conf file).
sfdisk $img <<EOF
label: dos
label-id: ${config.sdImage.firmwarePartitionID}
start=''${gap}M, size=$firmwareSizeBlocks, type=b
start=$((gap + ${toString config.sdImage.firmwareSize}))M, type=83, bootable
EOF
# Copy the rootfs into the SD image
eval $(partx $img -o START,SECTORS --nr 2 --pairs)
dd conv=notrunc if=./root-fs.img of=$img seek=$START count=$SECTORS
# Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img
eval $(partx $img -o START,SECTORS --nr 1 --pairs)
truncate -s $((SECTORS * 512)) firmware_part.img
faketime "1970-01-01 00:00:00" mkfs.vfat -i ${config.sdImage.firmwarePartitionID} -n ${config.sdImage.firmwarePartitionName} firmware_part.img
# Populate the files intended for /boot/firmware
mkdir firmware
${config.sdImage.populateFirmwareCommands}
# Copy the populated /boot/firmware into the SD image
(cd firmware; mcopy -psvm -i ../firmware_part.img ./* ::)
# Verify the FAT partition before copying it.
fsck.vfat -vn firmware_part.img
dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS
${config.sdImage.postBuildCommands}
if test -n "$compressImage"; then
zstd -T$NIX_BUILD_CORES --rm $img
fi
'';
}) {};
boot.postBootCommands = ''
# On the first boot do some maintenance tasks
if [ -f /nix-path-registration ]; then
set -euo pipefail
set -x
# Figure out device names for the boot device and root filesystem.
rootPart=$(${pkgs.util-linux}/bin/findmnt -n -o SOURCE /)
bootDevice=$(lsblk -npo PKNAME $rootPart)
partNum=$(lsblk -npo MAJ:MIN $rootPart | ${pkgs.gawk}/bin/awk -F: '{print $2}')
# Resize the root partition and the filesystem to fit the disk
echo ",+," | sfdisk -N$partNum --no-reread $bootDevice
${pkgs.parted}/bin/partprobe
${pkgs.e2fsprogs}/bin/resize2fs $rootPart
# Register the contents of the initial Nix store
${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration
# nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag.
touch /etc/NIXOS
${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
# Prevents this from running on later boots.
rm -f /nix-path-registration
fi
'';
warnings = [
''
.../cd-dvd/sd-image.nix is deprecated and will eventually be removed.
Please switch to .../sd-card/sd-image.nix, instead.
''
];
};
}

@ -26,7 +26,7 @@ let
# A clue for the kernel loading
kernelParams = pkgs.writeText "kernel-params.txt" ''
Kernel Parameters:
init=/boot/init systemConfig=/boot/init ${toString config.boot.kernelParams}
init=/boot/init ${toString config.boot.kernelParams}
'';
# System wide nixpkgs config

@ -23,13 +23,13 @@ let
label nixos
MENU LABEL ^NixOS using nfsroot
KERNEL bzImage
append ip=dhcp nfsroot=/home/pcroot systemConfig=${config.system.build.toplevel} init=${config.system.build.toplevel}/init rw
append ip=dhcp nfsroot=/home/pcroot init=${config.system.build.toplevel}/init rw
# I don't know how to make this boot with nfsroot (using the initrd)
label nixos_initrd
MENU LABEL NixOS booting the poor ^initrd.
KERNEL bzImage
append initrd=initrd ip=dhcp nfsroot=/home/pcroot systemConfig=${config.system.build.toplevel} init=${config.system.build.toplevel}/init rw
append initrd=initrd ip=dhcp nfsroot=/home/pcroot init=${config.system.build.toplevel}/init rw
label memtest
MENU LABEL ^${pkgs.memtest86.name}

@ -0,0 +1,10 @@
{
imports = [
../../profiles/installation-device.nix
./sd-image-aarch64.nix
];
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -0,0 +1,10 @@
{
imports = [
../../profiles/installation-device.nix
./sd-image-aarch64-new-kernel.nix
];
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -0,0 +1,7 @@
{ pkgs, ... }:
{
imports = [ ./sd-image-aarch64.nix ];
boot.kernelPackages = pkgs.linuxPackages_latest;
}

@ -0,0 +1,75 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-aarch64.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{
imports = [
../../profiles/base.nix
./sd-image.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
# The serial ports listed here are:
# - ttyS0: for Tegra (Jetson TX1)
# - ttyAMA0: for QEMU's -machine virt
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"];
boot.initrd.availableKernelModules = [
# Allows early (earlier) modesetting for the Raspberry Pi
"vc4" "bcm2835_dma" "i2c_bcm2835"
# Allows early (earlier) modesetting for Allwinner SoCs
"sun4i_drm" "sun8i_drm_hdmi" "sun8i_mixer"
];
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
[pi3]
kernel=u-boot-rpi3.bin
[pi4]
kernel=u-boot-rpi4.bin
enable_gic=1
armstub=armstub8-gic.bin
# Otherwise the resolution will be weird in most cases, compared to
# what the pi3 firmware does by default.
disable_overscan=1
[all]
# Boot in 64-bit mode.
arm_64bit=1
# U-Boot needs this to work, regardless of whether UART is actually used or not.
# Look in arch/arm/mach-bcm283x/Kconfig in the U-Boot tree to see if this is still
# a requirement in the future.
enable_uart=1
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
# Add the config
cp ${configTxt} firmware/config.txt
# Add pi3 specific files
cp ${pkgs.ubootRaspberryPi3_64bit}/u-boot.bin firmware/u-boot-rpi3.bin
# Add pi4 specific files
cp ${pkgs.ubootRaspberryPi4_64bit}/u-boot.bin firmware/u-boot-rpi4.bin
cp ${pkgs.raspberrypi-armstubs}/armstub8-gic.bin firmware/armstub8-gic.bin
cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb firmware/
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
};
}

@ -0,0 +1,10 @@
{
imports = [
../../profiles/installation-device.nix
./sd-image-armv7l-multiplatform.nix
];
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -0,0 +1,52 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{
imports = [
../../profiles/base.nix
./sd-image.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
boot.kernelPackages = pkgs.linuxPackages_latest;
# The serial ports listed here are:
# - ttyS0: for Tegra (Jetson TK1)
# - ttymxc0: for i.MX6 (Wandboard)
# - ttyAMA0: for Allwinner (pcDuino3 Nano) and QEMU's -machine virt
# - ttyO0: for OMAP (BeagleBone Black)
# - ttySAC2: for Exynos (ODROID-XU3)
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=ttySAC2,115200n8" "console=tty0"];
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
[pi2]
kernel=u-boot-rpi2.bin
[pi3]
kernel=u-boot-rpi3.bin
# U-Boot used to need this to work, regardless of whether UART is actually used or not.
# TODO: check when/if this can be removed.
enable_uart=1
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
cp ${pkgs.ubootRaspberryPi2}/u-boot.bin firmware/u-boot-rpi2.bin
cp ${pkgs.ubootRaspberryPi3_32bit}/u-boot.bin firmware/u-boot-rpi3.bin
cp ${configTxt} firmware/config.txt
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
};
}

@ -0,0 +1,10 @@
{
imports = [
../../profiles/installation-device.nix
./sd-image-raspberrypi.nix
];
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -0,0 +1,41 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-raspberrypi.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{
imports = [
../../profiles/base.nix
./sd-image.nix
];
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
boot.consoleLogLevel = lib.mkDefault 7;
boot.kernelPackages = pkgs.linuxPackages_rpi1;
sdImage = {
populateFirmwareCommands = let
configTxt = pkgs.writeText "config.txt" ''
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
# when attempting to show low-voltage or overtemperature warnings.
avoid_warnings=1
[pi0]
kernel=u-boot-rpi0.bin
[pi1]
kernel=u-boot-rpi1.bin
'';
in ''
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin firmware/u-boot-rpi0.bin
cp ${pkgs.ubootRaspberryPi}/u-boot.bin firmware/u-boot-rpi1.bin
cp ${configTxt} firmware/config.txt
'';
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
'';
};
}

@ -0,0 +1,10 @@
{
imports = [
../../profiles/installation-device.nix
./sd-image-raspberrypi4.nix
];
# the installation media is also the installation target,
# so we don't want to provide the installation configuration.nix.
installer.cloneConfig = false;
}

@ -0,0 +1,8 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-raspberrypi4.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{
imports = [ ./sd-image-aarch64.nix ];
boot.kernelPackages = pkgs.linuxPackages_rpi4;
}

@ -0,0 +1,245 @@
# This module creates a bootable SD card image containing the given NixOS
# configuration. The generated image is MBR partitioned, with a FAT
# /boot/firmware partition, and ext4 root partition. The generated image
# is sized to fit its contents, and a boot script automatically resizes
# the root partition to fit the device on the first boot.
#
# The firmware partition is built with expectation to hold the Raspberry
# Pi firmware and bootloader, and be removed and replaced with a firmware
# build for the target SoC for other board families.
#
# The derivation for the SD image will be placed in
# config.system.build.sdImage
{ config, lib, pkgs, ... }:
with lib;
let
rootfsImage = pkgs.callPackage ../../../lib/make-ext4-fs.nix ({
inherit (config.sdImage) storePaths;
compressImage = true;
populateImageCommands = config.sdImage.populateRootCommands;
volumeLabel = "NIXOS_SD";
} // optionalAttrs (config.sdImage.rootPartitionUUID != null) {
uuid = config.sdImage.rootPartitionUUID;
});
in
{
imports = [
(mkRemovedOptionModule [ "sdImage" "bootPartitionID" ] "The FAT partition for SD image now only holds the Raspberry Pi firmware files. Use firmwarePartitionID to configure that partition's ID.")
(mkRemovedOptionModule [ "sdImage" "bootSize" ] "The boot files for SD image have been moved to the main ext4 partition. The FAT partition now only holds the Raspberry Pi firmware files. Changing its size may not be required.")
];
options.sdImage = {
imageName = mkOption {
default = "${config.sdImage.imageBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img";
description = ''
Name of the generated image file.
'';
};
imageBaseName = mkOption {
default = "nixos-sd-image";
description = ''
Prefix of the name of the generated image file.
'';
};
storePaths = mkOption {
type = with types; listOf package;
example = literalExample "[ pkgs.stdenv ]";
description = ''
Derivations to be included in the Nix store in the generated SD image.
'';
};
firmwarePartitionID = mkOption {
type = types.str;
default = "0x2178694e";
description = ''
Volume ID for the /boot/firmware partition on the SD card. This value
must be a 32-bit hexadecimal number.
'';
};
firmwarePartitionName = mkOption {
type = types.str;
default = "FIRMWARE";
description = ''
Name of the filesystem which holds the boot firmware.
'';
};
rootPartitionUUID = mkOption {
type = types.nullOr types.str;
default = null;
example = "14e19a7b-0ae0-484d-9d54-43bd6fdc20c7";
description = ''
UUID for the filesystem on the main NixOS partition on the SD card.
'';
};
firmwareSize = mkOption {
type = types.int;
# As of 2019-08-18 the Raspberry pi firmware + u-boot takes ~18MiB
default = 30;
description = ''
Size of the /boot/firmware partition, in megabytes.
'';
};
populateFirmwareCommands = mkOption {
example = literalExample "'' cp \${pkgs.myBootLoader}/u-boot.bin firmware/ ''";
description = ''
Shell commands to populate the ./firmware directory.
All files in that directory are copied to the
/boot/firmware partition on the SD image.
'';
};
populateRootCommands = mkOption {
example = literalExample "''\${config.boot.loader.generic-extlinux-compatible.populateCmd} -c \${config.system.build.toplevel} -d ./files/boot''";
description = ''
Shell commands to populate the ./files directory.
All files in that directory are copied to the
root (/) partition on the SD image. Use this to
populate the ./files/boot (/boot) directory.
'';
};
postBuildCommands = mkOption {
example = literalExample "'' dd if=\${pkgs.myBootLoader}/SPL of=$img bs=1024 seek=1 conv=notrunc ''";
default = "";
description = ''
Shell commands to run after the image is built.
Can be used for boards requiring to dd u-boot SPL before actual partitions.
'';
};
compressImage = mkOption {
type = types.bool;
default = true;
description = ''
Whether the SD image should be compressed using
<command>zstd</command>.
'';
};
};
config = {
fileSystems = {
"/boot/firmware" = {
device = "/dev/disk/by-label/${config.sdImage.firmwarePartitionName}";
fsType = "vfat";
# Alternatively, this could be removed from the configuration.
# The filesystem is not needed at runtime, it could be treated
# as an opaque blob instead of a discrete FAT32 filesystem.
options = [ "nofail" "noauto" ];
};
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
};
};
sdImage.storePaths = [ config.system.build.toplevel ];
system.build.sdImage = pkgs.callPackage ({ stdenv, dosfstools, e2fsprogs,
mtools, libfaketime, util-linux, zstd }: stdenv.mkDerivation {
name = config.sdImage.imageName;
nativeBuildInputs = [ dosfstools e2fsprogs mtools libfaketime util-linux zstd ];
inherit (config.sdImage) compressImage;
buildCommand = ''
mkdir -p $out/nix-support $out/sd-image
export img=$out/sd-image/${config.sdImage.imageName}
echo "${pkgs.stdenv.buildPlatform.system}" > $out/nix-support/system
if test -n "$compressImage"; then
echo "file sd-image $img.zst" >> $out/nix-support/hydra-build-products
else
echo "file sd-image $img" >> $out/nix-support/hydra-build-products
fi
echo "Decompressing rootfs image"
zstd -d --no-progress "${rootfsImage}" -o ./root-fs.img
# Gap in front of the first partition, in MiB
gap=8
# Create the image file sized to fit /boot/firmware and /, plus slack for the gap.
rootSizeBlocks=$(du -B 512 --apparent-size ./root-fs.img | awk '{ print $1 }')
firmwareSizeBlocks=$((${toString config.sdImage.firmwareSize} * 1024 * 1024 / 512))
imageSize=$((rootSizeBlocks * 512 + firmwareSizeBlocks * 512 + gap * 1024 * 1024))
truncate -s $imageSize $img
# type=b is 'W95 FAT32', type=83 is 'Linux'.
# The "bootable" partition is where u-boot will look file for the bootloader
# information (dtbs, extlinux.conf file).
sfdisk $img <<EOF
label: dos
label-id: ${config.sdImage.firmwarePartitionID}
start=''${gap}M, size=$firmwareSizeBlocks, type=b
start=$((gap + ${toString config.sdImage.firmwareSize}))M, type=83, bootable
EOF
# Copy the rootfs into the SD image
eval $(partx $img -o START,SECTORS --nr 2 --pairs)
dd conv=notrunc if=./root-fs.img of=$img seek=$START count=$SECTORS
# Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img
eval $(partx $img -o START,SECTORS --nr 1 --pairs)
truncate -s $((SECTORS * 512)) firmware_part.img
faketime "1970-01-01 00:00:00" mkfs.vfat -i ${config.sdImage.firmwarePartitionID} -n ${config.sdImage.firmwarePartitionName} firmware_part.img
# Populate the files intended for /boot/firmware
mkdir firmware
${config.sdImage.populateFirmwareCommands}
# Copy the populated /boot/firmware into the SD image
(cd firmware; mcopy -psvm -i ../firmware_part.img ./* ::)
# Verify the FAT partition before copying it.
fsck.vfat -vn firmware_part.img
dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS
${config.sdImage.postBuildCommands}
if test -n "$compressImage"; then
zstd -T$NIX_BUILD_CORES --rm $img
fi
'';
}) {};
boot.postBootCommands = ''
# On the first boot do some maintenance tasks
if [ -f /nix-path-registration ]; then
set -euo pipefail
set -x
# Figure out device names for the boot device and root filesystem.
rootPart=$(${pkgs.util-linux}/bin/findmnt -n -o SOURCE /)
bootDevice=$(lsblk -npo PKNAME $rootPart)
partNum=$(lsblk -npo MAJ:MIN $rootPart | ${pkgs.gawk}/bin/awk -F: '{print $2}')
# Resize the root partition and the filesystem to fit the disk
echo ",+," | sfdisk -N$partNum --no-reread $bootDevice
${pkgs.parted}/bin/partprobe
${pkgs.e2fsprogs}/bin/resize2fs $rootPart
# Register the contents of the initial Nix store
${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration
# nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag.
touch /etc/NIXOS
${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
# Prevents this from running on later boots.
rm -f /nix-path-registration
fi
'';
};
}

@ -585,6 +585,22 @@ EOF
return $config;
}
sub generateXserverConfig {
my $xserverEnabled = "@xserverEnabled@";
my $config = "";
if ($xserverEnabled eq "1") {
$config = <<EOF;
# Enable the X11 windowing system.
services.xserver.enable = true;
EOF
} else {
$config = <<EOF;
# Enable the X11 windowing system.
# services.xserver.enable = true;
EOF
}
}
if ($showHardwareConfig) {
print STDOUT $hwConfig;
@ -630,6 +646,8 @@ EOF
my $networkingDhcpConfig = generateNetworkingDhcpConfig();
my $xserverConfig = generateXserverConfig();
(my $desktopConfiguration = <<EOF)=~s/^/ /gm;
@desktopConfiguration@
EOF

@ -36,6 +36,7 @@ let
path = lib.optionals (lib.elem "btrfs" config.boot.supportedFilesystems) [ pkgs.btrfs-progs ];
perl = "${pkgs.perl}/bin/perl -I${pkgs.perlPackages.FileSlurp}/${pkgs.perl.libPrefix}";
inherit (config.system.nixos-generate-config) configuration desktopConfiguration;
xserverEnabled = config.services.xserver.enable;
};
nixos-option =
@ -87,8 +88,8 @@ in
desktopConfiguration = mkOption {
internal = true;
type = types.str;
default = "";
type = types.listOf types.lines;
default = [];
description = ''
Text to preseed the desktop configuration that <literal>nixos-generate-config</literal>
saves to <literal>/etc/nixos/configuration.nix</literal>.
@ -136,6 +137,8 @@ in
# keyMap = "us";
# };
$xserverConfig
$desktopConfiguration
# Configure keymap in X11
# services.xserver.layout = "us";

@ -53,7 +53,7 @@ in
${pkgs.kexectools}/sbin/kexec -p /run/current-system/kernel \
--initrd=/run/current-system/initrd \
--reset-vga --console-vga \
--command-line="systemConfig=$(readlink -f /run/current-system) init=$(readlink -f /run/current-system/init) irqpoll maxcpus=1 reset_devices ${kernelParams}"
--command-line="init=$(readlink -f /run/current-system/init) irqpoll maxcpus=1 reset_devices ${kernelParams}"
'';
kernelParams = [
"crashkernel=${crashdump.reservedMemory}"

@ -80,6 +80,7 @@
./hardware/video/displaylink.nix
./hardware/video/hidpi.nix
./hardware/video/nvidia.nix
./hardware/video/switcheroo-control.nix
./hardware/video/uvcvideo/default.nix
./hardware/video/webcam/facetimehd.nix
./hardware/xpadneo.nix
@ -206,7 +207,6 @@
./security/dhparams.nix
./security/duosec.nix
./security/google_oslogin.nix
./security/hidepid.nix
./security/lock-kernel-modules.nix
./security/misc.nix
./security/oath.nix
@ -256,6 +256,8 @@
./services/backup/tsm.nix
./services/backup/zfs-replication.nix
./services/backup/znapzend.nix
./services/blockchain/ethereum/geth.nix
./services/backup/zrepl.nix
./services/cluster/hadoop/default.nix
./services/cluster/k3s/default.nix
./services/cluster/kubernetes/addons/dns.nix
@ -320,7 +322,8 @@
./services/desktops/gsignond.nix
./services/desktops/gvfs.nix
./services/desktops/malcontent.nix
./services/desktops/pipewire.nix
./services/desktops/pipewire/pipewire.nix
./services/desktops/pipewire/pipewire-media-session.nix
./services/desktops/gnome3/at-spi2-core.nix
./services/desktops/gnome3/chrome-gnome-shell.nix
./services/desktops/gnome3/evolution-data-server.nix
@ -379,6 +382,7 @@
./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix
./services/hardware/sane_extra_backends/dsseries.nix
./services/hardware/spacenavd.nix
./services/hardware/tcsd.nix
./services/hardware/tlp.nix
./services/hardware/thinkfan.nix
@ -459,6 +463,7 @@
./services/misc/errbot.nix
./services/misc/etcd.nix
./services/misc/etebase-server.nix
./services/misc/etesync-dav.nix
./services/misc/ethminer.nix
./services/misc/exhibitor.nix
./services/misc/felix.nix
@ -485,6 +490,7 @@
./services/misc/logkeys.nix
./services/misc/leaps.nix
./services/misc/lidarr.nix
./services/misc/lifecycled.nix
./services/misc/mame.nix
./services/misc/matrix-appservice-discord.nix
./services/misc/matrix-synapse.nix
@ -507,6 +513,7 @@
./services/misc/paperless.nix
./services/misc/parsoid.nix
./services/misc/plex.nix
./services/misc/plikd.nix
./services/misc/tautulli.nix
./services/misc/pinnwand.nix
./services/misc/pykms.nix

@ -22,8 +22,6 @@ with lib;
environment.memoryAllocator.provider = mkDefault "scudo";
environment.variables.SCUDO_OPTIONS = mkDefault "ZeroContents=1";
security.hideProcessInformation = mkDefault true;
security.lockKernelModules = mkDefault true;
security.protectKernelImage = mkDefault true;

@ -4,12 +4,20 @@ with lib;
let
cfg = config.programs.steam;
steam = pkgs.steam.override {
extraLibraries = pkgs: with config.hardware.opengl;
if pkgs.hostPlatform.is64bit
then [ package ] ++ extraPackages
else [ package32 ] ++ extraPackages32;
};
in {
options.programs.steam.enable = mkEnableOption "steam";
config = mkIf cfg.enable {
hardware.opengl = { # this fixes the "glXChooseVisual failed" bug, context: https://github.com/NixOS/nixpkgs/issues/47932
enable = true;
driSupport = true;
driSupport32Bit = true;
};
@ -18,7 +26,7 @@ in {
hardware.steam-hardware.enable = true;
environment.systemPackages = [ pkgs.steam ];
environment.systemPackages = [ steam steam.run ];
};
meta.maintainers = with maintainers; [ mkg20001 ];

@ -73,6 +73,11 @@ with lib;
(mkRemovedOptionModule [ "services" "venus" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "flashpolicyd" ] "The flashpolicyd module has been removed. Adobe Flash Player is deprecated.")
(mkRemovedOptionModule [ "security" "hideProcessInformation" ] ''
The hidepid module was removed, since the underlying machinery
is broken when using cgroups-v2.
'')
# Do NOT add any option renames here, see top of the file
];
}

@ -1,31 +0,0 @@
{ config, lib, ... }:
with lib;
{
meta = {
maintainers = [ maintainers.joachifm ];
doc = ./hidepid.xml;
};
options = {
security.hideProcessInformation = mkOption {
type = types.bool;
default = false;
description = ''
Restrict process information to the owning user.
'';
};
};
config = mkIf config.security.hideProcessInformation {
users.groups.proc.gid = config.ids.gids.proc;
users.groups.proc.members = [ "polkituser" ];
boot.specialFileSystems."/proc".options = [ "hidepid=2" "gid=${toString config.ids.gids.proc}" ];
systemd.services.systemd-logind.serviceConfig.SupplementaryGroups = [ "proc" ];
# Disable cgroupsv2, which doesn't work with hidepid.
# https://github.com/NixOS/nixpkgs/pull/104094#issuecomment-729996203
systemd.enableUnifiedCgroupHierarchy = false;
};
}

@ -1,28 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-hidepid">
<title>Hiding process information</title>
<para>
Setting
<programlisting>
<xref linkend="opt-security.hideProcessInformation"/> = true;
</programlisting>
ensures that access to process information is restricted to the owning user.
This implies, among other things, that command-line arguments remain private.
Unless your deployment relies on unprivileged users being able to inspect the
process information of other users, this option should be safe to enable.
</para>
<para>
Members of the <literal>proc</literal> group are exempt from process
information hiding.
</para>
<para>
To allow a service <replaceable>foo</replaceable> to run without process
information hiding, set
<programlisting>
<link linkend="opt-systemd.services._name_.serviceConfig">systemd.services.<replaceable>foo</replaceable>.serviceConfig</link>.SupplementaryGroups = [ "proc" ];
</programlisting>
</para>
</chapter>

@ -1,56 +1,16 @@
{ config, lib, pkgs, ... }:
with lib;
{ lib, ... }:
let
cfg = config.security.rngd;
removed = k: lib.mkRemovedOptionModule [ "security" "rngd" k ];
in
{
options = {
security.rngd = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the rng daemon. Devices that the kernel recognises
as entropy sources are handled automatically by krngd.
'';
};
debug = mkOption {
type = types.bool;
default = false;
description = "Whether to enable debug output (-d).";
};
};
};
config = mkIf cfg.enable {
systemd.services.rngd = {
bindsTo = [ "dev-random.device" ];
after = [ "dev-random.device" ];
# Clean shutdown without DefaultDependencies
conflicts = [ "shutdown.target" ];
before = [
"sysinit.target"
"shutdown.target"
];
description = "Hardware RNG Entropy Gatherer Daemon";
# rngd may have to start early to avoid entropy starvation during boot with encrypted swap
unitConfig.DefaultDependencies = false;
serviceConfig = {
ExecStart = "${pkgs.rng-tools}/sbin/rngd -f"
+ optionalString cfg.debug " -d";
# PrivateTmp would introduce a circular dependency if /tmp is on tmpfs and swap is encrypted,
# thus depending on rngd before swap, while swap depends on rngd to avoid entropy starvation.
NoNewPrivileges = true;
PrivateNetwork = true;
ProtectSystem = "full";
ProtectHome = true;
};
};
};
imports = [
(removed "enable" ''
rngd is not necessary for any device that the kernel recognises
as an hardware RNG, as it will automatically run the krngd task
to periodically collect random data from the device and mix it
into the kernel's RNG.
'')
(removed "debug"
"The rngd module was removed, so its debug option does nothing.")
];
}

@ -0,0 +1,54 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.zrepl;
format = pkgs.formats.yaml { };
configFile = format.generate "zrepl.yml" cfg.settings;
in
{
meta.maintainers = with maintainers; [ cole-h ];
options = {
services.zrepl = {
enable = mkEnableOption "zrepl";
settings = mkOption {
default = { };
description = ''
Configuration for zrepl. See <link
xlink:href="https://zrepl.github.io/configuration.html"/>
for more information.
'';
type = types.submodule {
freeformType = format.type;
};
};
};
};
### Implementation ###
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.zrepl ];
# zrepl looks for its config in this location by default. This
# allows the use of e.g. `zrepl signal wakeup <job>` without having
# to specify the storepath of the config.
environment.etc."zrepl/zrepl.yml".source = configFile;
systemd.packages = [ pkgs.zrepl ];
systemd.services.zrepl = {
requires = [ "local-fs.target" ];
wantedBy = [ "zfs.target" ];
after = [ "zfs.target" ];
path = [ config.boot.zfs.package ];
restartTriggers = [ configFile ];
serviceConfig = {
Restart = "on-failure";
};
};
};
}

@ -0,0 +1,178 @@
{ config, lib, pkgs, ... }:
with lib;
let
eachGeth = config.services.geth;
gethOpts = { config, lib, name, ...}: {
options = {
enable = lib.mkEnableOption "Go Ethereum Node";
port = mkOption {
type = types.port;
default = 30303;
description = "Port number Go Ethereum will be listening on, both TCP and UDP.";
};
http = {
enable = lib.mkEnableOption "Go Ethereum HTTP API";
address = mkOption {
type = types.str;
default = "127.0.0.1";
description = "Listen address of Go Ethereum HTTP API.";
};
port = mkOption {
type = types.port;
default = 8545;
description = "Port number of Go Ethereum HTTP API.";
};
apis = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = "APIs to enable over WebSocket";
example = ["net" "eth"];
};
};
websocket = {
enable = lib.mkEnableOption "Go Ethereum WebSocket API";
address = mkOption {
type = types.str;
default = "127.0.0.1";
description = "Listen address of Go Ethereum WebSocket API.";
};
port = mkOption {
type = types.port;
default = 8546;
description = "Port number of Go Ethereum WebSocket API.";
};
apis = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = "APIs to enable over WebSocket";
example = ["net" "eth"];
};
};
metrics = {
enable = lib.mkEnableOption "Go Ethereum prometheus metrics";
address = mkOption {
type = types.str;
default = "127.0.0.1";
description = "Listen address of Go Ethereum metrics service.";
};
port = mkOption {
type = types.port;
default = 6060;
description = "Port number of Go Ethereum metrics service.";
};
};
network = mkOption {
type = types.nullOr (types.enum [ "goerli" "rinkeby" "yolov2" "ropsten" ]);
default = null;
description = "The network to connect to. Mainnet (null) is the default ethereum network.";
};
syncmode = mkOption {
type = types.enum [ "fast" "full" "light" ];
default = "fast";
description = "Blockchain sync mode.";
};
gcmode = mkOption {
type = types.enum [ "full" "archive" ];
default = "full";
description = "Blockchain garbage collection mode.";
};
maxpeers = mkOption {
type = types.int;
default = 50;
description = "Maximum peers to connect to.";
};
extraArgs = mkOption {
type = types.listOf types.str;
description = "Additional arguments passed to Go Ethereum.";
default = [];
};
package = mkOption {
default = pkgs.go-ethereum.geth;
type = types.package;
description = "Package to use as Go Ethereum node.";
};
};
};
in
{
###### interface
options = {
services.geth = mkOption {
type = types.attrsOf (types.submodule gethOpts);
default = {};
description = "Specification of one or more geth instances.";
};
};
###### implementation
config = mkIf (eachGeth != {}) {
environment.systemPackages = flatten (mapAttrsToList (gethName: cfg: [
cfg.package
]) eachGeth);
systemd.services = mapAttrs' (gethName: cfg: (
nameValuePair "geth-${gethName}" (mkIf cfg.enable {
description = "Go Ethereum node (${gethName})";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
StateDirectory = "goethereum/${gethName}/${if (cfg.network == null) then "mainnet" else cfg.network}";
# Hardening measures
PrivateTmp = "true";
ProtectSystem = "full";
NoNewPrivileges = "true";
PrivateDevices = "true";
MemoryDenyWriteExecute = "true";
};
script = ''
${cfg.package}/bin/geth \
--nousb \
--ipcdisable \
${optionalString (cfg.network != null) ''--${cfg.network}''} \
--syncmode ${cfg.syncmode} \
--gcmode ${cfg.gcmode} \
--port ${toString cfg.port} \
--maxpeers ${toString cfg.maxpeers} \
${if cfg.http.enable then ''--http --http.addr ${cfg.http.address} --http.port ${toString cfg.http.port}'' else ""} \
${optionalString (cfg.http.apis != null) ''--http.api ${lib.concatStringsSep "," cfg.http.apis}''} \
${if cfg.websocket.enable then ''--ws --ws.addr ${cfg.websocket.address} --ws.port ${toString cfg.websocket.port}'' else ""} \
${optionalString (cfg.websocket.apis != null) ''--ws.api ${lib.concatStringsSep "," cfg.websocket.apis}''} \
${optionalString cfg.metrics.enable ''--metrics --metrics.addr ${cfg.metrics.address} --metrics.port ${toString cfg.metrics.port}''} \
${lib.escapeShellArgs cfg.extraArgs} \
--datadir /var/lib/goethereum/${gethName}/${if (cfg.network == null) then "mainnet" else cfg.network}
'';
}))) eachGeth;
};
}

@ -89,6 +89,11 @@ in
example = "dbi:Pg:dbname=hydra;host=postgres.example.org;user=foo;";
description = ''
The DBI string for Hydra database connection.
NOTE: Attempts to set `application_name` will be overridden by
`hydra-TYPE` (where TYPE is e.g. `evaluator`, `queue-runner`,
etc.) in all hydra services to more easily distinguish where
queries are coming from.
'';
};
@ -284,7 +289,9 @@ in
{ wantedBy = [ "multi-user.target" ];
requires = optional haveLocalDB "postgresql.service";
after = optional haveLocalDB "postgresql.service";
environment = env;
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
};
preStart = ''
mkdir -p ${baseDir}
chown hydra.hydra ${baseDir}
@ -339,7 +346,9 @@ in
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = serverEnv;
environment = serverEnv // {
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
};
restartTriggers = [ hydraConf ];
serviceConfig =
{ ExecStart =
@ -361,6 +370,7 @@ in
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
IN_SYSTEMD = "1"; # to get log severity levels
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
@ -380,7 +390,9 @@ in
after = [ "hydra-init.service" "network.target" ];
path = with pkgs; [ hydra-package nettools jq ];
restartTriggers = [ hydraConf ];
environment = env;
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
User = "hydra";
@ -392,7 +404,9 @@ in
systemd.services.hydra-update-gc-roots =
{ requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
environment = env;
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
User = "hydra";
@ -403,7 +417,9 @@ in
systemd.services.hydra-send-stats =
{ wantedBy = [ "multi-user.target" ];
after = [ "hydra-init.service" ];
environment = env;
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
User = "hydra";
@ -417,6 +433,7 @@ in
restartTriggers = [ hydraConf ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner";
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
};
serviceConfig =
{ ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";

@ -375,6 +375,18 @@ in
fi
'';
script = ''
# https://mariadb.com/kb/en/getting-started-with-mariadb-galera-cluster/#systemd-and-galera-recovery
if test -n "''${_WSREP_START_POSITION}"; then
if test -e "${cfg.package}/bin/galera_recovery"; then
VAR=$(cd ${cfg.package}/bin/..; ${cfg.package}/bin/galera_recovery); [[ $? -eq 0 ]] && export _WSREP_START_POSITION=$VAR || exit 1
fi
fi
# The last two environment variables are used for starting Galera clusters
exec ${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION
'';
postStart = let
# The super user account to use on *first* run of MySQL server
superUser = if isMariaDB then cfg.user else "root";
@ -481,8 +493,7 @@ in
Type = if hasNotify then "notify" else "simple";
Restart = "on-abort";
RestartSec = "5s";
# The last two environment variables are used for starting Galera clusters
ExecStart = "${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION";
# User and group
User = cfg.user;
Group = cfg.group;

@ -1,183 +0,0 @@
# pipewire service.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.pipewire;
enable32BitAlsaPlugins = cfg.alsa.support32Bit
&& pkgs.stdenv.isx86_64
&& pkgs.pkgsi686Linux.pipewire != null;
# The package doesn't output to $out/lib/pipewire directly so that the
# overlays can use the outputs to replace the originals in FHS environments.
#
# This doesn't work in general because of missing development information.
jack-libs = pkgs.runCommand "jack-libs" {} ''
mkdir -p "$out/lib"
ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire"
'';
in {
meta = {
maintainers = teams.freedesktop.members;
};
###### interface
options = {
services.pipewire = {
enable = mkEnableOption "pipewire service";
package = mkOption {
type = types.package;
default = pkgs.pipewire;
defaultText = "pkgs.pipewire";
example = literalExample "pkgs.pipewire";
description = ''
The pipewire derivation to use.
'';
};
socketActivation = mkOption {
default = true;
type = types.bool;
description = ''
Automatically run pipewire when connections are made to the pipewire socket.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Literal string to append to /etc/pipewire/pipewire.conf.
'';
};
sessionManager = mkOption {
type = types.nullOr types.string;
default = null;
example = literalExample ''"''${pipewire}/bin/pipewire-media-session"'';
description = ''
Path to the pipewire session manager executable.
'';
};
sessionManagerArguments = mkOption {
type = types.listOf types.string;
default = [];
example = literalExample ''[ "-p" "bluez5.msbc-support=true" ]'';
description = ''
Arguments passed to the pipewire session manager.
'';
};
alsa = {
enable = mkEnableOption "ALSA support";
support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
};
jack = {
enable = mkEnableOption "JACK audio emulation";
};
pulse = {
enable = mkEnableOption "PulseAudio server emulation";
};
};
};
###### implementation
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.pulse.enable -> !config.hardware.pulseaudio.enable;
message = "PipeWire based PulseAudio server emulation replaces PulseAudio. This option requires `hardware.pulseaudio.enable` to be set to false";
}
{
assertion = cfg.jack.enable -> !config.services.jack.jackd.enable;
message = "PipeWire based JACK emulation doesn't use the JACK service. This option requires `services.jack.jackd.enable` to be set to false";
}
];
services.pipewire.sessionManager = mkDefault "${cfg.package}/bin/pipewire-media-session";
environment.systemPackages = [ cfg.package ]
++ lib.optional cfg.jack.enable jack-libs;
systemd.packages = [ cfg.package ]
++ lib.optional cfg.pulse.enable cfg.package.pulse;
# PipeWire depends on DBUS but doesn't list it. Without this booting
# into a terminal results in the service crashing with an error.
systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"];
systemd.user.services.pipewire.bindsTo = [ "dbus.service" ];
services.udev.packages = [ cfg.package ];
# If any paths are updated here they must also be updated in the package test.
environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
text = ''
pcm_type.pipewire {
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
${optionalString enable32BitAlsaPlugins
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
}
ctl_type.pipewire {
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
${optionalString enable32BitAlsaPlugins
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
}
'';
};
environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
};
environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
};
environment.sessionVariables.LD_LIBRARY_PATH =
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire";
environment.etc."pipewire/pipewire.conf" = {
# Adapted from src/daemon/pipewire.conf.in
text = ''
set-prop link.max-buffers 16 # version < 3 clients can't handle more
add-spa-lib audio.convert* audioconvert/libspa-audioconvert
add-spa-lib api.alsa.* alsa/libspa-alsa
add-spa-lib api.v4l2.* v4l2/libspa-v4l2
add-spa-lib api.libcamera.* libcamera/libspa-libcamera
add-spa-lib api.bluez5.* bluez5/libspa-bluez5
add-spa-lib api.vulkan.* vulkan/libspa-vulkan
add-spa-lib api.jack.* jack/libspa-jack
add-spa-lib support.* support/libspa-support
load-module libpipewire-module-rtkit # rt.prio=20 rt.time.soft=200000 rt.time.hard=200000
load-module libpipewire-module-protocol-native
load-module libpipewire-module-profiler
load-module libpipewire-module-metadata
load-module libpipewire-module-spa-device-factory
load-module libpipewire-module-spa-node-factory
load-module libpipewire-module-client-node
load-module libpipewire-module-client-device
load-module libpipewire-module-portal
load-module libpipewire-module-access
load-module libpipewire-module-adapter
load-module libpipewire-module-link-factory
load-module libpipewire-module-session-manager
create-object spa-node-factory factory.name=support.node.driver node.name=Dummy priority.driver=8000
exec ${cfg.sessionManager} ${lib.concatStringsSep " " cfg.sessionManagerArguments}
${cfg.extraConfig}
'';
};
environment.etc."pipewire/media-session.d/with-alsa" = mkIf cfg.alsa.enable { text = ""; };
environment.etc."pipewire/media-session.d/with-pulseaudio" = mkIf cfg.pulse.enable { text = ""; };
environment.etc."pipewire/media-session.d/with-jack" = mkIf cfg.jack.enable { text = ""; };
};
}

@ -0,0 +1,341 @@
# pipewire example session manager.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.pipewire.media-session;
enable32BitAlsaPlugins = cfg.alsa.support32Bit
&& pkgs.stdenv.isx86_64
&& pkgs.pkgsi686Linux.pipewire != null;
# Helpers for generating the pipewire JSON config file
mkSPAValueString = v:
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]"
else if lib.types.attrs.check v then
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}"
else lib.generators.mkValueStringDefault { } v;
mkSPAKeyValue = attrs: map (def: def.content) (
lib.sortProperties
(
lib.mapAttrsToList
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ] k} = ${mkSPAValueString (v._content or v)}")
attrs
)
);
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs);
in {
meta = {
maintainers = teams.freedesktop.members;
};
###### interface
options = {
services.pipewire.media-session = {
enable = mkOption {
type = types.bool;
default = config.services.pipewire.enable;
defaultText = "config.services.pipewire.enable";
description = "Example pipewire session manager";
};
package = mkOption {
type = types.package;
default = pkgs.pipewire.mediaSession;
example = literalExample "pkgs.pipewire.mediaSession";
description = ''
The pipewire-media-session derivation to use.
'';
};
config = mkOption {
type = types.attrs;
description = ''
Configuration for the media session core.
'';
default = {
# media-session config file
properties = {
# Properties to configure the session and some
# modules
#mem.mlock-all = false;
#context.profile.modules = "default,rtkit";
};
spa-libs = {
# Mapping from factory name to library.
"api.bluez5.*" = "bluez5/libspa-bluez5";
"api.alsa.*" = "alsa/libspa-alsa";
"api.v4l2.*" = "v4l2/libspa-v4l2";
"api.libcamera.*" = "libcamera/libspa-libcamera";
};
modules = {
# These are the modules that are enabled when a file with
# the key name is found in the media-session.d config directory.
# the default bundle is always enabled.
default = [
"flatpak" # manages flatpak access
"portal" # manage portal permissions
"v4l2" # video for linux udev detection
#"libcamera" # libcamera udev detection
"suspend-node" # suspend inactive nodes
"policy-node" # configure and link nodes
#"metadata" # export metadata API
#"default-nodes" # restore default nodes
#"default-profile" # restore default profiles
#"default-routes" # restore default route
#"streams-follow-default" # move streams when default changes
#"alsa-seq" # alsa seq midi support
#"alsa-monitor" # alsa udev detection
#"bluez5" # bluetooth support
#"restore-stream" # restore stream settings
];
"with-audio" = [
"metadata"
"default-nodes"
"default-profile"
"default-routes"
"alsa-seq"
"alsa-monitor"
];
"with-alsa" = [
"with-audio"
];
"with-jack" = [
"with-audio"
];
"with-pulseaudio" = [
"with-audio"
"bluez5"
"restore-stream"
"streams-follow-default"
];
};
};
};
alsaMonitorConfig = mkOption {
type = types.attrs;
description = ''
Configuration for the alsa monitor.
'';
default = {
# alsa-monitor config file
properties = {
#alsa.jack-device = true
};
rules = [
# an array of matches/actions to evaluate
{
# rules for matching a device or node. It is an array of
# properties that all need to match the regexp. If any of the
# matches work, the actions are executed for the object.
matches = [
{
# this matches all cards
device.name = "~alsa_card.*";
}
];
actions = {
# actions can update properties on the matched object.
update-props = {
api.alsa.use-acp = true;
#api.alsa.use-ucm = true;
#api.alsa.soft-mixer = false;
#api.alsa.ignore-dB = false;
#device.profile-set = "profileset-name";
#device.profile = "default profile name";
api.acp.auto-profile = false;
api.acp.auto-port = false;
#device.nick = "My Device";
};
};
}
{
matches = [
{
# matches all sinks
node.name = "~alsa_input.*";
}
{
# matches all sources
node.name = "~alsa_output.*";
}
];
actions = {
update-props = {
#node.nick = "My Node";
#node.nick = null;
#priority.driver = 100;
#priority.session = 100;
#node.pause-on-idle = false;
#resample.quality = 4;
#channelmix.normalize = false;
#channelmix.mix-lfe = false;
#audio.channels = 2;
#audio.format = "S16LE";
#audio.rate = 44100;
#audio.position = "FL,FR";
#api.alsa.period-size = 1024;
#api.alsa.headroom = 0;
#api.alsa.disable-mmap = false;
#api.alsa.disable-batch = false;
};
};
}
];
};
};
bluezMonitorConfig = mkOption {
type = types.attrs;
description = ''
Configuration for the bluez5 monitor.
'';
default = {
# bluez-monitor config file
properties = {
# msbc is not expected to work on all headset + adapter combinations.
#bluez5.msbc-support = true;
#bluez5.sbc-xq-support = true;
# Enabled headset roles (default: [ hsp_hs hfp_ag ]), this
# property only applies to native backend. Currently some headsets
# (Sony WH-1000XM3) are not working with both hsp_ag and hfp_ag
# enabled, disable either hsp_ag or hfp_ag to work around it.
#
# Supported headset roles: hsp_hs (HSP Headset),
# hsp_ag (HSP Audio Gateway),
# hfp_ag (HFP Audio Gateway)
#bluez5.headset-roles = [ "hsp_hs" "hsp_ag" "hfp_ag" ];
# Enabled A2DP codecs (default: all)
#bluez5.codecs = [ "sbc" "aac" "ldac" "aptx" "aptx_hd" ];
};
rules = [
# an array of matches/actions to evaluate
{
# rules for matching a device or node. It is an array of
# properties that all need to match the regexp. If any of the
# matches work, the actions are executed for the object.
matches = [
{
# this matches all cards
device.name = "~bluez_card.*";
}
];
actions = {
# actions can update properties on the matched object.
update-props = {
#device.nick = "My Device";
};
};
}
{
matches = [
{
# matches all sinks
node.name = "~bluez_input.*";
}
{
# matches all sources
node.name = "~bluez_output.*";
}
];
actions = {
update-props = {
#node.nick = "My Node"
#node.nick = null;
#priority.driver = 100;
#priority.session = 100;
#node.pause-on-idle = false;
#resample.quality = 4;
#channelmix.normalize = false;
#channelmix.mix-lfe = false;
};
};
}
];
};
};
v4l2MonitorConfig = mkOption {
type = types.attrs;
description = ''
Configuration for the V4L2 monitor.
'';
default = {
# v4l2-monitor config file
properties = {
};
rules = [
# an array of matches/actions to evaluate
{
# rules for matching a device or node. It is an array of
# properties that all need to match the regexp. If any of the
# matches work, the actions are executed for the object.
matches = [
{
# this matches all devices
device.name = "~v4l2_device.*";
}
];
actions = {
# actions can update properties on the matched object.
update-props = {
#device.nick = "My Device";
};
};
}
{
matches = [
{
# matches all sinks
node.name = "~v4l2_input.*";
}
{
# matches all sources
node.name = "~v4l2_output.*";
}
];
actions = {
update-props = {
#node.nick = "My Node";
#node.nick = null;
#priority.driver = 100;
#priority.session = 100;
#node.pause-on-idle = true;
};
};
}
];
};
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
services.pipewire.sessionManagerExecutable = "${cfg.package}/bin/pipewire-media-session";
environment.etc."pipewire/media-session.d/media-session.conf" = { text = toSPAJSON cfg.config; };
environment.etc."pipewire/media-session.d/v4l2-monitor.conf" = { text = toSPAJSON cfg.v4l2MonitorConfig; };
environment.etc."pipewire/media-session.d/with-alsa" = mkIf config.services.pipewire.alsa.enable { text = ""; };
environment.etc."pipewire/media-session.d/alsa-monitor.conf" = mkIf config.services.pipewire.alsa.enable { text = toSPAJSON cfg.alsaMonitorConfig; };
environment.etc."pipewire/media-session.d/with-pulseaudio" = mkIf config.services.pipewire.pulse.enable { text = ""; };
environment.etc."pipewire/media-session.d/bluez-monitor.conf" = mkIf config.services.pipewire.pulse.enable { text = toSPAJSON cfg.bluezMonitorConfig; };
environment.etc."pipewire/media-session.d/with-jack" = mkIf config.services.pipewire.jack.enable { text = ""; };
};
}

@ -0,0 +1,265 @@
# pipewire service.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.pipewire;
enable32BitAlsaPlugins = cfg.alsa.support32Bit
&& pkgs.stdenv.isx86_64
&& pkgs.pkgsi686Linux.pipewire != null;
# The package doesn't output to $out/lib/pipewire directly so that the
# overlays can use the outputs to replace the originals in FHS environments.
#
# This doesn't work in general because of missing development information.
jack-libs = pkgs.runCommand "jack-libs" {} ''
mkdir -p "$out/lib"
ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire"
'';
# Helpers for generating the pipewire JSON config file
mkSPAValueString = v:
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]"
else if lib.types.attrs.check v then
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}"
else lib.generators.mkValueStringDefault { } v;
mkSPAKeyValue = attrs: map (def: def.content) (
lib.sortProperties
(
lib.mapAttrsToList
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ] k} = ${mkSPAValueString (v._content or v)}")
attrs
)
);
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs);
in {
meta = {
maintainers = teams.freedesktop.members;
};
###### interface
options = {
services.pipewire = {
enable = mkEnableOption "pipewire service";
package = mkOption {
type = types.package;
default = pkgs.pipewire;
defaultText = "pkgs.pipewire";
example = literalExample "pkgs.pipewire";
description = ''
The pipewire derivation to use.
'';
};
socketActivation = mkOption {
default = true;
type = types.bool;
description = ''
Automatically run pipewire when connections are made to the pipewire socket.
'';
};
config = mkOption {
type = types.attrs;
description = ''
Configuration for the pipewire daemon.
'';
default = {
properties = {
## set-prop is used to configure properties in the system
#
# "library.name.system" = "support/libspa-support";
# "context.data-loop.library.name.system" = "support/libspa-support";
"link.max-buffers" = 16; # version < 3 clients can't handle more than 16
#"mem.allow-mlock" = false;
#"mem.mlock-all" = true;
## https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/src/pipewire/pipewire.h#L93
#"log.level" = 2; # 5 is trace, which is verbose as hell, default is 2 which is warnings, 4 is debug output, 3 is info
## Properties for the DSP configuration
#
#"default.clock.rate" = 48000;
#"default.clock.quantum" = 1024;
#"default.clock.min-quantum" = 32;
#"default.clock.max-quantum" = 8192;
#"default.video.width" = 640;
#"default.video.height" = 480;
#"default.video.rate.num" = 25;
#"default.video.rate.denom" = 1;
};
spa-libs = {
## add-spa-lib <factory-name regex> <library-name>
#
# used to find spa factory names. It maps an spa factory name
# regular expression to a library name that should contain
# that factory.
#
"audio.convert*" = "audioconvert/libspa-audioconvert";
"api.alsa.*" = "alsa/libspa-alsa";
"api.v4l2.*" = "v4l2/libspa-v4l2";
"api.libcamera.*" = "libcamera/libspa-libcamera";
"api.bluez5.*" = "bluez5/libspa-bluez5";
"api.vulkan.*" = "vulkan/libspa-vulkan";
"api.jack.*" = "jack/libspa-jack";
"support.*" = "support/libspa-support";
# "videotestsrc" = "videotestsrc/libspa-videotestsrc";
# "audiotestsrc" = "audiotestsrc/libspa-audiotestsrc";
};
modules = {
## <module-name> = { [args = "<key>=<value> ..."]
# [flags = ifexists] }
# [flags = [ifexists]|[nofail]}
#
# Loads a module with the given parameters.
# If ifexists is given, the module is ignoed when it is not found.
# If nofail is given, module initialization failures are ignored.
#
libpipewire-module-rtkit = {
args = {
#rt.prio = 20;
#rt.time.soft = 200000;
#rt.time.hard = 200000;
#nice.level = -11;
};
flags = "ifexists|nofail";
};
libpipewire-module-protocol-native = { _priority = -100; _content = "null"; };
libpipewire-module-profiler = "null";
libpipewire-module-metadata = "null";
libpipewire-module-spa-device-factory = "null";
libpipewire-module-spa-node-factory = "null";
libpipewire-module-client-node = "null";
libpipewire-module-client-device = "null";
libpipewire-module-portal = "null";
libpipewire-module-access = {
args.access = {
allowed = ["${builtins.unsafeDiscardStringContext cfg.sessionManagerExecutable}"];
rejected = [];
restricted = [];
force = "flatpak";
};
};
libpipewire-module-adapter = "null";
libpipewire-module-link-factory = "null";
libpipewire-module-session-manager = "null";
};
objects = {
## create-object [-nofail] <factory-name> [<key>=<value> ...]
#
# Creates an object from a PipeWire factory with the given parameters.
# If -nofail is given, errors are ignored (and no object is created)
#
};
exec = {
## exec <program-name>
#
# Execute the given program. This is usually used to start the
# session manager. run the session manager with -h for options
#
"${builtins.unsafeDiscardStringContext cfg.sessionManagerExecutable}" = { args = "\"${lib.concatStringsSep " " cfg.sessionManagerArguments}\""; };
};
};
};
sessionManagerExecutable = mkOption {
type = types.str;
default = "";
example = literalExample ''${pkgs.pipewire.mediaSession}/bin/pipewire-media-session'';
description = ''
Path to the session manager executable.
'';
};
sessionManagerArguments = mkOption {
type = types.listOf types.str;
default = [];
example = literalExample ''["-p" "bluez5.msbc-support=true"]'';
description = ''
Arguments passed to the pipewire session manager.
'';
};
alsa = {
enable = mkEnableOption "ALSA support";
support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
};
jack = {
enable = mkEnableOption "JACK audio emulation";
};
pulse = {
enable = mkEnableOption "PulseAudio server emulation";
};
};
};
###### implementation
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.pulse.enable -> !config.hardware.pulseaudio.enable;
message = "PipeWire based PulseAudio server emulation replaces PulseAudio. This option requires `hardware.pulseaudio.enable` to be set to false";
}
{
assertion = cfg.jack.enable -> !config.services.jack.jackd.enable;
message = "PipeWire based JACK emulation doesn't use the JACK service. This option requires `services.jack.jackd.enable` to be set to false";
}
];
environment.systemPackages = [ cfg.package ]
++ lib.optional cfg.jack.enable jack-libs;
systemd.packages = [ cfg.package ]
++ lib.optional cfg.pulse.enable cfg.package.pulse;
# PipeWire depends on DBUS but doesn't list it. Without this booting
# into a terminal results in the service crashing with an error.
systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"];
systemd.user.services.pipewire.bindsTo = [ "dbus.service" ];
services.udev.packages = [ cfg.package ];
# If any paths are updated here they must also be updated in the package test.
environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
text = ''
pcm_type.pipewire {
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
${optionalString enable32BitAlsaPlugins
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
}
ctl_type.pipewire {
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
${optionalString enable32BitAlsaPlugins
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
}
'';
};
environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
};
environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
};
environment.sessionVariables.LD_LIBRARY_PATH =
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire";
# https://gitlab.freedesktop.org/pipewire/pipewire/-/issues/464#note_723554
systemd.user.services.pipewire.environment = {
"PIPEWIRE_LINK_PASSIVE" = "1";
"PIPEWIRE_CONFIG_FILE" = pkgs.writeText "pipewire.conf" (toSPAJSON cfg.config);
};
};
}

@ -3,21 +3,22 @@
with lib;
let
cfg = config.services.acpid;
canonicalHandlers = {
powerEvent = {
event = "button/power.*";
action = config.services.acpid.powerEventCommands;
action = cfg.powerEventCommands;
};
lidEvent = {
event = "button/lid.*";
action = config.services.acpid.lidEventCommands;
action = cfg.lidEventCommands;
};
acEvent = {
event = "ac_adapter.*";
action = config.services.acpid.acEventCommands;
action = cfg.acEventCommands;
};
};
@ -33,7 +34,7 @@ let
echo "event=${handler.event}" > $fn
echo "action=${pkgs.writeShellScriptBin "${name}.sh" handler.action }/bin/${name}.sh '%e'" >> $fn
'';
in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // config.services.acpid.handlers))
in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // cfg.handlers))
}
'';
@ -47,11 +48,7 @@ in
services.acpid = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the ACPI daemon.";
};
enable = mkEnableOption "the ACPI daemon";
logEvents = mkOption {
type = types.bool;
@ -129,26 +126,28 @@ in
###### implementation
config = mkIf config.services.acpid.enable {
config = mkIf cfg.enable {
systemd.services.acpid = {
description = "ACPI Daemon";
documentation = [ "man:acpid(8)" ];
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
path = [ pkgs.acpid ];
serviceConfig = {
Type = "forking";
ExecStart = escapeShellArgs
([ "${pkgs.acpid}/bin/acpid"
"--foreground"
"--netlink"
"--confdir" "${acpiConfDir}"
] ++ optional cfg.logEvents "--logevents"
);
};
unitConfig = {
ConditionVirtualization = "!systemd-nspawn";
ConditionPathExists = [ "/proc/acpi" ];
};
script = "acpid ${optionalString config.services.acpid.logEvents "--logevents"} --confdir ${acpiConfDir}";
};
};

@ -1,12 +1,39 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.hardware.bluetooth;
bluez-bluetooth = cfg.package;
package = cfg.package;
inherit (lib)
mkDefault mkEnableOption mkIf mkOption
mkRenamedOptionModule mkRemovedOptionModule
concatStringsSep escapeShellArgs
optional optionals optionalAttrs recursiveUpdate types;
cfgFmt = pkgs.formats.ini { };
# bluez will complain if some of the sections are not found, so just make them
# empty (but present in the file) for now
defaults = {
General.ControllerMode = "dual";
Controller = { };
GATT = { };
Policy.AutoEnable = cfg.powerOnBoot;
};
hasDisabledPlugins = builtins.length cfg.disabledPlugins > 0;
in {
in
{
imports = [
(mkRenamedOptionModule [ "hardware" "bluetooth" "config" ] [ "hardware" "bluetooth" "settings" ])
(mkRemovedOptionModule [ "hardware" "bluetooth" "extraConfig" ] ''
Use hardware.bluetooth.settings instead.
This is part of the general move to use structured settings instead of raw
text for config as introduced by RFC0042:
https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md
'')
];
###### interface
@ -18,7 +45,7 @@ in {
hsphfpd.enable = mkEnableOption "support for hsphfpd[-prototype] implementation";
powerOnBoot = mkOption {
type = types.bool;
type = types.bool;
default = true;
description = "Whether to power up the default Bluetooth controller on boot.";
};
@ -38,8 +65,15 @@ in {
'';
};
config = mkOption {
type = with types; attrsOf (attrsOf (oneOf [ bool int str ]));
disabledPlugins = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Built-in plugins to disable";
};
settings = mkOption {
type = cfgFmt.type;
default = { };
example = {
General = {
ControllerMode = "bredr";
@ -47,79 +81,65 @@ in {
};
description = "Set configuration for system-wide bluetooth (/etc/bluetooth/main.conf).";
};
extraConfig = mkOption {
type = with types; nullOr lines;
default = null;
example = ''
[General]
ControllerMode = bredr
'';
description = ''
Set additional configuration for system-wide bluetooth (/etc/bluetooth/main.conf).
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
warnings = optional (cfg.extraConfig != null) "hardware.bluetooth.`extraConfig` is deprecated, please use hardware.bluetooth.`config`.";
environment.systemPackages = [ package ]
++ optional cfg.hsphfpd.enable pkgs.hsphfpd;
hardware.bluetooth.config = {
Policy = {
AutoEnable = mkDefault cfg.powerOnBoot;
};
};
environment.systemPackages = [ bluez-bluetooth ]
++ optionals cfg.hsphfpd.enable [ pkgs.hsphfpd ];
environment.etc."bluetooth/main.conf"= {
source = pkgs.writeText "main.conf"
(generators.toINI { } cfg.config + optionalString (cfg.extraConfig != null) cfg.extraConfig);
};
services.udev.packages = [ bluez-bluetooth ];
services.dbus.packages = [ bluez-bluetooth ]
++ optionals cfg.hsphfpd.enable [ pkgs.hsphfpd ];
systemd.packages = [ bluez-bluetooth ];
environment.etc."bluetooth/main.conf".source =
cfgFmt.generate "main.conf" (recursiveUpdate defaults cfg.settings);
services.udev.packages = [ package ];
services.dbus.packages = [ package ]
++ optional cfg.hsphfpd.enable pkgs.hsphfpd;
systemd.packages = [ package ];
systemd.services = {
bluetooth = {
bluetooth =
let
# `man bluetoothd` will refer to main.conf in the nix store but bluez
# will in fact load the configuration file at /etc/bluetooth/main.conf
# so force it here to avoid any ambiguity and things suddenly breaking
# if/when the bluez derivation is changed.
args = [ "-f" "/etc/bluetooth/main.conf" ]
++ optional hasDisabledPlugins
"--noplugin=${concatStringsSep "," cfg.disabledPlugins}";
in
{
wantedBy = [ "bluetooth.target" ];
aliases = [ "dbus-org.bluez.service" ];
serviceConfig.ExecStart = [
""
"${package}/libexec/bluetooth/bluetoothd ${escapeShellArgs args}"
];
# restarting can leave people without a mouse/keyboard
unitConfig.X-RestartIfChanged = false;
};
}
// (optionalAttrs cfg.hsphfpd.enable {
hsphfpd = {
after = [ "bluetooth.service" ];
requires = [ "bluetooth.service" ];
wantedBy = [ "bluetooth.target" ];
aliases = [ "dbus-org.bluez.service" ];
# restarting can leave people without a mouse/keyboard
unitConfig.X-RestartIfChanged = false;
description = "A prototype implementation used for connecting HSP/HFP Bluetooth devices";
serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/hsphfpd.pl";
};
}
// (optionalAttrs cfg.hsphfpd.enable {
hsphfpd = {
after = [ "bluetooth.service" ];
requires = [ "bluetooth.service" ];
wantedBy = [ "multi-user.target" ];
description = "A prototype implementation used for connecting HSP/HFP Bluetooth devices";
serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/hsphfpd.pl";
};
})
;
});
systemd.user.services = {
obex.aliases = [ "dbus-org.bluez.obex.service" ];
}
// (optionalAttrs cfg.hsphfpd.enable {
telephony_client = {
wantedBy = [ "default.target"];
description = "telephony_client for hsphfpd";
serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/telephony_client.pl";
};
})
;
// optionalAttrs cfg.hsphfpd.enable {
telephony_client = {
wantedBy = [ "default.target" ];
description = "telephony_client for hsphfpd";
serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/telephony_client.pl";
};
};
};
}

@ -0,0 +1,26 @@
{ config, lib, pkgs, ... }:
with lib;
let cfg = config.hardware.spacenavd;
in {
options = {
hardware.spacenavd = {
enable = mkEnableOption "spacenavd to support 3DConnexion devices";
};
};
config = mkIf cfg.enable {
systemd.user.services.spacenavd = {
description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion";
after = [ "syslog.target" ];
wantedBy = [ "graphical.target" ];
serviceConfig = {
ExecStart = "${pkgs.spacenavd}/bin/spacenavd -d -l syslog";
StandardError = "syslog";
};
};
};
}

@ -48,7 +48,7 @@ in {
systemd.services.trezord = {
description = "Trezor Bridge";
after = [ "systemd-udev-settle.service" "network.target" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [];
serviceConfig = {

@ -202,12 +202,26 @@ in
'';
};
extraRules = mkOption {
initrdRules = mkOption {
default = "";
example = ''
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:1D:60:B9:6D:4F", KERNEL=="eth*", NAME="my_fast_network_card"
'';
type = types.lines;
description = ''
<command>udev</command> rules to include in the initrd
<emphasis>only</emphasis>. They'll be written into file
<filename>99-local.rules</filename>. Thus they are read and applied
after the essential initrd rules.
'';
};
extraRules = mkOption {
default = "";
example = ''
ENV{ID_VENDOR_ID}=="046d", ENV{ID_MODEL_ID}=="0825", ENV{PULSE_IGNORE}="1"
'';
type = types.lines;
description = ''
Additional <command>udev</command> rules. They'll be written
into file <filename>99-local.rules</filename>. Thus they are
@ -284,6 +298,13 @@ in
boot.kernelParams = mkIf (!config.networking.usePredictableInterfaceNames) [ "net.ifnames=0" ];
boot.initrd.extraUdevRulesCommands = optionalString (cfg.initrdRules != "")
''
cat <<'EOF' > $out/99-local.rules
${cfg.initrdRules}
EOF
'';
environment.etc =
{
"udev/rules.d".source = udevRules;

@ -10,7 +10,10 @@ in {
config = lib.mkIf cfg.enable {
hardware.uinput.enable = true;
boot.extraModprobeConfig = lib.readFile "${pkgs.xow}/lib/modprobe.d/xow-blacklist.conf";
systemd.packages = [ pkgs.xow ];
systemd.services.xow.wantedBy = [ "multi-user.target" ];
services.udev.packages = [ pkgs.xow ];
};

@ -16,7 +16,14 @@ let
alias = domain: list: "${list}: \"|${pkgs.mlmmj}/bin/mlmmj-receive -L ${listDir domain list}/\"";
subjectPrefix = list: "[${list}]";
listAddress = domain: list: "${list}@${domain}";
customHeaders = domain: list: [ "List-Id: ${list}" "Reply-To: ${list}@${domain}" ];
customHeaders = domain: list: [
"List-Id: ${list}"
"Reply-To: ${list}@${domain}"
"List-Post: <mailto:${list}@${domain}>"
"List-Help: <mailto:${list}+help@${domain}>"
"List-Subscribe: <mailto:${list}+subscribe@${domain}>"
"List-Unsubscribe: <mailto:${list}+unsubscribe@${domain}>"
];
footer = domain: list: "To unsubscribe send a mail to ${list}+unsubscribe@${domain}";
createList = d: l:
let ctlDir = listCtl d l; in
@ -110,17 +117,29 @@ in
services.postfix = {
enable = true;
recipientDelimiter= "+";
extraMasterConf = ''
mlmmj unix - n n - - pipe flags=ORhu user=mlmmj argv=${pkgs.mlmmj}/bin/mlmmj-receive -F -L ${spoolDir}/$nexthop
'';
masterConfig.mlmmj = {
type = "unix";
private = true;
privileged = true;
chroot = false;
wakeup = 0;
command = "pipe";
args = [
"flags=ORhu"
"user=mlmmj"
"argv=${pkgs.mlmmj}/bin/mlmmj-receive"
"-F"
"-L"
"${spoolDir}/$nexthop"
];
};
extraAliases = concatMapLines (alias cfg.listDomain) cfg.mailLists;
extraConfig = ''
transport_maps = hash:${stateDir}/transports
virtual_alias_maps = hash:${stateDir}/virtuals
propagate_unmatched_extensions = virtual
'';
extraConfig = "propagate_unmatched_extensions = virtual";
virtual = concatMapLines (virtual cfg.listDomain) cfg.mailLists;
transport = concatMapLines (transport cfg.listDomain) cfg.mailLists;
};
environment.systemPackages = [ pkgs.mlmmj ];
@ -129,10 +148,8 @@ in
${pkgs.coreutils}/bin/mkdir -p ${stateDir} ${spoolDir}/${cfg.listDomain}
${pkgs.coreutils}/bin/chown -R ${cfg.user}:${cfg.group} ${spoolDir}
${concatMapLines (createList cfg.listDomain) cfg.mailLists}
echo "${concatMapLines (virtual cfg.listDomain) cfg.mailLists}" > ${stateDir}/virtuals
echo "${concatMapLines (transport cfg.listDomain) cfg.mailLists}" > ${stateDir}/transports
${pkgs.postfix}/bin/postmap ${stateDir}/virtuals
${pkgs.postfix}/bin/postmap ${stateDir}/transports
${pkgs.postfix}/bin/postmap /etc/postfix/virtual
${pkgs.postfix}/bin/postmap /etc/postfix/transport
'';
systemd.services.mlmmj-maintd = {

@ -1,69 +0,0 @@
worker_processes 3
listen ENV["UNICORN_PATH"] + "/tmp/sockets/gitlab.socket", :backlog => 1024
listen "/run/gitlab/gitlab.socket", :backlog => 1024
working_directory ENV["GITLAB_PATH"]
pid ENV["UNICORN_PATH"] + "/tmp/pids/unicorn.pid"
timeout 60
# combine Ruby 2.0.0dev or REE with "preload_app true" for memory savings
# http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
preload_app true
GC.respond_to?(:copy_on_write_friendly=) and
GC.copy_on_write_friendly = true
check_client_connection false
before_fork do |server, worker|
# the following is highly recommended for Rails + "preload_app true"
# as there's no need for the master process to hold a connection
defined?(ActiveRecord::Base) and
ActiveRecord::Base.connection.disconnect!
# The following is only recommended for memory/DB-constrained
# installations. It is not needed if your system can house
# twice as many worker_processes as you have configured.
#
# This allows a new master process to incrementally
# phase out the old master process with SIGTTOU to avoid a
# thundering herd (especially in the "preload_app false" case)
# when doing a transparent upgrade. The last worker spawned
# will then kill off the old master process with a SIGQUIT.
old_pid = "#{server.config[:pid]}.oldbin"
if old_pid != server.pid
begin
sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU
Process.kill(sig, File.read(old_pid).to_i)
rescue Errno::ENOENT, Errno::ESRCH
end
end
# Throttle the master from forking too quickly by sleeping. Due
# to the implementation of standard Unix signal handlers, this
# helps (but does not completely) prevent identical, repeated signals
# from being lost when the receiving process is busy.
# sleep 1
end
after_fork do |server, worker|
# per-process listener ports for debugging/admin/migrations
# addr = "127.0.0.1:#{9293 + worker.nr}"
# server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true)
# the following is *required* for Rails + "preload_app true",
defined?(ActiveRecord::Base) and
ActiveRecord::Base.establish_connection
# reset prometheus client, this will cause any opened metrics files to be closed
defined?(::Prometheus::Client.reinitialize_on_pid_change) &&
Prometheus::Client.reinitialize_on_pid_change
# if preload_app is true, then you may also want to check and
# restart any other shared sockets/descriptors such as Memcached,
# and Redis. TokyoCabinet file handles are safe to reuse
# between any number of forked children (assuming your kernel
# correctly implements pread()/pwrite() system calls)
end

@ -0,0 +1,92 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.etesync-dav;
in
{
options.services.etesync-dav = {
enable = mkEnableOption "etesync-dav";
host = mkOption {
type = types.str;
default = "localhost";
description = "The server host address.";
};
port = mkOption {
type = types.port;
default = 37358;
description = "The server host port.";
};
apiUrl = mkOption {
type = types.str;
default = "https://api.etesync.com/";
description = "The url to the etesync API.";
};
openFirewall = mkOption {
default = false;
type = types.bool;
description = "Whether to open the firewall for the specified port.";
};
sslCertificate = mkOption {
type = types.nullOr types.path;
default = null;
example = "/var/etesync.crt";
description = ''
Path to server SSL certificate. It will be copied into
etesync-dav's data directory.
'';
};
sslCertificateKey = mkOption {
type = types.nullOr types.path;
default = null;
example = "/var/etesync.key";
description = ''
Path to server SSL certificate key. It will be copied into
etesync-dav's data directory.
'';
};
};
config = mkIf cfg.enable {
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
systemd.services.etesync-dav = {
description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync";
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.etesync-dav ];
environment = {
ETESYNC_LISTEN_ADDRESS = cfg.host;
ETESYNC_LISTEN_PORT = toString cfg.port;
ETESYNC_URL = cfg.apiUrl;
ETESYNC_DATA_DIR = "/var/lib/etesync-dav";
};
serviceConfig = {
Type = "simple";
DynamicUser = true;
StateDirectory = "etesync-dav";
ExecStart = "${pkgs.etesync-dav}/bin/etesync-dav";
ExecStartPre = mkIf (cfg.sslCertificate != null || cfg.sslCertificateKey != null) (
pkgs.writers.writeBash "etesync-dav-copy-keys" ''
${optionalString (cfg.sslCertificate != null) ''
cp ${toString cfg.sslCertificate} $STATE_DIRECTORY/etesync.crt
''}
${optionalString (cfg.sslCertificateKey != null) ''
cp ${toString cfg.sslCertificateKey} $STATE_DIRECTORY/etesync.key
''}
''
);
Restart = "on-failure";
RestartSec = "30min 1s";
};
};
};
}

@ -142,7 +142,7 @@ let
gitlabEnv = {
HOME = "${cfg.statePath}/home";
UNICORN_PATH = "${cfg.statePath}/";
PUMA_PATH = "${cfg.statePath}/";
GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
SCHEMA = "${cfg.statePath}/db/structure.sql";
GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
@ -424,7 +424,7 @@ in {
port = mkOption {
type = types.int;
default = 465;
default = 25;
description = "Port of the SMTP server for Gitlab.";
};
@ -454,7 +454,7 @@ in {
authentication = mkOption {
type = with types; nullOr str;
default = null;
description = "Authentitcation type to use, see http://api.rubyonrails.org/classes/ActionMailer/Base.html";
description = "Authentication type to use, see http://api.rubyonrails.org/classes/ActionMailer/Base.html";
};
enableStartTLSAuto = mkOption {
@ -641,6 +641,11 @@ in {
environment.systemPackages = [ pkgs.git gitlab-rake gitlab-rails cfg.packages.gitlab-shell ];
systemd.targets.gitlab = {
description = "Common target for all GitLab services.";
wantedBy = [ "multi-user.target" ];
};
# Redis is required for the sidekiq queue runner.
services.redis.enable = mkDefault true;
@ -655,36 +660,45 @@ in {
# here.
systemd.services.gitlab-postgresql = let pgsql = config.services.postgresql; in mkIf databaseActuallyCreateLocally {
after = [ "postgresql.service" ];
wantedBy = [ "multi-user.target" ];
path = [ pgsql.package ];
bindsTo = [ "postgresql.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = [
pgsql.package
pkgs.util-linux
];
script = ''
set -eu
PSQL="${pkgs.util-linux}/bin/runuser -u ${pgsql.superUser} -- psql --port=${toString pgsql.port}"
PSQL() {
psql --port=${toString pgsql.port} "$@"
}
$PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.databaseName}'" | grep -q 1 || $PSQL -tAc 'CREATE DATABASE "${cfg.databaseName}" OWNER "${cfg.databaseUsername}"'
current_owner=$($PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.databaseName}'")
PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.databaseName}'" | grep -q 1 || PSQL -tAc 'CREATE DATABASE "${cfg.databaseName}" OWNER "${cfg.databaseUsername}"'
current_owner=$(PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.databaseName}'")
if [[ "$current_owner" != "${cfg.databaseUsername}" ]]; then
$PSQL -tAc 'ALTER DATABASE "${cfg.databaseName}" OWNER TO "${cfg.databaseUsername}"'
PSQL -tAc 'ALTER DATABASE "${cfg.databaseName}" OWNER TO "${cfg.databaseUsername}"'
if [[ -e "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}" ]]; then
echo "Reassigning ownership of database ${cfg.databaseName} to user ${cfg.databaseUsername} failed on last boot. Failing..."
exit 1
fi
touch "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}"
$PSQL "${cfg.databaseName}" -tAc "REASSIGN OWNED BY \"$current_owner\" TO \"${cfg.databaseUsername}\""
PSQL "${cfg.databaseName}" -tAc "REASSIGN OWNED BY \"$current_owner\" TO \"${cfg.databaseUsername}\""
rm "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}"
fi
$PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS pg_trgm"
$PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS btree_gist;"
PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS pg_trgm"
PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS btree_gist;"
'';
serviceConfig = {
User = pgsql.superUser;
Type = "oneshot";
RemainAfterExit = true;
};
};
# Use postfix to send out mails.
services.postfix.enable = mkDefault true;
services.postfix.enable = mkDefault (cfg.smtp.enable && cfg.smtp.address == "localhost");
users.users.${cfg.user} =
{ group = cfg.group;
@ -703,7 +717,6 @@ in {
"d ${cfg.statePath} 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/config/initializers 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/db 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/log 0750 ${cfg.user} ${cfg.group} -"
"d ${cfg.statePath}/repositories 2770 ${cfg.user} ${cfg.group} -"
@ -726,13 +739,156 @@ in {
"L+ /run/gitlab/uploads - - - - ${cfg.statePath}/uploads"
"L+ /run/gitlab/shell-config.yml - - - - ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)}"
"L+ ${cfg.statePath}/config/unicorn.rb - - - - ${./defaultUnicornConfig.rb}"
];
systemd.services.gitlab-config = {
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = with pkgs; [
jq
openssl
replace
git
];
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
TimeoutSec = "infinity";
Restart = "on-failure";
WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
RemainAfterExit = true;
ExecStartPre = let
preStartFullPrivileges = ''
shopt -s dotglob nullglob
set -eu
chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/*
if [[ -n "$(ls -A '${cfg.statePath}'/config/)" ]]; then
chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/config/*
fi
'';
in "+${pkgs.writeShellScript "gitlab-pre-start-full-privileges" preStartFullPrivileges}";
ExecStart = pkgs.writeShellScript "gitlab-config" ''
set -eu
umask u=rwx,g=rx,o=
cp -f ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
rm -rf ${cfg.statePath}/db/*
rm -f ${cfg.statePath}/lib
find '${cfg.statePath}/config/' -maxdepth 1 -mindepth 1 -type d -execdir rm -rf {} \;
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
ln -sf ${extraGitlabRb} ${cfg.statePath}/config/initializers/extra-gitlab.rb
${cfg.packages.gitlab-shell}/bin/install
${optionalString cfg.smtp.enable ''
install -m u=rw ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
${optionalString (cfg.smtp.passwordFile != null) ''
smtp_password=$(<'${cfg.smtp.passwordFile}')
replace-literal -e '@smtpPassword@' "$smtp_password" '${cfg.statePath}/config/initializers/smtp_settings.rb'
''}
''}
(
umask u=rwx,g=,o=
openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
rm -f '${cfg.statePath}/config/database.yml'
${if cfg.databasePasswordFile != null then ''
export db_password="$(<'${cfg.databasePasswordFile}')"
if [[ -z "$db_password" ]]; then
>&2 echo "Database password was an empty string!"
exit 1
fi
jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
'.production.password = $ENV.db_password' \
>'${cfg.statePath}/config/database.yml'
''
else ''
jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
>'${cfg.statePath}/config/database.yml'
''
}
${utils.genJqSecretsReplacementSnippet
gitlabConfig
"${cfg.statePath}/config/gitlab.yml"
}
rm -f '${cfg.statePath}/config/secrets.yml'
export secret="$(<'${cfg.secrets.secretFile}')"
export db="$(<'${cfg.secrets.dbFile}')"
export otp="$(<'${cfg.secrets.otpFile}')"
export jws="$(<'${cfg.secrets.jwsFile}')"
jq -n '{production: {secret_key_base: $ENV.secret,
otp_key_base: $ENV.otp,
db_key_base: $ENV.db,
openid_connect_signing_key: $ENV.jws}}' \
> '${cfg.statePath}/config/secrets.yml'
)
# We remove potentially broken links to old gitlab-shell versions
rm -Rf ${cfg.statePath}/repositories/**/*.git/hooks
git config --global core.autocrlf "input"
'';
};
};
systemd.services.gitlab-db-config = {
after = [ "gitlab-config.service" "gitlab-postgresql.service" "postgresql.service" ];
bindsTo = [
"gitlab-config.service"
] ++ optional (cfg.databaseHost == "") "postgresql.service"
++ optional databaseActuallyCreateLocally "gitlab-postgresql.service";
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
TimeoutSec = "infinity";
Restart = "on-failure";
WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
RemainAfterExit = true;
ExecStart = pkgs.writeShellScript "gitlab-db-config" ''
set -eu
umask u=rwx,g=rx,o=
initial_root_password="$(<'${cfg.initialRootPasswordFile}')"
${gitlab-rake}/bin/gitlab-rake gitlab:db:configure GITLAB_ROOT_PASSWORD="$initial_root_password" \
GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}' > /dev/null
'';
};
};
systemd.services.gitlab-sidekiq = {
after = [ "network.target" "redis.service" "gitlab.service" ];
wantedBy = [ "multi-user.target" ];
after = [
"network.target"
"redis.service"
"postgresql.service"
"gitlab-config.service"
"gitlab-db-config.service"
];
bindsTo = [
"redis.service"
"gitlab-config.service"
"gitlab-db-config.service"
] ++ optional (cfg.databaseHost == "") "postgresql.service";
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
environment = gitlabEnv;
path = with pkgs; [
postgresqlPackage
@ -758,9 +914,10 @@ in {
};
systemd.services.gitaly = {
after = [ "network.target" "gitlab.service" ];
bindsTo = [ "gitlab.service" ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "gitlab-config.service" ];
bindsTo = [ "gitlab-config.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = with pkgs; [
openssh
procps # See https://gitlab.com/gitlab-org/gitaly/issues/1562
@ -783,8 +940,10 @@ in {
systemd.services.gitlab-pages = mkIf (gitlabConfig.production.pages.enabled or false) {
description = "GitLab static pages daemon";
after = [ "network.target" "redis.service" "gitlab.service" ]; # gitlab.service creates configs
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "gitlab-config.service" ];
bindsTo = [ "gitlab-config.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = [ pkgs.unzip ];
@ -803,7 +962,8 @@ in {
systemd.services.gitlab-workhorse = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
path = with pkgs; [
exiftool
git
@ -832,8 +992,10 @@ in {
systemd.services.gitlab-mailroom = mkIf (gitlabConfig.production.incoming_email.enabled or false) {
description = "GitLab incoming mail daemon";
after = [ "network.target" "redis.service" "gitlab.service" ]; # gitlab.service creates configs
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "redis.service" "gitlab-config.service" ];
bindsTo = [ "gitlab-config.service" ];
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
environment = gitlabEnv;
serviceConfig = {
Type = "simple";
@ -842,15 +1004,26 @@ in {
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.packages.gitlab.rubyEnv}/bin/bundle exec mail_room -c ${cfg.packages.gitlab}/share/gitlab/config.dist/mail_room.yml";
ExecStart = "${cfg.packages.gitlab.rubyEnv}/bin/bundle exec mail_room -c ${cfg.statePath}/config/mail_room.yml";
WorkingDirectory = gitlabEnv.HOME;
};
};
systemd.services.gitlab = {
after = [ "gitlab-workhorse.service" "network.target" "gitlab-postgresql.service" "redis.service" ];
requires = [ "gitlab-sidekiq.service" ];
wantedBy = [ "multi-user.target" ];
after = [
"gitlab-workhorse.service"
"network.target"
"redis.service"
"gitlab-config.service"
"gitlab-db-config.service"
];
bindsTo = [
"redis.service"
"gitlab-config.service"
"gitlab-db-config.service"
] ++ optional (cfg.databaseHost == "") "postgresql.service";
wantedBy = [ "gitlab.target" ];
partOf = [ "gitlab.target" ];
environment = gitlabEnv;
path = with pkgs; [
postgresqlPackage
@ -868,96 +1041,7 @@ in {
TimeoutSec = "infinity";
Restart = "on-failure";
WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
ExecStartPre = let
preStartFullPrivileges = ''
shopt -s dotglob nullglob
set -eu
chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/*
chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/config/*
'';
preStart = ''
set -eu
cp -f ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
rm -rf ${cfg.statePath}/db/*
rm -rf ${cfg.statePath}/config/initializers/*
rm -f ${cfg.statePath}/lib
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
ln -sf ${extraGitlabRb} ${cfg.statePath}/config/initializers/extra-gitlab.rb
${cfg.packages.gitlab-shell}/bin/install
${optionalString cfg.smtp.enable ''
install -m u=rw ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
${optionalString (cfg.smtp.passwordFile != null) ''
smtp_password=$(<'${cfg.smtp.passwordFile}')
${pkgs.replace}/bin/replace-literal -e '@smtpPassword@' "$smtp_password" '${cfg.statePath}/config/initializers/smtp_settings.rb'
''}
''}
(
umask u=rwx,g=,o=
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
if [[ -h '${cfg.statePath}/config/database.yml' ]]; then
rm '${cfg.statePath}/config/database.yml'
fi
${if cfg.databasePasswordFile != null then ''
export db_password="$(<'${cfg.databasePasswordFile}')"
if [[ -z "$db_password" ]]; then
>&2 echo "Database password was an empty string!"
exit 1
fi
${pkgs.jq}/bin/jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
'.production.password = $ENV.db_password' \
>'${cfg.statePath}/config/database.yml'
''
else ''
${pkgs.jq}/bin/jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
>'${cfg.statePath}/config/database.yml'
''
}
${utils.genJqSecretsReplacementSnippet
gitlabConfig
"${cfg.statePath}/config/gitlab.yml"
}
if [[ -h '${cfg.statePath}/config/secrets.yml' ]]; then
rm '${cfg.statePath}/config/secrets.yml'
fi
export secret="$(<'${cfg.secrets.secretFile}')"
export db="$(<'${cfg.secrets.dbFile}')"
export otp="$(<'${cfg.secrets.otpFile}')"
export jws="$(<'${cfg.secrets.jwsFile}')"
${pkgs.jq}/bin/jq -n '{production: {secret_key_base: $ENV.secret,
otp_key_base: $ENV.otp,
db_key_base: $ENV.db,
openid_connect_signing_key: $ENV.jws}}' \
> '${cfg.statePath}/config/secrets.yml'
)
initial_root_password="$(<'${cfg.initialRootPasswordFile}')"
${gitlab-rake}/bin/gitlab-rake gitlab:db:configure GITLAB_ROOT_PASSWORD="$initial_root_password" \
GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}' > /dev/null
# We remove potentially broken links to old gitlab-shell versions
rm -Rf ${cfg.statePath}/repositories/**/*.git/hooks
${pkgs.git}/bin/git config --global core.autocrlf "input"
'';
in [
"+${pkgs.writeShellScript "gitlab-pre-start-full-privileges" preStartFullPrivileges}"
"${pkgs.writeShellScript "gitlab-pre-start" preStart}"
];
ExecStart = "${cfg.packages.gitlab.rubyEnv}/bin/unicorn -c ${cfg.statePath}/config/unicorn.rb -E production";
ExecStart = "${cfg.packages.gitlab.rubyEnv}/bin/puma -C ${cfg.statePath}/config/puma.rb -e production";
};
};

@ -115,4 +115,6 @@ in
};
};
};
meta.maintainers = with lib.maintainers; [ erictapen ];
}

@ -0,0 +1,164 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.lifecycled;
# TODO: Add the ability to extend this with an rfc 42-like interface.
# In the meantime, one can modify the environment (as
# long as it's not overriding anything from here) with
# systemd.services.lifecycled.serviceConfig.Environment
configFile = pkgs.writeText "lifecycled" ''
LIFECYCLED_HANDLER=${cfg.handler}
${lib.optionalString (cfg.cloudwatchGroup != null) "LIFECYCLED_CLOUDWATCH_GROUP=${cfg.cloudwatchGroup}"}
${lib.optionalString (cfg.cloudwatchStream != null) "LIFECYCLED_CLOUDWATCH_STREAM=${cfg.cloudwatchStream}"}
${lib.optionalString cfg.debug "LIFECYCLED_DEBUG=${lib.boolToString cfg.debug}"}
${lib.optionalString (cfg.instanceId != null) "LIFECYCLED_INSTANCE_ID=${cfg.instanceId}"}
${lib.optionalString cfg.json "LIFECYCLED_JSON=${lib.boolToString cfg.json}"}
${lib.optionalString cfg.noSpot "LIFECYCLED_NO_SPOT=${lib.boolToString cfg.noSpot}"}
${lib.optionalString (cfg.snsTopic != null) "LIFECYCLED_SNS_TOPIC=${cfg.snsTopic}"}
${lib.optionalString (cfg.awsRegion != null) "AWS_REGION=${cfg.awsRegion}"}
'';
in
{
meta.maintainers = with maintainers; [ cole-h grahamc ];
options = {
services.lifecycled = {
enable = mkEnableOption "lifecycled";
queueCleaner = {
enable = mkEnableOption "lifecycled-queue-cleaner";
frequency = mkOption {
type = types.str;
default = "hourly";
description = ''
How often to trigger the queue cleaner.
NOTE: This string should be a valid value for a systemd
timer's <literal>OnCalendar</literal> configuration. See
<citerefentry><refentrytitle>systemd.timer</refentrytitle><manvolnum>5</manvolnum></citerefentry>
for more information.
'';
};
parallel = mkOption {
type = types.ints.unsigned;
default = 20;
description = ''
The number of parallel deletes to run.
'';
};
};
instanceId = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
The instance ID to listen for events for.
'';
};
snsTopic = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
The SNS topic that receives events.
'';
};
noSpot = mkOption {
type = types.bool;
default = false;
description = ''
Disable the spot termination listener.
'';
};
handler = mkOption {
type = types.path;
description = ''
The script to invoke to handle events.
'';
};
json = mkOption {
type = types.bool;
default = false;
description = ''
Enable JSON logging.
'';
};
cloudwatchGroup = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Write logs to a specific Cloudwatch Logs group.
'';
};
cloudwatchStream = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Write logs to a specific Cloudwatch Logs stream. Defaults to the instance ID.
'';
};
debug = mkOption {
type = types.bool;
default = false;
description = ''
Enable debugging information.
'';
};
# XXX: Can be removed if / when
# https://github.com/buildkite/lifecycled/pull/91 is merged.
awsRegion = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
The region used for accessing AWS services.
'';
};
};
};
### Implementation ###
config = mkMerge [
(mkIf cfg.enable {
environment.etc."lifecycled".source = configFile;
systemd.packages = [ pkgs.lifecycled ];
systemd.services.lifecycled = {
wantedBy = [ "network-online.target" ];
restartTriggers = [ configFile ];
};
})
(mkIf cfg.queueCleaner.enable {
systemd.services.lifecycled-queue-cleaner = {
description = "Lifecycle Daemon Queue Cleaner";
environment = optionalAttrs (cfg.awsRegion != null) { AWS_REGION = cfg.awsRegion; };
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.lifecycled}/bin/lifecycled-queue-cleaner -parallel ${toString cfg.queueCleaner.parallel}";
};
};
systemd.timers.lifecycled-queue-cleaner = {
description = "Lifecycle Daemon Queue Cleaner Timer";
wantedBy = [ "timers.target" ];
after = [ "network-online.target" ];
timerConfig = {
Unit = "lifecycled-queue-cleaner.service";
OnCalendar = "${cfg.queueCleaner.frequency}";
};
};
})
];
}

@ -21,13 +21,45 @@ in
};
dates = mkOption {
type = types.str;
default = "03:15";
example = "weekly";
description = ''
How often or when garbage collection is performed. For most desktop and server systems
a sufficient garbage collection is once a week.
The format is described in
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
'';
};
randomizedDelaySec = mkOption {
default = "0";
type = types.str;
example = "45min";
description = ''
Specification (in the format described by
Add a randomized delay before each automatic upgrade.
The delay will be chosen between zero and this value.
This value must be a time span in the format specified by
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>) of the time at
which the garbage collector will run.
<manvolnum>7</manvolnum></citerefentry>
'';
};
persistent = mkOption {
default = true;
type = types.bool;
example = false;
description = ''
Takes a boolean argument. If true, the time when the service
unit was last triggered is stored on disk. When the timer is
activated, the service unit is triggered immediately if it
would have been triggered at least once during the time when
the timer was inactive. Such triggering is nonetheless
subject to the delay imposed by RandomizedDelaySec=. This is
useful to catch up on missed runs of the service when the
system was powered down.
'';
};
@ -50,11 +82,18 @@ in
config = {
systemd.services.nix-gc =
{ description = "Nix Garbage Collector";
script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
startAt = optional cfg.automatic cfg.dates;
systemd.services.nix-gc = {
description = "Nix Garbage Collector";
script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
startAt = optional cfg.automatic cfg.dates;
};
systemd.timers.nix-gc = lib.mkIf cfg.automatic {
timerConfig = {
RandomizedDelaySec = cfg.randomizedDelaySec;
Persistent = cfg.persistent;
};
};
};

@ -0,0 +1,82 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.plikd;
format = pkgs.formats.toml {};
plikdCfg = format.generate "plikd.cfg" cfg.settings;
in
{
options = {
services.plikd = {
enable = mkEnableOption "the plikd server";
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open ports in the firewall for the plikd.";
};
settings = mkOption {
type = format.type;
default = {};
description = ''
Configuration for plikd, see <link xlink:href="https://github.com/root-gg/plik/blob/master/server/plikd.cfg"/>
for supported values.
'';
};
};
};
config = mkIf cfg.enable {
services.plikd.settings = mapAttrs (name: mkDefault) {
ListenPort = 8080;
ListenAddress = "localhost";
DataBackend = "file";
DataBackendConfig = {
Directory = "/var/lib/plikd";
};
MetadataBackendConfig = {
Driver = "sqlite3";
ConnectionString = "/var/lib/plikd/plik.db";
};
};
systemd.services.plikd = {
description = "Plikd file sharing server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = "${pkgs.plikd}/bin/plikd --config ${plikdCfg}";
Restart = "on-failure";
StateDirectory = "plikd";
LogsDirectory = "plikd";
DynamicUser = true;
# Basic hardening
NoNewPrivileges = "yes";
PrivateTmp = "yes";
PrivateDevices = "yes";
DevicePolicy = "closed";
ProtectSystem = "strict";
ProtectHome = "read-only";
ProtectControlGroups = "yes";
ProtectKernelModules = "yes";
ProtectKernelTunables = "yes";
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
RestrictNamespaces = "yes";
RestrictRealtime = "yes";
RestrictSUIDSGID = "yes";
MemoryDenyWriteExecute = "yes";
LockPersonality = "yes";
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.settings.ListenPort ];
};
};
}

@ -65,10 +65,18 @@ let
dashboardFile = pkgs.writeText "dashboard.yaml" (builtins.toJSON dashboardConfiguration);
notifierConfiguration = {
apiVersion = 1;
notifiers = cfg.provision.notifiers;
};
notifierFile = pkgs.writeText "notifier.yaml" (builtins.toJSON notifierConfiguration);
provisionConfDir = pkgs.runCommand "grafana-provisioning" { } ''
mkdir -p $out/{datasources,dashboards}
mkdir -p $out/{datasources,dashboards,notifiers}
ln -sf ${datasourceFile} $out/datasources/datasource.yaml
ln -sf ${dashboardFile} $out/dashboards/dashboard.yaml
ln -sf ${notifierFile} $out/notifiers/notifier.yaml
'';
# Get a submodule without any embedded metadata:
@ -79,80 +87,80 @@ let
options = {
name = mkOption {
type = types.str;
description = "Name of the datasource. Required";
description = "Name of the datasource. Required.";
};
type = mkOption {
type = types.enum ["graphite" "prometheus" "cloudwatch" "elasticsearch" "influxdb" "opentsdb" "mysql" "mssql" "postgres" "loki"];
description = "Datasource type. Required";
description = "Datasource type. Required.";
};
access = mkOption {
type = types.enum ["proxy" "direct"];
default = "proxy";
description = "Access mode. proxy or direct (Server or Browser in the UI). Required";
description = "Access mode. proxy or direct (Server or Browser in the UI). Required.";
};
orgId = mkOption {
type = types.int;
default = 1;
description = "Org id. will default to orgId 1 if not specified";
description = "Org id. will default to orgId 1 if not specified.";
};
url = mkOption {
type = types.str;
description = "Url of the datasource";
description = "Url of the datasource.";
};
password = mkOption {
type = types.nullOr types.str;
default = null;
description = "Database password, if used";
description = "Database password, if used.";
};
user = mkOption {
type = types.nullOr types.str;
default = null;
description = "Database user, if used";
description = "Database user, if used.";
};
database = mkOption {
type = types.nullOr types.str;
default = null;
description = "Database name, if used";
description = "Database name, if used.";
};
basicAuth = mkOption {
type = types.nullOr types.bool;
default = null;
description = "Enable/disable basic auth";
description = "Enable/disable basic auth.";
};
basicAuthUser = mkOption {
type = types.nullOr types.str;
default = null;
description = "Basic auth username";
description = "Basic auth username.";
};
basicAuthPassword = mkOption {
type = types.nullOr types.str;
default = null;
description = "Basic auth password";
description = "Basic auth password.";
};
withCredentials = mkOption {
type = types.bool;
default = false;
description = "Enable/disable with credentials headers";
description = "Enable/disable with credentials headers.";
};
isDefault = mkOption {
type = types.bool;
default = false;
description = "Mark as default datasource. Max one per org";
description = "Mark as default datasource. Max one per org.";
};
jsonData = mkOption {
type = types.nullOr types.attrs;
default = null;
description = "Datasource specific configuration";
description = "Datasource specific configuration.";
};
secureJsonData = mkOption {
type = types.nullOr types.attrs;
default = null;
description = "Datasource specific secure configuration";
description = "Datasource specific secure configuration.";
};
version = mkOption {
type = types.int;
default = 1;
description = "Version";
description = "Version.";
};
editable = mkOption {
type = types.bool;
@ -168,41 +176,99 @@ let
name = mkOption {
type = types.str;
default = "default";
description = "Provider name";
description = "Provider name.";
};
orgId = mkOption {
type = types.int;
default = 1;
description = "Organization ID";
description = "Organization ID.";
};
folder = mkOption {
type = types.str;
default = "";
description = "Add dashboards to the specified folder";
description = "Add dashboards to the specified folder.";
};
type = mkOption {
type = types.str;
default = "file";
description = "Dashboard provider type";
description = "Dashboard provider type.";
};
disableDeletion = mkOption {
type = types.bool;
default = false;
description = "Disable deletion when JSON file is removed";
description = "Disable deletion when JSON file is removed.";
};
updateIntervalSeconds = mkOption {
type = types.int;
default = 10;
description = "How often Grafana will scan for changed dashboards";
description = "How often Grafana will scan for changed dashboards.";
};
options = {
path = mkOption {
type = types.path;
description = "Path grafana will watch for dashboards";
description = "Path grafana will watch for dashboards.";
};
};
};
};
grafanaTypes.notifierConfig = types.submodule {
options = {
name = mkOption {
type = types.str;
default = "default";
description = "Notifier name.";
};
type = mkOption {
type = types.enum ["dingding" "discord" "email" "googlechat" "hipchat" "kafka" "line" "teams" "opsgenie" "pagerduty" "prometheus-alertmanager" "pushover" "sensu" "sensugo" "slack" "telegram" "threema" "victorops" "webhook"];
description = "Notifier type.";
};
uid = mkOption {
type = types.str;
description = "Unique notifier identifier.";
};
org_id = mkOption {
type = types.int;
default = 1;
description = "Organization ID.";
};
org_name = mkOption {
type = types.str;
default = "Main Org.";
description = "Organization name.";
};
is_default = mkOption {
type = types.bool;
description = "Is the default notifier.";
default = false;
};
send_reminder = mkOption {
type = types.bool;
default = true;
description = "Should the notifier be sent reminder notifications while alerts continue to fire.";
};
frequency = mkOption {
type = types.str;
default = "5m";
description = "How frequently should the notifier be sent reminders.";
};
disable_resolve_message = mkOption {
type = types.bool;
default = false;
description = "Turn off the message that sends when an alert returns to OK.";
};
settings = mkOption {
type = types.nullOr types.attrs;
default = null;
description = "Settings for the notifier type.";
};
secure_settings = mkOption {
type = types.nullOr types.attrs;
default = null;
description = "Secure settings for the notifier type.";
};
};
};
in {
options.services.grafana = {
enable = mkEnableOption "grafana";
@ -337,17 +403,23 @@ in {
provision = {
enable = mkEnableOption "provision";
datasources = mkOption {
description = "Grafana datasources configuration";
description = "Grafana datasources configuration.";
default = [];
type = types.listOf grafanaTypes.datasourceConfig;
apply = x: map _filter x;
};
dashboards = mkOption {
description = "Grafana dashboard configuration";
description = "Grafana dashboard configuration.";
default = [];
type = types.listOf grafanaTypes.dashboardConfig;
apply = x: map _filter x;
};
notifiers = mkOption {
description = "Grafana notifier configuration.";
default = [];
type = types.listOf grafanaTypes.notifierConfig;
apply = x: map _filter x;
};
};
security = {
@ -391,12 +463,12 @@ in {
smtp = {
enable = mkEnableOption "smtp";
host = mkOption {
description = "Host to connect to";
description = "Host to connect to.";
default = "localhost:25";
type = types.str;
};
user = mkOption {
description = "User used for authentication";
description = "User used for authentication.";
default = "";
type = types.str;
};
@ -417,7 +489,7 @@ in {
type = types.nullOr types.path;
};
fromAddress = mkOption {
description = "Email address used for sending";
description = "Email address used for sending.";
default = "admin@grafana.localhost";
type = types.str;
};
@ -425,7 +497,7 @@ in {
users = {
allowSignUp = mkOption {
description = "Disable user signup / registration";
description = "Disable user signup / registration.";
default = false;
type = types.bool;
};
@ -451,17 +523,17 @@ in {
auth.anonymous = {
enable = mkOption {
description = "Whether to allow anonymous access";
description = "Whether to allow anonymous access.";
default = false;
type = types.bool;
};
org_name = mkOption {
description = "Which organization to allow anonymous access to";
description = "Which organization to allow anonymous access to.";
default = "Main Org.";
type = types.str;
};
org_role = mkOption {
description = "Which role anonymous users have in the organization";
description = "Which role anonymous users have in the organization.";
default = "Viewer";
type = types.str;
};
@ -470,7 +542,7 @@ in {
analytics.reporting = {
enable = mkOption {
description = "Whether to allow anonymous usage reporting to stats.grafana.net";
description = "Whether to allow anonymous usage reporting to stats.grafana.net.";
default = true;
type = types.bool;
};
@ -496,6 +568,9 @@ in {
(optional (
any (x: x.password != null || x.basicAuthPassword != null || x.secureJsonData != null) cfg.provision.datasources
) "Datasource passwords will be stored as plaintext in the Nix store!")
(optional (
any (x: x.secure_settings != null) cfg.provision.notifiers
) "Notifier secure settings will be stored as plaintext in the Nix store!")
];
environment.systemPackages = [ cfg.package ];

@ -468,7 +468,7 @@ let
'';
};
value = mkOption {
values = mkOption {
type = types.listOf types.str;
default = [];
description = ''

@ -316,7 +316,7 @@ in
client = {
enable = mkEnableOption "Ceph client configuration";
extraConfig = mkOption {
type = with types; attrsOf str;
type = with types; attrsOf (attrsOf str);
default = {};
example = ''
{

@ -191,9 +191,8 @@ in
{ description = "DHCP Client";
wantedBy = [ "multi-user.target" ] ++ optional (!hasDefaultGatewaySet) "network-online.target";
wants = [ "network.target" "systemd-udev-settle.service" ];
wants = [ "network.target" ];
before = [ "network-online.target" ];
after = [ "systemd-udev-settle.service" ];
restartTriggers = [ exitHook ];

@ -113,7 +113,6 @@ in
"~@memlock"
"~@resources"
"~@setuid"
"~@sync"
"~@timer"
];
};

@ -8,9 +8,9 @@ let
# Convert systemd-style address specification to kresd config line(s).
# On Nix level we don't attempt to precisely validate the address specifications.
mkListen = kind: addr: let
al_v4 = builtins.match "([0-9.]\+):([0-9]\+)" addr;
al_v6 = builtins.match "\\[(.\+)]:([0-9]\+)" addr;
al_portOnly = builtins.match "()([0-9]\+)" addr;
al_v4 = builtins.match "([0-9.]+):([0-9]+)" addr;
al_v6 = builtins.match "\\[(.+)]:([0-9]+)" addr;
al_portOnly = builtins.match "()([0-9]+)" addr;
al = findFirst (a: a != null)
(throw "services.kresd.*: incorrect address specification '${addr}'")
[ al_v4 al_v6 al_portOnly ];

@ -270,7 +270,7 @@ in
drivers = mkOption {
type = types.listOf types.path;
default = [];
example = literalExample "with pkgs; [ gutenprint hplip splix cups-googlecloudprint ]";
example = literalExample "with pkgs; [ gutenprint hplip splix ]";
description = ''
CUPS drivers to use. Drivers provided by CUPS, cups-filters,
Ghostscript and Samba are added unconditionally. If this list contains

@ -8,30 +8,19 @@ let
cfg = config.services.clamav;
pkg = pkgs.clamav;
clamdConfigFile = pkgs.writeText "clamd.conf" ''
DatabaseDirectory ${stateDir}
LocalSocket ${runDir}/clamd.ctl
PidFile ${runDir}/clamd.pid
TemporaryDirectory /tmp
User clamav
Foreground yes
${cfg.daemon.extraConfig}
'';
freshclamConfigFile = pkgs.writeText "freshclam.conf" ''
DatabaseDirectory ${stateDir}
Foreground yes
Checks ${toString cfg.updater.frequency}
${cfg.updater.extraConfig}
DatabaseMirror database.clamav.net
'';
toKeyValue = generators.toKeyValue {
mkKeyValue = generators.mkKeyValueDefault {} " ";
listsAsDuplicateKeys = true;
};
clamdConfigFile = pkgs.writeText "clamd.conf" (toKeyValue cfg.daemon.settings);
freshclamConfigFile = pkgs.writeText "freshclam.conf" (toKeyValue cfg.updater.settings);
in
{
imports = [
(mkRenamedOptionModule [ "services" "clamav" "updater" "config" ] [ "services" "clamav" "updater" "extraConfig" ])
(mkRemovedOptionModule [ "services" "clamav" "updater" "config" ] "Use services.clamav.updater.settings instead.")
(mkRemovedOptionModule [ "services" "clamav" "updater" "extraConfig" ] "Use services.clamav.updater.settings instead.")
(mkRemovedOptionModule [ "services" "clamav" "daemon" "extraConfig" ] "Use services.clamav.daemon.settings instead.")
];
options = {
@ -39,12 +28,12 @@ in
daemon = {
enable = mkEnableOption "ClamAV clamd daemon";
extraConfig = mkOption {
type = types.lines;
default = "";
settings = mkOption {
type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
default = {};
description = ''
Extra configuration for clamd. Contents will be added verbatim to the
configuration file.
ClamAV configuration. Refer to <link xlink:href="https://linux.die.net/man/5/clamd.conf"/>,
for details on supported values.
'';
};
};
@ -68,12 +57,12 @@ in
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
settings = mkOption {
type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
default = {};
description = ''
Extra configuration for freshclam. Contents will be added verbatim to the
configuration file.
freshclam configuration. Refer to <link xlink:href="https://linux.die.net/man/5/freshclam.conf"/>,
for details on supported values.
'';
};
};
@ -93,6 +82,22 @@ in
users.groups.${clamavGroup} =
{ gid = config.ids.gids.clamav; };
services.clamav.daemon.settings = {
DatabaseDirectory = stateDir;
LocalSocket = "${runDir}/clamd.ctl";
PidFile = "${runDir}/clamd.pid";
TemporaryDirectory = "/tmp";
User = "clamav";
Foreground = true;
};
services.clamav.updater.settings = {
DatabaseDirectory = stateDir;
Foreground = true;
Checks = cfg.updater.frequency;
DatabaseMirror = [ "database.clamav.net" ];
};
environment.etc."clamav/freshclam.conf".source = freshclamConfigFile;
environment.etc."clamav/clamd.conf".source = clamdConfigFile;

@ -82,11 +82,8 @@ in {
X-RestartIfChanged=false
'';
systemd.units."autovt@.service".unit = pkgs.runCommand "unit" { preferLocalBuild = true; }
''
mkdir -p $out
ln -s ${config.systemd.units."kmsconvt@.service".unit}/kmsconvt@.service $out/autovt@.service
'';
systemd.suppressedSystemUnits = [ "autovt@.service" ];
systemd.units."kmsconvt@.service".aliases = [ "autovt@.service" ];
systemd.services.systemd-vconsole-setup.enable = false;

@ -329,7 +329,7 @@ in
extraConfig = "internal;";
};
locations."~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$" = {
locations."~ ^/lib.*\\.(js|css|gif|png|ico|jpg|jpeg)$" = {
extraConfig = "expires 365d;";
};
@ -349,7 +349,7 @@ in
'';
};
locations."~ \.php$" = {
locations."~ \\.php$" = {
extraConfig = ''
try_files $uri $uri/ /doku.php;
include ${pkgs.nginx}/conf/fastcgi_params;

@ -25,10 +25,28 @@ let
ES_ENABLED = if (cfg.elasticsearch.host != null) then "true" else "false";
ES_HOST = cfg.elasticsearch.host;
ES_PORT = toString(cfg.elasticsearch.port);
TRUSTED_PROXY_IP = cfg.trustedProxy;
}
// (if cfg.smtp.authenticate then { SMTP_LOGIN = cfg.smtp.user; } else {})
// cfg.extraConfig;
cfgService = {
# User and group
User = cfg.user;
Group = cfg.group;
# State directory and mode
StateDirectory = "mastodon";
StateDirectoryMode = "0750";
# Logs directory and mode
LogsDirectory = "mastodon";
LogsDirectoryMode = "0750";
# Access write directories
UMask = "0027";
# Sandboxing
PrivateTmp = true;
};
envFile = pkgs.writeText "mastodon.env" (lib.concatMapStrings (s: s + "\n") (
(lib.concatLists (lib.mapAttrsToList (name: value:
if value != null then [
@ -93,7 +111,6 @@ in {
group = lib.mkOption {
description = ''
Group under which mastodon runs.
If it is set to "mastodon", a group will be created.
'';
type = lib.types.str;
default = "mastodon";
@ -179,6 +196,26 @@ in {
type = lib.types.str;
};
trustedProxy = lib.mkOption {
description = ''
You need to set it to the IP from which your reverse proxy sends requests to Mastodon's web process,
otherwise Mastodon will record the reverse proxy's own IP as the IP of all requests, which would be
bad because IP addresses are used for important rate limits and security functions.
'';
type = lib.types.str;
default = "127.0.0.1";
};
enableUnixSocket = lib.mkOption {
description = ''
Instead of binding to an IP address like 127.0.0.1, you may bind to a Unix socket. This variable
is process-specific, e.g. you need different values for every process, and it works for both web (Puma)
processes and streaming API (Node.js) processes.
'';
type = lib.types.bool;
default = true;
};
redis = {
createLocally = lib.mkOption {
description = "Configure local Redis server for Mastodon.";
@ -370,19 +407,16 @@ in {
environment = env;
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.package;
LogsDirectory = "mastodon";
StateDirectory = "mastodon";
};
} // cfgService;
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
};
systemd.services.mastodon-init-db = lib.mkIf cfg.automaticMigrations {
script = ''
if [ `psql mastodon -c \
if [ `psql ${cfg.database.name} -c \
"select count(*) from pg_class c \
join pg_namespace s on s.oid = c.relnamespace \
where s.nspname not in ('pg_catalog', 'pg_toast', 'information_schema') \
@ -397,14 +431,9 @@ in {
environment = env;
serviceConfig = {
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
EnvironmentFile = "/var/lib/mastodon/.secrets_env";
PrivateTmp = true;
LogsDirectory = "mastodon";
StateDirectory = "mastodon";
WorkingDirectory = cfg.package;
};
} // cfgService;
after = [ "mastodon-init-dirs.service" "network.target" ] ++ (if databaseActuallyCreateLocally then [ "postgresql.service" ] else []);
wantedBy = [ "multi-user.target" ];
};
@ -415,21 +444,20 @@ in {
++ (if cfg.automaticMigrations then [ "mastodon-init-db.service" ] else [ "mastodon-init-dirs.service" ]);
description = "Mastodon streaming";
wantedBy = [ "multi-user.target" ];
environment = env // {
PORT = toString(cfg.streamingPort);
};
environment = env // (if cfg.enableUnixSocket
then { SOCKET = "/run/mastodon-streaming/streaming.socket"; }
else { PORT = toString(cfg.streamingPort); }
);
serviceConfig = {
ExecStart = "${pkgs.nodejs-slim}/bin/node streaming";
ExecStart = "${cfg.package}/run-streaming.sh";
Restart = "always";
RestartSec = 20;
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.package;
EnvironmentFile = "/var/lib/mastodon/.secrets_env";
PrivateTmp = true;
LogsDirectory = "mastodon";
StateDirectory = "mastodon";
};
WorkingDirectory = cfg.package;
# Runtime directory and mode
RuntimeDirectory = "mastodon-streaming";
RuntimeDirectoryMode = "0750";
} // cfgService;
};
systemd.services.mastodon-web = {
@ -438,21 +466,20 @@ in {
++ (if cfg.automaticMigrations then [ "mastodon-init-db.service" ] else [ "mastodon-init-dirs.service" ]);
description = "Mastodon web";
wantedBy = [ "multi-user.target" ];
environment = env // {
PORT = toString(cfg.webPort);
};
environment = env // (if cfg.enableUnixSocket
then { SOCKET = "/run/mastodon-web/web.socket"; }
else { PORT = toString(cfg.webPort); }
);
serviceConfig = {
ExecStart = "${cfg.package}/bin/puma -C config/puma.rb";
Restart = "always";
RestartSec = 20;
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.package;
EnvironmentFile = "/var/lib/mastodon/.secrets_env";
PrivateTmp = true;
LogsDirectory = "mastodon";
StateDirectory = "mastodon";
};
WorkingDirectory = cfg.package;
# Runtime directory and mode
RuntimeDirectory = "mastodon-web";
RuntimeDirectoryMode = "0750";
} // cfgService;
path = with pkgs; [ file imagemagick ffmpeg ];
};
@ -469,14 +496,9 @@ in {
ExecStart = "${cfg.package}/bin/sidekiq -c 25 -r ${cfg.package}";
Restart = "always";
RestartSec = 20;
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.package;
EnvironmentFile = "/var/lib/mastodon/.secrets_env";
PrivateTmp = true;
LogsDirectory = "mastodon";
StateDirectory = "mastodon";
};
WorkingDirectory = cfg.package;
} // cfgService;
path = with pkgs; [ file imagemagick ffmpeg ];
};
@ -495,12 +517,12 @@ in {
};
locations."@proxy" = {
proxyPass = "http://127.0.0.1:${toString(cfg.webPort)}";
proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString(cfg.webPort)}");
proxyWebsockets = true;
};
locations."/api/v1/streaming/" = {
proxyPass = "http://127.0.0.1:${toString(cfg.streamingPort)}/";
proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-streaming/streaming.socket" else "http://127.0.0.1:${toString(cfg.streamingPort)}/");
proxyWebsockets = true;
};
};
@ -534,7 +556,7 @@ in {
(lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package mastodonEnv ])
];
users.groups.mastodon = lib.mkIf (cfg.group == "mastodon") { };
users.groups.${cfg.group}.members = lib.optional cfg.configureNginx config.services.nginx.user;
};
meta.maintainers = with lib.maintainers; [ happy-river erictapen ];

@ -44,7 +44,7 @@ in
'';
description = ''
Configuration for Miniflux, refer to
<link xlink:href="http://docs.miniflux.app/en/latest/configuration.html"/>
<link xlink:href="https://miniflux.app/docs/configuration.html"/>
for documentation on the supported values.
'';
};

@ -211,7 +211,7 @@ in
environment = let
penv = python.buildEnv.override {
# setuptools: https://github.com/benoitc/gunicorn/issues/1716
extraLibs = [ python.pkgs.gevent python.pkgs.setuptools pkg ];
extraLibs = [ python.pkgs.eventlet python.pkgs.setuptools pkg ];
};
in {
PYTHONPATH = "${dataDir}/${wikiIdent}/config:${penv}/${python.sitePackages}";
@ -233,7 +233,7 @@ in
ExecStart = ''${python.pkgs.gunicorn}/bin/gunicorn moin_wsgi \
--name gunicorn-${wikiIdent} \
--workers ${toString cfg.gunicorn.workers} \
--worker-class gevent \
--worker-class eventlet \
--bind unix:/run/moin/${wikiIdent}/gunicorn.sock
'';

@ -28,7 +28,10 @@ let
upload_max_filesize = cfg.maxUploadSize;
post_max_size = cfg.maxUploadSize;
memory_limit = cfg.maxUploadSize;
} // cfg.phpOptions;
} // cfg.phpOptions
// optionalAttrs cfg.caching.apcu {
"apc.enable_cli" = "1";
};
occ = pkgs.writeScriptBin "nextcloud-occ" ''
#! ${pkgs.runtimeShell}
@ -91,7 +94,7 @@ in {
package = mkOption {
type = types.package;
description = "Which package to use for the Nextcloud instance.";
relatedPackages = [ "nextcloud18" "nextcloud19" "nextcloud20" ];
relatedPackages = [ "nextcloud19" "nextcloud20" "nextcloud21" ];
};
maxUploadSize = mkOption {
@ -285,6 +288,24 @@ in {
may be served via HTTPS.
'';
};
defaultPhoneRegion = mkOption {
default = null;
type = types.nullOr types.str;
example = "DE";
description = ''
<warning>
<para>This option exists since Nextcloud 21! If older versions are used,
this will throw an eval-error!</para>
</warning>
<link xlink:href="https://www.iso.org/iso-3166-country-codes.html">ISO 3611-1</link>
country codes for automatic phone-number detection without a country code.
With e.g. <literal>DE</literal> set, the <literal>+49</literal> can be omitted for
phone-numbers.
'';
};
};
caching = {
@ -350,10 +371,13 @@ in {
&& !(acfg.adminpass != null && acfg.adminpassFile != null));
message = "Please specify exactly one of adminpass or adminpassFile";
}
{ assertion = versionOlder cfg.package.version "21" -> cfg.config.defaultPhoneRegion == null;
message = "The `defaultPhoneRegion'-setting is only supported for Nextcloud >=21!";
}
];
warnings = let
latest = 20;
latest = 21;
upgradeWarning = major: nixos:
''
A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
@ -371,9 +395,9 @@ in {
Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
Please migrate your configuration to config.services.nextcloud.poolSettings.
'')
++ (optional (versionOlder cfg.package.version "18") (upgradeWarning 17 "20.03"))
++ (optional (versionOlder cfg.package.version "19") (upgradeWarning 18 "20.09"))
++ (optional (versionOlder cfg.package.version "20") (upgradeWarning 19 "21.05"));
++ (optional (versionOlder cfg.package.version "20") (upgradeWarning 19 "21.05"))
++ (optional (versionOlder cfg.package.version "21") (upgradeWarning 20 "21.05"));
services.nextcloud.package = with pkgs;
mkDefault (
@ -383,14 +407,13 @@ in {
nextcloud defined in an overlay, please set `services.nextcloud.package` to
`pkgs.nextcloud`.
''
else if versionOlder stateVersion "20.03" then nextcloud17
else if versionOlder stateVersion "20.09" then nextcloud18
# 21.03 will not be an official release - it was instead 21.05.
# This versionOlder statement remains set to 21.03 for backwards compatibility.
# See https://github.com/NixOS/nixpkgs/pull/108899 and
# https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
else if versionOlder stateVersion "21.03" then nextcloud19
else nextcloud20
else nextcloud21
);
}
@ -448,6 +471,7 @@ in {
'dbtype' => '${c.dbtype}',
'trusted_domains' => ${writePhpArrary ([ cfg.hostName ] ++ c.extraTrustedDomains)},
'trusted_proxies' => ${writePhpArrary (c.trustedProxies)},
${optionalString (c.defaultPhoneRegion != null) "'default_phone_region' => '${c.defaultPhoneRegion}',"}
];
'';
occInstallCmd = let
@ -596,6 +620,14 @@ in {
access_log off;
'';
};
"= /" = {
priority = 100;
extraConfig = ''
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
'';
};
"/" = {
priority = 900;
extraConfig = "rewrite ^ /index.php;";
@ -607,11 +639,15 @@ in {
"^~ /.well-known" = {
priority = 210;
extraConfig = ''
absolute_redirect off;
location = /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
return 301 /remote.php/dav;
}
location = /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
return 301 /remote.php/dav;
}
location ~ ^/\.well-known/(?!acme-challenge|pki-validation) {
return 301 /index.php$request_uri;
}
try_files $uri $uri/ =404;
'';
@ -619,7 +655,7 @@ in {
"~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/)".extraConfig = ''
return 404;
'';
"~ ^/(?:\\.|autotest|occ|issue|indie|db_|console)".extraConfig = ''
"~ ^/(?:\\.(?!well-known)|autotest|occ|issue|indie|db_|console)".extraConfig = ''
return 404;
'';
"~ ^\\/(?:index|remote|public|cron|core\\/ajax\\/update|status|ocs\\/v[12]|updater\\/.+|oc[ms]-provider\\/.+|.+\\/richdocumentscode\\/proxy)\\.php(?:$|\\/)" = {

@ -11,7 +11,7 @@
desktop client is packaged at <literal>pkgs.nextcloud-client</literal>.
</para>
<para>
The current default by NixOS is <package>nextcloud20</package> which is also the latest
The current default by NixOS is <package>nextcloud21</package> which is also the latest
major version available.
</para>
<section xml:id="module-services-nextcloud-basic-usage">

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save