commit
fdbac8e7d0
@ -1,60 +0,0 @@ |
||||
on: |
||||
issue_comment: |
||||
types: |
||||
- created |
||||
|
||||
# This action allows people with write access to the repo to rebase a PRs base branch from |
||||
# master to staging by commenting `/rebase-staging` on the PR while avoiding CODEOWNER notifications. |
||||
|
||||
jobs: |
||||
rebase: |
||||
runs-on: ubuntu-latest |
||||
if: github.repository_owner == 'NixOS' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase-staging') |
||||
steps: |
||||
- uses: peter-evans/create-or-update-comment@v1 |
||||
with: |
||||
comment-id: ${{ github.event.comment.id }} |
||||
reactions: eyes |
||||
- uses: scherermichael-oss/action-has-permission@1.0.6 |
||||
id: check-write-access |
||||
with: |
||||
required-permission: write |
||||
env: |
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
||||
- name: check base branch is master |
||||
if: steps.check-write-access.outputs.has-permission |
||||
run: | |
||||
if [ "$(curl https://api.github.com/repos/NixOS/nixpkgs/pulls/${{ github.event.issue.number }} | jq -r '.base.ref')" != "master" ]; then |
||||
echo "This action only works when the current base branch is master." |
||||
exit 1 |
||||
fi |
||||
- uses: actions/checkout@v2 |
||||
with: |
||||
fetch-depth: 0 |
||||
if: steps.check-write-access.outputs.has-permission |
||||
- name: rebase pull request |
||||
if: steps.check-write-access.outputs.has-permission |
||||
env: |
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
||||
PULL_REQUEST: ${{ github.event.issue.number }} |
||||
run: | |
||||
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" |
||||
git config --global user.name "github-actions[bot]" |
||||
git fetch origin |
||||
gh pr checkout "$PULL_REQUEST" |
||||
git rebase \ |
||||
--onto="$(git merge-base origin/master origin/staging)" \ |
||||
"HEAD~$(git rev-list --count HEAD ^master)" |
||||
git push --force |
||||
curl \ |
||||
-X POST \ |
||||
-H "Accept: application/vnd.github.v3+json" \ |
||||
-H "Authorization: token $GITHUB_TOKEN" \ |
||||
-d '{ "base": "staging" }' \ |
||||
"https://api.github.com/repos/NixOS/nixpkgs/pulls/$PULL_REQUEST" |
||||
- uses: peter-evans/create-or-update-comment@v1 |
||||
if: ${{ failure() }} |
||||
with: |
||||
issue-number: ${{ github.event.issue.number }} |
||||
body: | |
||||
[Failed to rebase on `staging`](https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}) |
@ -0,0 +1,134 @@ |
||||
on: |
||||
issue_comment: |
||||
types: |
||||
- created |
||||
|
||||
# This action allows people with write access to the repo to rebase a PRs base branch |
||||
# by commenting `/rebase ${branch}` on the PR while avoiding CODEOWNER notifications. |
||||
|
||||
jobs: |
||||
rebase: |
||||
runs-on: ubuntu-latest |
||||
if: github.repository_owner == 'NixOS' && github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') |
||||
steps: |
||||
- uses: peter-evans/create-or-update-comment@v1 |
||||
with: |
||||
comment-id: ${{ github.event.comment.id }} |
||||
reactions: eyes |
||||
- uses: scherermichael-oss/action-has-permission@1.0.6 |
||||
id: check-write-access |
||||
with: |
||||
required-permission: write |
||||
env: |
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
||||
- name: check permissions |
||||
run: | |
||||
echo "Commenter doesn't have write access to the repo" |
||||
exit 1 |
||||
if: "! steps.check-write-access.outputs.has-permission" |
||||
- name: setup |
||||
run: | |
||||
curl "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.issue.number }}" 2>/dev/null >pr.json |
||||
cat <<EOF >>"$GITHUB_ENV" |
||||
CAN_MODIFY=$(jq -r '.maintainer_can_modify' pr.json) |
||||
COMMITS=$(jq -r '.commits' pr.json) |
||||
CURRENT_BASE=$(jq -r '.base.ref' pr.json) |
||||
PR_BRANCH=$(jq -r '.head.ref' pr.json) |
||||
COMMENT_BRANCH=$(echo ${{ github.event.comment.body }} | awk "/^\/rebase / {print \$2}") |
||||
PULL_REQUEST=${{ github.event.issue.number }} |
||||
EOF |
||||
rm pr.json |
||||
- name: check branch |
||||
env: |
||||
PERMANENT_BRANCHES: "haskell-updates|master|nixos|nixpkgs|python-unstable|release|staging" |
||||
VALID_BRANCHES: "haskell-updates|master|python-unstable|release-20.09|staging|staging-20.09|staging-next" |
||||
run: | |
||||
message() { |
||||
cat <<EOF |
||||
Can't rebase $PR_BRANCH from $CURRENT_BASE onto $COMMENT_BRANCH (PR:$PULL_REQUEST COMMITS:$COMMITS) |
||||
EOF |
||||
} |
||||
if ! [[ "$COMMENT_BRANCH" =~ ^($VALID_BRANCHES)$ ]]; then |
||||
cat <<EOF |
||||
Check that the branch from the comment is valid: |
||||
|
||||
$(message) |
||||
|
||||
This action can only rebase onto these branches: |
||||
|
||||
$VALID_BRANCHES |
||||
|
||||
\`/rebase \${branch}\` must be at the start of the line |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
if [[ "$COMMENT_BRANCH" == "$CURRENT_BASE" ]]; then |
||||
cat <<EOF |
||||
Check that the branch from the comment isn't the current base branch: |
||||
|
||||
$(message) |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
if [[ "$COMMENT_BRANCH" == "$PR_BRANCH" ]]; then |
||||
cat <<EOF |
||||
Check that the branch from the comment isn't the current branch: |
||||
|
||||
$(message) |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
if [[ "$PR_BRANCH" =~ ^($PERMANENT_BRANCHES) ]]; then |
||||
cat <<EOF |
||||
Check that the PR branch isn't a permanent branch: |
||||
|
||||
$(message) |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
if [[ "$CAN_MODIFY" != "true" ]]; then |
||||
cat <<EOF |
||||
Check that maintainers can edit the PR branch: |
||||
|
||||
$(message) |
||||
EOF |
||||
exit 1 |
||||
fi |
||||
- uses: actions/checkout@v2 |
||||
with: |
||||
fetch-depth: 0 |
||||
- name: rebase pull request |
||||
env: |
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
||||
run: | |
||||
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" |
||||
git config --global user.name "github-actions[bot]" |
||||
git fetch origin |
||||
gh pr checkout "$PULL_REQUEST" |
||||
git rebase \ |
||||
--onto="$(git merge-base origin/"$CURRENT_BASE" origin/"$COMMENT_BRANCH")" \ |
||||
"HEAD~$COMMITS" |
||||
git push --force |
||||
curl \ |
||||
-X POST \ |
||||
-H "Accept: application/vnd.github.v3+json" \ |
||||
-H "Authorization: token $GITHUB_TOKEN" \ |
||||
-d "{ \"base\": \"$COMMENT_BRANCH\" }" \ |
||||
"https://api.github.com/repos/${{ github.repository }}/pulls/$PULL_REQUEST" |
||||
curl \ |
||||
-X PATCH \ |
||||
-H "Accept: application/vnd.github.v3+json" \ |
||||
-H "Authorization: token $GITHUB_TOKEN" \ |
||||
-d '{ "state": "closed" }' \ |
||||
"https://api.github.com/repos/${{ github.repository }}/pulls/$PULL_REQUEST" |
||||
- uses: peter-evans/create-or-update-comment@v1 |
||||
with: |
||||
issue-number: ${{ github.event.issue.number }} |
||||
body: | |
||||
Rebased, please reopen the pull request to restart CI |
||||
- uses: peter-evans/create-or-update-comment@v1 |
||||
if: failure() |
||||
with: |
||||
issue-number: ${{ github.event.issue.number }} |
||||
body: | |
||||
[Failed to rebase](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) |
@ -0,0 +1,298 @@ |
||||
# pkgs.dockerTools {#sec-pkgs-dockerTools} |
||||
|
||||
`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120). Docker itself is not used to perform any of the operations done by these functions. |
||||
|
||||
## buildImage {#ssec-pkgs-dockerTools-buildImage} |
||||
|
||||
This function is analogous to the `docker build` command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with `docker load`. |
||||
|
||||
The parameters of `buildImage` with relative example values are described below: |
||||
|
||||
[]{#ex-dockerTools-buildImage} |
||||
[]{#ex-dockerTools-buildImage-runAsRoot} |
||||
|
||||
```nix |
||||
buildImage { |
||||
name = "redis"; |
||||
tag = "latest"; |
||||
|
||||
fromImage = someBaseImage; |
||||
fromImageName = null; |
||||
fromImageTag = "latest"; |
||||
|
||||
contents = pkgs.redis; |
||||
runAsRoot = '' |
||||
#!${pkgs.runtimeShell} |
||||
mkdir -p /data |
||||
''; |
||||
|
||||
config = { |
||||
Cmd = [ "/bin/redis-server" ]; |
||||
WorkingDir = "/data"; |
||||
Volumes = { "/data" = { }; }; |
||||
}; |
||||
} |
||||
``` |
||||
|
||||
The above example will build a Docker image `redis/latest` from the given base image. Loading and running this image in Docker results in `redis-server` being started automatically. |
||||
|
||||
- `name` specifies the name of the resulting image. This is the only required argument for `buildImage`. |
||||
|
||||
- `tag` specifies the tag of the resulting image. By default it\'s `null`, which indicates that the nix output hash will be used as tag. |
||||
|
||||
- `fromImage` is the repository tarball containing the base image. It must be a valid Docker image, such as exported by `docker save`. By default it\'s `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`. |
||||
|
||||
- `fromImageName` can be used to further specify the base image within the repository, in case it contains multiple images. By default it\'s `null`, in which case `buildImage` will peek the first image available in the repository. |
||||
|
||||
- `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it\'s `null`, in which case `buildImage` will peek the first tag available for the base image. |
||||
|
||||
- `contents` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it\'s `null`. |
||||
|
||||
- `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`. |
||||
|
||||
> **_NOTE:_** Using this parameter requires the `kvm` device to be available. |
||||
|
||||
- `config` is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions). |
||||
|
||||
After the new layer has been created, its closure (to which `contents`, `config` and `runAsRoot` contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied. |
||||
|
||||
At the end of the process, only one new single layer will be produced and added to the resulting image. |
||||
|
||||
The resulting repository will only list the single image `image/tag`. In the case of [the `buildImage` example](#ex-dockerTools-buildImage) it would be `redis/latest`. |
||||
|
||||
It is possible to inspect the arguments with which an image was built using its `buildArgs` attribute. |
||||
|
||||
> **_NOTE:_** If you see errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)` you may need to add `pkgs.iana-etc` to `contents`. |
||||
|
||||
> **_NOTE:_** If you see errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)` you may need to add `pkgs.cacert` to `contents`. |
||||
|
||||
By default `buildImage` will use a static date of one second past the UNIX Epoch. This allows `buildImage` to produce binary reproducible images. When listing images with `docker images`, the newly created images will be listed like this: |
||||
|
||||
```ShellSession |
||||
$ docker images |
||||
REPOSITORY TAG IMAGE ID CREATED SIZE |
||||
hello latest 08c791c7846e 48 years ago 25.2MB |
||||
``` |
||||
|
||||
You can break binary reproducibility but have a sorted, meaningful `CREATED` column by setting `created` to `now`. |
||||
|
||||
```nix |
||||
pkgs.dockerTools.buildImage { |
||||
name = "hello"; |
||||
tag = "latest"; |
||||
created = "now"; |
||||
contents = pkgs.hello; |
||||
|
||||
config.Cmd = [ "/bin/hello" ]; |
||||
} |
||||
``` |
||||
|
||||
and now the Docker CLI will display a reasonable date and sort the images as expected: |
||||
|
||||
```ShellSession |
||||
$ docker images |
||||
REPOSITORY TAG IMAGE ID CREATED SIZE |
||||
hello latest de2bf4786de6 About a minute ago 25.2MB |
||||
``` |
||||
|
||||
however, the produced images will not be binary reproducible. |
||||
|
||||
## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage} |
||||
|
||||
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use `streamLayeredImage` instead, which this function uses internally. |
||||
|
||||
`name` |
||||
|
||||
: The name of the resulting image. |
||||
|
||||
`tag` _optional_ |
||||
|
||||
: Tag of the generated image. |
||||
|
||||
*Default:* the output path\'s hash |
||||
|
||||
`contents` _optional_ |
||||
|
||||
: Top level paths in the container. Either a single derivation, or a list of derivations. |
||||
|
||||
*Default:* `[]` |
||||
|
||||
`config` _optional_ |
||||
|
||||
: Run-time configuration of the container. A full list of the options are available at in the [ Docker Image Specification v1.2.0 ](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions). |
||||
|
||||
*Default:* `{}` |
||||
|
||||
`created` _optional_ |
||||
|
||||
: Date and time the layers were created. Follows the same `now` exception supported by `buildImage`. |
||||
|
||||
*Default:* `1970-01-01T00:00:01Z` |
||||
|
||||
`maxLayers` _optional_ |
||||
|
||||
: Maximum number of layers to create. |
||||
|
||||
*Default:* `100` |
||||
|
||||
*Maximum:* `125` |
||||
|
||||
`extraCommands` _optional_ |
||||
|
||||
: Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are \"on top\" of all the other layers, so can create additional directories and files. |
||||
|
||||
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents} |
||||
|
||||
Each path directly listed in `contents` will have a symlink in the root of the image. |
||||
|
||||
For example: |
||||
|
||||
```nix |
||||
pkgs.dockerTools.buildLayeredImage { |
||||
name = "hello"; |
||||
contents = [ pkgs.hello ]; |
||||
} |
||||
``` |
||||
|
||||
will create symlinks for all the paths in the `hello` package: |
||||
|
||||
```ShellSession |
||||
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello |
||||
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info |
||||
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo |
||||
``` |
||||
|
||||
### Automatic inclusion of `config` references {#dockerTools-buildLayeredImage-arg-config} |
||||
|
||||
The closure of `config` is automatically included in the closure of the final image. |
||||
|
||||
This allows you to make very simple Docker images with very little code. This container will start up and run `hello`: |
||||
|
||||
```nix |
||||
pkgs.dockerTools.buildLayeredImage { |
||||
name = "hello"; |
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ]; |
||||
} |
||||
``` |
||||
|
||||
### Adjusting `maxLayers` {#dockerTools-buildLayeredImage-arg-maxLayers} |
||||
|
||||
Increasing the `maxLayers` increases the number of layers which have a chance to be shared between different images. |
||||
|
||||
Modern Docker installations support up to 128 layers, however older versions support as few as 42. |
||||
|
||||
If the produced image will not be extended by other Docker builds, it is safe to set `maxLayers` to `128`. However it will be impossible to extend the image further. |
||||
|
||||
The first (`maxLayers-2`) most \"popular\" paths will have their own individual layers, then layer \#`maxLayers-1` will contain all the remaining \"unpopular\" paths, and finally layer \#`maxLayers` will contain the Image configuration. |
||||
|
||||
Docker\'s Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image. |
||||
|
||||
## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage} |
||||
|
||||
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for `buildLayeredImage`. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images. |
||||
|
||||
The image produced by running the output script can be piped directly into `docker load`, to load it into the local docker daemon: |
||||
|
||||
```ShellSession |
||||
$(nix-build) | docker load |
||||
``` |
||||
|
||||
Alternatively, the image be piped via `gzip` into `skopeo`, e.g. to copy it into a registry: |
||||
|
||||
```ShellSession |
||||
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag |
||||
``` |
||||
|
||||
## pullImage {#ssec-pkgs-dockerTools-fetchFromRegistry} |
||||
|
||||
This function is analogous to the `docker pull` command, in that it can be used to pull a Docker image from a Docker registry. By default [Docker Hub](https://hub.docker.com/) is used to pull images. |
||||
|
||||
Its parameters are described in the example below: |
||||
|
||||
```nix |
||||
pullImage { |
||||
imageName = "nixos/nix"; |
||||
imageDigest = |
||||
"sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; |
||||
finalImageName = "nix"; |
||||
finalImageTag = "1.11"; |
||||
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; |
||||
os = "linux"; |
||||
arch = "x86_64"; |
||||
} |
||||
``` |
||||
|
||||
- `imageName` specifies the name of the image to be downloaded, which can also include the registry namespace (e.g. `nixos`). This argument is required. |
||||
|
||||
- `imageDigest` specifies the digest of the image to be downloaded. This argument is required. |
||||
|
||||
- `finalImageName`, if specified, this is the name of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it\'s equal to `imageName`. |
||||
|
||||
- `finalImageTag`, if specified, this is the tag of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it\'s `latest`. |
||||
|
||||
- `sha256` is the checksum of the whole fetched image. This argument is required. |
||||
|
||||
- `os`, if specified, is the operating system of the fetched image. By default it\'s `linux`. |
||||
|
||||
- `arch`, if specified, is the cpu architecture of the fetched image. By default it\'s `x86_64`. |
||||
|
||||
`nix-prefetch-docker` command can be used to get required image parameters: |
||||
|
||||
```ShellSession |
||||
$ nix run nixpkgs.nix-prefetch-docker -c nix-prefetch-docker --image-name mysql --image-tag 5 |
||||
``` |
||||
|
||||
Since a given `imageName` may transparently refer to a manifest list of images which support multiple architectures and/or operating systems, you can supply the `--os` and `--arch` arguments to specify exactly which image you want. By default it will match the OS and architecture of the host the command is run on. |
||||
|
||||
```ShellSession |
||||
$ nix-prefetch-docker --image-name mysql --image-tag 5 --arch x86_64 --os linux |
||||
``` |
||||
|
||||
Desired image name and tag can be set using `--final-image-name` and `--final-image-tag` arguments: |
||||
|
||||
```ShellSession |
||||
$ nix-prefetch-docker --image-name mysql --image-tag 5 --final-image-name eu.gcr.io/my-project/mysql --final-image-tag prod |
||||
``` |
||||
|
||||
## exportImage {#ssec-pkgs-dockerTools-exportImage} |
||||
|
||||
This function is analogous to the `docker export` command, in that it can be used to flatten a Docker image that contains multiple layers. It is in fact the result of the merge of all the layers of the image. As such, the result is suitable for being imported in Docker with `docker import`. |
||||
|
||||
> **_NOTE:_** Using this function requires the `kvm` device to be available. |
||||
|
||||
The parameters of `exportImage` are the following: |
||||
|
||||
```nix |
||||
exportImage { |
||||
fromImage = someLayeredImage; |
||||
fromImageName = null; |
||||
fromImageTag = null; |
||||
|
||||
name = someLayeredImage.name; |
||||
} |
||||
``` |
||||
|
||||
The parameters relative to the base image have the same synopsis as described in [buildImage](#ssec-pkgs-dockerTools-buildImage), except that `fromImage` is the only required argument in this case. |
||||
|
||||
The `name` argument is the name of the derivation output, which defaults to `fromImage.name`. |
||||
|
||||
## shadowSetup {#ssec-pkgs-dockerTools-shadowSetup} |
||||
|
||||
This constant string is a helper for setting up the base files for managing users and groups, only if such files don\'t exist already. It is suitable for being used in a [`buildImage` `runAsRoot`](#ex-dockerTools-buildImage-runAsRoot) script for cases like in the example below: |
||||
|
||||
```nix |
||||
buildImage { |
||||
name = "shadow-basic"; |
||||
|
||||
runAsRoot = '' |
||||
#!${pkgs.runtimeShell} |
||||
${shadowSetup} |
||||
groupadd -r redis |
||||
useradd -r -g redis redis |
||||
mkdir /data |
||||
chown redis:redis /data |
||||
''; |
||||
} |
||||
``` |
||||
|
||||
Creating base files like `/etc/passwd` or `/etc/login.defs` is necessary for shadow-utils to manipulate users and groups. |
@ -1,499 +0,0 @@ |
||||
<section xmlns="http://docbook.org/ns/docbook" |
||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
||||
xml:id="sec-pkgs-dockerTools"> |
||||
<title>pkgs.dockerTools</title> |
||||
|
||||
<para> |
||||
<varname>pkgs.dockerTools</varname> is a set of functions for creating and manipulating Docker images according to the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120"> Docker Image Specification v1.2.0 </link>. Docker itself is not used to perform any of the operations done by these functions. |
||||
</para> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-buildImage"> |
||||
<title>buildImage</title> |
||||
|
||||
<para> |
||||
This function is analogous to the <command>docker build</command> command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with <command>docker load</command>. |
||||
</para> |
||||
|
||||
<para> |
||||
The parameters of <varname>buildImage</varname> with relative example values are described below: |
||||
</para> |
||||
|
||||
<example xml:id='ex-dockerTools-buildImage'> |
||||
<title>Docker build</title> |
||||
<programlisting> |
||||
buildImage { |
||||
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' /> |
||||
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' /> |
||||
|
||||
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' /> |
||||
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' /> |
||||
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' /> |
||||
|
||||
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' /> |
||||
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' /> |
||||
#!${pkgs.runtimeShell} |
||||
mkdir -p /data |
||||
''; |
||||
|
||||
config = { <co xml:id='ex-dockerTools-buildImage-8' /> |
||||
Cmd = [ "/bin/redis-server" ]; |
||||
WorkingDir = "/data"; |
||||
Volumes = { |
||||
"/data" = {}; |
||||
}; |
||||
}; |
||||
} |
||||
</programlisting> |
||||
</example> |
||||
|
||||
<para> |
||||
The above example will build a Docker image <literal>redis/latest</literal> from the given base image. Loading and running this image in Docker results in <literal>redis-server</literal> being started automatically. |
||||
</para> |
||||
|
||||
<calloutlist> |
||||
<callout arearefs='ex-dockerTools-buildImage-1'> |
||||
<para> |
||||
<varname>name</varname> specifies the name of the resulting image. This is the only required argument for <varname>buildImage</varname>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-2'> |
||||
<para> |
||||
<varname>tag</varname> specifies the tag of the resulting image. By default it's <literal>null</literal>, which indicates that the nix output hash will be used as tag. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-3'> |
||||
<para> |
||||
<varname>fromImage</varname> is the repository tarball containing the base image. It must be a valid Docker image, such as exported by <command>docker save</command>. By default it's <literal>null</literal>, which can be seen as equivalent to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-4'> |
||||
<para> |
||||
<varname>fromImageName</varname> can be used to further specify the base image within the repository, in case it contains multiple images. By default it's <literal>null</literal>, in which case <varname>buildImage</varname> will peek the first image available in the repository. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-5'> |
||||
<para> |
||||
<varname>fromImageTag</varname> can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's <literal>null</literal>, in which case <varname>buildImage</varname> will peek the first tag available for the base image. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-6'> |
||||
<para> |
||||
<varname>contents</varname> is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as <command>ADD contents/ /</command> in a <filename>Dockerfile</filename>. By default it's <literal>null</literal>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'> |
||||
<para> |
||||
<varname>runAsRoot</varname> is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied <varname>contents</varname> derivation. This can be similarly seen as <command>RUN ...</command> in a <filename>Dockerfile</filename>. |
||||
<note> |
||||
<para> |
||||
Using this parameter requires the <literal>kvm</literal> device to be available. |
||||
</para> |
||||
</note> |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-buildImage-8'> |
||||
<para> |
||||
<varname>config</varname> is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions"> Docker Image Specification v1.2.0 </link>. |
||||
</para> |
||||
</callout> |
||||
</calloutlist> |
||||
|
||||
<para> |
||||
After the new layer has been created, its closure (to which <varname>contents</varname>, <varname>config</varname> and <varname>runAsRoot</varname> contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied. |
||||
</para> |
||||
|
||||
<para> |
||||
At the end of the process, only one new single layer will be produced and added to the resulting image. |
||||
</para> |
||||
|
||||
<para> |
||||
The resulting repository will only list the single image <varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/> it would be <varname>redis/latest</varname>. |
||||
</para> |
||||
|
||||
<para> |
||||
It is possible to inspect the arguments with which an image was built using its <varname>buildArgs</varname> attribute. |
||||
</para> |
||||
|
||||
<note> |
||||
<para> |
||||
If you see errors similar to <literal>getProtocolByName: does not exist (no such protocol name: tcp)</literal> you may need to add <literal>pkgs.iana-etc</literal> to <varname>contents</varname>. |
||||
</para> |
||||
</note> |
||||
|
||||
<note> |
||||
<para> |
||||
If you see errors similar to <literal>Error_Protocol ("certificate has unknown CA",True,UnknownCa)</literal> you may need to add <literal>pkgs.cacert</literal> to <varname>contents</varname>. |
||||
</para> |
||||
</note> |
||||
|
||||
<example xml:id="example-pkgs-dockerTools-buildImage-creation-date"> |
||||
<title>Impurely Defining a Docker Layer's Creation Date</title> |
||||
<para> |
||||
By default <function>buildImage</function> will use a static date of one second past the UNIX Epoch. This allows <function>buildImage</function> to produce binary reproducible images. When listing images with <command>docker images</command>, the newly created images will be listed like this: |
||||
</para> |
||||
<screen> |
||||
<prompt>$ </prompt>docker images |
||||
REPOSITORY TAG IMAGE ID CREATED SIZE |
||||
hello latest 08c791c7846e 48 years ago 25.2MB |
||||
</screen> |
||||
<para> |
||||
You can break binary reproducibility but have a sorted, meaningful <literal>CREATED</literal> column by setting <literal>created</literal> to <literal>now</literal>. |
||||
</para> |
||||
<programlisting><![CDATA[ |
||||
pkgs.dockerTools.buildImage { |
||||
name = "hello"; |
||||
tag = "latest"; |
||||
created = "now"; |
||||
contents = pkgs.hello; |
||||
|
||||
config.Cmd = [ "/bin/hello" ]; |
||||
} |
||||
]]></programlisting> |
||||
<para> |
||||
and now the Docker CLI will display a reasonable date and sort the images as expected: |
||||
<screen> |
||||
<prompt>$ </prompt>docker images |
||||
REPOSITORY TAG IMAGE ID CREATED SIZE |
||||
hello latest de2bf4786de6 About a minute ago 25.2MB |
||||
</screen> |
||||
however, the produced images will not be binary reproducible. |
||||
</para> |
||||
</example> |
||||
</section> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-buildLayeredImage"> |
||||
<title>buildLayeredImage</title> |
||||
|
||||
<para> |
||||
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use <function>streamLayeredImage</function> instead, which this function uses internally. |
||||
</para> |
||||
|
||||
<variablelist> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>name</varname> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
The name of the resulting image. |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>tag</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Tag of the generated image. |
||||
</para> |
||||
<para> |
||||
<emphasis>Default:</emphasis> the output path's hash |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>contents</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Top level paths in the container. Either a single derivation, or a list of derivations. |
||||
</para> |
||||
<para> |
||||
<emphasis>Default:</emphasis> <literal>[]</literal> |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>config</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Run-time configuration of the container. A full list of the options are available at in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions"> Docker Image Specification v1.2.0 </link>. |
||||
</para> |
||||
<para> |
||||
<emphasis>Default:</emphasis> <literal>{}</literal> |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>created</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Date and time the layers were created. Follows the same <literal>now</literal> exception supported by <literal>buildImage</literal>. |
||||
</para> |
||||
<para> |
||||
<emphasis>Default:</emphasis> <literal>1970-01-01T00:00:01Z</literal> |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>maxLayers</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Maximum number of layers to create. |
||||
</para> |
||||
<para> |
||||
<emphasis>Default:</emphasis> <literal>100</literal> |
||||
</para> |
||||
<para> |
||||
<emphasis>Maximum:</emphasis> <literal>125</literal> |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
<varlistentry> |
||||
<term> |
||||
<varname>extraCommands</varname> <emphasis>optional</emphasis> |
||||
</term> |
||||
<listitem> |
||||
<para> |
||||
Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are "on top" of all the other layers, so can create additional directories and files. |
||||
</para> |
||||
</listitem> |
||||
</varlistentry> |
||||
</variablelist> |
||||
|
||||
<section xml:id="dockerTools-buildLayeredImage-arg-contents"> |
||||
<title>Behavior of <varname>contents</varname> in the final image</title> |
||||
|
||||
<para> |
||||
Each path directly listed in <varname>contents</varname> will have a symlink in the root of the image. |
||||
</para> |
||||
|
||||
<para> |
||||
For example: |
||||
<programlisting><![CDATA[ |
||||
pkgs.dockerTools.buildLayeredImage { |
||||
name = "hello"; |
||||
contents = [ pkgs.hello ]; |
||||
} |
||||
]]></programlisting> |
||||
will create symlinks for all the paths in the <literal>hello</literal> package: |
||||
<screen><![CDATA[ |
||||
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello |
||||
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info |
||||
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo |
||||
]]></screen> |
||||
</para> |
||||
</section> |
||||
|
||||
<section xml:id="dockerTools-buildLayeredImage-arg-config"> |
||||
<title>Automatic inclusion of <varname>config</varname> references</title> |
||||
|
||||
<para> |
||||
The closure of <varname>config</varname> is automatically included in the closure of the final image. |
||||
</para> |
||||
|
||||
<para> |
||||
This allows you to make very simple Docker images with very little code. This container will start up and run <command>hello</command>: |
||||
<programlisting><![CDATA[ |
||||
pkgs.dockerTools.buildLayeredImage { |
||||
name = "hello"; |
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ]; |
||||
} |
||||
]]></programlisting> |
||||
</para> |
||||
</section> |
||||
|
||||
<section xml:id="dockerTools-buildLayeredImage-arg-maxLayers"> |
||||
<title>Adjusting <varname>maxLayers</varname></title> |
||||
|
||||
<para> |
||||
Increasing the <varname>maxLayers</varname> increases the number of layers which have a chance to be shared between different images. |
||||
</para> |
||||
|
||||
<para> |
||||
Modern Docker installations support up to 128 layers, however older versions support as few as 42. |
||||
</para> |
||||
|
||||
<para> |
||||
If the produced image will not be extended by other Docker builds, it is safe to set <varname>maxLayers</varname> to <literal>128</literal>. However it will be impossible to extend the image further. |
||||
</para> |
||||
|
||||
<para> |
||||
The first (<literal>maxLayers-2</literal>) most "popular" paths will have their own individual layers, then layer #<literal>maxLayers-1</literal> will contain all the remaining "unpopular" paths, and finally layer #<literal>maxLayers</literal> will contain the Image configuration. |
||||
</para> |
||||
|
||||
<para> |
||||
Docker's Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image. |
||||
</para> |
||||
</section> |
||||
</section> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-streamLayeredImage"> |
||||
<title>streamLayeredImage</title> |
||||
|
||||
<para> |
||||
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for <function>buildLayeredImage</function>. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images. |
||||
</para> |
||||
|
||||
<para> |
||||
The image produced by running the output script can be piped directly into <command>docker load</command>, to load it into the local docker daemon: |
||||
<screen><![CDATA[ |
||||
$(nix-build) | docker load |
||||
]]></screen> |
||||
</para> |
||||
<para> |
||||
Alternatively, the image be piped via <command>gzip</command> into <command>skopeo</command>, e.g. to copy it into a registry: |
||||
<screen><![CDATA[ |
||||
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag |
||||
]]></screen> |
||||
</para> |
||||
</section> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry"> |
||||
<title>pullImage</title> |
||||
|
||||
<para> |
||||
This function is analogous to the <command>docker pull</command> command, in that it can be used to pull a Docker image from a Docker registry. By default <link xlink:href="https://hub.docker.com/">Docker Hub</link> is used to pull images. |
||||
</para> |
||||
|
||||
<para> |
||||
Its parameters are described in the example below: |
||||
</para> |
||||
|
||||
<example xml:id='ex-dockerTools-pullImage'> |
||||
<title>Docker pull</title> |
||||
<programlisting> |
||||
pullImage { |
||||
imageName = "nixos/nix"; <co xml:id='ex-dockerTools-pullImage-1' /> |
||||
imageDigest = "sha256:20d9485b25ecfd89204e843a962c1bd70e9cc6858d65d7f5fadc340246e2116b"; <co xml:id='ex-dockerTools-pullImage-2' /> |
||||
finalImageName = "nix"; <co xml:id='ex-dockerTools-pullImage-3' /> |
||||
finalImageTag = "1.11"; <co xml:id='ex-dockerTools-pullImage-4' /> |
||||
sha256 = "0mqjy3zq2v6rrhizgb9nvhczl87lcfphq9601wcprdika2jz7qh8"; <co xml:id='ex-dockerTools-pullImage-5' /> |
||||
os = "linux"; <co xml:id='ex-dockerTools-pullImage-6' /> |
||||
arch = "x86_64"; <co xml:id='ex-dockerTools-pullImage-7' /> |
||||
} |
||||
</programlisting> |
||||
</example> |
||||
|
||||
<calloutlist> |
||||
<callout arearefs='ex-dockerTools-pullImage-1'> |
||||
<para> |
||||
<varname>imageName</varname> specifies the name of the image to be downloaded, which can also include the registry namespace (e.g. <literal>nixos</literal>). This argument is required. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-2'> |
||||
<para> |
||||
<varname>imageDigest</varname> specifies the digest of the image to be downloaded. This argument is required. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-3'> |
||||
<para> |
||||
<varname>finalImageName</varname>, if specified, this is the name of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it's equal to <varname>imageName</varname>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-4'> |
||||
<para> |
||||
<varname>finalImageTag</varname>, if specified, this is the tag of the image to be created. Note it is never used to fetch the image since we prefer to rely on the immutable digest ID. By default it's <literal>latest</literal>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-5'> |
||||
<para> |
||||
<varname>sha256</varname> is the checksum of the whole fetched image. This argument is required. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-6'> |
||||
<para> |
||||
<varname>os</varname>, if specified, is the operating system of the fetched image. By default it's <literal>linux</literal>. |
||||
</para> |
||||
</callout> |
||||
<callout arearefs='ex-dockerTools-pullImage-7'> |
||||
<para> |
||||
<varname>arch</varname>, if specified, is the cpu architecture of the fetched image. By default it's <literal>x86_64</literal>. |
||||
</para> |
||||
</callout> |
||||
</calloutlist> |
||||
|
||||
<para> |
||||
<literal>nix-prefetch-docker</literal> command can be used to get required image parameters: |
||||
<screen> |
||||
<prompt>$ </prompt>nix run nixpkgs.nix-prefetch-docker -c nix-prefetch-docker --image-name mysql --image-tag 5 |
||||
</screen> |
||||
Since a given <varname>imageName</varname> may transparently refer to a manifest list of images which support multiple architectures and/or operating systems, you can supply the <option>--os</option> and <option>--arch</option> arguments to specify exactly which image you want. By default it will match the OS and architecture of the host the command is run on. |
||||
<screen> |
||||
<prompt>$ </prompt>nix-prefetch-docker --image-name mysql --image-tag 5 --arch x86_64 --os linux |
||||
</screen> |
||||
Desired image name and tag can be set using <option>--final-image-name</option> and <option>--final-image-tag</option> arguments: |
||||
<screen> |
||||
<prompt>$ </prompt>nix-prefetch-docker --image-name mysql --image-tag 5 --final-image-name eu.gcr.io/my-project/mysql --final-image-tag prod |
||||
</screen> |
||||
</para> |
||||
</section> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-exportImage"> |
||||
<title>exportImage</title> |
||||
|
||||
<para> |
||||
This function is analogous to the <command>docker export</command> command, in that it can be used to flatten a Docker image that contains multiple layers. It is in fact the result of the merge of all the layers of the image. As such, the result is suitable for being imported in Docker with <command>docker import</command>. |
||||
</para> |
||||
|
||||
<note> |
||||
<para> |
||||
Using this function requires the <literal>kvm</literal> device to be available. |
||||
</para> |
||||
</note> |
||||
|
||||
<para> |
||||
The parameters of <varname>exportImage</varname> are the following: |
||||
</para> |
||||
|
||||
<example xml:id='ex-dockerTools-exportImage'> |
||||
<title>Docker export</title> |
||||
<programlisting> |
||||
exportImage { |
||||
fromImage = someLayeredImage; |
||||
fromImageName = null; |
||||
fromImageTag = null; |
||||
|
||||
name = someLayeredImage.name; |
||||
} |
||||
</programlisting> |
||||
</example> |
||||
|
||||
<para> |
||||
The parameters relative to the base image have the same synopsis as described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that <varname>fromImage</varname> is the only required argument in this case. |
||||
</para> |
||||
|
||||
<para> |
||||
The <varname>name</varname> argument is the name of the derivation output, which defaults to <varname>fromImage.name</varname>. |
||||
</para> |
||||
</section> |
||||
|
||||
<section xml:id="ssec-pkgs-dockerTools-shadowSetup"> |
||||
<title>shadowSetup</title> |
||||
|
||||
<para> |
||||
This constant string is a helper for setting up the base files for managing users and groups, only if such files don't exist already. It is suitable for being used in a <varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like in the example below: |
||||
</para> |
||||
|
||||
<example xml:id='ex-dockerTools-shadowSetup'> |
||||
<title>Shadow base files</title> |
||||
<programlisting> |
||||
buildImage { |
||||
name = "shadow-basic"; |
||||
|
||||
runAsRoot = '' |
||||
#!${pkgs.runtimeShell} |
||||
${shadowSetup} |
||||
groupadd -r redis |
||||
useradd -r -g redis redis |
||||
mkdir /data |
||||
chown redis:redis /data |
||||
''; |
||||
} |
||||
</programlisting> |
||||
</example> |
||||
|
||||
<para> |
||||
Creating base files like <literal>/etc/passwd</literal> or <literal>/etc/login.defs</literal> is necessary for shadow-utils to manipulate users and groups. |
||||
</para> |
||||
</section> |
||||
</section> |
@ -0,0 +1,64 @@ |
||||
# Eclipse {#sec-eclipse} |
||||
|
||||
The Nix expressions related to the Eclipse platform and IDE are in [`pkgs/applications/editors/eclipse`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse). |
||||
|
||||
Nixpkgs provides a number of packages that will install Eclipse in its various forms. These range from the bare-bones Eclipse Platform to the more fully featured Eclipse SDK or Scala-IDE packages and multiple version are often available. It is possible to list available Eclipse packages by issuing the command: |
||||
|
||||
```ShellSession |
||||
$ nix-env -f '<nixpkgs>' -qaP -A eclipses --description |
||||
``` |
||||
|
||||
Once an Eclipse variant is installed it can be run using the `eclipse` command, as expected. From within Eclipse it is then possible to install plugins in the usual manner by either manually specifying an Eclipse update site or by installing the Marketplace Client plugin and using it to discover and install other plugins. This installation method provides an Eclipse installation that closely resemble a manually installed Eclipse. |
||||
|
||||
If you prefer to install plugins in a more declarative manner then Nixpkgs also offer a number of Eclipse plugins that can be installed in an _Eclipse environment_. This type of environment is created using the function `eclipseWithPlugins` found inside the `nixpkgs.eclipses` attribute set. This function takes as argument `{ eclipse, plugins ? [], jvmArgs ? [] }` where `eclipse` is a one of the Eclipse packages described above, `plugins` is a list of plugin derivations, and `jvmArgs` is a list of arguments given to the JVM running the Eclipse. For example, say you wish to install the latest Eclipse Platform with the popular Eclipse Color Theme plugin and also allow Eclipse to use more RAM. You could then add |
||||
|
||||
```nix |
||||
packageOverrides = pkgs: { |
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins { |
||||
eclipse = eclipse-platform; |
||||
jvmArgs = [ "-Xmx2048m" ]; |
||||
plugins = [ plugins.color-theme ]; |
||||
}; |
||||
} |
||||
``` |
||||
|
||||
to your Nixpkgs configuration (`~/.config/nixpkgs/config.nix`) and install it by running `nix-env -f '<nixpkgs>' -iA myEclipse` and afterward run Eclipse as usual. It is possible to find out which plugins are available for installation using `eclipseWithPlugins` by running |
||||
|
||||
```ShellSession |
||||
$ nix-env -f '<nixpkgs>' -qaP -A eclipses.plugins --description |
||||
``` |
||||
|
||||
If there is a need to install plugins that are not available in Nixpkgs then it may be possible to define these plugins outside Nixpkgs using the `buildEclipseUpdateSite` and `buildEclipsePlugin` functions found in the `nixpkgs.eclipses.plugins` attribute set. Use the `buildEclipseUpdateSite` function to install a plugin distributed as an Eclipse update site. This function takes `{ name, src }` as argument where `src` indicates the Eclipse update site archive. All Eclipse features and plugins within the downloaded update site will be installed. When an update site archive is not available then the `buildEclipsePlugin` function can be used to install a plugin that consists of a pair of feature and plugin JARs. This function takes an argument `{ name, srcFeature, srcPlugin }` where `srcFeature` and `srcPlugin` are the feature and plugin JARs, respectively. |
||||
|
||||
Expanding the previous example with two plugins using the above functions we have |
||||
|
||||
```nix |
||||
packageOverrides = pkgs: { |
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins { |
||||
eclipse = eclipse-platform; |
||||
jvmArgs = [ "-Xmx2048m" ]; |
||||
plugins = [ |
||||
plugins.color-theme |
||||
(plugins.buildEclipsePlugin { |
||||
name = "myplugin1-1.0"; |
||||
srcFeature = fetchurl { |
||||
url = "http://…/features/myplugin1.jar"; |
||||
sha256 = "123…"; |
||||
}; |
||||
srcPlugin = fetchurl { |
||||
url = "http://…/plugins/myplugin1.jar"; |
||||
sha256 = "123…"; |
||||
}; |
||||
}); |
||||
(plugins.buildEclipseUpdateSite { |
||||
name = "myplugin2-1.0"; |
||||
src = fetchurl { |
||||
stripRoot = false; |
||||
url = "http://…/myplugin2.zip"; |
||||
sha256 = "123…"; |
||||
}; |
||||
}); |
||||
]; |
||||
}; |
||||
} |
||||
``` |
@ -1,72 +0,0 @@ |
||||
<section xmlns="http://docbook.org/ns/docbook" |
||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
||||
xml:id="sec-eclipse"> |
||||
<title>Eclipse</title> |
||||
|
||||
<para> |
||||
The Nix expressions related to the Eclipse platform and IDE are in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/eclipse"><filename>pkgs/applications/editors/eclipse</filename></link>. |
||||
</para> |
||||
|
||||
<para> |
||||
Nixpkgs provides a number of packages that will install Eclipse in its various forms. These range from the bare-bones Eclipse Platform to the more fully featured Eclipse SDK or Scala-IDE packages and multiple version are often available. It is possible to list available Eclipse packages by issuing the command: |
||||
<screen> |
||||
<prompt>$ </prompt>nix-env -f '<nixpkgs>' -qaP -A eclipses --description |
||||
</screen> |
||||
Once an Eclipse variant is installed it can be run using the <command>eclipse</command> command, as expected. From within Eclipse it is then possible to install plugins in the usual manner by either manually specifying an Eclipse update site or by installing the Marketplace Client plugin and using it to discover and install other plugins. This installation method provides an Eclipse installation that closely resemble a manually installed Eclipse. |
||||
</para> |
||||
|
||||
<para> |
||||
If you prefer to install plugins in a more declarative manner then Nixpkgs also offer a number of Eclipse plugins that can be installed in an <emphasis>Eclipse environment</emphasis>. This type of environment is created using the function <varname>eclipseWithPlugins</varname> found inside the <varname>nixpkgs.eclipses</varname> attribute set. This function takes as argument <literal>{ eclipse, plugins ? [], jvmArgs ? [] }</literal> where <varname>eclipse</varname> is a one of the Eclipse packages described above, <varname>plugins</varname> is a list of plugin derivations, and <varname>jvmArgs</varname> is a list of arguments given to the JVM running the Eclipse. For example, say you wish to install the latest Eclipse Platform with the popular Eclipse Color Theme plugin and also allow Eclipse to use more RAM. You could then add |
||||
<screen> |
||||
packageOverrides = pkgs: { |
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins { |
||||
eclipse = eclipse-platform; |
||||
jvmArgs = [ "-Xmx2048m" ]; |
||||
plugins = [ plugins.color-theme ]; |
||||
}; |
||||
} |
||||
</screen> |
||||
to your Nixpkgs configuration (<filename>~/.config/nixpkgs/config.nix</filename>) and install it by running <command>nix-env -f '<nixpkgs>' -iA myEclipse</command> and afterward run Eclipse as usual. It is possible to find out which plugins are available for installation using <varname>eclipseWithPlugins</varname> by running |
||||
<screen> |
||||
<prompt>$ </prompt>nix-env -f '<nixpkgs>' -qaP -A eclipses.plugins --description |
||||
</screen> |
||||
</para> |
||||
|
||||
<para> |
||||
If there is a need to install plugins that are not available in Nixpkgs then it may be possible to define these plugins outside Nixpkgs using the <varname>buildEclipseUpdateSite</varname> and <varname>buildEclipsePlugin</varname> functions found in the <varname>nixpkgs.eclipses.plugins</varname> attribute set. Use the <varname>buildEclipseUpdateSite</varname> function to install a plugin distributed as an Eclipse update site. This function takes <literal>{ name, src }</literal> as argument where <literal>src</literal> indicates the Eclipse update site archive. All Eclipse features and plugins within the downloaded update site will be installed. When an update site archive is not available then the <varname>buildEclipsePlugin</varname> function can be used to install a plugin that consists of a pair of feature and plugin JARs. This function takes an argument <literal>{ name, srcFeature, srcPlugin }</literal> where <literal>srcFeature</literal> and <literal>srcPlugin</literal> are the feature and plugin JARs, respectively. |
||||
</para> |
||||
|
||||
<para> |
||||
Expanding the previous example with two plugins using the above functions we have |
||||
<screen> |
||||
packageOverrides = pkgs: { |
||||
myEclipse = with pkgs.eclipses; eclipseWithPlugins { |
||||
eclipse = eclipse-platform; |
||||
jvmArgs = [ "-Xmx2048m" ]; |
||||
plugins = [ |
||||
plugins.color-theme |
||||
(plugins.buildEclipsePlugin { |
||||
name = "myplugin1-1.0"; |
||||
srcFeature = fetchurl { |
||||
url = "http://…/features/myplugin1.jar"; |
||||
sha256 = "123…"; |
||||
}; |
||||
srcPlugin = fetchurl { |
||||
url = "http://…/plugins/myplugin1.jar"; |
||||
sha256 = "123…"; |
||||
}; |
||||
}); |
||||
(plugins.buildEclipseUpdateSite { |
||||
name = "myplugin2-1.0"; |
||||
src = fetchurl { |
||||
stripRoot = false; |
||||
url = "http://…/myplugin2.zip"; |
||||
sha256 = "123…"; |
||||
}; |
||||
}); |
||||
]; |
||||
}; |
||||
} |
||||
</screen> |
||||
</para> |
||||
</section> |
@ -0,0 +1,62 @@ |
||||
# Platform Notes {#chap-platform-notes} |
||||
|
||||
## Darwin (macOS) {#sec-darwin} |
||||
|
||||
Some common issues when packaging software for Darwin: |
||||
|
||||
- The Darwin `stdenv` uses clang instead of gcc. When referring to the compiler `$CC` or `cc` will work in both cases. Some builds hardcode gcc/g++ in their build scripts, that can usually be fixed with using something like `makeFlags = [ "CC=cc" ];` or by patching the build scripts. |
||||
|
||||
```nix |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
buildPhase = '' |
||||
$CC -o hello hello.c |
||||
''; |
||||
} |
||||
``` |
||||
|
||||
- On Darwin, libraries are linked using absolute paths, libraries are resolved by their `install_name` at link time. Sometimes packages won’t set this correctly causing the library lookups to fail at runtime. This can be fixed by adding extra linker flags or by running `install_name_tool -id` during the `fixupPhase`. |
||||
|
||||
```nix |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib"; |
||||
} |
||||
``` |
||||
|
||||
- Even if the libraries are linked using absolute paths and resolved via their `install_name` correctly, tests can sometimes fail to run binaries. This happens because the `checkPhase` runs before the libraries are installed. |
||||
|
||||
This can usually be solved by running the tests after the `installPhase` or alternatively by using `DYLD_LIBRARY_PATH`. More information about this variable can be found in the *dyld(1)* manpage. |
||||
|
||||
``` |
||||
dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib |
||||
Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq |
||||
Reason: image not found |
||||
./tests/jqtest: line 5: 75779 Abort trap: 6 |
||||
``` |
||||
|
||||
```nix |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
doInstallCheck = true; |
||||
installCheckTarget = "check"; |
||||
} |
||||
``` |
||||
|
||||
- Some packages assume xcode is available and use `xcrun` to resolve build tools like `clang`, etc. This causes errors like `xcode-select: error: no developer tools were found at '/Applications/Xcode.app'` while the build doesn’t actually depend on xcode. |
||||
|
||||
```nix |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
prePatch = '' |
||||
substituteInPlace Makefile \ |
||||
--replace '/usr/bin/xcrun clang' clang |
||||
''; |
||||
} |
||||
``` |
||||
|
||||
The package `xcbuild` can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues. |
@ -1,83 +0,0 @@ |
||||
<chapter xmlns="http://docbook.org/ns/docbook" |
||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
||||
xml:id="chap-platform-notes"> |
||||
<title>Platform Notes</title> |
||||
<section xml:id="sec-darwin"> |
||||
<title>Darwin (macOS)</title> |
||||
|
||||
<para> |
||||
Some common issues when packaging software for Darwin: |
||||
</para> |
||||
|
||||
<itemizedlist> |
||||
<listitem> |
||||
<para> |
||||
The Darwin <literal>stdenv</literal> uses clang instead of gcc. When referring to the compiler <varname>$CC</varname> or <command>cc</command> will work in both cases. Some builds hardcode gcc/g++ in their build scripts, that can usually be fixed with using something like <literal>makeFlags = [ "CC=cc" ];</literal> or by patching the build scripts. |
||||
</para> |
||||
<programlisting> |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
buildPhase = '' |
||||
$CC -o hello hello.c |
||||
''; |
||||
} |
||||
</programlisting> |
||||
</listitem> |
||||
<listitem> |
||||
<para> |
||||
On Darwin, libraries are linked using absolute paths, libraries are resolved by their <literal>install_name</literal> at link time. Sometimes packages won't set this correctly causing the library lookups to fail at runtime. This can be fixed by adding extra linker flags or by running <command>install_name_tool -id</command> during the <function>fixupPhase</function>. |
||||
</para> |
||||
<programlisting> |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
makeFlags = lib.optional stdenv.isDarwin "LDFLAGS=-Wl,-install_name,$(out)/lib/libfoo.dylib"; |
||||
} |
||||
</programlisting> |
||||
</listitem> |
||||
<listitem> |
||||
<para> |
||||
Even if the libraries are linked using absolute paths and resolved via their <literal>install_name</literal> correctly, tests can sometimes fail to run binaries. This happens because the <varname>checkPhase</varname> runs before the libraries are installed. |
||||
</para> |
||||
<para> |
||||
This can usually be solved by running the tests after the <varname>installPhase</varname> or alternatively by using <varname>DYLD_LIBRARY_PATH</varname>. More information about this variable can be found in the <citerefentry> |
||||
<refentrytitle>dyld</refentrytitle> |
||||
<manvolnum>1</manvolnum></citerefentry> manpage. |
||||
</para> |
||||
<programlisting> |
||||
dyld: Library not loaded: /nix/store/7hnmbscpayxzxrixrgxvvlifzlxdsdir-jq-1.5-lib/lib/libjq.1.dylib |
||||
Referenced from: /private/tmp/nix-build-jq-1.5.drv-0/jq-1.5/tests/../jq |
||||
Reason: image not found |
||||
./tests/jqtest: line 5: 75779 Abort trap: 6 |
||||
</programlisting> |
||||
<programlisting> |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
doInstallCheck = true; |
||||
installCheckTarget = "check"; |
||||
} |
||||
</programlisting> |
||||
</listitem> |
||||
<listitem> |
||||
<para> |
||||
Some packages assume xcode is available and use <command>xcrun</command> to resolve build tools like <command>clang</command>, etc. This causes errors like <code>xcode-select: error: no developer tools were found at '/Applications/Xcode.app'</code> while the build doesn't actually depend on xcode. |
||||
</para> |
||||
<programlisting> |
||||
stdenv.mkDerivation { |
||||
name = "libfoo-1.2.3"; |
||||
# ... |
||||
prePatch = '' |
||||
substituteInPlace Makefile \ |
||||
--replace '/usr/bin/xcrun clang' clang |
||||
''; |
||||
} |
||||
</programlisting> |
||||
<para> |
||||
The package <literal>xcbuild</literal> can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues. |
||||
</para> |
||||
</listitem> |
||||
</itemizedlist> |
||||
</section> |
||||
</chapter> |
@ -0,0 +1,67 @@ |
||||
<section xmlns="http://docbook.org/ns/docbook" |
||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
||||
version="5.0" |
||||
xml:id="sec-rename-ifs"> |
||||
<title>Renaming network interfaces</title> |
||||
|
||||
<para> |
||||
NixOS uses the udev |
||||
<link xlink:href="https://systemd.io/PREDICTABLE_INTERFACE_NAMES/">predictable naming scheme</link> |
||||
to assign names to network interfaces. This means that by default |
||||
cards are not given the traditional names like |
||||
<literal>eth0</literal> or <literal>eth1</literal>, whose order can |
||||
change unpredictably across reboots. Instead, relying on physical |
||||
locations and firmware information, the scheme produces names like |
||||
<literal>ens1</literal>, <literal>enp2s0</literal>, etc. |
||||
</para> |
||||
|
||||
<para> |
||||
These names are predictable but less memorable and not necessarily |
||||
stable: for example installing new hardware or changing firmware |
||||
settings can result in a |
||||
<link xlink:href="https://github.com/systemd/systemd/issues/3715#issue-165347602">name change</link>. |
||||
If this is undesirable, for example if you have a single ethernet |
||||
card, you can revert to the traditional scheme by setting |
||||
<xref linkend="opt-networking.usePredictableInterfaceNames"/> to |
||||
<literal>false</literal>. |
||||
</para> |
||||
|
||||
<section xml:id="sec-custom-ifnames"> |
||||
<title>Assigning custom names</title> |
||||
<para> |
||||
In case there are multiple interfaces of the same type, it’s better to |
||||
assign custom names based on the device hardware address. For |
||||
example, we assign the name <literal>wan</literal> to the interface |
||||
with MAC address <literal>52:54:00:12:01:01</literal> using a |
||||
netword link unit: |
||||
</para> |
||||
<programlisting> |
||||
<link linkend="opt-systemd.network.links">systemd.network.links."10-wan"</link> = { |
||||
matchConfig.MACAddress = "52:54:00:12:01:01"; |
||||
linkConfig.Name = "wan"; |
||||
}; |
||||
</programlisting> |
||||
<para> |
||||
Note that links are directly read by udev, <emphasis>not networkd</emphasis>, |
||||
and will work even if networkd is disabled. |
||||
</para> |
||||
<para> |
||||
Alternatively, we can use a plain old udev rule: |
||||
</para> |
||||
<programlisting> |
||||
<link linkend="opt-services.udev.initrdRules">services.udev.initrdRules</link> = '' |
||||
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", \ |
||||
ATTR{address}=="52:54:00:12:01:01", KERNEL=="eth*", NAME="wan" |
||||
''; |
||||
</programlisting> |
||||
|
||||
<warning><para> |
||||
The rule must be installed in the initrd using |
||||
<literal>services.udev.initrdRules</literal>, not the usual |
||||
<literal>services.udev.extraRules</literal> option. This is to avoid race |
||||
conditions with other programs controlling the interface. |
||||
</para></warning> |
||||
</section> |
||||
|
||||
</section> |
@ -1,7 +1,14 @@ |
||||
{ pkgs, ... }: |
||||
|
||||
{ config, ... }: |
||||
{ |
||||
imports = [ ./sd-image-aarch64.nix ]; |
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_latest; |
||||
imports = [ |
||||
../sd-card/sd-image-aarch64-new-kernel-installer.nix |
||||
]; |
||||
config = { |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image-aarch64-new-kernel.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image-aarch64-new-kernel-installer.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
} |
||||
|
@ -1,80 +1,14 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-aarch64.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ config, ... }: |
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
../../profiles/installation-device.nix |
||||
./sd-image.nix |
||||
../sd-card/sd-image-aarch64-installer.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
|
||||
# The serial ports listed here are: |
||||
# - ttyS0: for Tegra (Jetson TX1) |
||||
# - ttyAMA0: for QEMU's -machine virt |
||||
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"]; |
||||
|
||||
boot.initrd.availableKernelModules = [ |
||||
# Allows early (earlier) modesetting for the Raspberry Pi |
||||
"vc4" "bcm2835_dma" "i2c_bcm2835" |
||||
# Allows early (earlier) modesetting for Allwinner SoCs |
||||
"sun4i_drm" "sun8i_drm_hdmi" "sun8i_mixer" |
||||
]; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
[pi3] |
||||
kernel=u-boot-rpi3.bin |
||||
|
||||
[pi4] |
||||
kernel=u-boot-rpi4.bin |
||||
enable_gic=1 |
||||
armstub=armstub8-gic.bin |
||||
|
||||
# Otherwise the resolution will be weird in most cases, compared to |
||||
# what the pi3 firmware does by default. |
||||
disable_overscan=1 |
||||
|
||||
[all] |
||||
# Boot in 64-bit mode. |
||||
arm_64bit=1 |
||||
|
||||
# U-Boot needs this to work, regardless of whether UART is actually used or not. |
||||
# Look in arch/arm/mach-bcm283x/Kconfig in the U-Boot tree to see if this is still |
||||
# a requirement in the future. |
||||
enable_uart=1 |
||||
|
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
|
||||
# Add the config |
||||
cp ${configTxt} firmware/config.txt |
||||
|
||||
# Add pi3 specific files |
||||
cp ${pkgs.ubootRaspberryPi3_64bit}/u-boot.bin firmware/u-boot-rpi3.bin |
||||
|
||||
# Add pi4 specific files |
||||
cp ${pkgs.ubootRaspberryPi4_64bit}/u-boot.bin firmware/u-boot-rpi4.bin |
||||
cp ${pkgs.raspberrypi-armstubs}/armstub8-gic.bin firmware/armstub8-gic.bin |
||||
cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb firmware/ |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
config = { |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image-aarch64.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image-aarch64-installer.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
||||
|
@ -1,57 +1,14 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ config, ... }: |
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
../../profiles/installation-device.nix |
||||
./sd-image.nix |
||||
../sd-card/sd-image-armv7l-multiplatform-installer.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
boot.kernelPackages = pkgs.linuxPackages_latest; |
||||
# The serial ports listed here are: |
||||
# - ttyS0: for Tegra (Jetson TK1) |
||||
# - ttymxc0: for i.MX6 (Wandboard) |
||||
# - ttyAMA0: for Allwinner (pcDuino3 Nano) and QEMU's -machine virt |
||||
# - ttyO0: for OMAP (BeagleBone Black) |
||||
# - ttySAC2: for Exynos (ODROID-XU3) |
||||
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=ttySAC2,115200n8" "console=tty0"]; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
|
||||
[pi2] |
||||
kernel=u-boot-rpi2.bin |
||||
|
||||
[pi3] |
||||
kernel=u-boot-rpi3.bin |
||||
|
||||
# U-Boot used to need this to work, regardless of whether UART is actually used or not. |
||||
# TODO: check when/if this can be removed. |
||||
enable_uart=1 |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
cp ${pkgs.ubootRaspberryPi2}/u-boot.bin firmware/u-boot-rpi2.bin |
||||
cp ${pkgs.ubootRaspberryPi3_32bit}/u-boot.bin firmware/u-boot-rpi3.bin |
||||
cp ${configTxt} firmware/config.txt |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
config = { |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image-armv7l-multiplatform.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image-armv7l-multiplatform-installer.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
||||
|
@ -1,46 +1,14 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ config, ... }: |
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
../../profiles/installation-device.nix |
||||
./sd-image.nix |
||||
../sd-card/sd-image-raspberrypi-installer.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
boot.kernelPackages = pkgs.linuxPackages_rpi1; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
|
||||
[pi0] |
||||
kernel=u-boot-rpi0.bin |
||||
|
||||
[pi1] |
||||
kernel=u-boot-rpi1.bin |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin firmware/u-boot-rpi0.bin |
||||
cp ${pkgs.ubootRaspberryPi}/u-boot.bin firmware/u-boot-rpi1.bin |
||||
cp ${configTxt} firmware/config.txt |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
config = { |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image-raspberrypi.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image-raspberrypi-installer.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
||||
|
@ -1,8 +1,14 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/cd-dvd/sd-image-raspberrypi4.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ config, ... }: |
||||
{ |
||||
imports = [ ./sd-image-aarch64.nix ]; |
||||
boot.kernelPackages = pkgs.linuxPackages_rpi4; |
||||
imports = [ |
||||
../sd-card/sd-image-raspberrypi4-installer.nix |
||||
]; |
||||
config = { |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image-raspberrypi4.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image-raspberrypi4-installer.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
} |
||||
|
@ -1,245 +1,14 @@ |
||||
# This module creates a bootable SD card image containing the given NixOS |
||||
# configuration. The generated image is MBR partitioned, with a FAT |
||||
# /boot/firmware partition, and ext4 root partition. The generated image |
||||
# is sized to fit its contents, and a boot script automatically resizes |
||||
# the root partition to fit the device on the first boot. |
||||
# |
||||
# The firmware partition is built with expectation to hold the Raspberry |
||||
# Pi firmware and bootloader, and be removed and replaced with a firmware |
||||
# build for the target SoC for other board families. |
||||
# |
||||
# The derivation for the SD image will be placed in |
||||
# config.system.build.sdImage |
||||
|
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
rootfsImage = pkgs.callPackage ../../../lib/make-ext4-fs.nix ({ |
||||
inherit (config.sdImage) storePaths; |
||||
compressImage = true; |
||||
populateImageCommands = config.sdImage.populateRootCommands; |
||||
volumeLabel = "NIXOS_SD"; |
||||
} // optionalAttrs (config.sdImage.rootPartitionUUID != null) { |
||||
uuid = config.sdImage.rootPartitionUUID; |
||||
}); |
||||
in |
||||
{ config, ... }: |
||||
{ |
||||
imports = [ |
||||
(mkRemovedOptionModule [ "sdImage" "bootPartitionID" ] "The FAT partition for SD image now only holds the Raspberry Pi firmware files. Use firmwarePartitionID to configure that partition's ID.") |
||||
(mkRemovedOptionModule [ "sdImage" "bootSize" ] "The boot files for SD image have been moved to the main ext4 partition. The FAT partition now only holds the Raspberry Pi firmware files. Changing its size may not be required.") |
||||
../sd-card/sd-image.nix |
||||
]; |
||||
|
||||
options.sdImage = { |
||||
imageName = mkOption { |
||||
default = "${config.sdImage.imageBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img"; |
||||
description = '' |
||||
Name of the generated image file. |
||||
''; |
||||
}; |
||||
|
||||
imageBaseName = mkOption { |
||||
default = "nixos-sd-image"; |
||||
description = '' |
||||
Prefix of the name of the generated image file. |
||||
''; |
||||
}; |
||||
|
||||
storePaths = mkOption { |
||||
type = with types; listOf package; |
||||
example = literalExample "[ pkgs.stdenv ]"; |
||||
description = '' |
||||
Derivations to be included in the Nix store in the generated SD image. |
||||
''; |
||||
}; |
||||
|
||||
firmwarePartitionID = mkOption { |
||||
type = types.str; |
||||
default = "0x2178694e"; |
||||
description = '' |
||||
Volume ID for the /boot/firmware partition on the SD card. This value |
||||
must be a 32-bit hexadecimal number. |
||||
''; |
||||
}; |
||||
|
||||
firmwarePartitionName = mkOption { |
||||
type = types.str; |
||||
default = "FIRMWARE"; |
||||
description = '' |
||||
Name of the filesystem which holds the boot firmware. |
||||
''; |
||||
}; |
||||
|
||||
rootPartitionUUID = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
example = "14e19a7b-0ae0-484d-9d54-43bd6fdc20c7"; |
||||
description = '' |
||||
UUID for the filesystem on the main NixOS partition on the SD card. |
||||
''; |
||||
}; |
||||
|
||||
firmwareSize = mkOption { |
||||
type = types.int; |
||||
# As of 2019-08-18 the Raspberry pi firmware + u-boot takes ~18MiB |
||||
default = 30; |
||||
description = '' |
||||
Size of the /boot/firmware partition, in megabytes. |
||||
''; |
||||
}; |
||||
|
||||
populateFirmwareCommands = mkOption { |
||||
example = literalExample "'' cp \${pkgs.myBootLoader}/u-boot.bin firmware/ ''"; |
||||
description = '' |
||||
Shell commands to populate the ./firmware directory. |
||||
All files in that directory are copied to the |
||||
/boot/firmware partition on the SD image. |
||||
''; |
||||
}; |
||||
|
||||
populateRootCommands = mkOption { |
||||
example = literalExample "''\${config.boot.loader.generic-extlinux-compatible.populateCmd} -c \${config.system.build.toplevel} -d ./files/boot''"; |
||||
description = '' |
||||
Shell commands to populate the ./files directory. |
||||
All files in that directory are copied to the |
||||
root (/) partition on the SD image. Use this to |
||||
populate the ./files/boot (/boot) directory. |
||||
''; |
||||
}; |
||||
|
||||
postBuildCommands = mkOption { |
||||
example = literalExample "'' dd if=\${pkgs.myBootLoader}/SPL of=$img bs=1024 seek=1 conv=notrunc ''"; |
||||
default = ""; |
||||
description = '' |
||||
Shell commands to run after the image is built. |
||||
Can be used for boards requiring to dd u-boot SPL before actual partitions. |
||||
''; |
||||
}; |
||||
|
||||
compressImage = mkOption { |
||||
type = types.bool; |
||||
default = true; |
||||
description = '' |
||||
Whether the SD image should be compressed using |
||||
<command>zstd</command>. |
||||
''; |
||||
}; |
||||
|
||||
}; |
||||
|
||||
config = { |
||||
fileSystems = { |
||||
"/boot/firmware" = { |
||||
device = "/dev/disk/by-label/${config.sdImage.firmwarePartitionName}"; |
||||
fsType = "vfat"; |
||||
# Alternatively, this could be removed from the configuration. |
||||
# The filesystem is not needed at runtime, it could be treated |
||||
# as an opaque blob instead of a discrete FAT32 filesystem. |
||||
options = [ "nofail" "noauto" ]; |
||||
}; |
||||
"/" = { |
||||
device = "/dev/disk/by-label/NIXOS_SD"; |
||||
fsType = "ext4"; |
||||
}; |
||||
}; |
||||
|
||||
sdImage.storePaths = [ config.system.build.toplevel ]; |
||||
|
||||
system.build.sdImage = pkgs.callPackage ({ stdenv, dosfstools, e2fsprogs, |
||||
mtools, libfaketime, util-linux, zstd }: stdenv.mkDerivation { |
||||
name = config.sdImage.imageName; |
||||
|
||||
nativeBuildInputs = [ dosfstools e2fsprogs mtools libfaketime util-linux zstd ]; |
||||
|
||||
inherit (config.sdImage) compressImage; |
||||
|
||||
buildCommand = '' |
||||
mkdir -p $out/nix-support $out/sd-image |
||||
export img=$out/sd-image/${config.sdImage.imageName} |
||||
|
||||
echo "${pkgs.stdenv.buildPlatform.system}" > $out/nix-support/system |
||||
if test -n "$compressImage"; then |
||||
echo "file sd-image $img.zst" >> $out/nix-support/hydra-build-products |
||||
else |
||||
echo "file sd-image $img" >> $out/nix-support/hydra-build-products |
||||
fi |
||||
|
||||
echo "Decompressing rootfs image" |
||||
zstd -d --no-progress "${rootfsImage}" -o ./root-fs.img |
||||
|
||||
# Gap in front of the first partition, in MiB |
||||
gap=8 |
||||
|
||||
# Create the image file sized to fit /boot/firmware and /, plus slack for the gap. |
||||
rootSizeBlocks=$(du -B 512 --apparent-size ./root-fs.img | awk '{ print $1 }') |
||||
firmwareSizeBlocks=$((${toString config.sdImage.firmwareSize} * 1024 * 1024 / 512)) |
||||
imageSize=$((rootSizeBlocks * 512 + firmwareSizeBlocks * 512 + gap * 1024 * 1024)) |
||||
truncate -s $imageSize $img |
||||
|
||||
# type=b is 'W95 FAT32', type=83 is 'Linux'. |
||||
# The "bootable" partition is where u-boot will look file for the bootloader |
||||
# information (dtbs, extlinux.conf file). |
||||
sfdisk $img <<EOF |
||||
label: dos |
||||
label-id: ${config.sdImage.firmwarePartitionID} |
||||
|
||||
start=''${gap}M, size=$firmwareSizeBlocks, type=b |
||||
start=$((gap + ${toString config.sdImage.firmwareSize}))M, type=83, bootable |
||||
EOF |
||||
|
||||
# Copy the rootfs into the SD image |
||||
eval $(partx $img -o START,SECTORS --nr 2 --pairs) |
||||
dd conv=notrunc if=./root-fs.img of=$img seek=$START count=$SECTORS |
||||
|
||||
# Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img |
||||
eval $(partx $img -o START,SECTORS --nr 1 --pairs) |
||||
truncate -s $((SECTORS * 512)) firmware_part.img |
||||
faketime "1970-01-01 00:00:00" mkfs.vfat -i ${config.sdImage.firmwarePartitionID} -n ${config.sdImage.firmwarePartitionName} firmware_part.img |
||||
|
||||
# Populate the files intended for /boot/firmware |
||||
mkdir firmware |
||||
${config.sdImage.populateFirmwareCommands} |
||||
|
||||
# Copy the populated /boot/firmware into the SD image |
||||
(cd firmware; mcopy -psvm -i ../firmware_part.img ./* ::) |
||||
# Verify the FAT partition before copying it. |
||||
fsck.vfat -vn firmware_part.img |
||||
dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS |
||||
|
||||
${config.sdImage.postBuildCommands} |
||||
|
||||
if test -n "$compressImage"; then |
||||
zstd -T$NIX_BUILD_CORES --rm $img |
||||
fi |
||||
''; |
||||
}) {}; |
||||
|
||||
boot.postBootCommands = '' |
||||
# On the first boot do some maintenance tasks |
||||
if [ -f /nix-path-registration ]; then |
||||
set -euo pipefail |
||||
set -x |
||||
# Figure out device names for the boot device and root filesystem. |
||||
rootPart=$(${pkgs.util-linux}/bin/findmnt -n -o SOURCE /) |
||||
bootDevice=$(lsblk -npo PKNAME $rootPart) |
||||
partNum=$(lsblk -npo MAJ:MIN $rootPart | ${pkgs.gawk}/bin/awk -F: '{print $2}') |
||||
|
||||
# Resize the root partition and the filesystem to fit the disk |
||||
echo ",+," | sfdisk -N$partNum --no-reread $bootDevice |
||||
${pkgs.parted}/bin/partprobe |
||||
${pkgs.e2fsprogs}/bin/resize2fs $rootPart |
||||
|
||||
# Register the contents of the initial Nix store |
||||
${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration |
||||
|
||||
# nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag. |
||||
touch /etc/NIXOS |
||||
${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system |
||||
|
||||
# Prevents this from running on later boots. |
||||
rm -f /nix-path-registration |
||||
fi |
||||
''; |
||||
warnings = [ |
||||
'' |
||||
.../cd-dvd/sd-image.nix is deprecated and will eventually be removed. |
||||
Please switch to .../sd-card/sd-image.nix, instead. |
||||
'' |
||||
]; |
||||
}; |
||||
} |
||||
|
@ -0,0 +1,10 @@ |
||||
{ |
||||
imports = [ |
||||
../../profiles/installation-device.nix |
||||
./sd-image-aarch64.nix |
||||
]; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
@ -0,0 +1,10 @@ |
||||
{ |
||||
imports = [ |
||||
../../profiles/installation-device.nix |
||||
./sd-image-aarch64-new-kernel.nix |
||||
]; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
@ -0,0 +1,7 @@ |
||||
{ pkgs, ... }: |
||||
|
||||
{ |
||||
imports = [ ./sd-image-aarch64.nix ]; |
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_latest; |
||||
} |
@ -0,0 +1,75 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-aarch64.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
./sd-image.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
|
||||
# The serial ports listed here are: |
||||
# - ttyS0: for Tegra (Jetson TX1) |
||||
# - ttyAMA0: for QEMU's -machine virt |
||||
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"]; |
||||
|
||||
boot.initrd.availableKernelModules = [ |
||||
# Allows early (earlier) modesetting for the Raspberry Pi |
||||
"vc4" "bcm2835_dma" "i2c_bcm2835" |
||||
# Allows early (earlier) modesetting for Allwinner SoCs |
||||
"sun4i_drm" "sun8i_drm_hdmi" "sun8i_mixer" |
||||
]; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
[pi3] |
||||
kernel=u-boot-rpi3.bin |
||||
|
||||
[pi4] |
||||
kernel=u-boot-rpi4.bin |
||||
enable_gic=1 |
||||
armstub=armstub8-gic.bin |
||||
|
||||
# Otherwise the resolution will be weird in most cases, compared to |
||||
# what the pi3 firmware does by default. |
||||
disable_overscan=1 |
||||
|
||||
[all] |
||||
# Boot in 64-bit mode. |
||||
arm_64bit=1 |
||||
|
||||
# U-Boot needs this to work, regardless of whether UART is actually used or not. |
||||
# Look in arch/arm/mach-bcm283x/Kconfig in the U-Boot tree to see if this is still |
||||
# a requirement in the future. |
||||
enable_uart=1 |
||||
|
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
|
||||
# Add the config |
||||
cp ${configTxt} firmware/config.txt |
||||
|
||||
# Add pi3 specific files |
||||
cp ${pkgs.ubootRaspberryPi3_64bit}/u-boot.bin firmware/u-boot-rpi3.bin |
||||
|
||||
# Add pi4 specific files |
||||
cp ${pkgs.ubootRaspberryPi4_64bit}/u-boot.bin firmware/u-boot-rpi4.bin |
||||
cp ${pkgs.raspberrypi-armstubs}/armstub8-gic.bin firmware/armstub8-gic.bin |
||||
cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb firmware/ |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
}; |
||||
} |
@ -0,0 +1,10 @@ |
||||
{ |
||||
imports = [ |
||||
../../profiles/installation-device.nix |
||||
./sd-image-armv7l-multiplatform.nix |
||||
]; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
@ -0,0 +1,52 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
./sd-image.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
boot.kernelPackages = pkgs.linuxPackages_latest; |
||||
# The serial ports listed here are: |
||||
# - ttyS0: for Tegra (Jetson TK1) |
||||
# - ttymxc0: for i.MX6 (Wandboard) |
||||
# - ttyAMA0: for Allwinner (pcDuino3 Nano) and QEMU's -machine virt |
||||
# - ttyO0: for OMAP (BeagleBone Black) |
||||
# - ttySAC2: for Exynos (ODROID-XU3) |
||||
boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=ttySAC2,115200n8" "console=tty0"]; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
|
||||
[pi2] |
||||
kernel=u-boot-rpi2.bin |
||||
|
||||
[pi3] |
||||
kernel=u-boot-rpi3.bin |
||||
|
||||
# U-Boot used to need this to work, regardless of whether UART is actually used or not. |
||||
# TODO: check when/if this can be removed. |
||||
enable_uart=1 |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
cp ${pkgs.ubootRaspberryPi2}/u-boot.bin firmware/u-boot-rpi2.bin |
||||
cp ${pkgs.ubootRaspberryPi3_32bit}/u-boot.bin firmware/u-boot-rpi3.bin |
||||
cp ${configTxt} firmware/config.txt |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
}; |
||||
} |
@ -0,0 +1,10 @@ |
||||
{ |
||||
imports = [ |
||||
../../profiles/installation-device.nix |
||||
./sd-image-raspberrypi.nix |
||||
]; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
@ -0,0 +1,41 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-raspberrypi.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ |
||||
imports = [ |
||||
../../profiles/base.nix |
||||
./sd-image.nix |
||||
]; |
||||
|
||||
boot.loader.grub.enable = false; |
||||
boot.loader.generic-extlinux-compatible.enable = true; |
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7; |
||||
boot.kernelPackages = pkgs.linuxPackages_rpi1; |
||||
|
||||
sdImage = { |
||||
populateFirmwareCommands = let |
||||
configTxt = pkgs.writeText "config.txt" '' |
||||
# Prevent the firmware from smashing the framebuffer setup done by the mainline kernel |
||||
# when attempting to show low-voltage or overtemperature warnings. |
||||
avoid_warnings=1 |
||||
|
||||
[pi0] |
||||
kernel=u-boot-rpi0.bin |
||||
|
||||
[pi1] |
||||
kernel=u-boot-rpi1.bin |
||||
''; |
||||
in '' |
||||
(cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/) |
||||
cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin firmware/u-boot-rpi0.bin |
||||
cp ${pkgs.ubootRaspberryPi}/u-boot.bin firmware/u-boot-rpi1.bin |
||||
cp ${configTxt} firmware/config.txt |
||||
''; |
||||
populateRootCommands = '' |
||||
mkdir -p ./files/boot |
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot |
||||
''; |
||||
}; |
||||
} |
@ -0,0 +1,10 @@ |
||||
{ |
||||
imports = [ |
||||
../../profiles/installation-device.nix |
||||
./sd-image-raspberrypi4.nix |
||||
]; |
||||
|
||||
# the installation media is also the installation target, |
||||
# so we don't want to provide the installation configuration.nix. |
||||
installer.cloneConfig = false; |
||||
} |
@ -0,0 +1,8 @@ |
||||
# To build, use: |
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-raspberrypi4.nix -A config.system.build.sdImage |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
{ |
||||
imports = [ ./sd-image-aarch64.nix ]; |
||||
boot.kernelPackages = pkgs.linuxPackages_rpi4; |
||||
} |
@ -0,0 +1,245 @@ |
||||
# This module creates a bootable SD card image containing the given NixOS |
||||
# configuration. The generated image is MBR partitioned, with a FAT |
||||
# /boot/firmware partition, and ext4 root partition. The generated image |
||||
# is sized to fit its contents, and a boot script automatically resizes |
||||
# the root partition to fit the device on the first boot. |
||||
# |
||||
# The firmware partition is built with expectation to hold the Raspberry |
||||
# Pi firmware and bootloader, and be removed and replaced with a firmware |
||||
# build for the target SoC for other board families. |
||||
# |
||||
# The derivation for the SD image will be placed in |
||||
# config.system.build.sdImage |
||||
|
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
rootfsImage = pkgs.callPackage ../../../lib/make-ext4-fs.nix ({ |
||||
inherit (config.sdImage) storePaths; |
||||
compressImage = true; |
||||
populateImageCommands = config.sdImage.populateRootCommands; |
||||
volumeLabel = "NIXOS_SD"; |
||||
} // optionalAttrs (config.sdImage.rootPartitionUUID != null) { |
||||
uuid = config.sdImage.rootPartitionUUID; |
||||
}); |
||||
in |
||||
{ |
||||
imports = [ |
||||
(mkRemovedOptionModule [ "sdImage" "bootPartitionID" ] "The FAT partition for SD image now only holds the Raspberry Pi firmware files. Use firmwarePartitionID to configure that partition's ID.") |
||||
(mkRemovedOptionModule [ "sdImage" "bootSize" ] "The boot files for SD image have been moved to the main ext4 partition. The FAT partition now only holds the Raspberry Pi firmware files. Changing its size may not be required.") |
||||
]; |
||||
|
||||
options.sdImage = { |
||||
imageName = mkOption { |
||||
default = "${config.sdImage.imageBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img"; |
||||
description = '' |
||||
Name of the generated image file. |
||||
''; |
||||
}; |
||||
|
||||
imageBaseName = mkOption { |
||||
default = "nixos-sd-image"; |
||||
description = '' |
||||
Prefix of the name of the generated image file. |
||||
''; |
||||
}; |
||||
|
||||
storePaths = mkOption { |
||||
type = with types; listOf package; |
||||
example = literalExample "[ pkgs.stdenv ]"; |
||||
description = '' |
||||
Derivations to be included in the Nix store in the generated SD image. |
||||
''; |
||||
}; |
||||
|
||||
firmwarePartitionID = mkOption { |
||||
type = types.str; |
||||
default = "0x2178694e"; |
||||
description = '' |
||||
Volume ID for the /boot/firmware partition on the SD card. This value |
||||
must be a 32-bit hexadecimal number. |
||||
''; |
||||
}; |
||||
|
||||
firmwarePartitionName = mkOption { |
||||
type = types.str; |
||||
default = "FIRMWARE"; |
||||
description = '' |
||||
Name of the filesystem which holds the boot firmware. |
||||
''; |
||||
}; |
||||
|
||||
rootPartitionUUID = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
example = "14e19a7b-0ae0-484d-9d54-43bd6fdc20c7"; |
||||
description = '' |
||||
UUID for the filesystem on the main NixOS partition on the SD card. |
||||
''; |
||||
}; |
||||
|
||||
firmwareSize = mkOption { |
||||
type = types.int; |
||||
# As of 2019-08-18 the Raspberry pi firmware + u-boot takes ~18MiB |
||||
default = 30; |
||||
description = '' |
||||
Size of the /boot/firmware partition, in megabytes. |
||||
''; |
||||
}; |
||||
|
||||
populateFirmwareCommands = mkOption { |
||||
example = literalExample "'' cp \${pkgs.myBootLoader}/u-boot.bin firmware/ ''"; |
||||
description = '' |
||||
Shell commands to populate the ./firmware directory. |
||||
All files in that directory are copied to the |
||||
/boot/firmware partition on the SD image. |
||||
''; |
||||
}; |
||||
|
||||
populateRootCommands = mkOption { |
||||
example = literalExample "''\${config.boot.loader.generic-extlinux-compatible.populateCmd} -c \${config.system.build.toplevel} -d ./files/boot''"; |
||||
description = '' |
||||
Shell commands to populate the ./files directory. |
||||
All files in that directory are copied to the |
||||
root (/) partition on the SD image. Use this to |
||||
populate the ./files/boot (/boot) directory. |
||||
''; |
||||
}; |
||||
|
||||
postBuildCommands = mkOption { |
||||
example = literalExample "'' dd if=\${pkgs.myBootLoader}/SPL of=$img bs=1024 seek=1 conv=notrunc ''"; |
||||
default = ""; |
||||
description = '' |
||||
Shell commands to run after the image is built. |
||||
Can be used for boards requiring to dd u-boot SPL before actual partitions. |
||||
''; |
||||
}; |
||||
|
||||
compressImage = mkOption { |
||||
type = types.bool; |
||||
default = true; |
||||
description = '' |
||||
Whether the SD image should be compressed using |
||||
<command>zstd</command>. |
||||
''; |
||||
}; |
||||
|
||||
}; |
||||
|
||||
config = { |
||||
fileSystems = { |
||||
"/boot/firmware" = { |
||||
device = "/dev/disk/by-label/${config.sdImage.firmwarePartitionName}"; |
||||
fsType = "vfat"; |
||||
# Alternatively, this could be removed from the configuration. |
||||
# The filesystem is not needed at runtime, it could be treated |
||||
# as an opaque blob instead of a discrete FAT32 filesystem. |
||||
options = [ "nofail" "noauto" ]; |
||||
}; |
||||
"/" = { |
||||
device = "/dev/disk/by-label/NIXOS_SD"; |
||||
fsType = "ext4"; |
||||
}; |
||||
}; |
||||
|
||||
sdImage.storePaths = [ config.system.build.toplevel ]; |
||||
|
||||
system.build.sdImage = pkgs.callPackage ({ stdenv, dosfstools, e2fsprogs, |
||||
mtools, libfaketime, util-linux, zstd }: stdenv.mkDerivation { |
||||
name = config.sdImage.imageName; |
||||
|
||||
nativeBuildInputs = [ dosfstools e2fsprogs mtools libfaketime util-linux zstd ]; |
||||
|
||||
inherit (config.sdImage) compressImage; |
||||
|
||||
buildCommand = '' |
||||
mkdir -p $out/nix-support $out/sd-image |
||||
export img=$out/sd-image/${config.sdImage.imageName} |
||||
|
||||
echo "${pkgs.stdenv.buildPlatform.system}" > $out/nix-support/system |
||||
if test -n "$compressImage"; then |
||||
echo "file sd-image $img.zst" >> $out/nix-support/hydra-build-products |
||||
else |
||||
echo "file sd-image $img" >> $out/nix-support/hydra-build-products |
||||
fi |
||||
|
||||
echo "Decompressing rootfs image" |
||||
zstd -d --no-progress "${rootfsImage}" -o ./root-fs.img |
||||
|
||||
# Gap in front of the first partition, in MiB |
||||
gap=8 |
||||
|
||||
# Create the image file sized to fit /boot/firmware and /, plus slack for the gap. |
||||
rootSizeBlocks=$(du -B 512 --apparent-size ./root-fs.img | awk '{ print $1 }') |
||||
firmwareSizeBlocks=$((${toString config.sdImage.firmwareSize} * 1024 * 1024 / 512)) |
||||
imageSize=$((rootSizeBlocks * 512 + firmwareSizeBlocks * 512 + gap * 1024 * 1024)) |
||||
truncate -s $imageSize $img |
||||
|
||||
# type=b is 'W95 FAT32', type=83 is 'Linux'. |
||||
# The "bootable" partition is where u-boot will look file for the bootloader |
||||
# information (dtbs, extlinux.conf file). |
||||
sfdisk $img <<EOF |
||||
label: dos |
||||
label-id: ${config.sdImage.firmwarePartitionID} |
||||
|
||||
start=''${gap}M, size=$firmwareSizeBlocks, type=b |
||||
start=$((gap + ${toString config.sdImage.firmwareSize}))M, type=83, bootable |
||||
EOF |
||||
|
||||
# Copy the rootfs into the SD image |
||||
eval $(partx $img -o START,SECTORS --nr 2 --pairs) |
||||
dd conv=notrunc if=./root-fs.img of=$img seek=$START count=$SECTORS |
||||
|
||||
# Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img |
||||
eval $(partx $img -o START,SECTORS --nr 1 --pairs) |
||||
truncate -s $((SECTORS * 512)) firmware_part.img |
||||
faketime "1970-01-01 00:00:00" mkfs.vfat -i ${config.sdImage.firmwarePartitionID} -n ${config.sdImage.firmwarePartitionName} firmware_part.img |
||||
|
||||
# Populate the files intended for /boot/firmware |
||||
mkdir firmware |
||||
${config.sdImage.populateFirmwareCommands} |
||||
|
||||
# Copy the populated /boot/firmware into the SD image |
||||
(cd firmware; mcopy -psvm -i ../firmware_part.img ./* ::) |
||||
# Verify the FAT partition before copying it. |
||||
fsck.vfat -vn firmware_part.img |
||||
dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS |
||||
|
||||
${config.sdImage.postBuildCommands} |
||||
|
||||
if test -n "$compressImage"; then |
||||
zstd -T$NIX_BUILD_CORES --rm $img |
||||
fi |
||||
''; |
||||
}) {}; |
||||
|
||||
boot.postBootCommands = '' |
||||
# On the first boot do some maintenance tasks |
||||
if [ -f /nix-path-registration ]; then |
||||
set -euo pipefail |
||||
set -x |
||||
# Figure out device names for the boot device and root filesystem. |
||||
rootPart=$(${pkgs.util-linux}/bin/findmnt -n -o SOURCE /) |
||||
bootDevice=$(lsblk -npo PKNAME $rootPart) |
||||
partNum=$(lsblk -npo MAJ:MIN $rootPart | ${pkgs.gawk}/bin/awk -F: '{print $2}') |
||||
|
||||
# Resize the root partition and the filesystem to fit the disk |
||||
echo ",+," | sfdisk -N$partNum --no-reread $bootDevice |
||||
${pkgs.parted}/bin/partprobe |
||||
${pkgs.e2fsprogs}/bin/resize2fs $rootPart |
||||
|
||||
# Register the contents of the initial Nix store |
||||
${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration |
||||
|
||||
# nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag. |
||||
touch /etc/NIXOS |
||||
${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system |
||||
|
||||
# Prevents this from running on later boots. |
||||
rm -f /nix-path-registration |
||||
fi |
||||
''; |
||||
}; |
||||
} |
@ -1,31 +0,0 @@ |
||||
{ config, lib, ... }: |
||||
with lib; |
||||
|
||||
{ |
||||
meta = { |
||||
maintainers = [ maintainers.joachifm ]; |
||||
doc = ./hidepid.xml; |
||||
}; |
||||
|
||||
options = { |
||||
security.hideProcessInformation = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = '' |
||||
Restrict process information to the owning user. |
||||
''; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf config.security.hideProcessInformation { |
||||
users.groups.proc.gid = config.ids.gids.proc; |
||||
users.groups.proc.members = [ "polkituser" ]; |
||||
|
||||
boot.specialFileSystems."/proc".options = [ "hidepid=2" "gid=${toString config.ids.gids.proc}" ]; |
||||
systemd.services.systemd-logind.serviceConfig.SupplementaryGroups = [ "proc" ]; |
||||
|
||||
# Disable cgroupsv2, which doesn't work with hidepid. |
||||
# https://github.com/NixOS/nixpkgs/pull/104094#issuecomment-729996203 |
||||
systemd.enableUnifiedCgroupHierarchy = false; |
||||
}; |
||||
} |
@ -1,28 +0,0 @@ |
||||
<chapter xmlns="http://docbook.org/ns/docbook" |
||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
||||
version="5.0" |
||||
xml:id="sec-hidepid"> |
||||
<title>Hiding process information</title> |
||||
<para> |
||||
Setting |
||||
<programlisting> |
||||
<xref linkend="opt-security.hideProcessInformation"/> = true; |
||||
</programlisting> |
||||
ensures that access to process information is restricted to the owning user. |
||||
This implies, among other things, that command-line arguments remain private. |
||||
Unless your deployment relies on unprivileged users being able to inspect the |
||||
process information of other users, this option should be safe to enable. |
||||
</para> |
||||
<para> |
||||
Members of the <literal>proc</literal> group are exempt from process |
||||
information hiding. |
||||
</para> |
||||
<para> |
||||
To allow a service <replaceable>foo</replaceable> to run without process |
||||
information hiding, set |
||||
<programlisting> |
||||
<link linkend="opt-systemd.services._name_.serviceConfig">systemd.services.<replaceable>foo</replaceable>.serviceConfig</link>.SupplementaryGroups = [ "proc" ]; |
||||
</programlisting> |
||||
</para> |
||||
</chapter> |
@ -1,56 +1,16 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
{ lib, ... }: |
||||
let |
||||
cfg = config.security.rngd; |
||||
removed = k: lib.mkRemovedOptionModule [ "security" "rngd" k ]; |
||||
in |
||||
{ |
||||
options = { |
||||
security.rngd = { |
||||
enable = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = '' |
||||
Whether to enable the rng daemon. Devices that the kernel recognises |
||||
as entropy sources are handled automatically by krngd. |
||||
''; |
||||
}; |
||||
debug = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = "Whether to enable debug output (-d)."; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
systemd.services.rngd = { |
||||
bindsTo = [ "dev-random.device" ]; |
||||
|
||||
after = [ "dev-random.device" ]; |
||||
|
||||
# Clean shutdown without DefaultDependencies |
||||
conflicts = [ "shutdown.target" ]; |
||||
before = [ |
||||
"sysinit.target" |
||||
"shutdown.target" |
||||
]; |
||||
|
||||
description = "Hardware RNG Entropy Gatherer Daemon"; |
||||
|
||||
# rngd may have to start early to avoid entropy starvation during boot with encrypted swap |
||||
unitConfig.DefaultDependencies = false; |
||||
serviceConfig = { |
||||
ExecStart = "${pkgs.rng-tools}/sbin/rngd -f" |
||||
+ optionalString cfg.debug " -d"; |
||||
# PrivateTmp would introduce a circular dependency if /tmp is on tmpfs and swap is encrypted, |
||||
# thus depending on rngd before swap, while swap depends on rngd to avoid entropy starvation. |
||||
NoNewPrivileges = true; |
||||
PrivateNetwork = true; |
||||
ProtectSystem = "full"; |
||||
ProtectHome = true; |
||||
}; |
||||
}; |
||||
}; |
||||
imports = [ |
||||
(removed "enable" '' |
||||
rngd is not necessary for any device that the kernel recognises |
||||
as an hardware RNG, as it will automatically run the krngd task |
||||
to periodically collect random data from the device and mix it |
||||
into the kernel's RNG. |
||||
'') |
||||
(removed "debug" |
||||
"The rngd module was removed, so its debug option does nothing.") |
||||
]; |
||||
} |
||||
|
@ -0,0 +1,54 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
let |
||||
cfg = config.services.zrepl; |
||||
format = pkgs.formats.yaml { }; |
||||
configFile = format.generate "zrepl.yml" cfg.settings; |
||||
in |
||||
{ |
||||
meta.maintainers = with maintainers; [ cole-h ]; |
||||
|
||||
options = { |
||||
services.zrepl = { |
||||
enable = mkEnableOption "zrepl"; |
||||
|
||||
settings = mkOption { |
||||
default = { }; |
||||
description = '' |
||||
Configuration for zrepl. See <link |
||||
xlink:href="https://zrepl.github.io/configuration.html"/> |
||||
for more information. |
||||
''; |
||||
type = types.submodule { |
||||
freeformType = format.type; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
### Implementation ### |
||||
|
||||
config = mkIf cfg.enable { |
||||
environment.systemPackages = [ pkgs.zrepl ]; |
||||
|
||||
# zrepl looks for its config in this location by default. This |
||||
# allows the use of e.g. `zrepl signal wakeup <job>` without having |
||||
# to specify the storepath of the config. |
||||
environment.etc."zrepl/zrepl.yml".source = configFile; |
||||
|
||||
systemd.packages = [ pkgs.zrepl ]; |
||||
systemd.services.zrepl = { |
||||
requires = [ "local-fs.target" ]; |
||||
wantedBy = [ "zfs.target" ]; |
||||
after = [ "zfs.target" ]; |
||||
|
||||
path = [ config.boot.zfs.package ]; |
||||
restartTriggers = [ configFile ]; |
||||
|
||||
serviceConfig = { |
||||
Restart = "on-failure"; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,178 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
eachGeth = config.services.geth; |
||||
|
||||
gethOpts = { config, lib, name, ...}: { |
||||
|
||||
options = { |
||||
|
||||
enable = lib.mkEnableOption "Go Ethereum Node"; |
||||
|
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 30303; |
||||
description = "Port number Go Ethereum will be listening on, both TCP and UDP."; |
||||
}; |
||||
|
||||
http = { |
||||
enable = lib.mkEnableOption "Go Ethereum HTTP API"; |
||||
address = mkOption { |
||||
type = types.str; |
||||
default = "127.0.0.1"; |
||||
description = "Listen address of Go Ethereum HTTP API."; |
||||
}; |
||||
|
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 8545; |
||||
description = "Port number of Go Ethereum HTTP API."; |
||||
}; |
||||
|
||||
apis = mkOption { |
||||
type = types.nullOr (types.listOf types.str); |
||||
default = null; |
||||
description = "APIs to enable over WebSocket"; |
||||
example = ["net" "eth"]; |
||||
}; |
||||
}; |
||||
|
||||
websocket = { |
||||
enable = lib.mkEnableOption "Go Ethereum WebSocket API"; |
||||
address = mkOption { |
||||
type = types.str; |
||||
default = "127.0.0.1"; |
||||
description = "Listen address of Go Ethereum WebSocket API."; |
||||
}; |
||||
|
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 8546; |
||||
description = "Port number of Go Ethereum WebSocket API."; |
||||
}; |
||||
|
||||
apis = mkOption { |
||||
type = types.nullOr (types.listOf types.str); |
||||
default = null; |
||||
description = "APIs to enable over WebSocket"; |
||||
example = ["net" "eth"]; |
||||
}; |
||||
}; |
||||
|
||||
metrics = { |
||||
enable = lib.mkEnableOption "Go Ethereum prometheus metrics"; |
||||
address = mkOption { |
||||
type = types.str; |
||||
default = "127.0.0.1"; |
||||
description = "Listen address of Go Ethereum metrics service."; |
||||
}; |
||||
|
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 6060; |
||||
description = "Port number of Go Ethereum metrics service."; |
||||
}; |
||||
}; |
||||
|
||||
network = mkOption { |
||||
type = types.nullOr (types.enum [ "goerli" "rinkeby" "yolov2" "ropsten" ]); |
||||
default = null; |
||||
description = "The network to connect to. Mainnet (null) is the default ethereum network."; |
||||
}; |
||||
|
||||
syncmode = mkOption { |
||||
type = types.enum [ "fast" "full" "light" ]; |
||||
default = "fast"; |
||||
description = "Blockchain sync mode."; |
||||
}; |
||||
|
||||
gcmode = mkOption { |
||||
type = types.enum [ "full" "archive" ]; |
||||
default = "full"; |
||||
description = "Blockchain garbage collection mode."; |
||||
}; |
||||
|
||||
maxpeers = mkOption { |
||||
type = types.int; |
||||
default = 50; |
||||
description = "Maximum peers to connect to."; |
||||
}; |
||||
|
||||
extraArgs = mkOption { |
||||
type = types.listOf types.str; |
||||
description = "Additional arguments passed to Go Ethereum."; |
||||
default = []; |
||||
}; |
||||
|
||||
package = mkOption { |
||||
default = pkgs.go-ethereum.geth; |
||||
type = types.package; |
||||
description = "Package to use as Go Ethereum node."; |
||||
}; |
||||
}; |
||||
}; |
||||
in |
||||
|
||||
{ |
||||
|
||||
###### interface |
||||
|
||||
options = { |
||||
services.geth = mkOption { |
||||
type = types.attrsOf (types.submodule gethOpts); |
||||
default = {}; |
||||
description = "Specification of one or more geth instances."; |
||||
}; |
||||
}; |
||||
|
||||
###### implementation |
||||
|
||||
config = mkIf (eachGeth != {}) { |
||||
|
||||
environment.systemPackages = flatten (mapAttrsToList (gethName: cfg: [ |
||||
cfg.package |
||||
]) eachGeth); |
||||
|
||||
systemd.services = mapAttrs' (gethName: cfg: ( |
||||
nameValuePair "geth-${gethName}" (mkIf cfg.enable { |
||||
description = "Go Ethereum node (${gethName})"; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
after = [ "network.target" ]; |
||||
|
||||
serviceConfig = { |
||||
DynamicUser = true; |
||||
Restart = "always"; |
||||
StateDirectory = "goethereum/${gethName}/${if (cfg.network == null) then "mainnet" else cfg.network}"; |
||||
|
||||
# Hardening measures |
||||
PrivateTmp = "true"; |
||||
ProtectSystem = "full"; |
||||
NoNewPrivileges = "true"; |
||||
PrivateDevices = "true"; |
||||
MemoryDenyWriteExecute = "true"; |
||||
}; |
||||
|
||||
script = '' |
||||
${cfg.package}/bin/geth \ |
||||
--nousb \ |
||||
--ipcdisable \ |
||||
${optionalString (cfg.network != null) ''--${cfg.network}''} \ |
||||
--syncmode ${cfg.syncmode} \ |
||||
--gcmode ${cfg.gcmode} \ |
||||
--port ${toString cfg.port} \ |
||||
--maxpeers ${toString cfg.maxpeers} \ |
||||
${if cfg.http.enable then ''--http --http.addr ${cfg.http.address} --http.port ${toString cfg.http.port}'' else ""} \ |
||||
${optionalString (cfg.http.apis != null) ''--http.api ${lib.concatStringsSep "," cfg.http.apis}''} \ |
||||
${if cfg.websocket.enable then ''--ws --ws.addr ${cfg.websocket.address} --ws.port ${toString cfg.websocket.port}'' else ""} \ |
||||
${optionalString (cfg.websocket.apis != null) ''--ws.api ${lib.concatStringsSep "," cfg.websocket.apis}''} \ |
||||
${optionalString cfg.metrics.enable ''--metrics --metrics.addr ${cfg.metrics.address} --metrics.port ${toString cfg.metrics.port}''} \ |
||||
${lib.escapeShellArgs cfg.extraArgs} \ |
||||
--datadir /var/lib/goethereum/${gethName}/${if (cfg.network == null) then "mainnet" else cfg.network} |
||||
''; |
||||
}))) eachGeth; |
||||
|
||||
}; |
||||
|
||||
} |
@ -1,183 +0,0 @@ |
||||
# pipewire service. |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.pipewire; |
||||
enable32BitAlsaPlugins = cfg.alsa.support32Bit |
||||
&& pkgs.stdenv.isx86_64 |
||||
&& pkgs.pkgsi686Linux.pipewire != null; |
||||
|
||||
# The package doesn't output to $out/lib/pipewire directly so that the |
||||
# overlays can use the outputs to replace the originals in FHS environments. |
||||
# |
||||
# This doesn't work in general because of missing development information. |
||||
jack-libs = pkgs.runCommand "jack-libs" {} '' |
||||
mkdir -p "$out/lib" |
||||
ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire" |
||||
''; |
||||
in { |
||||
|
||||
meta = { |
||||
maintainers = teams.freedesktop.members; |
||||
}; |
||||
|
||||
###### interface |
||||
options = { |
||||
services.pipewire = { |
||||
enable = mkEnableOption "pipewire service"; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.pipewire; |
||||
defaultText = "pkgs.pipewire"; |
||||
example = literalExample "pkgs.pipewire"; |
||||
description = '' |
||||
The pipewire derivation to use. |
||||
''; |
||||
}; |
||||
|
||||
socketActivation = mkOption { |
||||
default = true; |
||||
type = types.bool; |
||||
description = '' |
||||
Automatically run pipewire when connections are made to the pipewire socket. |
||||
''; |
||||
}; |
||||
|
||||
extraConfig = mkOption { |
||||
type = types.lines; |
||||
default = ""; |
||||
description = '' |
||||
Literal string to append to /etc/pipewire/pipewire.conf. |
||||
''; |
||||
}; |
||||
|
||||
sessionManager = mkOption { |
||||
type = types.nullOr types.string; |
||||
default = null; |
||||
example = literalExample ''"''${pipewire}/bin/pipewire-media-session"''; |
||||
description = '' |
||||
Path to the pipewire session manager executable. |
||||
''; |
||||
}; |
||||
|
||||
sessionManagerArguments = mkOption { |
||||
type = types.listOf types.string; |
||||
default = []; |
||||
example = literalExample ''[ "-p" "bluez5.msbc-support=true" ]''; |
||||
description = '' |
||||
Arguments passed to the pipewire session manager. |
||||
''; |
||||
}; |
||||
|
||||
alsa = { |
||||
enable = mkEnableOption "ALSA support"; |
||||
support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems"; |
||||
}; |
||||
|
||||
jack = { |
||||
enable = mkEnableOption "JACK audio emulation"; |
||||
}; |
||||
|
||||
pulse = { |
||||
enable = mkEnableOption "PulseAudio server emulation"; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
|
||||
###### implementation |
||||
config = mkIf cfg.enable { |
||||
assertions = [ |
||||
{ |
||||
assertion = cfg.pulse.enable -> !config.hardware.pulseaudio.enable; |
||||
message = "PipeWire based PulseAudio server emulation replaces PulseAudio. This option requires `hardware.pulseaudio.enable` to be set to false"; |
||||
} |
||||
{ |
||||
assertion = cfg.jack.enable -> !config.services.jack.jackd.enable; |
||||
message = "PipeWire based JACK emulation doesn't use the JACK service. This option requires `services.jack.jackd.enable` to be set to false"; |
||||
} |
||||
]; |
||||
|
||||
services.pipewire.sessionManager = mkDefault "${cfg.package}/bin/pipewire-media-session"; |
||||
|
||||
environment.systemPackages = [ cfg.package ] |
||||
++ lib.optional cfg.jack.enable jack-libs; |
||||
|
||||
systemd.packages = [ cfg.package ] |
||||
++ lib.optional cfg.pulse.enable cfg.package.pulse; |
||||
|
||||
# PipeWire depends on DBUS but doesn't list it. Without this booting |
||||
# into a terminal results in the service crashing with an error. |
||||
systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ]; |
||||
systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"]; |
||||
systemd.user.services.pipewire.bindsTo = [ "dbus.service" ]; |
||||
services.udev.packages = [ cfg.package ]; |
||||
|
||||
# If any paths are updated here they must also be updated in the package test. |
||||
environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable { |
||||
text = '' |
||||
pcm_type.pipewire { |
||||
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ; |
||||
${optionalString enable32BitAlsaPlugins |
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"} |
||||
} |
||||
ctl_type.pipewire { |
||||
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ; |
||||
${optionalString enable32BitAlsaPlugins |
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"} |
||||
} |
||||
''; |
||||
}; |
||||
environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable { |
||||
source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf"; |
||||
}; |
||||
environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable { |
||||
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf"; |
||||
}; |
||||
environment.sessionVariables.LD_LIBRARY_PATH = |
||||
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire"; |
||||
|
||||
environment.etc."pipewire/pipewire.conf" = { |
||||
# Adapted from src/daemon/pipewire.conf.in |
||||
text = '' |
||||
set-prop link.max-buffers 16 # version < 3 clients can't handle more |
||||
|
||||
add-spa-lib audio.convert* audioconvert/libspa-audioconvert |
||||
add-spa-lib api.alsa.* alsa/libspa-alsa |
||||
add-spa-lib api.v4l2.* v4l2/libspa-v4l2 |
||||
add-spa-lib api.libcamera.* libcamera/libspa-libcamera |
||||
add-spa-lib api.bluez5.* bluez5/libspa-bluez5 |
||||
add-spa-lib api.vulkan.* vulkan/libspa-vulkan |
||||
add-spa-lib api.jack.* jack/libspa-jack |
||||
add-spa-lib support.* support/libspa-support |
||||
|
||||
load-module libpipewire-module-rtkit # rt.prio=20 rt.time.soft=200000 rt.time.hard=200000 |
||||
load-module libpipewire-module-protocol-native |
||||
load-module libpipewire-module-profiler |
||||
load-module libpipewire-module-metadata |
||||
load-module libpipewire-module-spa-device-factory |
||||
load-module libpipewire-module-spa-node-factory |
||||
load-module libpipewire-module-client-node |
||||
load-module libpipewire-module-client-device |
||||
load-module libpipewire-module-portal |
||||
load-module libpipewire-module-access |
||||
load-module libpipewire-module-adapter |
||||
load-module libpipewire-module-link-factory |
||||
load-module libpipewire-module-session-manager |
||||
|
||||
create-object spa-node-factory factory.name=support.node.driver node.name=Dummy priority.driver=8000 |
||||
|
||||
exec ${cfg.sessionManager} ${lib.concatStringsSep " " cfg.sessionManagerArguments} |
||||
|
||||
${cfg.extraConfig} |
||||
''; |
||||
}; |
||||
|
||||
environment.etc."pipewire/media-session.d/with-alsa" = mkIf cfg.alsa.enable { text = ""; }; |
||||
environment.etc."pipewire/media-session.d/with-pulseaudio" = mkIf cfg.pulse.enable { text = ""; }; |
||||
environment.etc."pipewire/media-session.d/with-jack" = mkIf cfg.jack.enable { text = ""; }; |
||||
}; |
||||
} |
@ -0,0 +1,341 @@ |
||||
# pipewire example session manager. |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.pipewire.media-session; |
||||
enable32BitAlsaPlugins = cfg.alsa.support32Bit |
||||
&& pkgs.stdenv.isx86_64 |
||||
&& pkgs.pkgsi686Linux.pipewire != null; |
||||
|
||||
# Helpers for generating the pipewire JSON config file |
||||
mkSPAValueString = v: |
||||
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]" |
||||
else if lib.types.attrs.check v then |
||||
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}" |
||||
else lib.generators.mkValueStringDefault { } v; |
||||
|
||||
mkSPAKeyValue = attrs: map (def: def.content) ( |
||||
lib.sortProperties |
||||
( |
||||
lib.mapAttrsToList |
||||
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ] k} = ${mkSPAValueString (v._content or v)}") |
||||
attrs |
||||
) |
||||
); |
||||
|
||||
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs); |
||||
in { |
||||
|
||||
meta = { |
||||
maintainers = teams.freedesktop.members; |
||||
}; |
||||
|
||||
###### interface |
||||
options = { |
||||
services.pipewire.media-session = { |
||||
enable = mkOption { |
||||
type = types.bool; |
||||
default = config.services.pipewire.enable; |
||||
defaultText = "config.services.pipewire.enable"; |
||||
description = "Example pipewire session manager"; |
||||
}; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.pipewire.mediaSession; |
||||
example = literalExample "pkgs.pipewire.mediaSession"; |
||||
description = '' |
||||
The pipewire-media-session derivation to use. |
||||
''; |
||||
}; |
||||
|
||||
config = mkOption { |
||||
type = types.attrs; |
||||
description = '' |
||||
Configuration for the media session core. |
||||
''; |
||||
default = { |
||||
# media-session config file |
||||
properties = { |
||||
# Properties to configure the session and some |
||||
# modules |
||||
#mem.mlock-all = false; |
||||
#context.profile.modules = "default,rtkit"; |
||||
}; |
||||
|
||||
spa-libs = { |
||||
# Mapping from factory name to library. |
||||
"api.bluez5.*" = "bluez5/libspa-bluez5"; |
||||
"api.alsa.*" = "alsa/libspa-alsa"; |
||||
"api.v4l2.*" = "v4l2/libspa-v4l2"; |
||||
"api.libcamera.*" = "libcamera/libspa-libcamera"; |
||||
}; |
||||
|
||||
modules = { |
||||
# These are the modules that are enabled when a file with |
||||
# the key name is found in the media-session.d config directory. |
||||
# the default bundle is always enabled. |
||||
|
||||
default = [ |
||||
"flatpak" # manages flatpak access |
||||
"portal" # manage portal permissions |
||||
"v4l2" # video for linux udev detection |
||||
#"libcamera" # libcamera udev detection |
||||
"suspend-node" # suspend inactive nodes |
||||
"policy-node" # configure and link nodes |
||||
#"metadata" # export metadata API |
||||
#"default-nodes" # restore default nodes |
||||
#"default-profile" # restore default profiles |
||||
#"default-routes" # restore default route |
||||
#"streams-follow-default" # move streams when default changes |
||||
#"alsa-seq" # alsa seq midi support |
||||
#"alsa-monitor" # alsa udev detection |
||||
#"bluez5" # bluetooth support |
||||
#"restore-stream" # restore stream settings |
||||
]; |
||||
"with-audio" = [ |
||||
"metadata" |
||||
"default-nodes" |
||||
"default-profile" |
||||
"default-routes" |
||||
"alsa-seq" |
||||
"alsa-monitor" |
||||
]; |
||||
"with-alsa" = [ |
||||
"with-audio" |
||||
]; |
||||
"with-jack" = [ |
||||
"with-audio" |
||||
]; |
||||
"with-pulseaudio" = [ |
||||
"with-audio" |
||||
"bluez5" |
||||
"restore-stream" |
||||
"streams-follow-default" |
||||
]; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
alsaMonitorConfig = mkOption { |
||||
type = types.attrs; |
||||
description = '' |
||||
Configuration for the alsa monitor. |
||||
''; |
||||
default = { |
||||
# alsa-monitor config file |
||||
properties = { |
||||
#alsa.jack-device = true |
||||
}; |
||||
|
||||
rules = [ |
||||
# an array of matches/actions to evaluate |
||||
{ |
||||
# rules for matching a device or node. It is an array of |
||||
# properties that all need to match the regexp. If any of the |
||||
# matches work, the actions are executed for the object. |
||||
matches = [ |
||||
{ |
||||
# this matches all cards |
||||
device.name = "~alsa_card.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
# actions can update properties on the matched object. |
||||
update-props = { |
||||
api.alsa.use-acp = true; |
||||
#api.alsa.use-ucm = true; |
||||
#api.alsa.soft-mixer = false; |
||||
#api.alsa.ignore-dB = false; |
||||
#device.profile-set = "profileset-name"; |
||||
#device.profile = "default profile name"; |
||||
api.acp.auto-profile = false; |
||||
api.acp.auto-port = false; |
||||
#device.nick = "My Device"; |
||||
}; |
||||
}; |
||||
} |
||||
{ |
||||
matches = [ |
||||
{ |
||||
# matches all sinks |
||||
node.name = "~alsa_input.*"; |
||||
} |
||||
{ |
||||
# matches all sources |
||||
node.name = "~alsa_output.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
update-props = { |
||||
#node.nick = "My Node"; |
||||
#node.nick = null; |
||||
#priority.driver = 100; |
||||
#priority.session = 100; |
||||
#node.pause-on-idle = false; |
||||
#resample.quality = 4; |
||||
#channelmix.normalize = false; |
||||
#channelmix.mix-lfe = false; |
||||
#audio.channels = 2; |
||||
#audio.format = "S16LE"; |
||||
#audio.rate = 44100; |
||||
#audio.position = "FL,FR"; |
||||
#api.alsa.period-size = 1024; |
||||
#api.alsa.headroom = 0; |
||||
#api.alsa.disable-mmap = false; |
||||
#api.alsa.disable-batch = false; |
||||
}; |
||||
}; |
||||
} |
||||
]; |
||||
}; |
||||
}; |
||||
|
||||
bluezMonitorConfig = mkOption { |
||||
type = types.attrs; |
||||
description = '' |
||||
Configuration for the bluez5 monitor. |
||||
''; |
||||
default = { |
||||
# bluez-monitor config file |
||||
properties = { |
||||
# msbc is not expected to work on all headset + adapter combinations. |
||||
#bluez5.msbc-support = true; |
||||
#bluez5.sbc-xq-support = true; |
||||
|
||||
# Enabled headset roles (default: [ hsp_hs hfp_ag ]), this |
||||
# property only applies to native backend. Currently some headsets |
||||
# (Sony WH-1000XM3) are not working with both hsp_ag and hfp_ag |
||||
# enabled, disable either hsp_ag or hfp_ag to work around it. |
||||
# |
||||
# Supported headset roles: hsp_hs (HSP Headset), |
||||
# hsp_ag (HSP Audio Gateway), |
||||
# hfp_ag (HFP Audio Gateway) |
||||
#bluez5.headset-roles = [ "hsp_hs" "hsp_ag" "hfp_ag" ]; |
||||
|
||||
# Enabled A2DP codecs (default: all) |
||||
#bluez5.codecs = [ "sbc" "aac" "ldac" "aptx" "aptx_hd" ]; |
||||
}; |
||||
|
||||
rules = [ |
||||
# an array of matches/actions to evaluate |
||||
{ |
||||
# rules for matching a device or node. It is an array of |
||||
# properties that all need to match the regexp. If any of the |
||||
# matches work, the actions are executed for the object. |
||||
matches = [ |
||||
{ |
||||
# this matches all cards |
||||
device.name = "~bluez_card.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
# actions can update properties on the matched object. |
||||
update-props = { |
||||
#device.nick = "My Device"; |
||||
}; |
||||
}; |
||||
} |
||||
{ |
||||
matches = [ |
||||
{ |
||||
# matches all sinks |
||||
node.name = "~bluez_input.*"; |
||||
} |
||||
{ |
||||
# matches all sources |
||||
node.name = "~bluez_output.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
update-props = { |
||||
#node.nick = "My Node" |
||||
#node.nick = null; |
||||
#priority.driver = 100; |
||||
#priority.session = 100; |
||||
#node.pause-on-idle = false; |
||||
#resample.quality = 4; |
||||
#channelmix.normalize = false; |
||||
#channelmix.mix-lfe = false; |
||||
}; |
||||
}; |
||||
} |
||||
]; |
||||
}; |
||||
}; |
||||
|
||||
v4l2MonitorConfig = mkOption { |
||||
type = types.attrs; |
||||
description = '' |
||||
Configuration for the V4L2 monitor. |
||||
''; |
||||
default = { |
||||
# v4l2-monitor config file |
||||
properties = { |
||||
}; |
||||
|
||||
rules = [ |
||||
# an array of matches/actions to evaluate |
||||
{ |
||||
# rules for matching a device or node. It is an array of |
||||
# properties that all need to match the regexp. If any of the |
||||
# matches work, the actions are executed for the object. |
||||
matches = [ |
||||
{ |
||||
# this matches all devices |
||||
device.name = "~v4l2_device.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
# actions can update properties on the matched object. |
||||
update-props = { |
||||
#device.nick = "My Device"; |
||||
}; |
||||
}; |
||||
} |
||||
{ |
||||
matches = [ |
||||
{ |
||||
# matches all sinks |
||||
node.name = "~v4l2_input.*"; |
||||
} |
||||
{ |
||||
# matches all sources |
||||
node.name = "~v4l2_output.*"; |
||||
} |
||||
]; |
||||
actions = { |
||||
update-props = { |
||||
#node.nick = "My Node"; |
||||
#node.nick = null; |
||||
#priority.driver = 100; |
||||
#priority.session = 100; |
||||
#node.pause-on-idle = true; |
||||
}; |
||||
}; |
||||
} |
||||
]; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
###### implementation |
||||
config = mkIf cfg.enable { |
||||
environment.systemPackages = [ cfg.package ]; |
||||
services.pipewire.sessionManagerExecutable = "${cfg.package}/bin/pipewire-media-session"; |
||||
|
||||
environment.etc."pipewire/media-session.d/media-session.conf" = { text = toSPAJSON cfg.config; }; |
||||
environment.etc."pipewire/media-session.d/v4l2-monitor.conf" = { text = toSPAJSON cfg.v4l2MonitorConfig; }; |
||||
|
||||
environment.etc."pipewire/media-session.d/with-alsa" = mkIf config.services.pipewire.alsa.enable { text = ""; }; |
||||
environment.etc."pipewire/media-session.d/alsa-monitor.conf" = mkIf config.services.pipewire.alsa.enable { text = toSPAJSON cfg.alsaMonitorConfig; }; |
||||
|
||||
environment.etc."pipewire/media-session.d/with-pulseaudio" = mkIf config.services.pipewire.pulse.enable { text = ""; }; |
||||
environment.etc."pipewire/media-session.d/bluez-monitor.conf" = mkIf config.services.pipewire.pulse.enable { text = toSPAJSON cfg.bluezMonitorConfig; }; |
||||
|
||||
environment.etc."pipewire/media-session.d/with-jack" = mkIf config.services.pipewire.jack.enable { text = ""; }; |
||||
}; |
||||
} |
@ -0,0 +1,265 @@ |
||||
# pipewire service. |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.pipewire; |
||||
enable32BitAlsaPlugins = cfg.alsa.support32Bit |
||||
&& pkgs.stdenv.isx86_64 |
||||
&& pkgs.pkgsi686Linux.pipewire != null; |
||||
|
||||
# The package doesn't output to $out/lib/pipewire directly so that the |
||||
# overlays can use the outputs to replace the originals in FHS environments. |
||||
# |
||||
# This doesn't work in general because of missing development information. |
||||
jack-libs = pkgs.runCommand "jack-libs" {} '' |
||||
mkdir -p "$out/lib" |
||||
ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire" |
||||
''; |
||||
|
||||
# Helpers for generating the pipewire JSON config file |
||||
mkSPAValueString = v: |
||||
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]" |
||||
else if lib.types.attrs.check v then |
||||
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}" |
||||
else lib.generators.mkValueStringDefault { } v; |
||||
|
||||
mkSPAKeyValue = attrs: map (def: def.content) ( |
||||
lib.sortProperties |
||||
( |
||||
lib.mapAttrsToList |
||||
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ] k} = ${mkSPAValueString (v._content or v)}") |
||||
attrs |
||||
) |
||||
); |
||||
|
||||
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs); |
||||
in { |
||||
|
||||
meta = { |
||||
maintainers = teams.freedesktop.members; |
||||
}; |
||||
|
||||
###### interface |
||||
options = { |
||||
services.pipewire = { |
||||
enable = mkEnableOption "pipewire service"; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.pipewire; |
||||
defaultText = "pkgs.pipewire"; |
||||
example = literalExample "pkgs.pipewire"; |
||||
description = '' |
||||
The pipewire derivation to use. |
||||
''; |
||||
}; |
||||
|
||||
socketActivation = mkOption { |
||||
default = true; |
||||
type = types.bool; |
||||
description = '' |
||||
Automatically run pipewire when connections are made to the pipewire socket. |
||||
''; |
||||
}; |
||||
|
||||
config = mkOption { |
||||
type = types.attrs; |
||||
description = '' |
||||
Configuration for the pipewire daemon. |
||||
''; |
||||
default = { |
||||
properties = { |
||||
## set-prop is used to configure properties in the system |
||||
# |
||||
# "library.name.system" = "support/libspa-support"; |
||||
# "context.data-loop.library.name.system" = "support/libspa-support"; |
||||
"link.max-buffers" = 16; # version < 3 clients can't handle more than 16 |
||||
#"mem.allow-mlock" = false; |
||||
#"mem.mlock-all" = true; |
||||
## https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/src/pipewire/pipewire.h#L93 |
||||
#"log.level" = 2; # 5 is trace, which is verbose as hell, default is 2 which is warnings, 4 is debug output, 3 is info |
||||
|
||||
## Properties for the DSP configuration |
||||
# |
||||
#"default.clock.rate" = 48000; |
||||
#"default.clock.quantum" = 1024; |
||||
#"default.clock.min-quantum" = 32; |
||||
#"default.clock.max-quantum" = 8192; |
||||
#"default.video.width" = 640; |
||||
#"default.video.height" = 480; |
||||
#"default.video.rate.num" = 25; |
||||
#"default.video.rate.denom" = 1; |
||||
}; |
||||
|
||||
spa-libs = { |
||||
## add-spa-lib <factory-name regex> <library-name> |
||||
# |
||||
# used to find spa factory names. It maps an spa factory name |
||||
# regular expression to a library name that should contain |
||||
# that factory. |
||||
# |
||||
"audio.convert*" = "audioconvert/libspa-audioconvert"; |
||||
"api.alsa.*" = "alsa/libspa-alsa"; |
||||
"api.v4l2.*" = "v4l2/libspa-v4l2"; |
||||
"api.libcamera.*" = "libcamera/libspa-libcamera"; |
||||
"api.bluez5.*" = "bluez5/libspa-bluez5"; |
||||
"api.vulkan.*" = "vulkan/libspa-vulkan"; |
||||
"api.jack.*" = "jack/libspa-jack"; |
||||
"support.*" = "support/libspa-support"; |
||||
# "videotestsrc" = "videotestsrc/libspa-videotestsrc"; |
||||
# "audiotestsrc" = "audiotestsrc/libspa-audiotestsrc"; |
||||
}; |
||||
|
||||
modules = { |
||||
## <module-name> = { [args = "<key>=<value> ..."] |
||||
# [flags = ifexists] } |
||||
# [flags = [ifexists]|[nofail]} |
||||
# |
||||
# Loads a module with the given parameters. |
||||
# If ifexists is given, the module is ignoed when it is not found. |
||||
# If nofail is given, module initialization failures are ignored. |
||||
# |
||||
libpipewire-module-rtkit = { |
||||
args = { |
||||
#rt.prio = 20; |
||||
#rt.time.soft = 200000; |
||||
#rt.time.hard = 200000; |
||||
#nice.level = -11; |
||||
}; |
||||
flags = "ifexists|nofail"; |
||||
}; |
||||
libpipewire-module-protocol-native = { _priority = -100; _content = "null"; }; |
||||
libpipewire-module-profiler = "null"; |
||||
libpipewire-module-metadata = "null"; |
||||
libpipewire-module-spa-device-factory = "null"; |
||||
libpipewire-module-spa-node-factory = "null"; |
||||
libpipewire-module-client-node = "null"; |
||||
libpipewire-module-client-device = "null"; |
||||
libpipewire-module-portal = "null"; |
||||
libpipewire-module-access = { |
||||
args.access = { |
||||
allowed = ["${builtins.unsafeDiscardStringContext cfg.sessionManagerExecutable}"]; |
||||
rejected = []; |
||||
restricted = []; |
||||
force = "flatpak"; |
||||
}; |
||||
}; |
||||
libpipewire-module-adapter = "null"; |
||||
libpipewire-module-link-factory = "null"; |
||||
libpipewire-module-session-manager = "null"; |
||||
}; |
||||
|
||||
objects = { |
||||
## create-object [-nofail] <factory-name> [<key>=<value> ...] |
||||
# |
||||
# Creates an object from a PipeWire factory with the given parameters. |
||||
# If -nofail is given, errors are ignored (and no object is created) |
||||
# |
||||
}; |
||||
|
||||
|
||||
exec = { |
||||
## exec <program-name> |
||||
# |
||||
# Execute the given program. This is usually used to start the |
||||
# session manager. run the session manager with -h for options |
||||
# |
||||
"${builtins.unsafeDiscardStringContext cfg.sessionManagerExecutable}" = { args = "\"${lib.concatStringsSep " " cfg.sessionManagerArguments}\""; }; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
sessionManagerExecutable = mkOption { |
||||
type = types.str; |
||||
default = ""; |
||||
example = literalExample ''${pkgs.pipewire.mediaSession}/bin/pipewire-media-session''; |
||||
description = '' |
||||
Path to the session manager executable. |
||||
''; |
||||
}; |
||||
|
||||
sessionManagerArguments = mkOption { |
||||
type = types.listOf types.str; |
||||
default = []; |
||||
example = literalExample ''["-p" "bluez5.msbc-support=true"]''; |
||||
description = '' |
||||
Arguments passed to the pipewire session manager. |
||||
''; |
||||
}; |
||||
|
||||
alsa = { |
||||
enable = mkEnableOption "ALSA support"; |
||||
support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems"; |
||||
}; |
||||
|
||||
jack = { |
||||
enable = mkEnableOption "JACK audio emulation"; |
||||
}; |
||||
|
||||
pulse = { |
||||
enable = mkEnableOption "PulseAudio server emulation"; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
|
||||
###### implementation |
||||
config = mkIf cfg.enable { |
||||
assertions = [ |
||||
{ |
||||
assertion = cfg.pulse.enable -> !config.hardware.pulseaudio.enable; |
||||
message = "PipeWire based PulseAudio server emulation replaces PulseAudio. This option requires `hardware.pulseaudio.enable` to be set to false"; |
||||
} |
||||
{ |
||||
assertion = cfg.jack.enable -> !config.services.jack.jackd.enable; |
||||
message = "PipeWire based JACK emulation doesn't use the JACK service. This option requires `services.jack.jackd.enable` to be set to false"; |
||||
} |
||||
]; |
||||
|
||||
environment.systemPackages = [ cfg.package ] |
||||
++ lib.optional cfg.jack.enable jack-libs; |
||||
|
||||
systemd.packages = [ cfg.package ] |
||||
++ lib.optional cfg.pulse.enable cfg.package.pulse; |
||||
|
||||
# PipeWire depends on DBUS but doesn't list it. Without this booting |
||||
# into a terminal results in the service crashing with an error. |
||||
systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ]; |
||||
systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"]; |
||||
systemd.user.services.pipewire.bindsTo = [ "dbus.service" ]; |
||||
services.udev.packages = [ cfg.package ]; |
||||
|
||||
# If any paths are updated here they must also be updated in the package test. |
||||
environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable { |
||||
text = '' |
||||
pcm_type.pipewire { |
||||
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ; |
||||
${optionalString enable32BitAlsaPlugins |
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"} |
||||
} |
||||
ctl_type.pipewire { |
||||
libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ; |
||||
${optionalString enable32BitAlsaPlugins |
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"} |
||||
} |
||||
''; |
||||
}; |
||||
environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable { |
||||
source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf"; |
||||
}; |
||||
environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable { |
||||
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf"; |
||||
}; |
||||
|
||||
environment.sessionVariables.LD_LIBRARY_PATH = |
||||
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire"; |
||||
|
||||
# https://gitlab.freedesktop.org/pipewire/pipewire/-/issues/464#note_723554 |
||||
systemd.user.services.pipewire.environment = { |
||||
"PIPEWIRE_LINK_PASSIVE" = "1"; |
||||
"PIPEWIRE_CONFIG_FILE" = pkgs.writeText "pipewire.conf" (toSPAJSON cfg.config); |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,26 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let cfg = config.hardware.spacenavd; |
||||
|
||||
in { |
||||
|
||||
options = { |
||||
hardware.spacenavd = { |
||||
enable = mkEnableOption "spacenavd to support 3DConnexion devices"; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
systemd.user.services.spacenavd = { |
||||
description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion"; |
||||
after = [ "syslog.target" ]; |
||||
wantedBy = [ "graphical.target" ]; |
||||
serviceConfig = { |
||||
ExecStart = "${pkgs.spacenavd}/bin/spacenavd -d -l syslog"; |
||||
StandardError = "syslog"; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -1,69 +0,0 @@ |
||||
worker_processes 3 |
||||
|
||||
listen ENV["UNICORN_PATH"] + "/tmp/sockets/gitlab.socket", :backlog => 1024 |
||||
listen "/run/gitlab/gitlab.socket", :backlog => 1024 |
||||
|
||||
working_directory ENV["GITLAB_PATH"] |
||||
|
||||
pid ENV["UNICORN_PATH"] + "/tmp/pids/unicorn.pid" |
||||
|
||||
timeout 60 |
||||
|
||||
# combine Ruby 2.0.0dev or REE with "preload_app true" for memory savings |
||||
# http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow |
||||
preload_app true |
||||
GC.respond_to?(:copy_on_write_friendly=) and |
||||
GC.copy_on_write_friendly = true |
||||
|
||||
check_client_connection false |
||||
|
||||
before_fork do |server, worker| |
||||
# the following is highly recommended for Rails + "preload_app true" |
||||
# as there's no need for the master process to hold a connection |
||||
defined?(ActiveRecord::Base) and |
||||
ActiveRecord::Base.connection.disconnect! |
||||
|
||||
# The following is only recommended for memory/DB-constrained |
||||
# installations. It is not needed if your system can house |
||||
# twice as many worker_processes as you have configured. |
||||
# |
||||
# This allows a new master process to incrementally |
||||
# phase out the old master process with SIGTTOU to avoid a |
||||
# thundering herd (especially in the "preload_app false" case) |
||||
# when doing a transparent upgrade. The last worker spawned |
||||
# will then kill off the old master process with a SIGQUIT. |
||||
old_pid = "#{server.config[:pid]}.oldbin" |
||||
if old_pid != server.pid |
||||
begin |
||||
sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU |
||||
Process.kill(sig, File.read(old_pid).to_i) |
||||
rescue Errno::ENOENT, Errno::ESRCH |
||||
end |
||||
end |
||||
|
||||
# Throttle the master from forking too quickly by sleeping. Due |
||||
# to the implementation of standard Unix signal handlers, this |
||||
# helps (but does not completely) prevent identical, repeated signals |
||||
# from being lost when the receiving process is busy. |
||||
# sleep 1 |
||||
end |
||||
|
||||
after_fork do |server, worker| |
||||
# per-process listener ports for debugging/admin/migrations |
||||
# addr = "127.0.0.1:#{9293 + worker.nr}" |
||||
# server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true) |
||||
|
||||
# the following is *required* for Rails + "preload_app true", |
||||
defined?(ActiveRecord::Base) and |
||||
ActiveRecord::Base.establish_connection |
||||
|
||||
# reset prometheus client, this will cause any opened metrics files to be closed |
||||
defined?(::Prometheus::Client.reinitialize_on_pid_change) && |
||||
Prometheus::Client.reinitialize_on_pid_change |
||||
|
||||
# if preload_app is true, then you may also want to check and |
||||
# restart any other shared sockets/descriptors such as Memcached, |
||||
# and Redis. TokyoCabinet file handles are safe to reuse |
||||
# between any number of forked children (assuming your kernel |
||||
# correctly implements pread()/pwrite() system calls) |
||||
end |
@ -0,0 +1,92 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.etesync-dav; |
||||
in |
||||
{ |
||||
options.services.etesync-dav = { |
||||
enable = mkEnableOption "etesync-dav"; |
||||
|
||||
host = mkOption { |
||||
type = types.str; |
||||
default = "localhost"; |
||||
description = "The server host address."; |
||||
}; |
||||
|
||||
port = mkOption { |
||||
type = types.port; |
||||
default = 37358; |
||||
description = "The server host port."; |
||||
}; |
||||
|
||||
apiUrl = mkOption { |
||||
type = types.str; |
||||
default = "https://api.etesync.com/"; |
||||
description = "The url to the etesync API."; |
||||
}; |
||||
|
||||
openFirewall = mkOption { |
||||
default = false; |
||||
type = types.bool; |
||||
description = "Whether to open the firewall for the specified port."; |
||||
}; |
||||
|
||||
sslCertificate = mkOption { |
||||
type = types.nullOr types.path; |
||||
default = null; |
||||
example = "/var/etesync.crt"; |
||||
description = '' |
||||
Path to server SSL certificate. It will be copied into |
||||
etesync-dav's data directory. |
||||
''; |
||||
}; |
||||
|
||||
sslCertificateKey = mkOption { |
||||
type = types.nullOr types.path; |
||||
default = null; |
||||
example = "/var/etesync.key"; |
||||
description = '' |
||||
Path to server SSL certificate key. It will be copied into |
||||
etesync-dav's data directory. |
||||
''; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ]; |
||||
|
||||
systemd.services.etesync-dav = { |
||||
description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync"; |
||||
after = [ "network-online.target" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
path = [ pkgs.etesync-dav ]; |
||||
environment = { |
||||
ETESYNC_LISTEN_ADDRESS = cfg.host; |
||||
ETESYNC_LISTEN_PORT = toString cfg.port; |
||||
ETESYNC_URL = cfg.apiUrl; |
||||
ETESYNC_DATA_DIR = "/var/lib/etesync-dav"; |
||||
}; |
||||
|
||||
serviceConfig = { |
||||
Type = "simple"; |
||||
DynamicUser = true; |
||||
StateDirectory = "etesync-dav"; |
||||
ExecStart = "${pkgs.etesync-dav}/bin/etesync-dav"; |
||||
ExecStartPre = mkIf (cfg.sslCertificate != null || cfg.sslCertificateKey != null) ( |
||||
pkgs.writers.writeBash "etesync-dav-copy-keys" '' |
||||
${optionalString (cfg.sslCertificate != null) '' |
||||
cp ${toString cfg.sslCertificate} $STATE_DIRECTORY/etesync.crt |
||||
''} |
||||
${optionalString (cfg.sslCertificateKey != null) '' |
||||
cp ${toString cfg.sslCertificateKey} $STATE_DIRECTORY/etesync.key |
||||
''} |
||||
'' |
||||
); |
||||
Restart = "on-failure"; |
||||
RestartSec = "30min 1s"; |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,164 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
let |
||||
cfg = config.services.lifecycled; |
||||
|
||||
# TODO: Add the ability to extend this with an rfc 42-like interface. |
||||
# In the meantime, one can modify the environment (as |
||||
# long as it's not overriding anything from here) with |
||||
# systemd.services.lifecycled.serviceConfig.Environment |
||||
configFile = pkgs.writeText "lifecycled" '' |
||||
LIFECYCLED_HANDLER=${cfg.handler} |
||||
${lib.optionalString (cfg.cloudwatchGroup != null) "LIFECYCLED_CLOUDWATCH_GROUP=${cfg.cloudwatchGroup}"} |
||||
${lib.optionalString (cfg.cloudwatchStream != null) "LIFECYCLED_CLOUDWATCH_STREAM=${cfg.cloudwatchStream}"} |
||||
${lib.optionalString cfg.debug "LIFECYCLED_DEBUG=${lib.boolToString cfg.debug}"} |
||||
${lib.optionalString (cfg.instanceId != null) "LIFECYCLED_INSTANCE_ID=${cfg.instanceId}"} |
||||
${lib.optionalString cfg.json "LIFECYCLED_JSON=${lib.boolToString cfg.json}"} |
||||
${lib.optionalString cfg.noSpot "LIFECYCLED_NO_SPOT=${lib.boolToString cfg.noSpot}"} |
||||
${lib.optionalString (cfg.snsTopic != null) "LIFECYCLED_SNS_TOPIC=${cfg.snsTopic}"} |
||||
${lib.optionalString (cfg.awsRegion != null) "AWS_REGION=${cfg.awsRegion}"} |
||||
''; |
||||
in |
||||
{ |
||||
meta.maintainers = with maintainers; [ cole-h grahamc ]; |
||||
|
||||
options = { |
||||
services.lifecycled = { |
||||
enable = mkEnableOption "lifecycled"; |
||||
|
||||
queueCleaner = { |
||||
enable = mkEnableOption "lifecycled-queue-cleaner"; |
||||
|
||||
frequency = mkOption { |
||||
type = types.str; |
||||
default = "hourly"; |
||||
description = '' |
||||
How often to trigger the queue cleaner. |
||||
|
||||
NOTE: This string should be a valid value for a systemd |
||||
timer's <literal>OnCalendar</literal> configuration. See |
||||
<citerefentry><refentrytitle>systemd.timer</refentrytitle><manvolnum>5</manvolnum></citerefentry> |
||||
for more information. |
||||
''; |
||||
}; |
||||
|
||||
parallel = mkOption { |
||||
type = types.ints.unsigned; |
||||
default = 20; |
||||
description = '' |
||||
The number of parallel deletes to run. |
||||
''; |
||||
}; |
||||
}; |
||||
|
||||
instanceId = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
description = '' |
||||
The instance ID to listen for events for. |
||||
''; |
||||
}; |
||||
|
||||
snsTopic = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
description = '' |
||||
The SNS topic that receives events. |
||||
''; |
||||
}; |
||||
|
||||
noSpot = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = '' |
||||
Disable the spot termination listener. |
||||
''; |
||||
}; |
||||
|
||||
handler = mkOption { |
||||
type = types.path; |
||||
description = '' |
||||
The script to invoke to handle events. |
||||
''; |
||||
}; |
||||
|
||||
json = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = '' |
||||
Enable JSON logging. |
||||
''; |
||||
}; |
||||
|
||||
cloudwatchGroup = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
description = '' |
||||
Write logs to a specific Cloudwatch Logs group. |
||||
''; |
||||
}; |
||||
|
||||
cloudwatchStream = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
description = '' |
||||
Write logs to a specific Cloudwatch Logs stream. Defaults to the instance ID. |
||||
''; |
||||
}; |
||||
|
||||
debug = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = '' |
||||
Enable debugging information. |
||||
''; |
||||
}; |
||||
|
||||
# XXX: Can be removed if / when |
||||
# https://github.com/buildkite/lifecycled/pull/91 is merged. |
||||
awsRegion = mkOption { |
||||
type = types.nullOr types.str; |
||||
default = null; |
||||
description = '' |
||||
The region used for accessing AWS services. |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
### Implementation ### |
||||
|
||||
config = mkMerge [ |
||||
(mkIf cfg.enable { |
||||
environment.etc."lifecycled".source = configFile; |
||||
|
||||
systemd.packages = [ pkgs.lifecycled ]; |
||||
systemd.services.lifecycled = { |
||||
wantedBy = [ "network-online.target" ]; |
||||
restartTriggers = [ configFile ]; |
||||
}; |
||||
}) |
||||
|
||||
(mkIf cfg.queueCleaner.enable { |
||||
systemd.services.lifecycled-queue-cleaner = { |
||||
description = "Lifecycle Daemon Queue Cleaner"; |
||||
environment = optionalAttrs (cfg.awsRegion != null) { AWS_REGION = cfg.awsRegion; }; |
||||
serviceConfig = { |
||||
Type = "oneshot"; |
||||
ExecStart = "${pkgs.lifecycled}/bin/lifecycled-queue-cleaner -parallel ${toString cfg.queueCleaner.parallel}"; |
||||
}; |
||||
}; |
||||
|
||||
systemd.timers.lifecycled-queue-cleaner = { |
||||
description = "Lifecycle Daemon Queue Cleaner Timer"; |
||||
wantedBy = [ "timers.target" ]; |
||||
after = [ "network-online.target" ]; |
||||
timerConfig = { |
||||
Unit = "lifecycled-queue-cleaner.service"; |
||||
OnCalendar = "${cfg.queueCleaner.frequency}"; |
||||
}; |
||||
}; |
||||
}) |
||||
]; |
||||
} |
@ -0,0 +1,82 @@ |
||||
{ config, pkgs, lib, ... }: |
||||
|
||||
with lib; |
||||
|
||||
let |
||||
cfg = config.services.plikd; |
||||
|
||||
format = pkgs.formats.toml {}; |
||||
plikdCfg = format.generate "plikd.cfg" cfg.settings; |
||||
in |
||||
{ |
||||
options = { |
||||
services.plikd = { |
||||
enable = mkEnableOption "the plikd server"; |
||||
|
||||
openFirewall = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = "Open ports in the firewall for the plikd."; |
||||
}; |
||||
|
||||
settings = mkOption { |
||||
type = format.type; |
||||
default = {}; |
||||
description = '' |
||||
Configuration for plikd, see <link xlink:href="https://github.com/root-gg/plik/blob/master/server/plikd.cfg"/> |
||||
for supported values. |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
config = mkIf cfg.enable { |
||||
services.plikd.settings = mapAttrs (name: mkDefault) { |
||||
ListenPort = 8080; |
||||
ListenAddress = "localhost"; |
||||
DataBackend = "file"; |
||||
DataBackendConfig = { |
||||
Directory = "/var/lib/plikd"; |
||||
}; |
||||
MetadataBackendConfig = { |
||||
Driver = "sqlite3"; |
||||
ConnectionString = "/var/lib/plikd/plik.db"; |
||||
}; |
||||
}; |
||||
|
||||
systemd.services.plikd = { |
||||
description = "Plikd file sharing server"; |
||||
after = [ "network.target" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
serviceConfig = { |
||||
Type = "simple"; |
||||
ExecStart = "${pkgs.plikd}/bin/plikd --config ${plikdCfg}"; |
||||
Restart = "on-failure"; |
||||
StateDirectory = "plikd"; |
||||
LogsDirectory = "plikd"; |
||||
DynamicUser = true; |
||||
|
||||
# Basic hardening |
||||
NoNewPrivileges = "yes"; |
||||
PrivateTmp = "yes"; |
||||
PrivateDevices = "yes"; |
||||
DevicePolicy = "closed"; |
||||
ProtectSystem = "strict"; |
||||
ProtectHome = "read-only"; |
||||
ProtectControlGroups = "yes"; |
||||
ProtectKernelModules = "yes"; |
||||
ProtectKernelTunables = "yes"; |
||||
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK"; |
||||
RestrictNamespaces = "yes"; |
||||
RestrictRealtime = "yes"; |
||||
RestrictSUIDSGID = "yes"; |
||||
MemoryDenyWriteExecute = "yes"; |
||||
LockPersonality = "yes"; |
||||
}; |
||||
}; |
||||
|
||||
networking.firewall = mkIf cfg.openFirewall { |
||||
allowedTCPPorts = [ cfg.settings.ListenPort ]; |
||||
}; |
||||
}; |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue