Merge branch 'staging' into closure-size

wip/yesman
Vladimír Čunát 8 years ago
commit 716aac2519
  1. 4
      README.md
  2. 288
      doc/erlang-users-guide.xml
  3. 336
      doc/functions.xml
  4. 1
      doc/manual.xml
  5. 5
      doc/meta.xml
  6. 2
      doc/package-notes.xml
  7. 6
      lib/maintainers.nix
  8. 10
      lib/strings.nix
  9. 16
      nixos/doc/manual/configuration/wireless.xml
  10. 52
      nixos/doc/manual/man-nixos-rebuild.xml
  11. 30
      nixos/doc/manual/release-notes/rl-unstable.xml
  12. 5
      nixos/lib/test-driver/Machine.pm
  13. 1
      nixos/modules/config/ldap.nix
  14. 1
      nixos/modules/config/pulseaudio.nix
  15. 1
      nixos/modules/config/shells-environment.nix
  16. 5
      nixos/modules/config/swap.nix
  17. 7
      nixos/modules/config/sysctl.nix
  18. 5
      nixos/modules/config/unix-odbc-drivers.nix
  19. 2
      nixos/modules/config/users-groups.nix
  20. 15
      nixos/modules/hardware/video/encoder/wis-go7007.nix
  21. 4
      nixos/modules/installer/cd-dvd/channel.nix
  22. 2
      nixos/modules/installer/cd-dvd/installation-cd-base.nix
  23. 2
      nixos/modules/installer/cd-dvd/iso-image.nix
  24. 5
      nixos/modules/installer/cd-dvd/system-tarball-fuloong2f.nix
  25. 2
      nixos/modules/installer/cd-dvd/system-tarball-pc.nix
  26. 4
      nixos/modules/installer/cd-dvd/system-tarball-sheevaplug.nix
  27. 2
      nixos/modules/installer/tools/tools.nix
  28. 1
      nixos/modules/misc/crashdump.nix
  29. 22
      nixos/modules/misc/ids.nix
  30. 4
      nixos/modules/misc/nixos.nix
  31. 72
      nixos/modules/misc/version.nix
  32. 17
      nixos/modules/module-list.nix
  33. 1
      nixos/modules/profiles/graphical.nix
  34. 2
      nixos/modules/profiles/installation-device.nix
  35. 2
      nixos/modules/programs/cdemu.nix
  36. 6
      nixos/modules/programs/command-not-found/command-not-found.nix
  37. 3
      nixos/modules/programs/command-not-found/command-not-found.pl
  38. 114
      nixos/modules/programs/fish.nix
  39. 24
      nixos/modules/programs/ssh.nix
  40. 12
      nixos/modules/programs/zsh/zsh.nix
  41. 14
      nixos/modules/rename.nix
  42. 12
      nixos/modules/security/acme.nix
  43. 109
      nixos/modules/security/audit.nix
  44. 115
      nixos/modules/services/audio/fuppes.nix
  45. 155
      nixos/modules/services/audio/fuppes/vfolder.cfg
  46. 2
      nixos/modules/services/audio/liquidsoap.nix
  47. 4
      nixos/modules/services/audio/mpd.nix
  48. 4
      nixos/modules/services/backup/bacula.nix
  49. 1
      nixos/modules/services/backup/rsnapshot.nix
  50. 66
      nixos/modules/services/backup/tarsnap.nix
  51. 1
      nixos/modules/services/continuous-integration/jenkins/default.nix
  52. 2
      nixos/modules/services/continuous-integration/jenkins/job-builder.nix
  53. 8
      nixos/modules/services/databases/4store-endpoint.nix
  54. 9
      nixos/modules/services/databases/4store.nix
  55. 1
      nixos/modules/services/databases/couchdb.nix
  56. 1
      nixos/modules/services/databases/firebird.nix
  57. 1
      nixos/modules/services/databases/hbase.nix
  58. 1
      nixos/modules/services/databases/influxdb.nix
  59. 1
      nixos/modules/services/databases/mongodb.nix
  60. 5
      nixos/modules/services/databases/neo4j.nix
  61. 30
      nixos/modules/services/databases/openldap.nix
  62. 1
      nixos/modules/services/databases/opentsdb.nix
  63. 4
      nixos/modules/services/databases/postgresql.nix
  64. 1
      nixos/modules/services/databases/redis.nix
  65. 25
      nixos/modules/services/databases/virtuoso.nix
  66. 4
      nixos/modules/services/games/ghost-one.nix
  67. 117
      nixos/modules/services/hardware/acpid.nix
  68. 1
      nixos/modules/services/hardware/freefall.nix
  69. 30
      nixos/modules/services/hardware/irqbalance.nix
  70. 15
      nixos/modules/services/hardware/pommed.nix
  71. 4
      nixos/modules/services/hardware/sane.nix
  72. 24
      nixos/modules/services/hardware/thinkfan.nix
  73. 1
      nixos/modules/services/hardware/upower.nix
  74. 25
      nixos/modules/services/logging/klogd.nix
  75. 1
      nixos/modules/services/logging/logrotate.nix
  76. 3
      nixos/modules/services/logging/logstash.nix
  77. 1
      nixos/modules/services/logging/syslog-ng.nix
  78. 3
      nixos/modules/services/mail/dovecot.nix
  79. 147
      nixos/modules/services/mail/dspam.nix
  80. 17
      nixos/modules/services/mail/freepops.nix
  81. 109
      nixos/modules/services/mail/opendkim.nix
  82. 233
      nixos/modules/services/mail/postfix.nix
  83. 107
      nixos/modules/services/mail/postsrsd.nix
  84. 189
      nixos/modules/services/mail/rmilter.nix
  85. 90
      nixos/modules/services/mail/rspamd.nix
  86. 6
      nixos/modules/services/mail/spamassassin.nix
  87. 3
      nixos/modules/services/misc/apache-kafka.nix
  88. 3
      nixos/modules/services/misc/autofs.nix
  89. 1
      nixos/modules/services/misc/cgminer.nix
  90. 1
      nixos/modules/services/misc/confd.nix
  91. 15
      nixos/modules/services/misc/dictd.nix
  92. 111
      nixos/modules/services/misc/disnix.nix
  93. 6
      nixos/modules/services/misc/docker-registry.nix
  94. 10
      nixos/modules/services/misc/etcd.nix
  95. 97
      nixos/modules/services/misc/felix.nix
  96. 32
      nixos/modules/services/misc/folding-at-home.nix
  97. 1
      nixos/modules/services/misc/gitit.nix
  98. 1
      nixos/modules/services/misc/ihaskell.nix
  99. 54
      nixos/modules/services/misc/mathics.nix
  100. 25
      nixos/modules/services/misc/matrix-synapse-log_config.yaml
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,8 +1,8 @@
[<img src="http://nixos.org/logo/nixos-hires.png" width="500px" alt="logo" />](https://nixos.org/nixos)
[![Build Status](https://travis-ci.org/NixOS/nixpkgs.svg?branch=master)](https://travis-ci.org/NixOS/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/pr)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/issue)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/pr?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
[![Issue Stats](http://www.issuestats.com/github/nixos/nixpkgs/badge/issue?style=flat)](http://www.issuestats.com/github/nixos/nixpkgs)
Nixpkgs is a collection of packages for the [Nix](https://nixos.org/nix/) package
manager. It is periodically built and tested by the [hydra](http://hydra.nixos.org/)

@ -0,0 +1,288 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="users-guide-to-the-erlang-infrastructure">
<title>User's Guide to the Erlang Infrastructure</title>
<section xml:id="how-to-install-erlang-packages">
<title>How to install Erlang packages</title>
<para>
Erlang packages are not registered in the top level simply because
they are not relevant to the vast majority of Nix users. They are
installable using the <literal>erlangPackages</literal> attribute set.
You can list the avialable packages in the
<literal>erlangPackages</literal> with the following command:
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A erlangPackages
erlangPackages.esqlite esqlite-0.2.1
erlangPackages.goldrush goldrush-0.1.7
erlangPackages.ibrowse ibrowse-4.2.2
erlangPackages.jiffy jiffy-0.14.5
erlangPackages.lager lager-3.0.2
erlangPackages.meck meck-0.8.3
erlangPackages.rebar3-pc pc-1.1.0
</programlisting>
<para>
To install any of those packages into your profile, refer to them by
their attribute path (first column):
</para>
<programlisting>
$ nix-env -f &quot;&lt;nixpkgs&gt;&quot; -iA erlangPackages.ibrowse
</programlisting>
<para>
The attribute path of any Erlang packages corresponds to the name
of that particular package in Hex or its OTP Application/Release name.
</para>
</section>
<section xml:id="packaging-erlang-applications">
<title>Packaging Erlang Applications</title>
<section xml:id="rebar3-packages">
<title>Rebar3 Packages</title>
<para>
There is a Nix functional called
<literal>buildRebar3</literal>. We use this function to make a
derivation that understands how to build the rebar3 project. For
example, the epression we use to build the <link
xlink:href="https://github.com/erlang-nix/hex2nix">hex2nix</link>
project follows.
</para>
<programlisting>
{stdenv, fetchFromGitHub, buildRebar3, ibrowse, jsx, erlware_commons }:
buildRebar3 rec {
name = "hex2nix";
version = "0.0.1";
src = fetchFromGitHub {
owner = "ericbmerritt";
repo = "hex2nix";
rev = "${version}";
sha256 = "1w7xjidz1l5yjmhlplfx7kphmnpvqm67w99hd2m7kdixwdxq0zqg";
};
erlangDeps = [ ibrowse jsx erlware_commons ];
}
</programlisting>
<para>
The only visible difference between this derivation and
something like <literal>stdenv.mkDerivation</literal> is that we
have added <literal>erlangDeps</literal> to the derivation. If
you add your Erlang dependencies here they will be correctly
handled by the system.
</para>
<para>
If your package needs to compile native code via Rebar's port
compilation mechenism. You should add <literal>compilePort =
true;</literal> to the derivation.
</para>
</section>
<section xml:id="hex-packages">
<title>Hex Packages</title>
<para>
Hex packages are based on Rebar packages. In fact, at the moment
we can only compile Hex packages that are buildable with
Rebar3. Packages that use Mix and other build systems are not
supported. That being said, we know a lot more about Hex and can
do more for you.
</para>
<programlisting>
{ buildHex }:
buildHex {
name = "esqlite";
version = "0.2.1";
sha256 = "1296fn1lz4lz4zqzn4dwc3flgkh0i6n4sydg501faabfbv8d3wkr";
compilePort = true;
}
</programlisting>
<para>
For Hex packages you need to provide the name, the version, and
the Sha 256 digest of the package and use
<literal>buildHex</literal> to build it. Obviously, the package
needs to have already been published to Hex.
</para>
</section>
</section>
<section xml:id="how-to-develop">
<title>How to develop</title>
<section xml:id="accessing-an-environment">
<title>Accessing an Environment</title>
<para>
Often, all you want to do is be able to access a valid
environment that contains a specific package and its
dependencies. we can do that with the <literal>env</literal>
part of a derivation. For example, lets say we want to access an
erlang repl with ibrowse loaded up. We could do the following.
</para>
<programlisting>
~/w/nixpkgs ❯❯❯ nix-shell -A erlangPackages.ibrowse.env --run "erl"
Erlang/OTP 18 [erts-7.0] [source] [64-bit] [smp:4:4] [async-threads:10] [hipe] [kernel-poll:false]
Eshell V7.0 (abort with ^G)
1> m(ibrowse).
Module: ibrowse
MD5: 3b3e0137d0cbb28070146978a3392945
Compiled: January 10 2016, 23:34
Object file: /nix/store/g1rlf65rdgjs4abbyj4grp37ry7ywivj-ibrowse-4.2.2/lib/erlang/lib/ibrowse-4.2.2/ebin/ibrowse.beam
Compiler options: [{outdir,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/ebin"},
debug_info,debug_info,nowarn_shadow_vars,
warn_unused_import,warn_unused_vars,warnings_as_errors,
{i,"/tmp/nix-build-ibrowse-4.2.2.drv-0/hex-source-ibrowse-4.2.2/_build/default/lib/ibrowse/include"}]
Exports:
add_config/1 send_req_direct/7
all_trace_off/0 set_dest/3
code_change/3 set_max_attempts/3
get_config_value/1 set_max_pipeline_size/3
get_config_value/2 set_max_sessions/3
get_metrics/0 show_dest_status/0
get_metrics/2 show_dest_status/1
handle_call/3 show_dest_status/2
handle_cast/2 spawn_link_worker_process/1
handle_info/2 spawn_link_worker_process/2
init/1 spawn_worker_process/1
module_info/0 spawn_worker_process/2
module_info/1 start/0
rescan_config/0 start_link/0
rescan_config/1 stop/0
send_req/3 stop_worker_process/1
send_req/4 stream_close/1
send_req/5 stream_next/1
send_req/6 terminate/2
send_req_direct/4 trace_off/0
send_req_direct/5 trace_off/2
send_req_direct/6 trace_on/0
trace_on/2
ok
2>
</programlisting>
<para>
Notice the <literal>-A erlangPackages.ibrowse.env</literal>.That
is the key to this functionality.
</para>
</section>
<section xml:id="creating-a-shell">
<title>Creating a Shell</title>
<para>
Getting access to an environment often isn't enough to do real
development. Many times we need to create a
<literal>shell.nix</literal> file and do our development inside
of the environment specified by that file. This file looks a lot
like the packageing described above. The main difference is that
<literal>src</literal> points to project root and we call the
package directly.
</para>
<programlisting>
{ pkgs ? import &quot;&lt;nixpkgs&quot;&gt; {} }:
with pkgs;
let
f = { buildHex, ibrowse, jsx, erlware_commons }:
buildHex {
name = "hex2nix";
version = "0.1.0";
src = ./.;
erlangDeps = [ ibrowse jsx erlware_commons ];
};
drv = erlangPackages.callPackage f {};
in
drv
</programlisting>
<section xml:id="building-in-a-shell">
<title>Building in a shell</title>
<para>
Unfortunatly for us users of Nix, Rebar isn't very cooperative
with us from the standpoint of building a hermetic
environment. When building the rebar3 support we had to do some
sneaky things to get it not to go out and pull packages on its
own. Also unfortunately, you have to do some of the same things
when building a project inside of a Nix shell.
<orderedlist numeration="arabic">
<listitem>
<para>Run <literal>rebar3-nix-bootstrap</literal> every time
dependencies change</para>
</listitem>
<listitem>
<para>Set Home to the current directory.</para>
</listitem>
</orderedlist>
If you do these two things then Rebar will be happy with you. I
codify these into a makefile. Forunately, rebar3-nix-bootstrap
is idempotent and fairly quick. so you can run it as often as
you like.
</para>
<programlisting>
# =============================================================================
# Rules
# =============================================================================
.PHONY= all test clean repl shell build test analyze bootstrap
all: test
clean:
rm -rf _build
rm -rf .cache
repl:
nix-shell --run "erl"
shell:
nix-shell --run "bash"
bootstrap:
nix-shell --pure --run "rebar3-nix-bootstrap"
build: bootstrap
nix-shell --pure --run "HOME=$(CURDIR) rebar3 compile"
analyze: bootstrap
nix-shell --pure --run "HOME=$(CURDIR) rebar3 do compile,dialyzer"
test: bootstrap
nix-shell --pure --run "HOME=$(CURDIR) rebar3 do compile,dialyzer,eunit"
</programlisting>
<para>
If you add the <literal>shell.nix</literal> as described and
user rebar as follows things should simply work.
</para>
</section>
</section>
</section>
<section xml:id="generating-packages-from-hex-with-hex2nix">
<title>Generating Packages from Hex with Hex2Nix</title>
<para>
Updating the Hex packages requires the use of the
<literal>hex2nix</literal> tool. Given the path to the Erlang
modules (usually
<literal>pkgs/development/erlang-modules</literal>). It will
happily dump a file called
<literal>hex-packages.nix</literal>. That file will contain all
the packages that use a recognized build system in Hex. However,
it can't know whether or not all those packages are buildable.
</para>
<para>
To make life easier for our users, it makes good sense to go
ahead and attempt to build all those packages and remove the
ones that don't build. To do that, simply run the command (in
the root of your <literal>nixpkgs</literal> repository). that follows.
</para>
<programlisting>
$ nix-build -A erlangPackages
</programlisting>
<para>
That will build every package in
<literal>erlangPackages</literal>. Then you can go through and
manually remove the ones that fail. Hopefully, someone will
improve <literal>hex2nix</literal> in the future to automate
that.
</para>
</section>
</chapter>

@ -291,4 +291,340 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
</section>
<section xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#docker-image-specification-v100">
Docker Image Specification v1.0.0
</link>. Docker itself is not used to perform any of the operations done by these
functions.
</para>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are
described below:
</para>
<example xml:id='ex-dockerTools-buildImage'><title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
}
</programlisting>
</example>
<para>The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal>
device to be available.
</para>
</note>
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
Docker Image Specification v1.0.0
</link>.
</para>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
</para>
<para>
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'><title>Docker pull</title>
<programlisting>
pullImage {
imageName = "debian"; <co xml:id='ex-dockerTools-pullImage-1' />
imageTag = "jessie"; <co xml:id='ex-dockerTools-pullImage-2' />
imageId = null; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "1bhw5hkz6chrnrih0ymjbmn69hyfriza2lr550xyvpdrnbzr4gk2"; <co xml:id='ex-dockerTools-pullImage-4' />
indexUrl = "https://index.docker.io"; <co xml:id='ex-dockerTools-pullImage-5' />
registryUrl = "https://registry-1.docker.io";
registryVersion = "v1";
}
</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
</para>
<note>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
</note>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
In the above example the default values are shown for the variables <varname>indexUrl</varname>,
<varname>registryUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
</para>
<note>
<para>
Using this function requires the <literal>kvm</literal>
device to be available.
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'><title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'><title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>
</chapter>

@ -20,6 +20,7 @@
<xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" />
<xi:include href="haskell-users-guide.xml" />
<xi:include href="erlang-users-guide.xml" />
<xi:include href="contributing.xml" />
</book>

@ -112,11 +112,6 @@ meta-attributes</title>
package.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>version</varname></term>
<listitem><para>Package version.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>branch</varname></term>
<listitem><para>Release branch. Used to specify that a package is not

@ -125,7 +125,7 @@ $ make menuconfig ARCH=<replaceable>arch</replaceable></screen>
<listitem>
<para>It may be that the new kernel requires updating the external
kernel modules and kernel-dependent packages listed in the
<varname>kernelPackagesFor</varname> function in
<varname>linuxPackagesFor</varname> function in
<filename>all-packages.nix</filename> (such as the NVIDIA drivers,
AUFS, etc.). If the updated packages aren’t backwards compatible
with older kernels, you may need to keep the older versions

@ -38,6 +38,7 @@
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
badi = "Badi' Abdul-Wahid <abdulwahidc@gmail.com>";
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
Baughn = "Svein Ove Aas <sveina@gmail.com>";
bbenoist = "Baptist BENOIST <return_0@live.com>";
bcarrell = "Brandon Carrell <brandoncarrell@gmail.com>";
bcdarwin = "Ben Darwin <bcdarwin@gmail.com>";
@ -231,7 +232,7 @@
palo = "Ingolf Wanger <palipalo9@googlemail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
pesterhazy = "Paulus Esterhazy <pesterhazy@gmail.com>";
phausmann = "Philipp Hausmann <nix@314.ch>";
phile314 = "Philipp Hausmann <nix@314.ch>";
philandstuff = "Philip Potter <philip.g.potter@gmail.com>";
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
phunehehe = "Hoang Xuan Phu <phunehehe@gmail.com>";
@ -264,7 +265,6 @@
robbinch = "Robbin C. <robbinch33@gmail.com>";
robgssp = "Rob Glossop <robgssp@gmail.com>";
roconnor = "Russell O'Connor <roconnor@theorem.ca>";
roelof = "Roelof Wobben <rwobben@hotmail.com>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
rushmorem = "Rushmore Mushambi <rushmore@webenchanter.com>";
@ -294,6 +294,7 @@
steveej = "Stefan Junker <mail@stefanjunker.de>";
szczyp = "Szczyp <qb@szczyp.com>";
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
taeer = "Taeer Bar-Yam <taeer@necsi.edu>";
tailhook = "Paul Colomiets <paul@colomiets.name>";
taktoa = "Remy Goldschmidt <taktoa@gmail.com>";
telotortium = "Robert Irelan <rirelan@gmail.com>";
@ -334,6 +335,7 @@
wyvie = "Elijah Rum <elijahrum@gmail.com>";
yarr = "Dmitry V. <savraz@gmail.com>";
z77z = "Marco Maggesi <maggesi@math.unifi.it>";
zagy = "Christian Zagrodnick <cz@flyingcircus.io>";
zef = "Zef Hemel <zef@zef.me>";
zimbatm = "zimbatm <zimbatm@zimbatm.com>";
zoomulator = "Kim Simmons <zoomulator@gmail.com>";

@ -191,9 +191,13 @@ rec {
versionAtLeast = v1: v2: !versionOlder v1 v2;
# Get the version of the specified derivation, as specified in its
# ‘name’ attribute.
getVersion = drv: (builtins.parseDrvName drv.name).version;
# This function takes an argument that's either a derivation or a
# derivation's "name" attribute and extracts the version part from that
# argument. For example:
#
# lib.getVersion "youtube-dl-2016.01.01" ==> "2016.01.01"
# lib.getVersion pkgs.youtube-dl ==> "2016.01.01"
getVersion = x: (builtins.parseDrvName (x.name or x)).version;
# Extract name with version from URL. Ask for separator which is

@ -18,8 +18,20 @@ NixOS will start wpa_supplicant for you if you enable this setting:
networking.wireless.enable = true;
</programlisting>
NixOS currently does not generate wpa_supplicant's
configuration file, <literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
NixOS lets you specify networks for wpa_supplicant declaratively:
<programlisting>
networking.wireless.networks = {
echelon = {
psk = "abcdefgh";
};
"free.wifi" = {};
}
</programlisting>
Be aware that keys will be written to the nix store in plaintext!
When no networks are set, it will default to using a configuration file at
<literal>/etc/wpa_supplicant.conf</literal>. You should edit this file
yourself to define wireless networks, WPA keys and so on (see
wpa_supplicant.conf(5)).
</para>

@ -281,6 +281,51 @@ $ nixos-rebuild switch -p test -I nixos-config=./test.nix
</listitem>
</varlistentry>
<varlistentry>
<term><option>--build-host</option></term>
<listitem>
<para>Instead of building the new configuration locally, use the
specified host to perform the build. The host needs to be accessible
with ssh, and must be able to perform Nix builds. If the option
<option>--target-host</option> is not set, the build will be copied back
to the local machine when done.</para>
<para>Note that, if <option>--no-build-nix</option> is not specified,
Nix will be built both locally and remotely. This is because the
configuration will always be evaluated locally even though the building
might be performed remotely.</para>
<para>You can include a remote user name in
the host name (<replaceable>user@host</replaceable>). You can also set
ssh options by defining the <envar>NIX_SSHOPTS</envar> environment
variable.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--target-host</option></term>
<listitem>
<para>Specifies the NixOS target host. By setting this to something other
than <replaceable>localhost</replaceable>, the system activation will
happen on the remote host instead of the local machine. The remote host
needs to be accessible over ssh, and for the commands
<option>switch</option>, <option>boot</option> and <option>test</option>
you need root access.</para>
<para>If <option>--build-host</option> is not explicitly
specified, <option>--build-host</option> will implicitly be set to the
same value as <option>--target-host</option>. So, if you only specify
<option>--target-host</option> both building and activation will take
place remotely (and no build artifacts will be copied to the local
machine).</para>
<para>You can include a remote user name in
the host name (<replaceable>user@host</replaceable>). You can also set
ssh options by defining the <envar>NIX_SSHOPTS</envar> environment
variable.</para>
</listitem>
</varlistentry>
</variablelist>
<para>In addition, <command>nixos-rebuild</command> accepts various
@ -305,6 +350,13 @@ the Nix manual for details.</para>
</listitem>
</varlistentry>
<varlistentry><term><envar>NIX_SSHOPTS</envar></term>
<listitem><para>Additional options to be passed to
<command>ssh</command> on the command line.</para></listitem>
</varlistentry>
</variablelist>
</refsection>

@ -24,6 +24,17 @@ nixos.path = ./nixpkgs-unstable-2015-12-06/nixos;
<xref linkend="module-misc-nixos" /></para>
</listitem>
<listitem>
<para>Firefox and similar browsers are now <emphasis>wrapped by default</emphasis>.
The package and attribute names are plain <literal>firefox</literal>
or <literal>midori</literal>, etc. Backward-compatibility attributes were set up,
but note that <command>nix-env -u</command> will <emphasis>not</emphasis> update
your current <literal>firefox-with-plugins</literal>;
you have to uninstall it and install <literal>firefox</literal> instead.
More discussion is <link xlink:href="https://github.com/NixOS/nixpkgs/pull/12299">
on the PR</link>. </para>
</listitem>
</itemizedlist>
<para>The following new services were added since the last release:
@ -47,6 +58,12 @@ following incompatible changes:</para>
</para>
</listitem>
<listitem>
<para><literal>jobs</literal> NixOS option has been removed. It served as
compatibility layer between Upstart jobs and SystemD services. All services
have been rewritten to use <literal>systemd.services</literal></para>
</listitem>
<listitem>
<para><command>wmiimenu</command> is removed, as it has been
removed by the developers upstream. Use <command>wimenu</command>
@ -130,4 +147,17 @@ nginx.override {
</listitem>
</itemizedlist>
<para>Other notable improvements:
<itemizedlist>
<listitem>
<para>The <command>command-not-found</command> hook was extended.
Apart from <literal>$NIX_AUTO_INSTALL</literal> variable,
it newly also checks for <literal>$NIX_AUTO_RUN</literal>
which causes it to directly run the missing commands via
<command>nix-shell</command> (without installing anything). </para>
</listitem>
</itemizedlist></para>
</section>

@ -381,6 +381,11 @@ sub waitForUnit {
my $info = $self->getUnitInfo($unit);
my $state = $info->{ActiveState};
die "unit ‘$unit’ reached state ‘$state’\n" if $state eq "failed";
if ($state eq "inactive") {
my ($status, $jobs) = $self->execute("systemctl list-jobs --full 2>&1");
die "unit ‘$unit’ is inactive and there are no pending jobs\n"
if $jobs =~ /No jobs/; # FIXME: fragile
}
return 1 if $state eq "active";
};
});

@ -57,6 +57,7 @@ in
users.ldap = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable authentication against an LDAP server.";
};

@ -99,6 +99,7 @@ in {
package = mkOption {
type = types.package;
default = pulseaudioLight;
defaultText = "pkgs.pulseaudioLight";
example = literalExample "pkgs.pulseaudioFull";
description = ''
The PulseAudio derivation to use. This can be used to enable

@ -119,6 +119,7 @@ in
environment.binsh = mkOption {
default = "${config.system.build.binsh}/bin/sh";
defaultText = "\${config.system.build.binsh}/bin/sh";
example = literalExample ''
"''${pkgs.dash}/bin/dash"
'';

@ -128,6 +128,7 @@ in
wantedBy = [ "${realDevice'}.swap" ];
before = [ "${realDevice'}.swap" ];
path = [ pkgs.utillinux ] ++ optional sw.randomEncryption pkgs.cryptsetup;
script =
''
${optionalString (sw.size != null) ''
@ -145,11 +146,13 @@ in
mkswap ${sw.realDevice}
''}
'';
unitConfig.RequiresMountsFor = [ "${dirOf sw.device}" ];
unitConfig.DefaultDependencies = false; # needed to prevent a cycle
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = sw.randomEncryption;
serviceConfig.ExecStop = optionalString sw.randomEncryption "cryptsetup luksClose ${sw.deviceName}";
serviceConfig.ExecStop = optionalString sw.randomEncryption "${pkgs.cryptsetup}/bin/cryptsetup luksClose ${sw.deviceName}";
restartIfChanged = false;
};
in listToAttrs (map createSwapDevice (filter (sw: sw.size != null || sw.randomEncryption) config.swapDevices));

@ -22,10 +22,9 @@ in
boot.kernel.sysctl = mkOption {
default = {};
example = {
"net.ipv4.tcp_syncookies" = false;
"vm.swappiness" = 60;
};
example = literalExample ''
{ "net.ipv4.tcp_syncookies" = false; "vm.swappiness" = 60; }
'';
type = types.attrsOf sysctlOption;
description = ''
Runtime parameters of the Linux kernel, as set by

@ -10,8 +10,9 @@ with lib;
options = {
environment.unixODBCDrivers = mkOption {
type = types.listOf types.package;
default = [];
example = literalExample "map (x : x.ini) (with pkgs.unixODBCDrivers; [ mysql psql psqlng ] )";
example = literalExample "with pkgs.unixODBCDrivers; [ mysql psql psqlng ]";
description = ''
Specifies Unix ODBC drivers to be registered in
<filename>/etc/odbcinst.ini</filename>. You may also want to
@ -26,7 +27,7 @@ with lib;
config = mkIf (config.environment.unixODBCDrivers != []) {
environment.etc."odbcinst.ini".text =
let inis = config.environment.unixODBCDrivers;
let inis = map (x : x.ini) config.environment.unixODBCDrivers;
in lib.concatStringsSep "\n" inis;
};

@ -26,7 +26,7 @@ let
'';
hashedPasswordDescription = ''
To generate hashed password install <literal>mkpassword</literal>
To generate hashed password install <literal>mkpasswd</literal>
package and run <literal>mkpasswd -m sha-512</literal>.
'';

@ -1,15 +0,0 @@
{pkgs, config, ...}:
let
wis_go7007 = config.boot.kernelPackages.wis_go7007;
in
{
boot.extraModulePackages = [ wis_go7007 ];
environment.systemPackages = [ wis_go7007 ];
hardware.firmware = [ wis_go7007 ];
services.udev.packages = [ wis_go7007 ];
}

@ -17,7 +17,9 @@ let
mkdir -p $out
cp -prd ${pkgs.path} $out/nixos
chmod -R u+w $out/nixos
ln -s . $out/nixos/nixpkgs
if [ ! -e $out/nixos/nixpkgs ]; then
ln -s . $out/nixos/nixpkgs
fi
rm -rf $out/nixos/.git
echo -n ${config.system.nixosVersionSuffix} > $out/nixos/.version-suffix
'';

@ -16,7 +16,7 @@ with lib;
];
# ISO naming.
isoImage.isoName = "${config.isoImage.isoBaseName}-${config.system.nixosVersion}-${pkgs.stdenv.system}.iso";
isoImage.isoName = "${config.isoImage.isoBaseName}-${config.system.nixosLabel}-${pkgs.stdenv.system}.iso";
isoImage.volumeID = substring 0 11 "NIXOS_ISO";

@ -39,7 +39,7 @@ let
DEFAULT boot
LABEL boot
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel}
MENU LABEL NixOS ${config.system.nixosLabel}${config.isoImage.appendToMenuLabel}
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
INITRD /boot/initrd

@ -74,7 +74,7 @@ in
# Tools to create / manipulate filesystems.
pkgs.ntfsprogs # for resizing NTFS partitions
pkgs.btrfsProgs
pkgs.btrfs-progs
pkgs.jfsutils
# Some compression/archiver tools.
@ -149,8 +149,7 @@ in
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
jobs.openssh.startOn = lib.mkOverride 50 "";
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
boot.loader.grub.enable = false;
boot.loader.generationsDir.enable = false;

@ -109,7 +109,7 @@ in
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
jobs.openssh.startOn = lib.mkOverride 50 "";
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
# To be able to use the systemTarball to catch troubles.
boot.crashDump = {

@ -67,7 +67,7 @@ in
pkgs.dmraid
# Tools to create / manipulate filesystems.
pkgs.btrfsProgs
pkgs.btrfs-progs
# Some compression/archiver tools.
pkgs.unzip
@ -164,7 +164,7 @@ in
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
jobs.openssh.startOn = lib.mkOverride 50 "";
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
# cpufrequtils fails to build on non-pc
powerManagement.enable = false;

@ -38,7 +38,7 @@ let
nixos-generate-config = makeProg {
name = "nixos-generate-config";
src = ./nixos-generate-config.pl;
path = [ pkgs.btrfsProgs ];
path = [ pkgs.btrfs-progs ];
perl = "${pkgs.perl}/bin/perl -I${pkgs.perlPackages.FileSlurp}/lib/perl5/site_perl";
inherit (config.system) nixosRelease;
};

@ -24,6 +24,7 @@ in
'';
};
kernelPackages = mkOption {
type = types.package;
default = pkgs.linuxPackages;
# We don't want to evaluate all of linuxPackages for the manual
# - some of it might not even evaluate correctly.

@ -136,7 +136,7 @@
kippo = 108;
jenkins = 109;
systemd-journal-gateway = 110;
notbit = 111;
#notbit = 111; # unused
ngircd = 112;
btsync = 113;
minecraft = 114;
@ -239,6 +239,15 @@
bepasty = 215;
pumpio = 216;
nm-openvpn = 217;
mathics = 218;
ejabberd = 219;
postsrsd = 220;
opendkim = 221;
dspam = 222;
gale = 223;
matrix-synapse = 224;
rspamd = 225;
rmilter = 226;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -355,7 +364,7 @@
kippo = 108;
jenkins = 109;
systemd-journal-gateway = 110;
notbit = 111;
#notbit = 111; # unused
#ngircd = 112; # unused
btsync = 113;
#minecraft = 114; # unused
@ -455,6 +464,15 @@
bepasty = 215;
pumpio = 216;
nm-openvpn = 217;
mathics = 218;
ejabberd = 219;
postsrsd = 220;
opendkim = 221;
dspam = 222;
gale = 223;
matrix-synapse = 224;
rspamd = 225;
rmilter = 226;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

@ -37,8 +37,8 @@ with lib;
nixos.extraModules = mkOption {
default = [];
example = literalExample "mkIf config.services.openssh.enable [ ./sshd-config.nix ]";
type = types.listOf types.unspecified;
example = literalExample "[ ./sshd-config.nix ]";
type = types.listOf (types.either (types.submodule ({...}:{options={};})) types.path);
description = ''
Define additional modules which would be loaded to evaluate the
configuration.

@ -2,13 +2,21 @@
with lib;
let
cfg = config.system;
releaseFile = "${toString pkgs.path}/.version";
suffixFile = "${toString pkgs.path}/.version-suffix";
revisionFile = "${toString pkgs.path}/.git-revision";
in
{
options = {
options.system = {
system.stateVersion = mkOption {
stateVersion = mkOption {
type = types.str;
default = config.system.nixosRelease;
default = cfg.nixosRelease;
description = ''
Every once in a while, a new NixOS release may change
configuration defaults in a way incompatible with stateful
@ -22,38 +30,63 @@ with lib;
'';
};
system.nixosVersion = mkOption {
nixosLabel = mkOption {
type = types.str;
description = ''
NixOS version name to be used in the names of generated
outputs and boot labels.
If you ever wanted to influence the labels in your GRUB menu,
this is option is for you.
Can be set directly or with <envar>NIXOS_LABEL</envar>
environment variable for <command>nixos-rebuild</command>,
e.g.:
<screen>
#!/bin/sh
today=`date +%Y%m%d`
branch=`(cd nixpkgs ; git branch 2>/dev/null | sed -n '/^\* / { s|^\* ||; p; }')`
revision=`(cd nixpkgs ; git rev-parse HEAD)`
export NIXOS_LABEL="$today.$branch-''${revision:0:7}"
nixos-rebuild switch</screen>
'';
};
nixosVersion = mkOption {
internal = true;
type = types.str;
description = "NixOS version.";
};
system.nixosRelease = mkOption {
nixosRelease = mkOption {
readOnly = true;
type = types.str;
default = readFile "${toString pkgs.path}/.version";
default = readFile releaseFile;
description = "NixOS release.";
};
system.nixosVersionSuffix = mkOption {
nixosVersionSuffix = mkOption {
internal = true;
type = types.str;
default = if pathExists suffixFile then readFile suffixFile else "pre-git";
description = "NixOS version suffix.";
};
system.nixosRevision = mkOption {
nixosRevision = mkOption {
internal = true;
type = types.str;
default = if pathExists revisionFile then readFile revisionFile else "master";
description = "NixOS Git revision hash.";
};
system.nixosCodeName = mkOption {
nixosCodeName = mkOption {
readOnly = true;
type = types.str;
description = "NixOS release code name.";
};
system.defaultChannel = mkOption {
defaultChannel = mkOption {
internal = true;
type = types.str;
default = https://nixos.org/channels/nixos-unstable;
@ -64,18 +97,15 @@ with lib;
config = {
system.nixosVersion = mkDefault (config.system.nixosRelease + config.system.nixosVersionSuffix);
system.nixosVersionSuffix =
let suffixFile = "${toString pkgs.path}/.version-suffix"; in
mkDefault (if pathExists suffixFile then readFile suffixFile else "pre-git");
system = {
# These defaults are set here rather than up there so that
# changing them would not rebuild the manual
nixosLabel = mkDefault (maybeEnv "NIXOS_LABEL" cfg.nixosVersion);
nixosVersion = mkDefault (maybeEnv "NIXOS_VERSION" (cfg.nixosRelease + cfg.nixosVersionSuffix));
system.nixosRevision =
let fn = "${toString pkgs.path}/.git-revision"; in
mkDefault (if pathExists fn then readFile fn else "master");
# Note: code names must only increase in alphabetical order.
system.nixosCodeName = "Emu";
# Note: code names must only increase in alphabetical order.
nixosCodeName = "Emu";
};
# Generate /etc/os-release. See
# http://0pointer.de/public/systemd-man/os-release.html for the

@ -64,6 +64,7 @@
./programs/dconf.nix
./programs/environment.nix
./programs/freetds.nix
./programs/fish.nix
./programs/ibus.nix
./programs/kbdlight.nix
./programs/light.nix
@ -83,6 +84,7 @@
./security/acme.nix
./security/apparmor.nix
./security/apparmor-suid.nix
./security/audit.nix
./security/ca.nix
./security/duosec.nix
./security/grsecurity.nix
@ -98,8 +100,6 @@
./services/amqp/activemq/default.nix
./services/amqp/rabbitmq.nix
./services/audio/alsa.nix
# Disabled as fuppes no longer builds.
# ./services/audio/fuppes.nix
./services/audio/icecast.nix
./services/audio/liquidsoap.nix
./services/audio/mpd.nix
@ -162,6 +162,7 @@
./services/hardware/bluetooth.nix
./services/hardware/brltty.nix
./services/hardware/freefall.nix
./services/hardware/irqbalance.nix
./services/hardware/nvidia-optimus.nix
./services/hardware/pcscd.nix
./services/hardware/pommed.nix
@ -182,13 +183,18 @@
./services/logging/syslogd.nix
./services/logging/syslog-ng.nix
./services/mail/dovecot.nix
./services/mail/dspam.nix
./services/mail/exim.nix
./services/mail/freepops.nix
./services/mail/mail.nix
./services/mail/mlmmj.nix
./services/mail/opendkim.nix
./services/mail/opensmtpd.nix
./services/mail/postfix.nix
./services/mail/postsrsd.nix
./services/mail/spamassassin.nix
./services/mail/rspamd.nix
./services/mail/rmilter.nix
./services/misc/apache-kafka.nix
./services/misc/autofs.nix
./services/misc/bepasty.nix
@ -209,6 +215,8 @@
./services/misc/gitolite.nix
./services/misc/gpsd.nix
./services/misc/ihaskell.nix
./services/misc/mathics.nix
./services/misc/matrix-synapse.nix
./services/misc/mbpfan.nix
./services/misc/mediatomb.nix
./services/misc/mesos-master.nix
@ -296,6 +304,7 @@
./services/networking/firewall.nix
./services/networking/flashpolicyd.nix
./services/networking/freenet.nix
./services/networking/gale.nix
./services/networking/gateone.nix
./services/networking/git-daemon.nix
./services/networking/gnunet.nix
@ -321,7 +330,6 @@
./services/networking/networkmanager.nix
./services/networking/ngircd.nix
./services/networking/nix-serve.nix
./services/networking/notbit.nix
./services/networking/nsd.nix
./services/networking/ntopng.nix
./services/networking/ntpd.nix
@ -330,6 +338,7 @@
./services/networking/openfire.nix
./services/networking/openntpd.nix
./services/networking/openvpn.nix
./services/networking/ostinato.nix
./services/networking/polipo.nix
./services/networking/prayer.nix
./services/networking/privoxy.nix
@ -441,6 +450,7 @@
./services/x11/window-managers/metacity.nix
./services/x11/window-managers/none.nix
./services/x11/window-managers/twm.nix
./services/x11/window-managers/windowlab.nix
./services/x11/window-managers/wmii.nix
./services/x11/window-managers/xmonad.nix
./services/x11/xfs.nix
@ -473,7 +483,6 @@
./system/boot/timesyncd.nix
./system/boot/tmp.nix
./system/etc/etc.nix
./system/upstart/upstart.nix
./tasks/bcache.nix
./tasks/cpu-freq.nix
./tasks/encrypted-devices.nix

@ -8,6 +8,7 @@
enable = true;
displayManager.kdm.enable = true;
desktopManager.kde4.enable = true;
synaptics.enable = true; # for touchpad support on many laptops
};
environment.systemPackages = [ pkgs.glxinfo ];

@ -51,7 +51,7 @@ with lib;
# Enable wpa_supplicant, but don't start it by default.
networking.wireless.enable = mkDefault true;
jobs.wpa_supplicant.startOn = mkOverride 50 "";
systemd.services.wpa_supplicant.wantedBy = mkOverride 50 [];
# Tell the Nix evaluator to garbage collect more aggressively.
# This is desirable in memory-constrained environments that don't

@ -38,7 +38,7 @@ in {
config = mkIf cfg.enable {
boot = {
extraModulePackages = [ pkgs.linuxPackages.vhba ];
extraModulePackages = [ config.boot.kernelPackages.vhba ];
kernelModules = [ "vhba" ];
};

@ -16,7 +16,7 @@ let
isExecutable = true;
inherit (pkgs) perl;
perlFlags = concatStrings (map (path: "-I ${path}/lib/perl5/site_perl ")
[ pkgs.perlPackages.DBI pkgs.perlPackages.DBDSQLite ]);
[ pkgs.perlPackages.DBI pkgs.perlPackages.DBDSQLite pkgs.perlPackages.StringShellQuote ]);
};
in
@ -30,7 +30,7 @@ in
local p=/run/current-system/sw/bin/command-not-found
if [ -x $p -a -f /nix/var/nix/profiles/per-user/root/channels/nixos/programs.sqlite ]; then
# Run the helper program.
$p "$1"
$p "$@"
# Retry the command if we just installed it.
if [ $? = 126 ]; then
"$@"
@ -51,7 +51,7 @@ in
local p=/run/current-system/sw/bin/command-not-found
if [ -x $p -a -f /nix/var/nix/profiles/per-user/root/channels/nixos/programs.sqlite ]; then
# Run the helper program.
$p "$1"
$p "$@"
# Retry the command if we just installed it.
if [ $? = 126 ]; then

@ -3,6 +3,7 @@
use strict;
use DBI;
use DBD::SQLite;
use String::ShellQuote;
use Config;
my $program = $ARGV[0];
@ -31,6 +32,8 @@ the package ‘$package’, which I will now install for you.
EOF
;
exit 126 if system("nix-env", "-iA", "nixos.$package") == 0;
} elsif ($ENV{"NIX_AUTO_RUN"} // "") {
exec("nix-shell", "-p", $package, "--run", shell_quote("exec", @ARGV));
} else {
print STDERR <<EOF;
The program $program is currently not installed. You can install it by typing:

@ -0,0 +1,114 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfge = config.environment;
cfg = config.programs.fish;
fishAliases = concatStringsSep "\n" (
mapAttrsFlatten (k: v: "alias ${k} '${v}'") cfg.shellAliases
);
in
{
options = {
programs.fish = {
enable = mkOption {
default = false;
description = ''
Whether to configure fish as an interactive shell.
'';
type = types.bool;
};
shellAliases = mkOption {
default = config.environment.shellAliases;
description = ''
Set of aliases for fish shell. See <option>environment.shellAliases</option>
for an option format description.
'';
type = types.attrs;
};
shellInit = mkOption {
default = "";
description = ''
Shell script code called during fish shell initialisation.
'';
type = types.lines;
};
loginShellInit = mkOption {
default = "";
description = ''
Shell script code called during fish login shell initialisation.
'';
type = types.lines;
};
interactiveShellInit = mkOption {
default = "";
description = ''
Shell script code called during interactive fish shell initialisation.
'';
type = types.lines;
};
promptInit = mkOption {
default = "";
description = ''
Shell script code used to initialise fish prompt.
'';
type = types.lines;
};
};
};
config = mkIf cfg.enable {
environment.etc."fish/foreign-env/shellInit".text = cfge.shellInit;
environment.etc."fish/foreign-env/loginShellInit".text = cfge.loginShellInit;
environment.etc."fish/foreign-env/interactiveShellInit".text = cfge.interactiveShellInit;
environment.etc."fish/config.fish".text = ''
# /etc/fish/config.fish: DO NOT EDIT -- this file has been generated automatically.
set fish_function_path $fish_function_path ${pkgs.fish-foreign-env}/share/fish-foreign-env/functions
fenv source ${config.system.build.setEnvironment} 1> /dev/null
fenv source /etc/fish/foreign-env/shellInit 1> /dev/null
${cfg.shellInit}
if builtin status --is-login
fenv source /etc/fish/foreign-env/loginShellInit 1> /dev/null
${cfg.loginShellInit}
end
if builtin status --is-interactive
${fishAliases}
fenv source /etc/fish/foreign-env/interactiveShellInit 1> /dev/null
${cfg.interactiveShellInit}
end
'';
environment.systemPackages = [ pkgs.fish ];
environment.shells = [
"/run/current-system/sw/bin/fish"
"/var/run/current-system/sw/bin/fish"
"${pkgs.fish}/bin/fish"
];
};
}

@ -93,7 +93,9 @@ in
};
package = mkOption {
type = types.package;
default = pkgs.openssh;
defaultText = "pkgs.openssh";
description = ''
The package used for the openssh client and daemon.
'';
@ -142,16 +144,18 @@ in
description = ''
The set of system-wide known SSH hosts.
'';
example = [
{
hostNames = [ "myhost" "myhost.mydomain.com" "10.10.1.4" ];
publicKeyFile = literalExample "./pubkeys/myhost_ssh_host_dsa_key.pub";
}
{
hostNames = [ "myhost2" ];
publicKeyFile = literalExample "./pubkeys/myhost2_ssh_host_dsa_key.pub";
}
];
example = literalExample ''
[
{
hostNames = [ "myhost" "myhost.mydomain.com" "10.10.1.4" ];
publicKeyFile = "./pubkeys/myhost_ssh_host_dsa_key.pub";
}
{
hostNames = [ "myhost2" ];
publicKeyFile = "./pubkeys/myhost2_ssh_host_dsa_key.pub";
}
]
'';
};
};

@ -98,18 +98,18 @@ in
loginShellInit = cfge.loginShellInit;
interactiveShellInit = ''
${cfge.interactiveShellInit}
${cfg.promptInit}
${zshAliases}
# Some sane history defaults
# history defaults
export SAVEHIST=2000
export HISTSIZE=2000
export HISTFILE=$HOME/.zsh_history
setopt HIST_IGNORE_DUPS SHARE_HISTORY HIST_FCNTL_LOCK
${cfge.interactiveShellInit}
${cfg.promptInit}
${zshAliases}
# Tell zsh how to find installed completions
for p in ''${(z)NIX_PROFILES}; do
fpath+=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions)

@ -14,6 +14,20 @@ with lib;
(mkRenamedOptionModule [ "networking" "enableWLAN" ] [ "networking" "wireless" "enable" ])
(mkRenamedOptionModule [ "networking" "enableRT73Firmware" ] [ "networking" "enableRalinkFirmware" ])
(mkRenamedOptionModule [ "services" "cadvisor" "host" ] [ "services" "cadvisor" "listenAddress" ])
(mkRenamedOptionModule [ "services" "dockerRegistry" "host" ] [ "services" "dockerRegistry" "listenAddress" ])
(mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
(mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
(mkRenamedOptionModule [ "services" "graphite" "web" "host" ] [ "services" "graphite" "web" "listenAddress" ])
(mkRenamedOptionModule [ "services" "kibana" "host" ] [ "services" "kibana" "listenAddress" ])
(mkRenamedOptionModule [ "services" "mpd" "network" "host" ] [ "services" "mpd" "network" "listenAddress" ])
(mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "listenAddress" ])
(mkRenamedOptionModule [ "services" "shout" "host" ] [ "services" "shout" "listenAddress" ])
(mkRenamedOptionModule [ "services" "sslh" "host" ] [ "services" "sslh" "listenAddress" ])
(mkRenamedOptionModule [ "services" "statsd" "host" ] [ "services" "statsd" "listenAddress" ])
(mkRenamedOptionModule [ "services" "subsonic" "host" ] [ "services" "subsonic" "listenAddress" ])
(mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
# Old Grub-related options.
(mkRenamedOptionModule [ "boot" "initrd" "extraKernelModules" ] [ "boot" "initrd" "kernelModules" ])
(mkRenamedOptionModule [ "boot" "extraKernelParams" ] [ "boot" "kernelParams" ])

@ -37,6 +37,12 @@ let
description = "Group running the ACME client.";
};
allowKeysForGroup = mkOption {
type = types.bool;
default = false;
description = "Give read permissions to the specified group to read SSL private certificates.";
};
postRun = mkOption {
type = types.lines;
default = "";
@ -137,6 +143,7 @@ in
systemd.services = flip mapAttrs' cfg.certs (cert: data:
let
cpath = "${cfg.directory}/${cert}";
rights = if data.allowKeysForGroup then "750" else "700";
cmdline = [ "-v" "-d" cert "--default_root" data.webroot "--valid_min" cfg.validMin ]
++ optionals (data.email != null) [ "--email" data.email ]
++ concatMap (p: [ "-f" p ]) data.plugins
@ -159,9 +166,10 @@ in
preStart = ''
mkdir -p '${cfg.directory}'
if [ ! -d '${cpath}' ]; then
mkdir -m 700 '${cpath}'
chown '${data.user}:${data.group}' '${cpath}'
mkdir '${cpath}'
fi
chmod ${rights} '${cpath}'
chown -R '${data.user}:${data.group}' '${cpath}'
'';
script = ''
cd '${cpath}'

@ -0,0 +1,109 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.security.audit;
failureModes = {
silent = 0;
printk = 1;
panic = 2;
};
# TODO: it seems like people like their rules to be somewhat secret, yet they will not be if
# put in the store like this. At the same time, it doesn't feel like a huge deal and working
# around that is a pain so I'm leaving it like this for now.
startScript = pkgs.writeScript "audit-start" ''
#!${pkgs.stdenv.shell} -eu
# Clear out any rules we may start with
auditctl -D
# Put the rules in a temporary file owned and only readable by root
rulesfile="$(mktemp)"
${concatMapStrings (x: "echo '${x}' >> $rulesfile\n") cfg.rules}
# Apply the requested rules
auditctl -R "$rulesfile"
# Enable and configure auditing
auditctl \
-e ${if cfg.enable == "lock" then "2" else "1"} \
-b ${toString cfg.backlogLimit} \
-f ${toString failureModes.${cfg.failureMode}} \
-r ${toString cfg.rateLimit}
'';
stopScript = pkgs.writeScript "audit-stop" ''
#!${pkgs.stdenv.shell} -eu
# Clear the rules
auditctl -D
# Disable auditing
auditctl -e 0
'';
in {
options = {
security.audit = {
enable = mkOption {
type = types.enum [ false true "lock" ];
default = true; # The kernel seems to enable it by default with no rules anyway
description = ''
Whether to enable the Linux audit system. The special `lock' value can be used to
enable auditing and prevent disabling it until a restart. Be careful about locking
this, as it will prevent you from changing your audit configuration until you
restart. If possible, test your configuration using build-vm beforehand.
'';
};
failureMode = mkOption {
type = types.enum [ "silent" "printk" "panic" ];
default = "printk";
description = "How to handle critical errors in the auditing system";
};
backlogLimit = mkOption {
type = types.int;
default = 64; # Apparently the kernel default
description = ''
The maximum number of outstanding audit buffers allowed; exceeding this is
considered a failure and handled in a manner specified by failureMode.
'';
};
rateLimit = mkOption {
type = types.int;
default = 0;
description = ''
The maximum messages per second permitted before triggering a failure as
specified by failureMode. Setting it to zero disables the limit.
'';
};
rules = mkOption {
type = types.listOf types.str; # (types.either types.str (types.submodule rule));
default = [];
example = [ "-a exit,always -F arch=b64 -S execve" ];
description = ''
The ordered audit rules, with each string appearing as one line of the audit.rules file.
'';
};
};
};
config = mkIf (cfg.enable == "lock" || cfg.enable) {
systemd.services.audit = {
description = "pseudo-service representing the kernel audit state";
wantedBy = [ "basic.target" ];
path = [ pkgs.audit ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "@${startScript} audit-start";
ExecStop = "@${stopScript} audit-stop";
};
};
};
}

@ -1,115 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.fuppesd;
in
with lib;
{
options = {
services.fuppesd = {
enable = mkOption {
default = false;
type = with types; bool;
description = ''
Enables Fuppes (UPnP A/V Media Server). Can be used to watch
photos, video and listen to music from a phone/tv connected to the
local network.
'';
};
name = mkOption {
example = "Media Center";
type = types.str;
description = ''
Enables Fuppes (UPnP A/V Media Server). Can be used to watch
photos, video and listen to music from a phone/tv connected to the
local network.
'';
};
log = {
level = mkOption {
default = 0;
example = 3;
type = with types; uniq int;
description = ''
Logging level of fuppes, An integer between 0 and 3.
'';
};
file = mkOption {
default = "/var/log/fuppes.log";
type = types.str;
description = ''
File which will contains the log produced by the daemon.
'';
};
};
config = mkOption {
example = "/etc/fuppes/fuppes.cfg";
type = types.str;
description = ''
Mutable configuration file which can be edited with the web
interface. Due to possible modification, double quote the full
path of the filename stored in your filesystem to avoid attempts
to modify the content of the nix store.
'';
};
vfolder = mkOption {
example = literalExample "/etc/fuppes/vfolder.cfg";
description = ''
XML file describing the layout of virtual folder visible by the
client.
'';
};
database = mkOption {
default = "/var/lib/fuppes/fuppes.db";
type = types.str;
description = ''
Database file which index all shared files.
'';
};
## At the moment, no plugins are packaged.
/*
plugins = mkOption {
type = with types; listOf package;
description = ''
List of Fuppes plugins.
'';
};
*/
user = mkOption {
default = "root"; # The default is not secure.
example = "fuppes";
type = types.str;
description = ''
Name of the user which own the configuration files and under which
the fuppes daemon will be executed.
'';
};
};
};
config = mkIf cfg.enable {
jobs.fuppesd = {
description = "UPnP A/V Media Server. (${cfg.name})";
startOn = "ip-up";
daemonType = "fork";
exec = ''/var/setuid-wrappers/sudo -u ${cfg.user} -- ${pkgs.fuppes}/bin/fuppesd --friendly-name ${cfg.name} --log-level ${toString cfg.log.level} --log-file ${cfg.log.file} --config-file ${cfg.config} --vfolder-config-file ${cfg.vfolder} --database-file ${cfg.database}'';
};
services.fuppesd.name = mkDefault config.networking.hostName;
services.fuppesd.vfolder = mkDefault ./fuppes/vfolder.cfg;
security.sudo.enable = true;
};
}

@ -1,155 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<fuppes_vfolder_config version="0.2">
<vfolder_layout device="default" enabled="false">
<vfolder name="Genre">
<vfolders property="genre">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="Genre/Artists">
<vfolders property="genre">
<vfolders property="artist">
<items type="audioItem" />
</vfolders>
</vfolders>
</vfolder>
<vfolder name="Artists/Albums">
<vfolders property="artist">
<vfolders property="album">
<items type="audioItem" />
</vfolders>
</vfolders>
</vfolder>
<vfolder name="ABC/Artists/Albums">
<vfolders split="ABC">
<vfolders property="artist">
<vfolders property="album">
<items type="audioItem" />
</vfolders>
</vfolders>
</vfolders>
</vfolder>
<vfolder name="Photos">
<vfolder name="All">
<items type="imageItem" />
</vfolder>
<vfolder name="Folders">
<folders filter="contains(imageItem)" />
</vfolder>
</vfolder>
<vfolder name="Videos">
<vfolder name="All">
<items type="videoItem" />
</vfolder>
<vfolder name="Folders">
<folders filter="contains(videoItem)" />
</vfolder>
</vfolder>
<vfolder name="shared dirs">
<shared_dirs full_extend="true" />
</vfolder>
</vfolder_layout>
<vfolder_layout device="Xbox 360" enabled="false">
<vfolder name="Music" id="1">
<vfolder name="Album" id="7">
<vfolders property="album">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="All Music" id="4">
<items type="audioItem" />
</vfolder>
<vfolder name="Artist" id="6">
<vfolders property="artist">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="Folders" id="20">
<folders filter="contains(audioItem)" />
</vfolder>
<vfolder name="Genre" id="5">
<vfolders property="genre">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="Playlist" id="15" />
</vfolder>
<vfolder name="Pictures" id="3">
<vfolder name="Album" id="13" />
<vfolder name="All Pictures" id="11">
<items type="imageItem" />
</vfolder>
<vfolder name="Date Taken" id="12" />
<vfolder name="Folders" id="22">
<folders filter="contains(imageItem)" />
</vfolder>
</vfolder>
<vfolder name="Playlists" id="18">
<vfolder name="All Playlists" id="19" />
<vfolder name="Folders" id="23" />
</vfolder>
<vfolder name="Video" id="2">
<vfolder name="Actor" id="10" />
<vfolder name="Album" id="14" />
<vfolder name="All Video" id="8">
<items type="videoItem" />
</vfolder>
<vfolder name="Folders" id="21">
<folders filter="contains(videoItem)" />
</vfolder>
<vfolder name="Genre" id="9" />
</vfolder>
</vfolder_layout>
<vfolder_layout device="Yamaha" enabled="false" create_references="true" >
<vfolder name="Playlists" />
<vfolder name="Artists">
<vfolders property="artist">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="Albums">
<vfolders property="album">
<items type="audioItem" />
</vfolders>
</vfolder>
<vfolder name="Songs">
<items type="audioItem" />
</vfolder>
<vfolder name="Genres">
<vfolders property="genre">
<items type="audioItem" />
</vfolders>
</vfolder>
</vfolder_layout>
</fuppes_vfolder_config>

@ -46,7 +46,7 @@ in
example = {
myStream1 = literalExample "\"/etc/liquidsoap/myStream1.liq\"";
myStream2 = literalExample "./myStream2.liq";
myStream3 = literalExample "\"out(playlist(\"/srv/music/\"))\"";
myStream3 = literalExample "\"out(playlist(\\\"/srv/music/\\\"))\"";
};
type = types.attrsOf (types.either types.path types.str);

@ -18,7 +18,7 @@ let
user "${cfg.user}"
group "${cfg.group}"
${optionalString (cfg.network.host != "any") ''bind_to_address "${cfg.network.host}"''}
${optionalString (cfg.network.listenAddress != "any") ''bind_to_address "${cfg.network.listenAddress}"''}
${optionalString (cfg.network.port != 6600) ''port "${toString cfg.network.port}"''}
${cfg.extraConfig}
@ -75,7 +75,7 @@ in {
network = {
host = mkOption {
listenAddress = mkOption {
default = "any";
description = ''
This setting sets the address for the daemon to listen on. Careful attention

@ -207,7 +207,7 @@ in {
description = ''
Extra configuration to be passed in Client directive.
'';
example = literalExample ''
example = ''
Maximum Concurrent Jobs = 20;
Heartbeat Interval = 30;
'';
@ -218,7 +218,7 @@ in {
description = ''
Extra configuration to be passed in Messages directive.
'';
example = literalExample ''
example = ''
console = all
'';
};

@ -43,6 +43,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.rsnapshot;
defaultText = "pkgs.rsnapshot";
example = literalExample "pkgs.rsnapshotGit";
description = ''
RSnapshot package to use.

@ -5,9 +5,9 @@ with lib;
let
cfg = config.services.tarsnap;
configFile = cfg: ''
cachedir ${config.services.tarsnap.cachedir}
keyfile ${config.services.tarsnap.keyfile}
configFile = name: cfg: ''
cachedir ${config.services.tarsnap.cachedir}/${name}
keyfile ${cfg.keyfile}
${optionalString cfg.nodump "nodump"}
${optionalString cfg.printStats "print-stats"}
${optionalString cfg.printStats "humanize-numbers"}
@ -41,6 +41,20 @@ in
account.
Create the keyfile with <command>tarsnap-keygen</command>.
Note that each individual archive (specified below) may also have its
own individual keyfile specified. Tarsnap does not allow multiple
concurrent backups with the same cache directory and key (starting a
new backup will cause another one to fail). If you have multiple
archives specified, you should either spread out your backups to be
far apart, or specify a separate key for each archive. By default
every archive defaults to using
<literal>"/root/tarsnap.key"</literal>.
It's recommended for backups that you generate a key for every archive
using <literal>tarsnap-keygen(1)</literal>, and then generate a
write-only tarsnap key using <literal>tarsnap-keymgmt(1)</literal>,
and keep your master key(s) for a particular machine off-site.
The keyfile name should be given as a string and not a path, to
avoid the key being copied into the Nix store.
'';
@ -57,6 +71,12 @@ in
will refuse to run until you manually rebuild the cache with
<command>tarsnap --fsck</command>.
Note that each individual archive (specified below) has its own cache
directory specified under <literal>cachedir</literal>; this is because
tarsnap locks the cache during backups, meaning multiple services
archives cannot be backed up concurrently or overlap with a shared
cache.
Set to <literal>null</literal> to disable caching.
'';
};
@ -65,6 +85,28 @@ in
type = types.attrsOf (types.submodule (
{
options = {
keyfile = mkOption {
type = types.str;
default = config.services.tarsnap.keyfile;
description = ''
Set a specific keyfile for this archive. This defaults to
<literal>"/root/tarsnap.key"</literal> if left unspecified.
Use this option if you want to run multiple backups
concurrently - each archive must have a unique key. You can
generate a write-only key derived from your master key (which
is recommended) using <literal>tarsnap-keymgmt(1)</literal>.
Note: every archive must have an individual master key. You
must generate multiple keys with
<literal>tarsnap-keygen(1)</literal>, and then generate write
only keys from those.
The keyfile name should be given as a string and not a path, to
avoid the key being copied into the Nix store.
'';
};
nodump = mkOption {
type = types.bool;
default = true;
@ -242,15 +284,23 @@ in
systemd.services."tarsnap@" = {
description = "Tarsnap archive '%i'";
requires = [ "network.target" ];
requires = [ "network-online.target" ];
after = [ "network-online.target" ];
path = [ pkgs.iputils pkgs.tarsnap pkgs.coreutils ];
path = [ pkgs.tarsnap pkgs.coreutils ];
# In order for the persistent tarsnap timer to work reliably, we have to
# make sure that the tarsnap server is reachable after systemd starts up
# the service - therefore we sleep in a loop until we can ping the
# endpoint.
preStart = "while ! ping -q -c 1 betatest-server.tarsnap.com &> /dev/null; do sleep 3; done";
scriptArgs = "%i";
script = ''
mkdir -p -m 0755 ${dirOf cfg.cachedir}
mkdir -p -m 0700 ${cfg.cachedir}
chown root:root ${cfg.cachedir}
chmod 0700 ${cfg.cachedir}
mkdir -p -m 0700 ${cfg.cachedir}/$1
DIRS=`cat /etc/tarsnap/$1.dirs`
exec tarsnap --configfile /etc/tarsnap/$1.conf -c -f $1-$(date +"%Y%m%d%H%M%S") $DIRS
'';
@ -259,17 +309,21 @@ in
IOSchedulingClass = "idle";
NoNewPrivileges = "true";
CapabilityBoundingSet = "CAP_DAC_READ_SEARCH";
PermissionsStartOnly = "true";
};
};
# Note: the timer must be Persistent=true, so that systemd will start it even
# if e.g. your laptop was asleep while the latest interval occurred.
systemd.timers = mapAttrs' (name: cfg: nameValuePair "tarsnap@${name}"
{ timerConfig.OnCalendar = cfg.period;
timerConfig.Persistent = "true";
wantedBy = [ "timers.target" ];
}) cfg.archives;
environment.etc =
(mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.conf"
{ text = configFile cfg;
{ text = configFile name cfg;
}) cfg.archives) //
(mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.dirs"
{ text = concatStringsSep " " cfg.directories;

@ -80,6 +80,7 @@ in {
packages = mkOption {
default = [ pkgs.stdenv pkgs.git pkgs.jdk config.programs.ssh.package pkgs.nix ];
defaultText = "[ pkgs.stdenv pkgs.git pkgs.jdk config.programs.ssh.package pkgs.nix ]";
type = types.listOf types.package;
description = ''
Packages to add to PATH for the jenkins process.

@ -74,7 +74,7 @@ in {
];
};
}
];
]
'';
description = ''
Job descriptions for Jenkins Job Builder in Nix format.

@ -60,11 +60,9 @@ with lib;
services.avahi.enable = true;
jobs.fourStoreEndpoint = {
name = "4store-endpoint";
startOn = "ip-up";
exec = ''
systemd.services."4store-endpoint" = {
wantedBy = [ "ip-up.target" ];
script = ''
${run} '${pkgs.rdf4store}/bin/4s-httpd -D ${cfg.options} ${if cfg.listenAddress!=null then "-H ${cfg.listenAddress}" else "" } -p ${toString cfg.port} ${cfg.database}'
'';
};

@ -52,9 +52,8 @@ with lib;
services.avahi.enable = true;
jobs.fourStore = {
name = "4store";
startOn = "ip-up";
systemd.services."4store" = {
wantedBy = [ "ip-up.target" ];
preStart = ''
mkdir -p ${stateDir}/
@ -64,11 +63,9 @@ with lib;
fi
'';
exec = ''
script = ''
${run} -c '${pkgs.rdf4store}/bin/4s-backend -D ${cfg.options} ${cfg.database}'
'';
};
};
}

@ -38,6 +38,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.couchdb;
defaultText = "pkgs.couchdb";
example = literalExample "pkgs.couchdb";
description = ''
CouchDB package to use.

@ -49,6 +49,7 @@ in
package = mkOption {
default = pkgs.firebirdSuper;
defaultText = "pkgs.firebirdSuper";
type = types.package;
/*
Example: <code>package = pkgs.firebirdSuper.override { icu =

@ -44,6 +44,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.hbase;
defaultText = "pkgs.hbase";
example = literalExample "pkgs.hbase";
description = ''
HBase package to use.

@ -120,6 +120,7 @@ in
package = mkOption {
default = pkgs.influxdb;
defaultText = "pkgs.influxdb";
description = "Which influxdb derivation to use";
type = types.package;
};

@ -41,6 +41,7 @@ in
package = mkOption {
default = pkgs.mongodb;
defaultText = "pkgs.mongodb";
type = types.package;
description = "
Which MongoDB derivation to use.

@ -7,7 +7,7 @@ let
serverConfig = pkgs.writeText "neo4j-server.properties" ''
org.neo4j.server.database.location=${cfg.dataDir}/data/graph.db
org.neo4j.server.webserver.address=${cfg.host}
org.neo4j.server.webserver.address=${cfg.listenAddress}
org.neo4j.server.webserver.port=${toString cfg.port}
${optionalString cfg.enableHttps ''
org.neo4j.server.webserver.https.enabled=true
@ -49,10 +49,11 @@ in {
package = mkOption {
description = "Neo4j package to use.";
default = pkgs.neo4j;
defaultText = "pkgs.neo4j";
type = types.package;
};
host = mkOption {
listenAddress = mkOption {
description = "Neo4j listen address.";
default = "127.0.0.1";
type = types.str;

@ -25,22 +25,7 @@ in
description = "
Whether to enable the ldap server.
";
example = literalExample ''
openldap.enable = true;
openldap.extraConfig = '''
include ''${pkgs.openldap.out}/etc/openldap/schema/core.schema
include ''${pkgs.openldap.out}/etc/openldap/schema/cosine.schema
include ''${pkgs.openldap.out}/etc/openldap/schema/inetorgperson.schema
include ''${pkgs.openldap.out}/etc/openldap/schema/nis.schema
database bdb
suffix dc=example,dc=org
rootdn cn=admin,dc=example,dc=org
# NOTE: change after first start
rootpw secret
directory /var/db/openldap
''';
'';
example = true;
};
user = mkOption {
@ -67,6 +52,19 @@ in
description = "
sldapd.conf configuration
";
example = ''
include ''${pkgs.openldap}/etc/openldap/schema/core.schema
include ''${pkgs.openldap}/etc/openldap/schema/cosine.schema
include ''${pkgs.openldap}/etc/openldap/schema/inetorgperson.schema
include ''${pkgs.openldap}/etc/openldap/schema/nis.schema
database bdb
suffix dc=example,dc=org
rootdn cn=admin,dc=example,dc=org
# NOTE: change after first start
rootpw secret
directory /var/db/openldap
'';
};
};

@ -26,6 +26,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.opentsdb;
defaultText = "pkgs.opentsdb";
example = literalExample "pkgs.opentsdb";
description = ''
OpenTSDB package to use.

@ -122,8 +122,8 @@ in
example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }).v_2_1_4 ]";
description = ''
When this list contains elements a new store path is created.
PostgreSQL and the elments are symlinked into it. Then pg_config,
postgres and pc_ctl are copied to make them use the new
PostgreSQL and the elements are symlinked into it. Then pg_config,
postgres and pg_ctl are copied to make them use the new
$out/lib directory as pkglibdir. This makes it possible to use postgis
without patching the .sql files which reference $libdir/postgis-1.5.
'';

@ -46,6 +46,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.redis;
defaultText = "pkgs.redis";
description = "Which Redis derivation to use.";
};

@ -29,20 +29,20 @@ with lib;
};
listenAddress = mkOption {
default = "1111";
example = "myserver:1323";
default = "1111";
example = "myserver:1323";
description = "ip:port or port to listen on.";
};
httpListenAddress = mkOption {
default = null;
example = "myserver:8080";
default = null;
example = "myserver:8080";
description = "ip:port or port for Virtuoso HTTP server to listen on.";
};
dirsAllowed = mkOption {
default = null;
example = "/www, /home/";
default = null;
example = "/www, /home/";
description = "A list of directories Virtuoso is allowed to access";
};
};
@ -61,18 +61,17 @@ with lib;
home = stateDir;
};
jobs.virtuoso = {
name = "virtuoso";
startOn = "ip-up";
systemd.services.virtuoso = {
wantedBy = [ "ip-up.target" ];
preStart = ''
mkdir -p ${stateDir}
chown ${virtuosoUser} ${stateDir}
mkdir -p ${stateDir}
chown ${virtuosoUser} ${stateDir}
'';
script = ''
cd ${stateDir}
${pkgs.virtuoso}/bin/virtuoso-t +foreground +configfile ${pkgs.writeText "virtuoso.ini" cfg.config}
cd ${stateDir}
${pkgs.virtuoso}/bin/virtuoso-t +foreground +configfile ${pkgs.writeText "virtuoso.ini" cfg.config}
'';
};

@ -78,8 +78,8 @@ in
bot_replaypath = replays
'';
jobs.ghostOne = {
name = "ghost-one";
systemd.services."ghost-one" = {
wantedBy = [ "multi-user.target" ];
script = ''
mkdir -p ${stateDir}
cd ${stateDir}

@ -20,7 +20,7 @@ let
}
'';
events = [powerEvent lidEvent acEvent];
events = [powerEvent lidEvent acEvent muteEvent volumeDownEvent volumeUpEvent cdPlayEvent cdNextEvent cdPrevEvent];
# Called when the power button is pressed.
powerEvent =
@ -55,6 +55,61 @@ let
'';
};
muteEvent = {
name = "mute";
event = "button/mute.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.muteCommands}
'';
};
volumeDownEvent = {
name = "volume-down";
event = "button/volumedown.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.volumeDownEventCommands}
'';
};
volumeUpEvent = {
name = "volume-up";
event = "button/volumeup.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.volumeUpEventCommands}
'';
};
cdPlayEvent = {
name = "cd-play";
event = "cd/play.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.cdPlayEventCommands}
'';
};
cdNextEvent = {
name = "cd-next";
event = "cd/next.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.cdNextEventCommands}
'';
};
cdPrevEvent = {
name = "cd-prev";
event = "cd/prev.*";
action = ''
#! ${pkgs.bash}/bin/sh
${config.services.acpid.cdPrevEventCommands}
'';
};
in
{
@ -89,6 +144,42 @@ in
description = "Shell commands to execute on an ac_adapter.* event.";
};
muteCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an button/mute.* event.";
};
volumeDownEventCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an button/volumedown.* event.";
};
volumeUpEventCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an button/volumeup.* event.";
};
cdPlayEventCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an cd/play.* event.";
};
cdNextEventCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an cd/next.* event.";
};
cdPrevEventCommands = mkOption {
type = types.lines;
default = "";
description = "Shell commands to execute on an cd/prev.* event.";
};
};
};
@ -98,22 +189,26 @@ in
config = mkIf config.services.acpid.enable {
jobs.acpid =
{ description = "ACPI Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
systemd.services.acpid = {
description = "ACPI Daemon";
path = [ pkgs.acpid ];
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
daemonType = "fork";
path = [ pkgs.acpid ];
exec = "acpid --confdir ${acpiConfDir}";
serviceConfig = {
Type = "forking";
};
unitConfig.ConditionVirtualization = "!systemd-nspawn";
unitConfig.ConditionPathExists = [ "/proc/acpi" ];
unitConfig = {
ConditionVirtualization = "!systemd-nspawn";
ConditionPathExists = [ "/proc/acpi" ];
};
script = "acpid --confdir ${acpiConfDir}";
};
};
}

@ -21,6 +21,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.freefall;
defaultText = "pkgs.freefall";
description = ''
freefall derivation to use.
'';

@ -0,0 +1,30 @@
#
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.irqbalance;
in
{
options.services.irqbalance.enable = mkEnableOption "irqbalance daemon";
config = mkIf cfg.enable {
systemd.services = {
irqbalance = {
description = "irqbalance daemon";
path = [ pkgs.irqbalance ];
serviceConfig =
{ ExecStart = "${pkgs.irqbalance}/bin/irqbalance --foreground"; };
wantedBy = [ "multi-user.target" ];
};
};
environment.systemPackages = [ pkgs.irqbalance ];
};
}

@ -35,18 +35,13 @@ with lib;
services.dbus.packages = [ pkgs.pommed ];
jobs.pommed = { name = "pommed";
systemd.services.pommed = {
description = "Pommed hotkey management";
startOn = "started dbus";
wantedBy = [ "multi-user.target" ];
after = [ "dbus.service" ];
postStop = "rm -f /var/run/pommed.pid";
exec = "${pkgs.pommed}/bin/pommed";
daemonType = "fork";
script = "${pkgs.pommed}/bin/pommed";
serviceConfig.Type = "forking";
path = [ pkgs.eject ];
};
};

@ -4,7 +4,9 @@ with lib;
let
pkg = if config.hardware.sane.snapshot then pkgs.saneBackendsGit else pkgs.saneBackends;
pkg = if config.hardware.sane.snapshot
then pkgs.sane-backends-git
else pkgs.sane-backends;
backends = [ pkg ] ++ config.hardware.sane.extraBackends;
saneConfig = pkgs.mkSaneConfig { paths = backends; };

@ -43,13 +43,7 @@ let
sensor ${cfg.sensor} (0, 10, 15, 2, 10, 5, 0, 3, 0, 3)
(0, 0, 55)
(1, 48, 60)
(2, 50, 61)
(3, 52, 63)
(6, 56, 65)
(7, 60, 85)
(127, 80, 32767)
${cfg.levels}
'';
in {
@ -72,6 +66,22 @@ in {
'';
};
levels = mkOption {
default = ''
(0, 0, 55)
(1, 48, 60)
(2, 50, 61)
(3, 52, 63)
(6, 56, 65)
(7, 60, 85)
(127, 80, 32767)
'';
description =''
Sensor used by thinkfan
'';
};
};
};

@ -27,6 +27,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.upower;
defaultText = "pkgs.upower";
example = lib.literalExample "pkgs.upower";
description = ''
Which upower package to use.

@ -24,21 +24,14 @@ with lib;
###### implementation
config = mkIf config.services.klogd.enable {
jobs.klogd =
{ description = "Kernel Log Daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.sysklogd ];
unitConfig.ConditionVirtualization = "!systemd-nspawn";
exec =
"klogd -c 1 -2 -n " +
"-k $(dirname $(readlink -f /run/booted-system/kernel))/System.map";
};
systemd.services.klogd = {
description = "Kernel Log Daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.sysklogd ];
unitConfig.ConditionVirtualization = "!systemd-nspawn";
script =
"klogd -c 1 -2 -n " +
"-k $(dirname $(readlink -f /run/booted-system/kernel))/System.map";
};
};
}

@ -13,6 +13,7 @@ in
options = {
services.logrotate = {
enable = mkOption {
type = lib.types.bool;
default = false;
description = ''
Enable the logrotate cron job

@ -33,6 +33,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.logstash;
defaultText = "pkgs.logstash";
example = literalExample "pkgs.logstash";
description = "Logstash package to use.";
};
@ -84,7 +85,7 @@ in
type = types.lines;
default = ''stdin { type => "example" }'';
description = "Logstash input configuration.";
example = literalExample ''
example = ''
# Read from journal
pipe {
command => "''${pkgs.systemd}/bin/journalctl -f -o json"

@ -39,6 +39,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.syslogng;
defaultText = "pkgs.syslogng";
description = ''
The package providing syslog-ng binaries.
'';

@ -90,6 +90,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.dovecot22;
defaultText = "pkgs.dovecot22";
description = "Dovecot package to use.";
};
@ -131,7 +132,7 @@ in
modules = mkOption {
type = types.listOf types.package;
default = [];
example = [ pkgs.dovecot_pigeonhole ];
example = literalExample "[ pkgs.dovecot_pigeonhole ]";
description = ''
Symlinks the contents of lib/dovecot of every given package into
/var/lib/dovecot/modules. This will make the given modules available

@ -0,0 +1,147 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.dspam;
dspam = pkgs.dspam;
defaultSock = "/run/dspam/dspam.sock";
cfgfile = pkgs.writeText "dspam.conf" ''
Home /var/lib/dspam
StorageDriver ${dspam}/lib/dspam/lib${cfg.storageDriver}_drv.so
Trust root
Trust ${cfg.user}
SystemLog on
UserLog on
${optionalString (cfg.domainSocket != null) ''ServerDomainSocketPath "${cfg.domainSocket}"''}
${cfg.extraConfig}
'';
in {
###### interface
options = {
services.dspam = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the dspam spam filter.";
};
user = mkOption {
type = types.str;
default = "dspam";
description = "User for the dspam daemon.";
};
group = mkOption {
type = types.str;
default = "dspam";
description = "Group for the dspam daemon.";
};
storageDriver = mkOption {
type = types.str;
default = "hash";
description = "Storage driver backend to use for dspam.";
};
domainSocket = mkOption {
type = types.nullOr types.path;
default = defaultSock;
description = "Path to local domain socket which is used for communication with the daemon. Set to null to disable UNIX socket.";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional dspam configuration.";
};
maintenanceInterval = mkOption {
type = types.nullOr types.str;
default = null;
description = "If set, maintenance script will be run at specified (in systemd.timer format) interval";
};
};
};
###### implementation
config = mkIf cfg.enable (mkMerge [
{
users.extraUsers = optionalAttrs (cfg.user == "dspam") (singleton
{ name = "dspam";
group = cfg.group;
uid = config.ids.uids.dspam;
});
users.extraGroups = optionalAttrs (cfg.group == "dspam") (singleton
{ name = "dspam";
gid = config.ids.gids.dspam;
});
environment.systemPackages = [ dspam ];
environment.etc."dspam/dspam.conf".source = cfgfile;
systemd.services.dspam = {
description = "dspam spam filtering daemon";
wantedBy = [ "multi-user.target" ];
restartTriggers = [ cfgfile ];
serviceConfig = {
ExecStart = "${dspam}/bin/dspam --daemon --nofork";
User = cfg.user;
Group = cfg.group;
RuntimeDirectory = optional (cfg.domainSocket == defaultSock) "dspam";
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m750 -p /var/lib/dspam
chown -R "${cfg.user}:${cfg.group}" /var/lib/dspam
mkdir -m750 -p /var/log/dspam
chown -R "${cfg.user}:${cfg.group}" /var/log/dspam
'';
};
}
(mkIf (cfg.maintenanceInterval != null) {
systemd.timers.dspam-maintenance = {
description = "Timer for dspam maintenance script";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.maintenanceInterval;
Unit = "dspam-maintenance.service";
};
};
systemd.services.dspam-maintenance = {
description = "dspam maintenance script";
restartTriggers = [ cfgfile ];
serviceConfig = {
ExecStart = "${dspam}/bin/dspam_maintenance";
Type = "oneshot";
User = cfg.user;
Group = cfg.group;
};
};
})
]);
}

@ -72,15 +72,16 @@ in
};
config = mkIf cfg.enable {
jobs.freepopsd = {
systemd.services.freepopsd = {
description = "Freepopsd (webmail over POP3)";
startOn = "ip-up";
exec = ''${pkgs.freepops}/bin/freepopsd \
-p ${toString cfg.port} \
-t ${toString cfg.threads} \
-b ${cfg.bind} \
-vv -l ${cfg.logFile} \
-s ${cfg.suid.user}.${cfg.suid.group}
wantedBy = [ "ip-up.target" ];
script = ''
${pkgs.freepops}/bin/freepopsd \
-p ${toString cfg.port} \
-t ${toString cfg.threads} \
-b ${cfg.bind} \
-vv -l ${cfg.logFile} \
-s ${cfg.suid.user}.${cfg.suid.group}
'';
};
};

@ -0,0 +1,109 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.opendkim;
defaultSock = "local:/run/opendkim/opendkim.sock";
args = [ "-f" "-l"
"-p" cfg.socket
"-d" cfg.domains
"-k" cfg.keyFile
"-s" cfg.selector
] ++ optionals (cfg.configFile != null) [ "-x" cfg.configFile ];
in {
###### interface
options = {
services.opendkim = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the OpenDKIM sender authentication system.";
};
socket = mkOption {
type = types.str;
default = defaultSock;
description = "Socket which is used for communication with OpenDKIM.";
};
user = mkOption {
type = types.str;
default = "opendkim";
description = "User for the daemon.";
};
group = mkOption {
type = types.str;
default = "opendkim";
description = "Group for the daemon.";
};
domains = mkOption {
type = types.str;
description = "Local domains set; messages from them are signed, not verified.";
};
keyFile = mkOption {
type = types.path;
description = "Secret key file used for signing messages.";
};
selector = mkOption {
type = types.str;
description = "Selector to use when signing.";
};
configFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Additional opendkim configuration.";
};
};
};
###### implementation
config = mkIf cfg.enable {
services.opendkim.domains = mkDefault "csl:${config.networking.hostName}";
users.extraUsers = optionalAttrs (cfg.user == "opendkim") (singleton
{ name = "opendkim";
group = cfg.group;
uid = config.ids.uids.opendkim;
});
users.extraGroups = optionalAttrs (cfg.group == "opendkim") (singleton
{ name = "opendkim";
gid = config.ids.gids.opendkim;
});
environment.systemPackages = [ pkgs.opendkim ];
systemd.services.opendkim = {
description = "OpenDKIM signing and verification daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.opendkim}/bin/opendkim ${concatMapStringsSep " " escapeShellArg args}";
User = cfg.user;
Group = cfg.group;
RuntimeDirectory = optional (cfg.socket == defaultSock) "opendkim";
};
};
};
}

@ -9,14 +9,14 @@ let
group = cfg.group;
setgidGroup = cfg.setgidGroup;
haveAliases = cfg.postmasterAlias != "" || cfg.rootAlias != "" || cfg.extraAliases != "";
haveTransport = cfg.transport != "";
haveVirtual = cfg.virtual != "";
mainCf =
''
compatibility_level = 2
queue_directory = /var/postfix/queue
command_directory = ${pkgs.postfix}/sbin
daemon_directory = ${pkgs.postfix}/libexec/postfix
mail_owner = ${user}
default_privs = nobody
@ -57,8 +57,6 @@ let
else
"[" + cfg.relayHost + "]"}
alias_maps = hash:/var/postfix/conf/aliases
mail_spool_directory = /var/spool/mail/
setgid_group = ${setgidGroup}
@ -80,7 +78,13 @@ let
+ optionalString (cfg.recipientDelimiter != "") ''
recipient_delimiter = ${cfg.recipientDelimiter}
''
+ optionalString (cfg.virtual != "") ''
+ optionalString haveAliases ''
alias_maps = hash:/etc/postfix/aliases
''
+ optionalString haveTransport ''
transport_maps = hash:/etc/postfix/transport
''
+ optionalString haveVirtual ''
virtual_alias_maps = hash:/etc/postfix/virtual
''
+ cfg.extraConfig;
@ -108,10 +112,14 @@ let
flush unix n - n 1000? 0 flush
proxymap unix - - n - - proxymap
proxywrite unix - - n - 1 proxymap
''
+ optionalString cfg.enableSmtp ''
smtp unix - - n - - smtp
relay unix - - n - - smtp
-o smtp_fallback_relay=
# -o smtp_helo_timeout=5 -o smtp_connect_timeout=5
''
+ ''
showq unix n - n - - showq
error unix - - n - - error
retry unix - - n - - error
@ -138,6 +146,7 @@ let
virtualFile = pkgs.writeText "postfix-virtual" cfg.virtual;
mainCfFile = pkgs.writeText "postfix-main.cf" mainCf;
masterCfFile = pkgs.writeText "postfix-master.cf" masterCf;
transportFile = pkgs.writeText "postfix-transport" cfg.transport;
in
@ -150,26 +159,36 @@ in
services.postfix = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to run the Postfix mail server.";
};
enableSmtp = mkOption {
default = true;
description = "Whether to enable smtp in master.cf.";
};
setSendmail = mkOption {
type = types.bool;
default = true;
description = "Whether to set the system sendmail to postfix's.";
};
user = mkOption {
type = types.str;
default = "postfix";
description = "What to call the Postfix user (must be used only for postfix).";
};
group = mkOption {
type = types.str;
default = "postfix";
description = "What to call the Postfix group (must be used only for postfix).";
};
setgidGroup = mkOption {
type = types.str;
default = "postdrop";
description = "
How to call postfix setgid group (for postdrop). Should
@ -178,6 +197,7 @@ in
};
networks = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
example = ["192.168.0.1/24"];
description = "
@ -188,6 +208,7 @@ in
};
networksStyle = mkOption {
type = types.str;
default = "";
description = "
Name of standard way of trusted network specification to use,
@ -197,6 +218,7 @@ in
};
hostname = mkOption {
type = types.str;
default = "";
description ="
Hostname to use. Leave blank to use just the hostname of machine.
@ -205,6 +227,7 @@ in
};
domain = mkOption {
type = types.str;
default = "";
description ="
Domain to use. Leave blank to use hostname minus first component.
@ -212,6 +235,7 @@ in
};
origin = mkOption {
type = types.str;
default = "";
description ="
Origin to use in outgoing e-mail. Leave blank to use hostname.
@ -219,6 +243,7 @@ in
};
destination = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
example = ["localhost"];
description = "
@ -228,6 +253,7 @@ in
};
relayDomains = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
example = ["localdomain"];
description = "
@ -236,6 +262,7 @@ in
};
relayHost = mkOption {
type = types.str;
default = "";
description = "
Mail relay for outbound mail.
@ -243,6 +270,7 @@ in
};
lookupMX = mkOption {
type = types.bool;
default = false;
description = "
Whether relay specified is just domain whose MX must be used.
@ -250,11 +278,13 @@ in
};
postmasterAlias = mkOption {
type = types.str;
default = "root";
description = "Who should receive postmaster e-mail.";
};
rootAlias = mkOption {
type = types.str;
default = "";
description = "
Who should receive root e-mail. Blank for no redirection.
@ -262,6 +292,7 @@ in
};
extraAliases = mkOption {
type = types.lines;
default = "";
description = "
Additional entries to put verbatim into aliases file, cf. man-page aliases(8).
@ -269,6 +300,7 @@ in
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "
Extra lines to be added verbatim to the main.cf configuration file.
@ -276,21 +308,25 @@ in
};
sslCert = mkOption {
type = types.str;
default = "";
description = "SSL certificate to use.";
};
sslCACert = mkOption {
type = types.str;
default = "";
description = "SSL certificate of CA.";
};
sslKey = mkOption {
type = types.str;
default = "";
description = "SSL key to use.";
};
recipientDelimiter = mkOption {
type = types.str;
default = "";
example = "+";
description = "
@ -299,18 +335,39 @@ in
};
virtual = mkOption {
type = types.lines;
default = "";
description = "
Entries for the virtual alias map, cf. man-page virtual(8).
";
};
transport = mkOption {
default = "";
description = "
Entries for the transport map, cf. man-page transport(8).
";
};
extraMasterConf = mkOption {
type = types.lines;
default = "";
example = "submission inet n - n - - smtpd";
description = "Extra lines to append to the generated master.cf file.";
};
aliasFiles = mkOption {
type = types.attrsOf types.path;
default = {};
description = "Aliases' tables to be compiled and placed into /var/lib/postfix/conf.";
};
mapFiles = mkOption {
type = types.attrsOf types.path;
default = {};
description = "Maps to be compiled and placed into /var/lib/postfix/conf.";
};
};
};
@ -318,90 +375,104 @@ in
###### implementation
config = mkIf config.services.postfix.enable {
config = mkIf config.services.postfix.enable (mkMerge [
{
environment = {
etc = singleton
{ source = "/var/postfix/conf";
target = "postfix";
};
# This makes comfortable for root to run 'postqueue' for example.
systemPackages = [ pkgs.postfix ];
};
environment = {
etc = singleton
{ source = "/var/lib/postfix/conf";
target = "postfix";
};
services.mail.sendmailSetuidWrapper = mkIf config.services.postfix.setSendmail {
program = "sendmail";
source = "${pkgs.postfix}/bin/sendmail";
owner = "nobody";
group = "postdrop";
setuid = false;
setgid = true;
};
# This makes comfortable for root to run 'postqueue' for example.
systemPackages = [ pkgs.postfix ];
};
users.extraUsers = singleton
{ name = user;
description = "Postfix mail server user";
uid = config.ids.uids.postfix;
group = group;
services.mail.sendmailSetuidWrapper = mkIf config.services.postfix.setSendmail {
program = "sendmail";
source = "${pkgs.postfix}/bin/sendmail";
group = setgidGroup;
setuid = false;
setgid = true;
};
users.extraGroups =
[ { name = group;
users.extraUsers = optional (user == "postfix")
{ name = "postfix";
description = "Postfix mail server user";
uid = config.ids.uids.postfix;
group = group;
};
users.extraGroups =
optional (group == "postfix")
{ name = group;
gid = config.ids.gids.postfix;
}
++ optional (setgidGroup == "postdrop")
{ name = setgidGroup;
gid = config.ids.gids.postdrop;
}
];
systemd.services.postfix =
{ description = "Postfix mail server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
Restart = "always";
PIDFile = "/var/postfix/queue/pid/master.pid";
};
preStart = ''
${pkgs.coreutils}/bin/mkdir -p /var/spool/mail /var/postfix/conf /var/postfix/queue
${pkgs.coreutils}/bin/chown -R ${user}:${group} /var/postfix
${pkgs.coreutils}/bin/chown -R ${user}:${setgidGroup} /var/postfix/queue
${pkgs.coreutils}/bin/chmod -R ug+rwX /var/postfix/queue
${pkgs.coreutils}/bin/chown root:root /var/spool/mail
${pkgs.coreutils}/bin/chmod a+rwxt /var/spool/mail
${pkgs.coreutils}/bin/ln -sf /var/spool/mail /var/
ln -sf ${pkgs.postfix}/etc/postfix/postfix-files /var/postfix/conf
ln -sf ${aliasesFile} /var/postfix/conf/aliases
ln -sf ${virtualFile} /var/postfix/conf/virtual
ln -sf ${mainCfFile} /var/postfix/conf/main.cf
ln -sf ${masterCfFile} /var/postfix/conf/master.cf
${pkgs.postfix}/sbin/postalias -c /var/postfix/conf /var/postfix/conf/aliases
${pkgs.postfix}/sbin/postmap -c /var/postfix/conf /var/postfix/conf/virtual
'';
script = ''
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf start
'';
reload = ''
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf reload
'';
preStop = ''
${pkgs.postfix}/sbin/postfix -c /var/postfix/conf stop
'';
};
};
systemd.services.postfix =
{ description = "Postfix mail server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [ pkgs.postfix ];
serviceConfig = {
Type = "forking";
Restart = "always";
PIDFile = "/var/lib/postfix/queue/pid/master.pid";
ExecStart = "${pkgs.postfix}/bin/postfix start";
ExecStop = "${pkgs.postfix}/bin/postfix stop";
ExecReload = "${pkgs.postfix}/bin/postfix reload";
};
preStart = ''
# Backwards compatibility
if [ ! -d /var/lib/postfix ] && [ -d /var/postfix ]; then
mkdir -p /var/lib
mv /var/postfix /var/lib/postfix
fi
mkdir -p /var/lib/postfix/data /var/lib/postfix/queue/{pid,public,maildrop}
chown -R ${user}:${group} /var/lib/postfix
chown root /var/lib/postfix/queue
chown root /var/lib/postfix/queue/pid
chgrp -R ${setgidGroup} /var/lib/postfix/queue/{public,maildrop}
chmod 770 /var/lib/postfix/queue/{public,maildrop}
rm -rf /var/lib/postfix/conf
mkdir -p /var/lib/postfix/conf
ln -sf ${mainCfFile} /var/lib/postfix/conf/main.cf
ln -sf ${masterCfFile} /var/lib/postfix/conf/master.cf
${concatStringsSep "\n" (mapAttrsToList (to: from: ''
ln -sf ${from} /var/lib/postfix/conf/${to}
postalias /var/lib/postfix/conf/${to}
'') cfg.aliasFiles)}
${concatStringsSep "\n" (mapAttrsToList (to: from: ''
ln -sf ${from} /var/lib/postfix/conf/${to}
postmap /var/lib/postfix/conf/${to}
'') cfg.mapFiles)}
mkdir -p /var/spool/mail
chown root:root /var/spool/mail
chmod a+rwxt /var/spool/mail
ln -sf /var/spool/mail /var/
'';
};
}
(mkIf haveAliases {
services.postfix.aliasFiles."aliases" = aliasesFile;
})
(mkIf haveTransport {
services.postfix.mapFiles."transport" = transportFile;
})
(mkIf haveVirtual {
services.postfix.mapFiles."virtual" = virtualFile;
})
]);
}

@ -0,0 +1,107 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.postsrsd;
in {
###### interface
options = {
services.postsrsd = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the postsrsd SRS server for Postfix.";
};
domain = mkOption {
type = types.str;
description = "Domain name for rewrite";
};
secretsFile = mkOption {
type = types.path;
default = "/var/lib/postsrsd/postsrsd.secret";
description = "Secret keys used for signing and verification";
};
forwardPort = mkOption {
type = types.int;
default = 10001;
description = "Port for the forward SRS lookup";
};
reversePort = mkOption {
type = types.int;
default = 10002;
description = "Port for the reverse SRS lookup";
};
user = mkOption {
type = types.str;
default = "postsrsd";
description = "User for the daemon";
};
group = mkOption {
type = types.str;
default = "postsrsd";
description = "Group for the daemon";
};
};
};
###### implementation
config = mkIf cfg.enable {
services.postsrsd.domain = mkDefault config.networking.hostName;
users.extraUsers = optionalAttrs (cfg.user == "postsrsd") (singleton
{ name = "postsrsd";
group = cfg.group;
uid = config.ids.uids.postsrsd;
});
users.extraGroups = optionalAttrs (cfg.group == "postsrsd") (singleton
{ name = "postsrsd";
gid = config.ids.gids.postsrsd;
});
systemd.services.postsrsd = {
description = "PostSRSd SRS rewriting server";
after = [ "network.target" ];
before = [ "postfix.service" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.coreutils ];
serviceConfig = {
ExecStart = ''${pkgs.postsrsd}/sbin/postsrsd "-s${cfg.secretsFile}" "-d${cfg.domain}" -f${toString cfg.forwardPort} -r${toString cfg.reversePort}'';
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;
};
preStart = ''
if [ ! -e "${cfg.secretsFile}" ]; then
echo "WARNING: secrets file not found, autogenerating!"
mkdir -p -m750 "$(dirname "${cfg.secretsFile}")"
dd if=/dev/random bs=18 count=1 | base64 > "${cfg.secretsFile}"
chmod 600 "${cfg.secretsFile}"
fi
chown "${cfg.user}:${cfg.group}" "${cfg.secretsFile}"
'';
};
};
}

@ -0,0 +1,189 @@
{ config, lib, pkgs, ... }:
with lib;
let
rspamdCfg = config.services.rspamd;
cfg = config.services.rmilter;
rmilterConf = ''
pidfile = /run/rmilter/rmilter.pid;
bind_socket = ${cfg.bindSocket};
tempdir = /tmp;
'' + (with cfg.rspamd; if enable then ''
spamd {
servers = ${concatStringsSep ", " servers};
connect_timeout = 1s;
results_timeout = 20s;
error_time = 10;
dead_time = 300;
maxerrors = 10;
reject_message = "${rejectMessage}";
${optionalString (length whitelist != 0) "whitelist = ${concatStringsSep ", " whitelist};"}
# rspamd_metric - metric for using with rspamd
# Default: "default"
rspamd_metric = "default";
${extraConfig}
};
'' else "") + cfg.extraConfig;
rmilterConfigFile = pkgs.writeText "rmilter.conf" rmilterConf;
in
{
###### interface
options = {
services.rmilter = {
enable = mkOption {
default = cfg.rspamd.enable;
description = "Whether to run the rmilter daemon.";
};
debug = mkOption {
default = false;
description = "Whether to run the rmilter daemon in debug mode.";
};
user = mkOption {
type = types.string;
default = "rmilter";
description = ''
User to use when no root privileges are required.
'';
};
group = mkOption {
type = types.string;
default = "rmilter";
description = ''
Group to use when no root privileges are required.
'';
};
bindSocket = mkOption {
type = types.string;
default = "unix:/run/rmilter/rmilter.sock";
description = "Socket to listed for MTA requests";
example = ''
"unix:/run/rmilter/rmilter.sock" or
"inet:11990@127.0.0.1"
'';
};
rspamd = {
enable = mkOption {
default = rspamdCfg.enable;
description = "Whether to use rspamd to filter mails";
};
servers = mkOption {
type = types.listOf types.str;
default = ["r:0.0.0.0:11333"];
description = ''
Spamd socket definitions.
Is server name is prefixed with r: it is rspamd server.
'';
};
whitelist = mkOption {
type = types.listOf types.str;
default = [ ];
description = "list of ips or nets that should be not checked with spamd";
};
rejectMessage = mkOption {
type = types.str;
default = "Spam message rejected; If this is not spam contact abuse";
description = "reject message for spam";
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Custom snippet to append to end of `spamd' section";
};
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Custom snippet to append to rmilter config";
};
postfix = {
enable = mkOption {
type = types.bool;
default = false;
description = "Add rmilter to postfix main.conf";
};
configFragment = mkOption {
type = types.str;
description = "Addon to postfix configuration";
default = ''
smtpd_milters = ${cfg.bindSocket}
# or for TCP socket
# # smtpd_milters = inet:localhost:9900
milter_protocol = 6
milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen}
# skip mail without checks if milter will die
milter_default_action = accept
'';
};
};
};
};
###### implementation
config = mkIf cfg.enable {
users.extraUsers = singleton {
name = cfg.user;
description = "rspamd daemon";
uid = config.ids.uids.rmilter;
group = cfg.group;
};
users.extraGroups = singleton {
name = cfg.group;
gid = config.ids.gids.rmilter;
};
systemd.services.rmilter = {
description = "Rmilter Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.rmilter}/bin/rmilter ${optionalString cfg.debug "-d"} -n -c ${rmilterConfigFile}";
User = cfg.user;
Group = cfg.group;
PermissionsStartOnly = true;
Restart = "always";
};
preStart = ''
${pkgs.coreutils}/bin/mkdir -p /run/rmilter
${pkgs.coreutils}/bin/chown ${cfg.user}:${cfg.group} /run/rmilter
'';
};
services.postfix.extraConfig = optionalString cfg.postfix.enable cfg.postfix.configFragment;
};
}

@ -0,0 +1,90 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.rspamd;
in
{
###### interface
options = {
services.rspamd = {
enable = mkOption {
default = false;
description = "Whether to run the rspamd daemon.";
};
debug = mkOption {
default = false;
description = "Whether to run the rspamd daemon in debug mode.";
};
user = mkOption {
type = types.string;
default = "rspamd";
description = ''
User to use when no root privileges are required.
'';
};
group = mkOption {
type = types.string;
default = "rspamd";
description = ''
Group to use when no root privileges are required.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
# Allow users to run 'rspamc' and 'rspamadm'.
environment.systemPackages = [ pkgs.rspamd ];
users.extraUsers = singleton {
name = cfg.user;
description = "rspamd daemon";
uid = config.ids.uids.rspamd;
group = cfg.group;
};
users.extraGroups = singleton {
name = cfg.group;
gid = config.ids.gids.spamd;
};
systemd.services.rspamd = {
description = "Rspamd Service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -f";
RuntimeDirectory = "/var/lib/rspamd";
PermissionsStartOnly = true;
Restart = "always";
};
preStart = ''
${pkgs.coreutils}/bin/mkdir -p /var/{lib,log}/rspamd
${pkgs.coreutils}/bin/chown ${cfg.user}:${cfg.group} /var/lib/rspamd
'';
};
};
}

@ -50,15 +50,13 @@ in
gid = config.ids.gids.spamd;
};
jobs.spamd = {
systemd.services.spamd = {
description = "Spam Assassin Server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
exec = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --nouser-config --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/var/run/spamd.pid";
script = "${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --nouser-config --virtual-config-dir=/var/lib/spamassassin/user-%u --allow-tell --pidfile=/var/run/spamd.pid";
};
};
}

@ -118,9 +118,8 @@ in {
package = mkOption {
description = "The kafka package to use";
default = pkgs.apacheKafka;
defaultText = "pkgs.apacheKafka";
type = types.package;
};

@ -27,8 +27,9 @@ in
};
autoMaster = mkOption {
type = types.str;
example = literalExample ''
autoMaster = let
let
mapConf = pkgs.writeText "auto" '''
kernel -ro,soft,intr ftp.kernel.org:/pub/linux
boot -fstype=ext2 :/dev/hda1

@ -41,6 +41,7 @@ in
package = mkOption {
default = pkgs.cgminer;
defaultText = "pkgs.cgminer";
description = "Which cgminer derivation to use.";
type = types.package;
};

@ -64,6 +64,7 @@ in {
package = mkOption {
description = "Confd package to use.";
default = pkgs.confd;
defaultText = "pkgs.confd";
type = types.package;
};
};

@ -51,13 +51,12 @@ with lib;
gid = config.ids.gids.dictd;
};
jobs.dictd =
{ description = "DICT.org Dictionary Server";
startOn = "startup";
environment = { LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive"; };
daemonType = "fork";
exec = "${pkgs.dict}/sbin/dictd -s -c ${dictdb}/share/dictd/dictd.conf --locale en_US.UTF-8";
};
systemd.services.dictd = {
description = "DICT.org Dictionary Server";
wantedBy = [ "multi-user.target" ];
environment = { LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive"; };
serviceConfig.Type = "forking";
script = "${pkgs.dict}/sbin/dictd -s -c ${dictdb}/share/dictd/dictd.conf --locale en_US.UTF-8";
};
};
}

@ -91,7 +91,7 @@ in
( { hostname = config.networking.hostName;
#targetHost = config.deployment.targetHost;
system = if config.nixpkgs.system == "" then builtins.currentSystem else config.nixpkgs.system;
supportedTypes = (import "${pkgs.stdenv.mkDerivation {
name = "supportedtypes";
buildCommand = ''
@ -110,6 +110,7 @@ in
// optionalAttrs (config.services.mysql.enable) { mysqlPort = config.services.mysql.port; }
// optionalAttrs (config.services.tomcat.enable) { tomcatPort = 8080; }
// optionalAttrs (config.services.svnserve.enable) { svnBaseDir = config.services.svnserve.svnBaseDir; }
// optionalAttrs (config.services.ejabberd.enable) { ejabberdUser = config.services.ejabberd.user; }
// optionalAttrs (cfg.publishInfrastructure.enableAuthentication) (
optionalAttrs (config.services.mysql.enable) { mysqlUsername = "root"; mysqlPassword = readFile config.services.mysql.rootPassword; })
)
@ -117,63 +118,61 @@ in
services.disnix.publishInfrastructure.enable = cfg.publishAvahi;
jobs = {
disnix =
{ description = "Disnix server";
wants = [ "dysnomia.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "dbus.service" ]
++ optional config.services.httpd.enable "httpd.service"
++ optional config.services.mysql.enable "mysql.service"
++ optional config.services.postgresql.enable "postgresql.service"
++ optional config.services.tomcat.enable "tomcat.service"
++ optional config.services.svnserve.enable "svnserve.service"
++ optional config.services.mongodb.enable "mongodb.service";
restartIfChanged = false;
path = [ pkgs.nix pkgs.disnix dysnomia "/run/current-system/sw" ];
environment = {
HOME = "/root";
};
preStart = ''
mkdir -p /etc/systemd-mutable/system
if [ ! -f /etc/systemd-mutable/system/dysnomia.target ]
then
( echo "[Unit]"
echo "Description=Services that are activated and deactivated by Dysnomia"
echo "After=final.target"
) > /etc/systemd-mutable/system/dysnomia.target
fi
'';
exec = "disnix-service";
systemd.services = {
disnix = {
description = "Disnix server";
wants = [ "dysnomia.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "dbus.service" ]
++ optional config.services.httpd.enable "httpd.service"
++ optional config.services.mysql.enable "mysql.service"
++ optional config.services.postgresql.enable "postgresql.service"
++ optional config.services.tomcat.enable "tomcat.service"
++ optional config.services.svnserve.enable "svnserve.service"
++ optional config.services.mongodb.enable "mongodb.service";
restartIfChanged = false;
path = [ pkgs.nix pkgs.disnix dysnomia "/run/current-system/sw" ];
environment = {
HOME = "/root";
};
preStart = ''
mkdir -p /etc/systemd-mutable/system
if [ ! -f /etc/systemd-mutable/system/dysnomia.target ]
then
( echo "[Unit]"
echo "Description=Services that are activated and deactivated by Dysnomia"
echo "After=final.target"
) > /etc/systemd-mutable/system/dysnomia.target
fi
'';
script = "disnix-service";
};
} // optionalAttrs cfg.publishAvahi {
disnixAvahi =
{ description = "Disnix Avahi publisher";
startOn = "started avahi-daemon";
exec =
''
${pkgs.avahi}/bin/avahi-publish-service disnix-${config.networking.hostName} _disnix._tcp 22 \
"mem=$(grep 'MemTotal:' /proc/meminfo | sed -e 's/kB//' -e 's/MemTotal://' -e 's/ //g')" \
${concatMapStrings (infrastructureAttrName:
let infrastructureAttrValue = getAttr infrastructureAttrName (cfg.infrastructure);
in
if isInt infrastructureAttrValue then
''${infrastructureAttrName}=${toString infrastructureAttrValue} \
''
else
''${infrastructureAttrName}=\"${infrastructureAttrValue}\" \
''
) (attrNames (cfg.infrastructure))}
'';
};
disnixAvahi = {
description = "Disnix Avahi publisher";
wants = [ "avahi-daemon.service" ];
wantedBy = [ "multi-user.target" ];
script = ''
${pkgs.avahi}/bin/avahi-publish-service disnix-${config.networking.hostName} _disnix._tcp 22 \
"mem=$(grep 'MemTotal:' /proc/meminfo | sed -e 's/kB//' -e 's/MemTotal://' -e 's/ //g')" \
${concatMapStrings (infrastructureAttrName:
let infrastructureAttrValue = getAttr infrastructureAttrName (cfg.infrastructure);
in
if isInt infrastructureAttrValue then
''${infrastructureAttrName}=${toString infrastructureAttrValue} \
''
else
''${infrastructureAttrName}=\"${infrastructureAttrValue}\" \
''
) (attrNames (cfg.infrastructure))}
'';
};
};
};
}

@ -15,7 +15,7 @@ in {
type = types.bool;
};
host = mkOption {
listenAddress = mkOption {
description = "Docker registry host or ip to bind to.";
default = "127.0.0.1";
type = types.str;
@ -50,7 +50,7 @@ in {
after = [ "network.target" ];
environment = {
REGISTRY_HOST = cfg.host;
REGISTRY_HOST = cfg.listenAddress;
REGISTRY_PORT = toString cfg.port;
GUNICORN_OPTS = "[--preload]"; # see https://github.com/docker/docker-registry#sqlalchemy
STORAGE_PATH = cfg.storagePath;
@ -65,7 +65,7 @@ in {
};
postStart = ''
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.host}:${toString cfg.port}/'; do
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.listenAddress}:${toString cfg.port}/'; do
sleep 1;
done
'';

@ -77,11 +77,11 @@ in {
default = {};
example = literalExample ''
{
"CORS": "*",
"NAME": "default-name",
"MAX_RESULT_BUFFER": "1024",
"MAX_CLUSTER_SIZE": "9",
"MAX_RETRY_ATTEMPTS": "3"
"CORS" = "*";
"NAME" = "default-name";
"MAX_RESULT_BUFFER" = "1024";
"MAX_CLUSTER_SIZE" = "9";
"MAX_RETRY_ATTEMPTS" = "3";
}
'';
};

@ -23,7 +23,9 @@ in
};
bundles = mkOption {
type = types.listOf types.package;
default = [ pkgs.felix_remoteshell ];
defaultText = "[ pkgs.felix_remoteshell ]";
description = "List of bundles that should be activated on startup";
};
@ -57,54 +59,51 @@ in
home = "/homeless-shelter";
};
jobs.felix =
{ description = "Felix server";
preStart =
''
# Initialise felix instance on first startup
if [ ! -d /var/felix ]
then
# Symlink system files
mkdir -p /var/felix
chown ${cfg.user}:${cfg.group} /var/felix
for i in ${pkgs.felix}/*
do
if [ "$i" != "${pkgs.felix}/bundle" ]
then
ln -sfn $i /var/felix/$(basename $i)
fi
done
# Symlink bundles
mkdir -p /var/felix/bundle
chown ${cfg.user}:${cfg.group} /var/felix/bundle
for i in ${pkgs.felix}/bundle/* ${toString cfg.bundles}
do
if [ -f $i ]
then
ln -sfn $i /var/felix/bundle/$(basename $i)
elif [ -d $i ]
then
for j in $i/bundle/*
do
ln -sfn $j /var/felix/bundle/$(basename $j)
done
fi
done
fi
'';
script =
''
cd /var/felix
${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c '${pkgs.jre}/bin/java -jar bin/felix.jar'
'';
};
systemd.services.felix = {
description = "Felix server";
wantedBy = [ "multi-user.target" ];
preStart = ''
# Initialise felix instance on first startup
if [ ! -d /var/felix ]
then
# Symlink system files
mkdir -p /var/felix
chown ${cfg.user}:${cfg.group} /var/felix
for i in ${pkgs.felix}/*
do
if [ "$i" != "${pkgs.felix}/bundle" ]
then
ln -sfn $i /var/felix/$(basename $i)
fi
done
# Symlink bundles
mkdir -p /var/felix/bundle
chown ${cfg.user}:${cfg.group} /var/felix/bundle
for i in ${pkgs.felix}/bundle/* ${toString cfg.bundles}
do
if [ -f $i ]
then
ln -sfn $i /var/felix/bundle/$(basename $i)
elif [ -d $i ]
then
for j in $i/bundle/*
do
ln -sfn $j /var/felix/bundle/$(basename $j)
done
fi
done
fi
'';
script = ''
cd /var/felix
${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c '${pkgs.jre}/bin/java -jar bin/felix.jar'
'';
};
};
}

@ -49,26 +49,20 @@ in {
home = stateDir;
};
jobs.foldingAtHome =
{ name = "foldingathome";
startOn = "started network-interfaces";
stopOn = "stopping network-interfaces";
preStart =
''
mkdir -m 0755 -p ${stateDir}
chown ${fahUser} ${stateDir}
cp -f ${pkgs.writeText "client.cfg" cfg.config} ${stateDir}/client.cfg
'';
exec = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${fahUser} -c 'cd ${stateDir}; ${pkgs.foldingathome}/bin/fah6'";
};
services.foldingAtHome.config = ''
[settings]
username=${cfg.nickname}
systemd.services.foldingathome = {
after = [ "network-interfaces.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
mkdir -m 0755 -p ${stateDir}
chown ${fahUser} ${stateDir}
cp -f ${pkgs.writeText "client.cfg" cfg.config} ${stateDir}/client.cfg
'';
script = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${fahUser} -c 'cd ${stateDir}; ${pkgs.foldingathome}/bin/fah6'";
};
services.foldingAtHome.config = ''
[settings]
username=${cfg.nickname}
'';
};
}

@ -35,6 +35,7 @@ let
};
haskellPackages = mkOption {
type = types.attrsOf types.package;
default = pkgs.haskellPackages;
defaultText = "pkgs.haskellPackages";
example = literalExample "pkgs.haskell.packages.ghc784";

@ -22,6 +22,7 @@ in
};
haskellPackages = mkOption {
type = types.attrsOf types.package;
default = pkgs.haskellPackages;
defaultText = "pkgs.haskellPackages";
example = literalExample "pkgs.haskell.packages.ghc784";

@ -0,0 +1,54 @@
{ pkgs, lib, config, ... }:
with lib;
let
cfg = config.services.mathics;
in {
options = {
services.mathics = {
enable = mkEnableOption "Mathics notebook service";
external = mkOption {
type = types.bool;
default = false;
description = "Listen on all interfaces, rather than just localhost?";
};
port = mkOption {
type = types.int;
default = 8000;
description = "TCP port to listen on.";
};
};
};
config = mkIf cfg.enable {
users.extraUsers.mathics = {
group = config.users.extraGroups.mathics.name;
description = "Mathics user";
home = "/var/lib/mathics";
createHome = true;
uid = config.ids.uids.mathics;
};
users.extraGroups.mathics.gid = config.ids.gids.mathics;
systemd.services.mathics = {
description = "Mathics notebook server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
User = config.users.extraUsers.mathics.name;
Group = config.users.extraGroups.mathics.name;
ExecStart = concatStringsSep " " [
"${pkgs.mathics}/bin/mathicsserver"
"--port" (toString cfg.port)
(if cfg.external then "--external" else "")
];
};
};
};
}

@ -0,0 +1,25 @@
version: 1
# In systemd's journal, loglevel is implicitly stored, so let's omit it
# from the message text.
formatters:
journal_fmt:
format: '%(name)s: [%(request)s] %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
journal:
class: systemd.journal.JournalHandler
formatter: journal_fmt
filters: [context]
SYSLOG_IDENTIFIER: synapse
root:
level: INFO
handlers: [journal]
disable_existing_loggers: False

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save