commit
def41fa5ac
@ -0,0 +1,23 @@ |
|||||||
|
--[[ |
||||||
|
Converts Code AST nodes produced by pandoc’s DocBook reader |
||||||
|
from citerefentry elements into AST for corresponding role |
||||||
|
for reStructuredText. |
||||||
|
|
||||||
|
We use subset of MyST syntax (CommonMark with features from rST) |
||||||
|
so let’s use the rST AST for rST features. |
||||||
|
|
||||||
|
Reference: https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage |
||||||
|
]] |
||||||
|
|
||||||
|
function Code(elem) |
||||||
|
elem.classes = elem.classes:map(function (x) |
||||||
|
if x == 'citerefentry' then |
||||||
|
elem.attributes['role'] = 'manpage' |
||||||
|
return 'interpreted-text' |
||||||
|
else |
||||||
|
return x |
||||||
|
end |
||||||
|
end) |
||||||
|
|
||||||
|
return elem |
||||||
|
end |
@ -1,3 +1,13 @@ |
|||||||
|
--[[ |
||||||
|
Converts Link AST nodes with empty label to DocBook xref elements. |
||||||
|
|
||||||
|
This is a temporary script to be able use cross-references conveniently |
||||||
|
using syntax taken from MyST, while we still use docbook-xsl |
||||||
|
for generating the documentation. |
||||||
|
|
||||||
|
Reference: https://myst-parser.readthedocs.io/en/latest/using/syntax.html#targets-and-cross-referencing |
||||||
|
]] |
||||||
|
|
||||||
local function starts_with(start, str) |
local function starts_with(start, str) |
||||||
return str:sub(1, #start) == start |
return str:sub(1, #start) == start |
||||||
end |
end |
@ -0,0 +1,36 @@ |
|||||||
|
--[[ |
||||||
|
Converts AST for reStructuredText roles into corresponding |
||||||
|
DocBook elements. |
||||||
|
|
||||||
|
Currently, only a subset of roles is supported. |
||||||
|
|
||||||
|
Reference: |
||||||
|
List of roles: |
||||||
|
https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html |
||||||
|
manpage: |
||||||
|
https://tdg.docbook.org/tdg/5.1/citerefentry.html |
||||||
|
file: |
||||||
|
https://tdg.docbook.org/tdg/5.1/filename.html |
||||||
|
]] |
||||||
|
|
||||||
|
function Code(elem) |
||||||
|
if elem.classes:includes('interpreted-text') then |
||||||
|
local tag = nil |
||||||
|
local content = elem.text |
||||||
|
if elem.attributes['role'] == 'manpage' then |
||||||
|
tag = 'citerefentry' |
||||||
|
local title, volnum = content:match('^(.+)%((%w+)%)$') |
||||||
|
if title == nil then |
||||||
|
-- No volnum in parentheses. |
||||||
|
title = content |
||||||
|
end |
||||||
|
content = '<refentrytitle>' .. title .. '</refentrytitle>' .. (volnum ~= nil and ('<manvolnum>' .. volnum .. '</manvolnum>') or '') |
||||||
|
elseif elem.attributes['role'] == 'file' then |
||||||
|
tag = 'filename' |
||||||
|
end |
||||||
|
|
||||||
|
if tag ~= nil then |
||||||
|
return pandoc.RawInline('docbook', '<' .. tag .. '>' .. content .. '</' .. tag .. '>') |
||||||
|
end |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,18 @@ |
|||||||
|
--[[ |
||||||
|
Turns a manpage reference into a link, when a mapping is defined |
||||||
|
in the unix-man-urls.lua file. |
||||||
|
]] |
||||||
|
|
||||||
|
local man_urls = { |
||||||
|
["tmpfiles.d(5)"] = "https://www.freedesktop.org/software/systemd/man/tmpfiles.d.html", |
||||||
|
["nix.conf(5)"] = "https://nixos.org/manual/nix/stable/#sec-conf-file", |
||||||
|
["systemd.time(7)"] = "https://www.freedesktop.org/software/systemd/man/systemd.time.html", |
||||||
|
["systemd.timer(5)"] = "https://www.freedesktop.org/software/systemd/man/systemd.timer.html", |
||||||
|
} |
||||||
|
|
||||||
|
function Code(elem) |
||||||
|
local is_man_role = elem.classes:includes('interpreted-text') and elem.attributes['role'] == 'manpage' |
||||||
|
if is_man_role and man_urls[elem.text] ~= nil then |
||||||
|
return pandoc.Link(elem, man_urls[elem.text]) |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,29 @@ |
|||||||
|
--[[ |
||||||
|
Replaces Str AST nodes containing {role}, followed by a Code node |
||||||
|
by a Code node with attrs that would be produced by rST reader |
||||||
|
from the role syntax. |
||||||
|
|
||||||
|
This is to emulate MyST syntax in Pandoc. |
||||||
|
(MyST is a CommonMark flavour with rST features mixed in.) |
||||||
|
|
||||||
|
Reference: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point |
||||||
|
]] |
||||||
|
|
||||||
|
function Inlines(inlines) |
||||||
|
for i = #inlines-1,1,-1 do |
||||||
|
local first = inlines[i] |
||||||
|
local second = inlines[i+1] |
||||||
|
local correct_tags = first.tag == 'Str' and second.tag == 'Code' |
||||||
|
if correct_tags then |
||||||
|
-- docutils supports alphanumeric strings separated by [-._:] |
||||||
|
-- We are slightly more liberal for simplicity. |
||||||
|
local role = first.text:match('^{([-._+:%w]+)}$') |
||||||
|
if role ~= nil then |
||||||
|
inlines:remove(i) |
||||||
|
second.attributes['role'] = role |
||||||
|
second.classes:insert('interpreted-text') |
||||||
|
end |
||||||
|
end |
||||||
|
end |
||||||
|
return inlines |
||||||
|
end |
@ -0,0 +1,25 @@ |
|||||||
|
--[[ |
||||||
|
Replaces Code nodes with attrs that would be produced by rST reader |
||||||
|
from the role syntax by a Str AST node containing {role}, followed by a Code node. |
||||||
|
|
||||||
|
This is to emulate MyST syntax in Pandoc. |
||||||
|
(MyST is a CommonMark flavour with rST features mixed in.) |
||||||
|
|
||||||
|
Reference: https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point |
||||||
|
]] |
||||||
|
|
||||||
|
function Code(elem) |
||||||
|
local role = elem.attributes['role'] |
||||||
|
|
||||||
|
if elem.classes:includes('interpreted-text') and role ~= nil then |
||||||
|
elem.classes = elem.classes:filter(function (c) |
||||||
|
return c ~= 'interpreted-text' |
||||||
|
end) |
||||||
|
elem.attributes['role'] = nil |
||||||
|
|
||||||
|
return { |
||||||
|
pandoc.Str('{' .. role .. '}'), |
||||||
|
elem, |
||||||
|
} |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,18 @@ |
|||||||
|
# /etc files {#etc} |
||||||
|
|
||||||
|
Certain calls in glibc require access to runtime files found in /etc such as `/etc/protocols` or `/etc/services` -- [getprotobyname](https://linux.die.net/man/3/getprotobyname) is one such function. |
||||||
|
|
||||||
|
On non-NixOS distributions these files are typically provided by packages (i.e. [netbase](https://packages.debian.org/sid/netbase)) if not already pre-installed in your distribution. This can cause non-reproducibility for code if they rely on these files being present. |
||||||
|
|
||||||
|
If [iana-etc](https://hydra.nixos.org/job/nixos/trunk-combined/nixpkgs.iana-etc.x86_64-linux) is part of your _buildInputs_ then it will set the environment varaibles `NIX_ETC_PROTOCOLS` and `NIX_ETC_SERVICES` to the corresponding files in the package through a _setup-hook_. |
||||||
|
|
||||||
|
|
||||||
|
```bash |
||||||
|
> nix-shell -p iana-etc |
||||||
|
|
||||||
|
[nix-shell:~]$ env | grep NIX_ETC |
||||||
|
NIX_ETC_SERVICES=/nix/store/aj866hr8fad8flnggwdhrldm0g799ccz-iana-etc-20210225/etc/services |
||||||
|
NIX_ETC_PROTOCOLS=/nix/store/aj866hr8fad8flnggwdhrldm0g799ccz-iana-etc-20210225/etc/protocols |
||||||
|
``` |
||||||
|
|
||||||
|
Nixpkg's version of [glibc](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/glibc/default.nix) has been patched to check for the existence of these environment variables. If the environment variable are *not set*, then it will attempt to find the files at the default location within _/etc_. |
@ -0,0 +1,31 @@ |
|||||||
|
|
||||||
|
## `invalidateFetcherByDrvHash` {#sec-pkgs-invalidateFetcherByDrvHash} |
||||||
|
|
||||||
|
Use the derivation hash to invalidate the output via name, for testing. |
||||||
|
|
||||||
|
Type: `(a@{ name, ... } -> Derivation) -> a -> Derivation` |
||||||
|
|
||||||
|
Normally, fixed output derivations can and should be cached by their output |
||||||
|
hash only, but for testing we want to re-fetch everytime the fetcher changes. |
||||||
|
|
||||||
|
Changes to the fetcher become apparent in the drvPath, which is a hash of |
||||||
|
how to fetch, rather than a fixed store path. |
||||||
|
By inserting this hash into the name, we can make sure to re-run the fetcher |
||||||
|
every time the fetcher changes. |
||||||
|
|
||||||
|
This relies on the assumption that Nix isn't clever enough to reuse its |
||||||
|
database of local store contents to optimize fetching. |
||||||
|
|
||||||
|
You might notice that the "salted" name derives from the normal invocation, |
||||||
|
not the final derivation. `invalidateFetcherByDrvHash` has to invoke the fetcher |
||||||
|
function twice: once to get a derivation hash, and again to produce the final |
||||||
|
fixed output derivation. |
||||||
|
|
||||||
|
Example: |
||||||
|
|
||||||
|
tests.fetchgit = invalidateFetcherByDrvHash fetchgit { |
||||||
|
name = "nix-source"; |
||||||
|
url = "https://github.com/NixOS/nix"; |
||||||
|
rev = "9d9dbe6ed05854e03811c361a3380e09183f4f4a"; |
||||||
|
sha256 = "sha256-7DszvbCNTjpzGRmpIVAWXk20P0/XTrWZ79KSOGLrUWY="; |
||||||
|
}; |
@ -0,0 +1,91 @@ |
|||||||
|
# Nim {#nim} |
||||||
|
|
||||||
|
## Overview {#nim-overview} |
||||||
|
|
||||||
|
The Nim compiler, a builder function, and some packaged libraries are available |
||||||
|
in Nixpkgs. Until now each compiler release has been effectively backwards |
||||||
|
compatible so only the latest version is available. |
||||||
|
|
||||||
|
## Nim program packages in Nixpkgs {#nim-program-packages-in-nixpkgs} |
||||||
|
|
||||||
|
Nim programs can be built using `nimPackages.buildNimPackage`. In the |
||||||
|
case of packages not containing exported library code the attribute |
||||||
|
`nimBinOnly` should be set to `true`. |
||||||
|
|
||||||
|
The following example shows a Nim program that depends only on Nim libraries: |
||||||
|
|
||||||
|
```nix |
||||||
|
{ lib, nimPackages, fetchurl }: |
||||||
|
|
||||||
|
nimPackages.buildNimPackage rec { |
||||||
|
pname = "hottext"; |
||||||
|
version = "1.4"; |
||||||
|
|
||||||
|
nimBinOnly = true; |
||||||
|
|
||||||
|
src = fetchurl { |
||||||
|
url = "https://git.sr.ht/~ehmry/hottext/archive/v${version}.tar.gz"; |
||||||
|
sha256 = "sha256-hIUofi81zowSMbt1lUsxCnVzfJGN3FEiTtN8CEFpwzY="; |
||||||
|
}; |
||||||
|
|
||||||
|
buildInputs = with nimPackages; [ |
||||||
|
bumpy |
||||||
|
chroma |
||||||
|
flatty |
||||||
|
nimsimd |
||||||
|
pixie |
||||||
|
sdl2 |
||||||
|
typography |
||||||
|
vmath |
||||||
|
zippy |
||||||
|
]; |
||||||
|
} |
||||||
|
|
||||||
|
``` |
||||||
|
|
||||||
|
## Nim library packages in Nixpkgs {#nim-library-packages-in-nixpkgs} |
||||||
|
|
||||||
|
|
||||||
|
Nim libraries can also be built using `nimPackages.buildNimPackage`, but |
||||||
|
often the product of a fetcher is sufficient to satisfy a dependency. |
||||||
|
The `fetchgit`, `fetchFromGitHub`, and `fetchNimble` functions yield an |
||||||
|
output that can be discovered during the `configurePhase` of `buildNimPackage`. |
||||||
|
|
||||||
|
Nim library packages are listed in |
||||||
|
[pkgs/top-level/nim-packages.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/nim-packages.nix) and implemented at |
||||||
|
[pkgs/development/nim-packages](https://github.com/NixOS/nixpkgs/tree/master/pkgs/development/nim-packages). |
||||||
|
|
||||||
|
The following example shows a Nim library that propagates a dependency on a |
||||||
|
non-Nim package: |
||||||
|
```nix |
||||||
|
{ lib, buildNimPackage, fetchNimble, SDL2 }: |
||||||
|
|
||||||
|
buildNimPackage rec { |
||||||
|
pname = "sdl2"; |
||||||
|
version = "2.0.4"; |
||||||
|
src = fetchNimble { |
||||||
|
inherit pname version; |
||||||
|
hash = "sha256-Vtcj8goI4zZPQs2TbFoBFlcR5UqDtOldaXSH/+/xULk="; |
||||||
|
}; |
||||||
|
propagatedBuildInputs = [ SDL2 ]; |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
## `buildNimPackage` parameters {#buildnimpackage-parameters} |
||||||
|
|
||||||
|
All parameters from `stdenv.mkDerivation` function are still supported. The |
||||||
|
following are specific to `buildNimPackage`: |
||||||
|
|
||||||
|
* `nimBinOnly ? false`: If `true` then build only the programs listed in |
||||||
|
the Nimble file in the packages sources. |
||||||
|
* `nimbleFile`: Specify the Nimble file location of the package being built |
||||||
|
rather than discover the file at build-time. |
||||||
|
* `nimRelease ? true`: Build the package in *release* mode. |
||||||
|
* `nimDefines ? []`: A list of Nim defines. Key-value tuples are not supported. |
||||||
|
* `nimFlags ? []`: A list of command line arguments to pass to the Nim compiler. |
||||||
|
Use this to specify defines with arguments in the form of `-d:${name}=${value}`. |
||||||
|
* `nimDoc` ? false`: Build and install HTML documentation. |
||||||
|
|
||||||
|
* `buildInputs` ? []: The packages listed here will be searched for `*.nimble` |
||||||
|
files which are used to populate the Nim library path. Otherwise the standard |
||||||
|
behavior is in effect. |
@ -0,0 +1,100 @@ |
|||||||
|
# Octave {#sec-octave} |
||||||
|
|
||||||
|
## Introduction {#ssec-octave-introduction} |
||||||
|
|
||||||
|
Octave is a modular scientific programming language and environment. |
||||||
|
A majority of the packages supported by Octave from their [website](https://octave.sourceforge.io/packages.php) are packaged in nixpkgs. |
||||||
|
|
||||||
|
## Structure {#ssec-octave-structure} |
||||||
|
|
||||||
|
All Octave add-on packages are available in two ways: |
||||||
|
1. Under the top-level `Octave` attribute, `octave.pkgs`. |
||||||
|
2. As a top-level attribute, `octavePackages`. |
||||||
|
|
||||||
|
## Packaging Octave Packages {#ssec-octave-packaging} |
||||||
|
|
||||||
|
Nixpkgs provides a function `buildOctavePackage`, a generic package builder function for any Octave package that complies with the Octave's current packaging format. |
||||||
|
|
||||||
|
All Octave packages are defined in [pkgs/top-level/octave-packages.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/octave-packages.nix) rather than `pkgs/all-packages.nix`. |
||||||
|
Each package is defined in their own file in the [pkgs/development/octave-modules](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/octave-modules) directory. |
||||||
|
Octave packages are made available through `all-packages.nix` through both the attribute `octavePackages` and `octave.pkgs`. |
||||||
|
You can test building an Octave package as follows: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-build -A octavePackages.symbolic |
||||||
|
``` |
||||||
|
|
||||||
|
When building Octave packages with `nix-build`, the `buildOctavePackage` function adds `octave-octaveVersion` to; the start of the package's name attribute. |
||||||
|
|
||||||
|
This can be required when installing the package using `nix-env`: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-env -i octave-6.2.0-symbolic |
||||||
|
``` |
||||||
|
|
||||||
|
Although, you can also install it using the attribute name: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-env -i -A octavePackages.symbolic |
||||||
|
``` |
||||||
|
|
||||||
|
You can build Octave with packages by using the `withPackages` passed-through function. |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-shell -p 'octave.withPackages (ps: with ps; [ symbolic ])' |
||||||
|
``` |
||||||
|
|
||||||
|
This will also work in a `shell.nix` file. |
||||||
|
|
||||||
|
```nix |
||||||
|
{ pkgs ? import <nixpkgs> { }}: |
||||||
|
|
||||||
|
pkgs.mkShell { |
||||||
|
nativeBuildInputs = with pkgs; [ |
||||||
|
(octave.withPackages (opkgs: with opkgs; [ symbolic ])) |
||||||
|
]; |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
### `buildOctavePackage` Steps {#sssec-buildOctavePackage-steps} |
||||||
|
|
||||||
|
The `buildOctavePackage` does several things to make sure things work properly. |
||||||
|
|
||||||
|
1. Sets the environment variable `OCTAVE_HISTFILE` to `/dev/null` during package compilation so that the commands run through the Octave interpreter directly are not logged. |
||||||
|
2. Skips the configuration step, because the packages are stored as gzipped tarballs, which Octave itself handles directly. |
||||||
|
3. Change the hierarchy of the tarball so that only a single directory is at the top-most level of the tarball. |
||||||
|
4. Use Octave itself to run the `pkg build` command, which unzips the tarball, extracts the necessary files written in Octave, and compiles any code written in C++ or Fortran, and places the fully compiled artifact in `$out`. |
||||||
|
|
||||||
|
`buildOctavePackage` is built on top of `stdenv` in a standard way, allowing most things to be customized. |
||||||
|
|
||||||
|
### Handling Dependencies {#sssec-octave-handling-dependencies} |
||||||
|
|
||||||
|
In Octave packages, there are four sets of dependencies that can be specified: |
||||||
|
|
||||||
|
`nativeBuildInputs` |
||||||
|
: Just like other packages, `nativeBuildInputs` is intended for architecture-dependent build-time-only dependencies. |
||||||
|
|
||||||
|
`buildInputs` |
||||||
|
: Like other packages, `buildInputs` is intended for architecture-independent build-time-only dependencies. |
||||||
|
|
||||||
|
`propagatedBuildInputs` |
||||||
|
: Similar to other packages, `propagatedBuildInputs` is intended for packages that are required for both building and running of the package. |
||||||
|
See [Symbolic](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/octave-modules/symbolic/default.nix) for how this works and why it is needed. |
||||||
|
|
||||||
|
`requiredOctavePackages` |
||||||
|
: This is a special dependency that ensures the specified Octave packages are dependent on others, and are made available simultaneously when loading them in Octave. |
||||||
|
|
||||||
|
### Installing Octave Packages {#sssec-installing-octave-packages} |
||||||
|
|
||||||
|
By default, the `buildOctavePackage` function does _not_ install the requested package into Octave for use. |
||||||
|
The function will only build the requested package. |
||||||
|
This is due to Octave maintaining an text-based database about which packages are installed where. |
||||||
|
To this end, when all the requested packages have been built, the Octave package and all its add-on packages are put together into an environment, similar to Python. |
||||||
|
|
||||||
|
1. First, all the Octave binaries are wrapped with the environment variable `OCTAVE_SITE_INITFILE` set to a file in `$out`, which is required for Octave to be able to find the non-standard package database location. |
||||||
|
2. Because of the way `buildEnv` works, all tarballs that are present (which should be all Octave packages to install) should be removed. |
||||||
|
3. The path down to the default install location of Octave packages is recreated so that Nix-operated Octave can install the packages. |
||||||
|
4. Install the packages into the `$out` environment while writing package entries to the database file. |
||||||
|
This database file is unique for each different (according to Nix) environment invocation. |
||||||
|
5. Rewrite the Octave-wide startup file to read from the list of packages installed in that particular environment. |
||||||
|
6. Wrap any programs that are required by the Octave packages so that they work with all the paths defined within the environment. |
@ -0,0 +1,25 @@ |
|||||||
|
# Supported systems according to RFC0046's definition. |
||||||
|
# |
||||||
|
# https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md |
||||||
|
{ lib }: |
||||||
|
rec { |
||||||
|
# List of systems that are built by Hydra. |
||||||
|
hydra = tier1 ++ tier2 ++ tier3; |
||||||
|
|
||||||
|
tier1 = [ |
||||||
|
"x86_64-linux" |
||||||
|
]; |
||||||
|
|
||||||
|
tier2 = [ |
||||||
|
"aarch64-linux" |
||||||
|
"x86_64-darwin" |
||||||
|
]; |
||||||
|
|
||||||
|
tier3 = [ |
||||||
|
"aarch64-darwin" |
||||||
|
"armv6l-linux" |
||||||
|
"armv7l-linux" |
||||||
|
"i686-linux" |
||||||
|
"mipsel-linux" |
||||||
|
]; |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,88 @@ |
|||||||
|
#! /usr/bin/env nix-shell |
||||||
|
#! nix-shell -I nixpkgs=. -i bash -p pandoc |
||||||
|
|
||||||
|
# This script is temporarily needed while we transition the manual to |
||||||
|
# CommonMark. It converts DocBook files into our CommonMark flavour. |
||||||
|
|
||||||
|
debug= |
||||||
|
files=() |
||||||
|
|
||||||
|
while [ "$#" -gt 0 ]; do |
||||||
|
i="$1"; shift 1 |
||||||
|
case "$i" in |
||||||
|
--debug) |
||||||
|
debug=1 |
||||||
|
;; |
||||||
|
*) |
||||||
|
files+=("$i") |
||||||
|
;; |
||||||
|
esac |
||||||
|
done |
||||||
|
|
||||||
|
echo "WARNING: This is an experimental script and might not preserve all formatting." > /dev/stderr |
||||||
|
echo "Please report any issues you discover." > /dev/stderr |
||||||
|
|
||||||
|
outExtension="md" |
||||||
|
if [[ $debug ]]; then |
||||||
|
outExtension="json" |
||||||
|
fi |
||||||
|
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" |
||||||
|
|
||||||
|
# NOTE: Keep in sync with Nixpkgs manual (/doc/Makefile). |
||||||
|
# TODO: Remove raw-attribute when we can get rid of DocBook altogether. |
||||||
|
pandoc_commonmark_enabled_extensions=+attributes+fenced_divs+footnotes+bracketed_spans+definition_lists+pipe_tables+raw_attribute |
||||||
|
targetLang="commonmark${pandoc_commonmark_enabled_extensions}+smart" |
||||||
|
if [[ $debug ]]; then |
||||||
|
targetLang=json |
||||||
|
fi |
||||||
|
pandoc_flags=( |
||||||
|
# Not needed: |
||||||
|
# - diagram-generator.lua (we do not support that in NixOS manual to limit dependencies) |
||||||
|
# - media extraction (was only required for diagram generator) |
||||||
|
# - myst-reader/roles.lua (only relevant for MyST → DocBook) |
||||||
|
# - link-unix-man-references.lua (links should only be added to display output) |
||||||
|
# - docbook-writer/rst-roles.lua (only relevant for → DocBook) |
||||||
|
# - docbook-writer/labelless-link-is-xref.lua (only relevant for → DocBook) |
||||||
|
"--lua-filter=$DIR/../../doc/build-aux/pandoc-filters/docbook-reader/citerefentry-to-rst-role.lua" |
||||||
|
"--lua-filter=$DIR/../../doc/build-aux/pandoc-filters/myst-writer/roles.lua" |
||||||
|
"--lua-filter=$DIR/doc/unknown-code-language.lua" |
||||||
|
-f docbook |
||||||
|
-t "$targetLang" |
||||||
|
--tab-stop=2 |
||||||
|
--wrap=none |
||||||
|
) |
||||||
|
|
||||||
|
for file in "${files[@]}"; do |
||||||
|
if [[ ! -f "$file" ]]; then |
||||||
|
echo "db-to-md.sh: $file does not exist" > /dev/stderr |
||||||
|
exit 1 |
||||||
|
else |
||||||
|
rootElement=$(xmllint --xpath 'name(//*)' "$file") |
||||||
|
|
||||||
|
if [[ $rootElement = chapter ]]; then |
||||||
|
extension=".chapter.$outExtension" |
||||||
|
elif [[ $rootElement = section ]]; then |
||||||
|
extension=".section.$outExtension" |
||||||
|
else |
||||||
|
echo "db-to-md.sh: $file contains an unsupported root element $rootElement" > /dev/stderr |
||||||
|
exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
outFile="${file%".section.xml"}" |
||||||
|
outFile="${outFile%".chapter.xml"}" |
||||||
|
outFile="${outFile%".xml"}$extension" |
||||||
|
temp1=$(mktemp) |
||||||
|
$DIR/doc/escape-code-markup.py "$file" "$temp1" |
||||||
|
if [[ $debug ]]; then |
||||||
|
echo "Converted $file to $temp1" > /dev/stderr |
||||||
|
fi |
||||||
|
temp2=$(mktemp) |
||||||
|
$DIR/doc/replace-xrefs-by-empty-links.py "$temp1" "$temp2" |
||||||
|
if [[ $debug ]]; then |
||||||
|
echo "Converted $temp1 to $temp2" > /dev/stderr |
||||||
|
fi |
||||||
|
pandoc "$temp2" -o "$outFile" "${pandoc_flags[@]}" |
||||||
|
echo "Converted $file to $outFile" > /dev/stderr |
||||||
|
fi |
||||||
|
done |
@ -0,0 +1,97 @@ |
|||||||
|
#! /usr/bin/env nix-shell |
||||||
|
#! nix-shell -I nixpkgs=channel:nixos-unstable -i python3 -p python3 -p python3.pkgs.lxml |
||||||
|
|
||||||
|
""" |
||||||
|
Pandoc will strip any markup within code elements so |
||||||
|
let’s escape them so that they can be handled manually. |
||||||
|
""" |
||||||
|
|
||||||
|
import lxml.etree as ET |
||||||
|
import re |
||||||
|
import sys |
||||||
|
|
||||||
|
def replace_element_by_text(el: ET.Element, text: str) -> None: |
||||||
|
""" |
||||||
|
Author: bernulf |
||||||
|
Source: https://stackoverflow.com/a/10520552/160386 |
||||||
|
SPDX-License-Identifier: CC-BY-SA-3.0 |
||||||
|
""" |
||||||
|
text = text + (el.tail or "") |
||||||
|
parent = el.getparent() |
||||||
|
if parent is not None: |
||||||
|
previous = el.getprevious() |
||||||
|
if previous is not None: |
||||||
|
previous.tail = (previous.tail or "") + text |
||||||
|
else: |
||||||
|
parent.text = (parent.text or "") + text |
||||||
|
parent.remove(el) |
||||||
|
|
||||||
|
DOCBOOK_NS = "http://docbook.org/ns/docbook" |
||||||
|
|
||||||
|
# List of elements that pandoc’s DocBook reader strips markup from. |
||||||
|
# https://github.com/jgm/pandoc/blob/master/src/Text/Pandoc/Readers/DocBook.hs |
||||||
|
code_elements = [ |
||||||
|
# CodeBlock |
||||||
|
"literallayout", |
||||||
|
"screen", |
||||||
|
"programlisting", |
||||||
|
# Code (inline) |
||||||
|
"classname", |
||||||
|
"code", |
||||||
|
"filename", |
||||||
|
"envar", |
||||||
|
"literal", |
||||||
|
"computeroutput", |
||||||
|
"prompt", |
||||||
|
"parameter", |
||||||
|
"option", |
||||||
|
"markup", |
||||||
|
"wordasword", |
||||||
|
"command", |
||||||
|
"varname", |
||||||
|
"function", |
||||||
|
"type", |
||||||
|
"symbol", |
||||||
|
"constant", |
||||||
|
"userinput", |
||||||
|
"systemitem", |
||||||
|
] |
||||||
|
|
||||||
|
XMLNS_REGEX = re.compile(r'\s+xmlns(?::[^=]+)?="[^"]*"') |
||||||
|
ROOT_ELEMENT_REGEX = re.compile(r'^\s*<[^>]+>') |
||||||
|
|
||||||
|
def remove_xmlns(match: re.Match) -> str: |
||||||
|
""" |
||||||
|
Removes xmlns attributes. |
||||||
|
|
||||||
|
Expects a match containing an opening tag. |
||||||
|
""" |
||||||
|
return XMLNS_REGEX.sub('', match.group(0)) |
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
assert len(sys.argv) >= 3, "usage: escape-code-markup.py <input> <output>" |
||||||
|
|
||||||
|
tree = ET.parse(sys.argv[1]) |
||||||
|
name_predicate = " or ".join([f"local-name()='{el}'" for el in code_elements]) |
||||||
|
|
||||||
|
for markup in tree.xpath(f"//*[({name_predicate}) and namespace-uri()='{DOCBOOK_NS}']/*"): |
||||||
|
text = ET.tostring(markup, encoding=str) |
||||||
|
|
||||||
|
# tostring adds xmlns attributes to the element we want to stringify |
||||||
|
# as if it was supposed to be usable standalone. |
||||||
|
# We are just converting it to CDATA so we do not care. |
||||||
|
# Let’s strip the namespace declarations to keep the code clean. |
||||||
|
# |
||||||
|
# Note that this removes even namespaces that were potentially |
||||||
|
# in the original file. Though, that should be very rare – |
||||||
|
# most of the time, we will stringify empty DocBook elements |
||||||
|
# like <xref> or <co> or, at worst, <link> with xlink:href attribute. |
||||||
|
# |
||||||
|
# Also note that the regex expects the root element to be first |
||||||
|
# thing in the string. But that should be fine, the tostring method |
||||||
|
# does not produce XML declaration or doctype by default. |
||||||
|
text = ROOT_ELEMENT_REGEX.sub(remove_xmlns, text) |
||||||
|
|
||||||
|
replace_element_by_text(markup, text) |
||||||
|
|
||||||
|
tree.write(sys.argv[2]) |
@ -0,0 +1,32 @@ |
|||||||
|
#! /usr/bin/env nix-shell |
||||||
|
#! nix-shell -I nixpkgs=channel:nixos-unstable -i python3 -p python3 -p python3.pkgs.lxml |
||||||
|
|
||||||
|
""" |
||||||
|
Pandoc will try to resolve xrefs and replace them with regular links. |
||||||
|
let’s replace them with links with empty labels which MyST |
||||||
|
and our pandoc filters recognize as cross-references. |
||||||
|
""" |
||||||
|
|
||||||
|
import lxml.etree as ET |
||||||
|
import sys |
||||||
|
|
||||||
|
XLINK_NS = "http://www.w3.org/1999/xlink" |
||||||
|
|
||||||
|
ns = { |
||||||
|
"db": "http://docbook.org/ns/docbook", |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
assert len(sys.argv) >= 3, "usage: replace-xrefs-by-empty-links.py <input> <output>" |
||||||
|
|
||||||
|
tree = ET.parse(sys.argv[1]) |
||||||
|
for xref in tree.findall(".//db:xref", ns): |
||||||
|
text = ET.tostring(xref, encoding=str) |
||||||
|
parent = xref.getparent() |
||||||
|
link = parent.makeelement('link') |
||||||
|
target_name = xref.get("linkend") |
||||||
|
link.set(f"{{{XLINK_NS}}}href", f"#{target_name}") |
||||||
|
parent.replace(xref, link) |
||||||
|
|
||||||
|
tree.write(sys.argv[2]) |
@ -0,0 +1,12 @@ |
|||||||
|
--[[ |
||||||
|
Adds “unknown” class to CodeBlock AST nodes without any classes. |
||||||
|
|
||||||
|
This will cause Pandoc to use fenced code block, which we prefer. |
||||||
|
]] |
||||||
|
|
||||||
|
function CodeBlock(elem) |
||||||
|
if #elem.classes == 0 then |
||||||
|
elem.classes:insert('unknown') |
||||||
|
return elem |
||||||
|
end |
||||||
|
end |
@ -0,0 +1,10 @@ |
|||||||
|
# Nix script to calculate the Haskell dependencies of every haskellPackage. Used by ./hydra-report.hs. |
||||||
|
let |
||||||
|
pkgs = import ../../.. {}; |
||||||
|
inherit (pkgs) lib; |
||||||
|
getDeps = _: pkg: { |
||||||
|
deps = builtins.filter (x: !isNull x) (map (x: x.pname or null) (pkg.propagatedBuildInputs or [])); |
||||||
|
broken = (pkg.meta.hydraPlatforms or [null]) == []; |
||||||
|
}; |
||||||
|
in |
||||||
|
lib.mapAttrs getDeps pkgs.haskellPackages |
@ -0,0 +1,7 @@ |
|||||||
|
# Nix script to lookup maintainer github handles from their email address. Used by ./hydra-report.hs. |
||||||
|
let |
||||||
|
pkgs = import ../../.. {}; |
||||||
|
maintainers = import ../../maintainer-list.nix; |
||||||
|
inherit (pkgs) lib; |
||||||
|
mkMailGithubPair = _: maintainer: if maintainer ? github then { "${maintainer.email}" = maintainer.github; } else {}; |
||||||
|
in lib.zipAttrsWith (_: builtins.head) (lib.mapAttrsToList mkMailGithubPair maintainers) |
@ -0,0 +1,122 @@ |
|||||||
|
#! /usr/bin/env nix-shell |
||||||
|
#! nix-shell -i bash -p git gh -I nixpkgs=. |
||||||
|
# |
||||||
|
# Script to merge the currently open haskell-updates PR into master, bump the |
||||||
|
# Stackage version and Hackage versions, and open the next haskell-updates PR. |
||||||
|
|
||||||
|
set -eu -o pipefail |
||||||
|
|
||||||
|
# exit after printing first argument to this function |
||||||
|
function die { |
||||||
|
# echo the first argument |
||||||
|
echo "ERROR: $1" |
||||||
|
echo "Aborting!" |
||||||
|
|
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
function help { |
||||||
|
echo "Usage: $0 HASKELL_UPDATES_PR_NUM" |
||||||
|
echo "Merge the currently open haskell-updates PR into master, and open the next one." |
||||||
|
echo |
||||||
|
echo " -h, --help print this help" |
||||||
|
echo " HASKELL_UPDATES_PR_NUM number of the currently open PR on NixOS/nixpkgs" |
||||||
|
echo " for the haskell-updates branch" |
||||||
|
echo |
||||||
|
echo "Example:" |
||||||
|
echo " \$ $0 137340" |
||||||
|
|
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
# Read in the current haskell-updates PR number from the command line. |
||||||
|
while [[ $# -gt 0 ]]; do |
||||||
|
key="$1" |
||||||
|
|
||||||
|
case $key in |
||||||
|
-h|--help) |
||||||
|
help |
||||||
|
;; |
||||||
|
*) |
||||||
|
curr_haskell_updates_pr_num="$1" |
||||||
|
shift |
||||||
|
;; |
||||||
|
esac |
||||||
|
done |
||||||
|
|
||||||
|
if [[ -z "${curr_haskell_updates_pr_num-}" ]] ; then |
||||||
|
die "You must pass the current haskell-updates PR number as the first argument to this script." |
||||||
|
fi |
||||||
|
|
||||||
|
# Make sure you have gh authentication setup. |
||||||
|
if ! gh auth status 2>/dev/null ; then |
||||||
|
die "You must setup the \`gh\` command. Run \`gh auth login\`." |
||||||
|
fi |
||||||
|
|
||||||
|
# Fetch nixpkgs to get an up-to-date origin/haskell-updates branch. |
||||||
|
echo "Fetching origin..." |
||||||
|
git fetch origin >/dev/null |
||||||
|
|
||||||
|
# Make sure we are currently on a local haskell-updates branch. |
||||||
|
curr_branch="$(git rev-parse --abbrev-ref HEAD)" |
||||||
|
if [[ "$curr_branch" != "haskell-updates" ]]; then |
||||||
|
die "Current branch is not called \"haskell-updates\"." |
||||||
|
fi |
||||||
|
|
||||||
|
# Make sure our local haskell-updates branch is on the same commit as |
||||||
|
# origin/haskell-updates. |
||||||
|
curr_branch_commit="$(git rev-parse haskell-updates)" |
||||||
|
origin_haskell_updates_commit="$(git rev-parse origin/haskell-updates)" |
||||||
|
if [[ "$curr_branch_commit" != "$origin_haskell_updates_commit" ]]; then |
||||||
|
die "Current branch is not at the same commit as origin/haskell-updates" |
||||||
|
fi |
||||||
|
|
||||||
|
# Merge the current open haskell-updates PR. |
||||||
|
echo "Merging https://github.com/NixOS/nixpkgs/pull/${curr_haskell_updates_pr_num}..." |
||||||
|
gh pr merge --repo NixOS/nixpkgs --merge "$curr_haskell_updates_pr_num" |
||||||
|
|
||||||
|
# Update the list of Haskell package versions in NixOS on Hackage. |
||||||
|
echo "Updating list of Haskell package versions in NixOS on Hackage..." |
||||||
|
./maintainers/scripts/haskell/upload-nixos-package-list-to-hackage.sh |
||||||
|
|
||||||
|
# Update stackage, Hackage hashes, and regenerate Haskell package set |
||||||
|
echo "Updating Stackage..." |
||||||
|
./maintainers/scripts/haskell/update-stackage.sh --do-commit |
||||||
|
echo "Updating Hackage hashes..." |
||||||
|
./maintainers/scripts/haskell/update-hackage.sh --do-commit |
||||||
|
echo "Regenerating Hackage packages..." |
||||||
|
./maintainers/scripts/haskell/regenerate-hackage-packages.sh --do-commit |
||||||
|
|
||||||
|
# Push these new commits to the haskell-updates branch |
||||||
|
echo "Pushing commits just created to the remote haskell-updates branch..." |
||||||
|
git push |
||||||
|
|
||||||
|
# Open new PR |
||||||
|
new_pr_body=$(cat <<EOF |
||||||
|
### This Merge |
||||||
|
|
||||||
|
This PR is the regular merge of the \`haskell-updates\` branch into \`master\`. |
||||||
|
|
||||||
|
This branch is being continually built and tested by hydra at https://hydra.nixos.org/jobset/nixpkgs/haskell-updates. You may be able to find an up-to-date Hydra build report at [cdepillabout/nix-haskell-updates-status](https://github.com/cdepillabout/nix-haskell-updates-status). |
||||||
|
|
||||||
|
We roughly aim to merge these \`haskell-updates\` PRs at least once every two weeks. See the @NixOS/haskell [team calendar](https://cloud.maralorn.de/apps/calendar/p/Mw5WLnzsP7fC4Zky) for who is currently in charge of this branch. |
||||||
|
|
||||||
|
### haskellPackages Workflow Summary |
||||||
|
|
||||||
|
Our workflow is currently described in [\`pkgs/development/haskell-modules/HACKING.md\`](https://github.com/NixOS/nixpkgs/blob/haskell-updates/pkgs/development/haskell-modules/HACKING.md). |
||||||
|
|
||||||
|
The short version is this: |
||||||
|
* We regularly update the Stackage and Hackage pins on \`haskell-updates\` (normally at the beginning of a merge window). |
||||||
|
* The community fixes builds of Haskell packages on that branch. |
||||||
|
* We aim at at least one merge of \`haskell-updates\` into \`master\` every two weeks. |
||||||
|
* We only do the merge if the [\`mergeable\`](https://hydra.nixos.org/job/nixpkgs/haskell-updates/mergeable) job is succeeding on hydra. |
||||||
|
* If a [\`maintained\`](https://hydra.nixos.org/job/nixpkgs/haskell-updates/maintained) package is still broken at the time of merge, we will only merge if the maintainer has been pinged 7 days in advance. (If you care about a Haskell package, become a maintainer!) |
||||||
|
|
||||||
|
--- |
||||||
|
|
||||||
|
This is the follow-up to #${curr_haskell_updates_pr_num}. Come to [#haskell:nixos.org](https://matrix.to/#/#haskell:nixos.org) if you have any questions. |
||||||
|
EOF |
||||||
|
) |
||||||
|
|
||||||
|
echo "Opening a PR for the next haskell-updates merge cycle..." |
||||||
|
gh pr create --repo NixOS/nixpkgs --base master --head haskell-updates --title "haskellPackages: update stackage and hackage" --body "$new_pr_body" |
|
@ -1,12 +1,13 @@ |
|||||||
{ nixpkgs ? import ../.. { } |
{ nixpkgs ? import ../.. { } |
||||||
}: |
}: |
||||||
with nixpkgs; |
with nixpkgs; |
||||||
|
let |
||||||
|
pyEnv = python3.withPackages(ps: [ ps.GitPython ]); |
||||||
|
in |
||||||
mkShell { |
mkShell { |
||||||
packages = [ |
packages = [ |
||||||
bash |
pyEnv |
||||||
luarocks-nix |
luarocks-nix |
||||||
nix-prefetch-scripts |
nix-prefetch-scripts |
||||||
parallel |
|
||||||
]; |
]; |
||||||
LUAROCKS_NIXPKGS_PATH = toString nixpkgs.path; |
|
||||||
} |
} |
||||||
|
@ -0,0 +1,62 @@ |
|||||||
|
# Cleaning the Nix Store {#sec-nix-gc} |
||||||
|
|
||||||
|
Nix has a purely functional model, meaning that packages are never |
||||||
|
upgraded in place. Instead new versions of packages end up in a |
||||||
|
different location in the Nix store (`/nix/store`). You should |
||||||
|
periodically run Nix's *garbage collector* to remove old, unreferenced |
||||||
|
packages. This is easy: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-collect-garbage |
||||||
|
``` |
||||||
|
|
||||||
|
Alternatively, you can use a systemd unit that does the same in the |
||||||
|
background: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# systemctl start nix-gc.service |
||||||
|
``` |
||||||
|
|
||||||
|
You can tell NixOS in `configuration.nix` to run this unit automatically |
||||||
|
at certain points in time, for instance, every night at 03:15: |
||||||
|
|
||||||
|
```nix |
||||||
|
nix.gc.automatic = true; |
||||||
|
nix.gc.dates = "03:15"; |
||||||
|
``` |
||||||
|
|
||||||
|
The commands above do not remove garbage collector roots, such as old |
||||||
|
system configurations. Thus they do not remove the ability to roll back |
||||||
|
to previous configurations. The following command deletes old roots, |
||||||
|
removing the ability to roll back to them: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-collect-garbage -d |
||||||
|
``` |
||||||
|
|
||||||
|
You can also do this for specific profiles, e.g. |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-env -p /nix/var/nix/profiles/per-user/eelco/profile --delete-generations old |
||||||
|
``` |
||||||
|
|
||||||
|
Note that NixOS system configurations are stored in the profile |
||||||
|
`/nix/var/nix/profiles/system`. |
||||||
|
|
||||||
|
Another way to reclaim disk space (often as much as 40% of the size of |
||||||
|
the Nix store) is to run Nix's store optimiser, which seeks out |
||||||
|
identical files in the store and replaces them with hard links to a |
||||||
|
single copy. |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ nix-store --optimise |
||||||
|
``` |
||||||
|
|
||||||
|
Since this command needs to read the entire Nix store, it can take quite |
||||||
|
a while to finish. |
||||||
|
|
||||||
|
## NixOS Boot Entries {#sect-nixos-gc-boot-entries} |
||||||
|
|
||||||
|
If your `/boot` partition runs out of space, after clearing old profiles |
||||||
|
you must rebuild your system with `nixos-rebuild` to update the `/boot` |
||||||
|
partition and clear space. |
@ -1,63 +0,0 @@ |
|||||||
<chapter xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-nix-gc"> |
|
||||||
<title>Cleaning the Nix Store</title> |
|
||||||
<para> |
|
||||||
Nix has a purely functional model, meaning that packages are never upgraded |
|
||||||
in place. Instead new versions of packages end up in a different location in |
|
||||||
the Nix store (<filename>/nix/store</filename>). You should periodically run |
|
||||||
Nix’s <emphasis>garbage collector</emphasis> to remove old, unreferenced |
|
||||||
packages. This is easy: |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>nix-collect-garbage |
|
||||||
</screen> |
|
||||||
Alternatively, you can use a systemd unit that does the same in the |
|
||||||
background: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>systemctl start nix-gc.service |
|
||||||
</screen> |
|
||||||
You can tell NixOS in <filename>configuration.nix</filename> to run this unit |
|
||||||
automatically at certain points in time, for instance, every night at 03:15: |
|
||||||
<programlisting> |
|
||||||
<xref linkend="opt-nix.gc.automatic"/> = true; |
|
||||||
<xref linkend="opt-nix.gc.dates"/> = "03:15"; |
|
||||||
</programlisting> |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
The commands above do not remove garbage collector roots, such as old system |
|
||||||
configurations. Thus they do not remove the ability to roll back to previous |
|
||||||
configurations. The following command deletes old roots, removing the ability |
|
||||||
to roll back to them: |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>nix-collect-garbage -d |
|
||||||
</screen> |
|
||||||
You can also do this for specific profiles, e.g. |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>nix-env -p /nix/var/nix/profiles/per-user/eelco/profile --delete-generations old |
|
||||||
</screen> |
|
||||||
Note that NixOS system configurations are stored in the profile |
|
||||||
<filename>/nix/var/nix/profiles/system</filename>. |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
Another way to reclaim disk space (often as much as 40% of the size of the |
|
||||||
Nix store) is to run Nix’s store optimiser, which seeks out identical files |
|
||||||
in the store and replaces them with hard links to a single copy. |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>nix-store --optimise |
|
||||||
</screen> |
|
||||||
Since this command needs to read the entire Nix store, it can take quite a |
|
||||||
while to finish. |
|
||||||
</para> |
|
||||||
<section xml:id="sect-nixos-gc-boot-entries"> |
|
||||||
<title>NixOS Boot Entries</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
If your <filename>/boot</filename> partition runs out of space, after |
|
||||||
clearing old profiles you must rebuild your system with |
|
||||||
<literal>nixos-rebuild</literal> to update the <filename>/boot</filename> |
|
||||||
partition and clear space. |
|
||||||
</para> |
|
||||||
</section> |
|
||||||
</chapter> |
|
@ -0,0 +1,44 @@ |
|||||||
|
# Container Networking {#sec-container-networking} |
||||||
|
|
||||||
|
When you create a container using `nixos-container create`, it gets it |
||||||
|
own private IPv4 address in the range `10.233.0.0/16`. You can get the |
||||||
|
container's IPv4 address as follows: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container show-ip foo |
||||||
|
10.233.4.2 |
||||||
|
|
||||||
|
$ ping -c1 10.233.4.2 |
||||||
|
64 bytes from 10.233.4.2: icmp_seq=1 ttl=64 time=0.106 ms |
||||||
|
``` |
||||||
|
|
||||||
|
Networking is implemented using a pair of virtual Ethernet devices. The |
||||||
|
network interface in the container is called `eth0`, while the matching |
||||||
|
interface in the host is called `ve-container-name` (e.g., `ve-foo`). |
||||||
|
The container has its own network namespace and the `CAP_NET_ADMIN` |
||||||
|
capability, so it can perform arbitrary network configuration such as |
||||||
|
setting up firewall rules, without affecting or having access to the |
||||||
|
host's network. |
||||||
|
|
||||||
|
By default, containers cannot talk to the outside network. If you want |
||||||
|
that, you should set up Network Address Translation (NAT) rules on the |
||||||
|
host to rewrite container traffic to use your external IP address. This |
||||||
|
can be accomplished using the following configuration on the host: |
||||||
|
|
||||||
|
```nix |
||||||
|
networking.nat.enable = true; |
||||||
|
networking.nat.internalInterfaces = ["ve-+"]; |
||||||
|
networking.nat.externalInterface = "eth0"; |
||||||
|
``` |
||||||
|
|
||||||
|
where `eth0` should be replaced with the desired external interface. |
||||||
|
Note that `ve-+` is a wildcard that matches all container interfaces. |
||||||
|
|
||||||
|
If you are using Network Manager, you need to explicitly prevent it from |
||||||
|
managing container interfaces: |
||||||
|
|
||||||
|
```nix |
||||||
|
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ]; |
||||||
|
``` |
||||||
|
|
||||||
|
You may need to restart your system for the changes to take effect. |
@ -1,59 +0,0 @@ |
|||||||
<section xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-container-networking"> |
|
||||||
<title>Container Networking</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
When you create a container using <literal>nixos-container create</literal>, |
|
||||||
it gets it own private IPv4 address in the range |
|
||||||
<literal>10.233.0.0/16</literal>. You can get the container’s IPv4 address |
|
||||||
as follows: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container show-ip foo |
|
||||||
10.233.4.2 |
|
||||||
|
|
||||||
<prompt>$ </prompt>ping -c1 10.233.4.2 |
|
||||||
64 bytes from 10.233.4.2: icmp_seq=1 ttl=64 time=0.106 ms |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
Networking is implemented using a pair of virtual Ethernet devices. The |
|
||||||
network interface in the container is called <literal>eth0</literal>, while |
|
||||||
the matching interface in the host is called |
|
||||||
<literal>ve-<replaceable>container-name</replaceable></literal> (e.g., |
|
||||||
<literal>ve-foo</literal>). The container has its own network namespace and |
|
||||||
the <literal>CAP_NET_ADMIN</literal> capability, so it can perform arbitrary |
|
||||||
network configuration such as setting up firewall rules, without affecting or |
|
||||||
having access to the host’s network. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
By default, containers cannot talk to the outside network. If you want that, |
|
||||||
you should set up Network Address Translation (NAT) rules on the host to |
|
||||||
rewrite container traffic to use your external IP address. This can be |
|
||||||
accomplished using the following configuration on the host: |
|
||||||
<programlisting> |
|
||||||
<xref linkend="opt-networking.nat.enable"/> = true; |
|
||||||
<xref linkend="opt-networking.nat.internalInterfaces"/> = ["ve-+"]; |
|
||||||
<xref linkend="opt-networking.nat.externalInterface"/> = "eth0"; |
|
||||||
</programlisting> |
|
||||||
where <literal>eth0</literal> should be replaced with the desired external |
|
||||||
interface. Note that <literal>ve-+</literal> is a wildcard that matches all |
|
||||||
container interfaces. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
If you are using Network Manager, you need to explicitly prevent it from |
|
||||||
managing container interfaces: |
|
||||||
<programlisting> |
|
||||||
networking.networkmanager.unmanaged = [ "interface-name:ve-*" ]; |
|
||||||
</programlisting> |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
You may need to restart your system for the changes to take effect. |
|
||||||
</para> |
|
||||||
</section> |
|
@ -0,0 +1,28 @@ |
|||||||
|
# Container Management {#ch-containers} |
||||||
|
|
||||||
|
NixOS allows you to easily run other NixOS instances as *containers*. |
||||||
|
Containers are a light-weight approach to virtualisation that runs |
||||||
|
software in the container at the same speed as in the host system. NixOS |
||||||
|
containers share the Nix store of the host, making container creation |
||||||
|
very efficient. |
||||||
|
|
||||||
|
::: {.warning} |
||||||
|
Currently, NixOS containers are not perfectly isolated from the host |
||||||
|
system. This means that a user with root access to the container can do |
||||||
|
things that affect the host. So you should not give container root |
||||||
|
access to untrusted users. |
||||||
|
::: |
||||||
|
|
||||||
|
NixOS containers can be created in two ways: imperatively, using the |
||||||
|
command `nixos-container`, and declaratively, by specifying them in your |
||||||
|
`configuration.nix`. The declarative approach implies that containers |
||||||
|
get upgraded along with your host system when you run `nixos-rebuild`, |
||||||
|
which is often not what you want. By contrast, in the imperative |
||||||
|
approach, containers are configured and updated independently from the |
||||||
|
host system. |
||||||
|
|
||||||
|
```{=docbook} |
||||||
|
<xi:include href="imperative-containers.section.xml" /> |
||||||
|
<xi:include href="declarative-containers.section.xml" /> |
||||||
|
<xi:include href="container-networking.section.xml" /> |
||||||
|
``` |
@ -1,34 +0,0 @@ |
|||||||
<chapter xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="ch-containers"> |
|
||||||
<title>Container Management</title> |
|
||||||
<para> |
|
||||||
NixOS allows you to easily run other NixOS instances as |
|
||||||
<emphasis>containers</emphasis>. Containers are a light-weight approach to |
|
||||||
virtualisation that runs software in the container at the same speed as in |
|
||||||
the host system. NixOS containers share the Nix store of the host, making |
|
||||||
container creation very efficient. |
|
||||||
</para> |
|
||||||
<warning> |
|
||||||
<para> |
|
||||||
Currently, NixOS containers are not perfectly isolated from the host system. |
|
||||||
This means that a user with root access to the container can do things that |
|
||||||
affect the host. So you should not give container root access to untrusted |
|
||||||
users. |
|
||||||
</para> |
|
||||||
</warning> |
|
||||||
<para> |
|
||||||
NixOS containers can be created in two ways: imperatively, using the command |
|
||||||
<command>nixos-container</command>, and declaratively, by specifying them in |
|
||||||
your <filename>configuration.nix</filename>. The declarative approach implies |
|
||||||
that containers get upgraded along with your host system when you run |
|
||||||
<command>nixos-rebuild</command>, which is often not what you want. By |
|
||||||
contrast, in the imperative approach, containers are configured and updated |
|
||||||
independently from the host system. |
|
||||||
</para> |
|
||||||
<xi:include href="imperative-containers.xml" /> |
|
||||||
<xi:include href="declarative-containers.xml" /> |
|
||||||
<xi:include href="container-networking.xml" /> |
|
||||||
</chapter> |
|
@ -0,0 +1,59 @@ |
|||||||
|
# Control Groups {#sec-cgroups} |
||||||
|
|
||||||
|
To keep track of the processes in a running system, systemd uses |
||||||
|
*control groups* (cgroups). A control group is a set of processes used |
||||||
|
to allocate resources such as CPU, memory or I/O bandwidth. There can be |
||||||
|
multiple control group hierarchies, allowing each kind of resource to be |
||||||
|
managed independently. |
||||||
|
|
||||||
|
The command `systemd-cgls` lists all control groups in the `systemd` |
||||||
|
hierarchy, which is what systemd uses to keep track of the processes |
||||||
|
belonging to each service or user session: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ systemd-cgls |
||||||
|
├─user |
||||||
|
│ └─eelco |
||||||
|
│ └─c1 |
||||||
|
│ ├─ 2567 -:0 |
||||||
|
│ ├─ 2682 kdeinit4: kdeinit4 Running... |
||||||
|
│ ├─ ... |
||||||
|
│ └─10851 sh -c less -R |
||||||
|
└─system |
||||||
|
├─httpd.service |
||||||
|
│ ├─2444 httpd -f /nix/store/3pyacby5cpr55a03qwbnndizpciwq161-httpd.conf -DNO_DETACH |
||||||
|
│ └─... |
||||||
|
├─dhcpcd.service |
||||||
|
│ └─2376 dhcpcd --config /nix/store/f8dif8dsi2yaa70n03xir8r653776ka6-dhcpcd.conf |
||||||
|
└─ ... |
||||||
|
``` |
||||||
|
|
||||||
|
Similarly, `systemd-cgls cpu` shows the cgroups in the CPU hierarchy, |
||||||
|
which allows per-cgroup CPU scheduling priorities. By default, every |
||||||
|
systemd service gets its own CPU cgroup, while all user sessions are in |
||||||
|
the top-level CPU cgroup. This ensures, for instance, that a thousand |
||||||
|
run-away processes in the `httpd.service` cgroup cannot starve the CPU |
||||||
|
for one process in the `postgresql.service` cgroup. (By contrast, it |
||||||
|
they were in the same cgroup, then the PostgreSQL process would get |
||||||
|
1/1001 of the cgroup's CPU time.) You can limit a service's CPU share in |
||||||
|
`configuration.nix`: |
||||||
|
|
||||||
|
```nix |
||||||
|
systemd.services.httpd.serviceConfig.CPUShares = 512; |
||||||
|
``` |
||||||
|
|
||||||
|
By default, every cgroup has 1024 CPU shares, so this will halve the CPU |
||||||
|
allocation of the `httpd.service` cgroup. |
||||||
|
|
||||||
|
There also is a `memory` hierarchy that controls memory allocation |
||||||
|
limits; by default, all processes are in the top-level cgroup, so any |
||||||
|
service or session can exhaust all available memory. Per-cgroup memory |
||||||
|
limits can be specified in `configuration.nix`; for instance, to limit |
||||||
|
`httpd.service` to 512 MiB of RAM (excluding swap): |
||||||
|
|
||||||
|
```nix |
||||||
|
systemd.services.httpd.serviceConfig.MemoryLimit = "512M"; |
||||||
|
``` |
||||||
|
|
||||||
|
The command `systemd-cgtop` shows a continuously updated list of all |
||||||
|
cgroups with their CPU and memory usage. |
@ -1,65 +0,0 @@ |
|||||||
<chapter xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-cgroups"> |
|
||||||
<title>Control Groups</title> |
|
||||||
<para> |
|
||||||
To keep track of the processes in a running system, systemd uses |
|
||||||
<emphasis>control groups</emphasis> (cgroups). A control group is a set of |
|
||||||
processes used to allocate resources such as CPU, memory or I/O bandwidth. |
|
||||||
There can be multiple control group hierarchies, allowing each kind of |
|
||||||
resource to be managed independently. |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
The command <command>systemd-cgls</command> lists all control groups in the |
|
||||||
<literal>systemd</literal> hierarchy, which is what systemd uses to keep |
|
||||||
track of the processes belonging to each service or user session: |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>systemd-cgls |
|
||||||
├─user |
|
||||||
│ └─eelco |
|
||||||
│ └─c1 |
|
||||||
│ ├─ 2567 -:0 |
|
||||||
│ ├─ 2682 kdeinit4: kdeinit4 Running... |
|
||||||
│ ├─ <replaceable>...</replaceable> |
|
||||||
│ └─10851 sh -c less -R |
|
||||||
└─system |
|
||||||
├─httpd.service |
|
||||||
│ ├─2444 httpd -f /nix/store/3pyacby5cpr55a03qwbnndizpciwq161-httpd.conf -DNO_DETACH |
|
||||||
│ └─<replaceable>...</replaceable> |
|
||||||
├─dhcpcd.service |
|
||||||
│ └─2376 dhcpcd --config /nix/store/f8dif8dsi2yaa70n03xir8r653776ka6-dhcpcd.conf |
|
||||||
└─ <replaceable>...</replaceable> |
|
||||||
</screen> |
|
||||||
Similarly, <command>systemd-cgls cpu</command> shows the cgroups in the CPU |
|
||||||
hierarchy, which allows per-cgroup CPU scheduling priorities. By default, |
|
||||||
every systemd service gets its own CPU cgroup, while all user sessions are in |
|
||||||
the top-level CPU cgroup. This ensures, for instance, that a thousand |
|
||||||
run-away processes in the <literal>httpd.service</literal> cgroup cannot |
|
||||||
starve the CPU for one process in the <literal>postgresql.service</literal> |
|
||||||
cgroup. (By contrast, it they were in the same cgroup, then the PostgreSQL |
|
||||||
process would get 1/1001 of the cgroup’s CPU time.) You can limit a |
|
||||||
service’s CPU share in <filename>configuration.nix</filename>: |
|
||||||
<programlisting> |
|
||||||
<link linkend="opt-systemd.services._name_.serviceConfig">systemd.services.httpd.serviceConfig</link>.CPUShares = 512; |
|
||||||
</programlisting> |
|
||||||
By default, every cgroup has 1024 CPU shares, so this will halve the CPU |
|
||||||
allocation of the <literal>httpd.service</literal> cgroup. |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
There also is a <literal>memory</literal> hierarchy that controls memory |
|
||||||
allocation limits; by default, all processes are in the top-level cgroup, so |
|
||||||
any service or session can exhaust all available memory. Per-cgroup memory |
|
||||||
limits can be specified in <filename>configuration.nix</filename>; for |
|
||||||
instance, to limit <literal>httpd.service</literal> to 512 MiB of RAM |
|
||||||
(excluding swap): |
|
||||||
<programlisting> |
|
||||||
<link linkend="opt-systemd.services._name_.serviceConfig">systemd.services.httpd.serviceConfig</link>.MemoryLimit = "512M"; |
|
||||||
</programlisting> |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
The command <command>systemd-cgtop</command> shows a continuously updated |
|
||||||
list of all cgroups with their CPU and memory usage. |
|
||||||
</para> |
|
||||||
</chapter> |
|
@ -0,0 +1,48 @@ |
|||||||
|
# Declarative Container Specification {#sec-declarative-containers} |
||||||
|
|
||||||
|
You can also specify containers and their configuration in the host's |
||||||
|
`configuration.nix`. For example, the following specifies that there |
||||||
|
shall be a container named `database` running PostgreSQL: |
||||||
|
|
||||||
|
```nix |
||||||
|
containers.database = |
||||||
|
{ config = |
||||||
|
{ config, pkgs, ... }: |
||||||
|
{ services.postgresql.enable = true; |
||||||
|
services.postgresql.package = pkgs.postgresql_9_6; |
||||||
|
}; |
||||||
|
}; |
||||||
|
``` |
||||||
|
|
||||||
|
If you run `nixos-rebuild switch`, the container will be built. If the |
||||||
|
container was already running, it will be updated in place, without |
||||||
|
rebooting. The container can be configured to start automatically by |
||||||
|
setting `containers.database.autoStart = true` in its configuration. |
||||||
|
|
||||||
|
By default, declarative containers share the network namespace of the |
||||||
|
host, meaning that they can listen on (privileged) ports. However, they |
||||||
|
cannot change the network configuration. You can give a container its |
||||||
|
own network as follows: |
||||||
|
|
||||||
|
```nix |
||||||
|
containers.database = { |
||||||
|
privateNetwork = true; |
||||||
|
hostAddress = "192.168.100.10"; |
||||||
|
localAddress = "192.168.100.11"; |
||||||
|
}; |
||||||
|
``` |
||||||
|
|
||||||
|
This gives the container a private virtual Ethernet interface with IP |
||||||
|
address `192.168.100.11`, which is hooked up to a virtual Ethernet |
||||||
|
interface on the host with IP address `192.168.100.10`. (See the next |
||||||
|
section for details on container networking.) |
||||||
|
|
||||||
|
To disable the container, just remove it from `configuration.nix` and |
||||||
|
run `nixos-rebuild |
||||||
|
switch`. Note that this will not delete the root directory of the |
||||||
|
container in `/var/lib/containers`. Containers can be destroyed using |
||||||
|
the imperative method: `nixos-container destroy foo`. |
||||||
|
|
||||||
|
Declarative containers can be started and stopped using the |
||||||
|
corresponding systemd service, e.g. |
||||||
|
`systemctl start container@database`. |
@ -1,60 +0,0 @@ |
|||||||
<section xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-declarative-containers"> |
|
||||||
<title>Declarative Container Specification</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
You can also specify containers and their configuration in the host’s |
|
||||||
<filename>configuration.nix</filename>. For example, the following specifies |
|
||||||
that there shall be a container named <literal>database</literal> running |
|
||||||
PostgreSQL: |
|
||||||
<programlisting> |
|
||||||
containers.database = |
|
||||||
{ config = |
|
||||||
{ config, pkgs, ... }: |
|
||||||
{ <xref linkend="opt-services.postgresql.enable"/> = true; |
|
||||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_6; |
|
||||||
}; |
|
||||||
}; |
|
||||||
</programlisting> |
|
||||||
If you run <literal>nixos-rebuild switch</literal>, the container will be |
|
||||||
built. If the container was already running, it will be updated in place, |
|
||||||
without rebooting. The container can be configured to start automatically by |
|
||||||
setting <literal>containers.database.autoStart = true</literal> in its |
|
||||||
configuration. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
By default, declarative containers share the network namespace of the host, |
|
||||||
meaning that they can listen on (privileged) ports. However, they cannot |
|
||||||
change the network configuration. You can give a container its own network as |
|
||||||
follows: |
|
||||||
<programlisting> |
|
||||||
containers.database = { |
|
||||||
<link linkend="opt-containers._name_.privateNetwork">privateNetwork</link> = true; |
|
||||||
<link linkend="opt-containers._name_.hostAddress">hostAddress</link> = "192.168.100.10"; |
|
||||||
<link linkend="opt-containers._name_.localAddress">localAddress</link> = "192.168.100.11"; |
|
||||||
}; |
|
||||||
</programlisting> |
|
||||||
This gives the container a private virtual Ethernet interface with IP address |
|
||||||
<literal>192.168.100.11</literal>, which is hooked up to a virtual Ethernet |
|
||||||
interface on the host with IP address <literal>192.168.100.10</literal>. (See |
|
||||||
the next section for details on container networking.) |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
To disable the container, just remove it from |
|
||||||
<filename>configuration.nix</filename> and run <literal>nixos-rebuild |
|
||||||
switch</literal>. Note that this will not delete the root directory of the |
|
||||||
container in <literal>/var/lib/containers</literal>. Containers can be |
|
||||||
destroyed using the imperative method: <literal>nixos-container destroy |
|
||||||
foo</literal>. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
Declarative containers can be started and stopped using the corresponding |
|
||||||
systemd service, e.g. <literal>systemctl start container@database</literal>. |
|
||||||
</para> |
|
||||||
</section> |
|
@ -0,0 +1,115 @@ |
|||||||
|
# Imperative Container Management {#sec-imperative-containers} |
||||||
|
|
||||||
|
We'll cover imperative container management using `nixos-container` |
||||||
|
first. Be aware that container management is currently only possible as |
||||||
|
`root`. |
||||||
|
|
||||||
|
You create a container with identifier `foo` as follows: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container create foo |
||||||
|
``` |
||||||
|
|
||||||
|
This creates the container's root directory in `/var/lib/containers/foo` |
||||||
|
and a small configuration file in `/etc/containers/foo.conf`. It also |
||||||
|
builds the container's initial system configuration and stores it in |
||||||
|
`/nix/var/nix/profiles/per-container/foo/system`. You can modify the |
||||||
|
initial configuration of the container on the command line. For |
||||||
|
instance, to create a container that has `sshd` running, with the given |
||||||
|
public key for `root`: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container create foo --config ' |
||||||
|
services.openssh.enable = true; |
||||||
|
users.users.root.openssh.authorizedKeys.keys = ["ssh-dss AAAAB3N…"]; |
||||||
|
' |
||||||
|
``` |
||||||
|
|
||||||
|
By default the next free address in the `10.233.0.0/16` subnet will be |
||||||
|
chosen as container IP. This behavior can be altered by setting |
||||||
|
`--host-address` and `--local-address`: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container create test --config-file test-container.nix \ |
||||||
|
--local-address 10.235.1.2 --host-address 10.235.1.1 |
||||||
|
``` |
||||||
|
|
||||||
|
Creating a container does not start it. To start the container, run: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container start foo |
||||||
|
``` |
||||||
|
|
||||||
|
This command will return as soon as the container has booted and has |
||||||
|
reached `multi-user.target`. On the host, the container runs within a |
||||||
|
systemd unit called `container@container-name.service`. Thus, if |
||||||
|
something went wrong, you can get status info using `systemctl`: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# systemctl status container@foo |
||||||
|
``` |
||||||
|
|
||||||
|
If the container has started successfully, you can log in as root using |
||||||
|
the `root-login` operation: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container root-login foo |
||||||
|
[root@foo:~]# |
||||||
|
``` |
||||||
|
|
||||||
|
Note that only root on the host can do this (since there is no |
||||||
|
authentication). You can also get a regular login prompt using the |
||||||
|
`login` operation, which is available to all users on the host: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container login foo |
||||||
|
foo login: alice |
||||||
|
Password: *** |
||||||
|
``` |
||||||
|
|
||||||
|
With `nixos-container run`, you can execute arbitrary commands in the |
||||||
|
container: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container run foo -- uname -a |
||||||
|
Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux |
||||||
|
``` |
||||||
|
|
||||||
|
There are several ways to change the configuration of the container. |
||||||
|
First, on the host, you can edit |
||||||
|
`/var/lib/container/name/etc/nixos/configuration.nix`, and run |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container update foo |
||||||
|
``` |
||||||
|
|
||||||
|
This will build and activate the new configuration. You can also specify |
||||||
|
a new configuration on the command line: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container update foo --config ' |
||||||
|
services.httpd.enable = true; |
||||||
|
services.httpd.adminAddr = "foo@example.org"; |
||||||
|
networking.firewall.allowedTCPPorts = [ 80 ]; |
||||||
|
' |
||||||
|
|
||||||
|
# curl http://$(nixos-container show-ip foo)/ |
||||||
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">… |
||||||
|
``` |
||||||
|
|
||||||
|
However, note that this will overwrite the container's |
||||||
|
`/etc/nixos/configuration.nix`. |
||||||
|
|
||||||
|
Alternatively, you can change the configuration from within the |
||||||
|
container itself by running `nixos-rebuild switch` inside the container. |
||||||
|
Note that the container by default does not have a copy of the NixOS |
||||||
|
channel, so you should run `nix-channel --update` first. |
||||||
|
|
||||||
|
Containers can be stopped and started using `nixos-container |
||||||
|
stop` and `nixos-container start`, respectively, or by using |
||||||
|
`systemctl` on the container's service unit. To destroy a container, |
||||||
|
including its file system, do |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-container destroy foo |
||||||
|
``` |
@ -1,123 +0,0 @@ |
|||||||
<section xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-imperative-containers"> |
|
||||||
<title>Imperative Container Management</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
We’ll cover imperative container management using |
|
||||||
<command>nixos-container</command> first. Be aware that container management |
|
||||||
is currently only possible as <literal>root</literal>. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
You create a container with identifier <literal>foo</literal> as follows: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container create <replaceable>foo</replaceable> |
|
||||||
</screen> |
|
||||||
This creates the container’s root directory in |
|
||||||
<filename>/var/lib/containers/<replaceable>foo</replaceable></filename> and a small configuration file |
|
||||||
in <filename>/etc/containers/<replaceable>foo</replaceable>.conf</filename>. It also builds the |
|
||||||
container’s initial system configuration and stores it in |
|
||||||
<filename>/nix/var/nix/profiles/per-container/<replaceable>foo</replaceable>/system</filename>. You can |
|
||||||
modify the initial configuration of the container on the command line. For |
|
||||||
instance, to create a container that has <command>sshd</command> running, |
|
||||||
with the given public key for <literal>root</literal>: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container create <replaceable>foo</replaceable> --config ' |
|
||||||
<xref linkend="opt-services.openssh.enable"/> = true; |
|
||||||
<link linkend="opt-users.users._name_.openssh.authorizedKeys.keys">users.users.root.openssh.authorizedKeys.keys</link> = ["ssh-dss AAAAB3N…"]; |
|
||||||
' |
|
||||||
</screen> |
|
||||||
By default the next free address in the <literal>10.233.0.0/16</literal> subnet will be chosen |
|
||||||
as container IP. This behavior can be altered by setting <literal>--host-address</literal> and |
|
||||||
<literal>--local-address</literal>: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container create test --config-file test-container.nix \ |
|
||||||
--local-address 10.235.1.2 --host-address 10.235.1.1 |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
Creating a container does not start it. To start the container, run: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container start <replaceable>foo</replaceable> |
|
||||||
</screen> |
|
||||||
This command will return as soon as the container has booted and has reached |
|
||||||
<literal>multi-user.target</literal>. On the host, the container runs within |
|
||||||
a systemd unit called |
|
||||||
<literal>container@<replaceable>container-name</replaceable>.service</literal>. |
|
||||||
Thus, if something went wrong, you can get status info using |
|
||||||
<command>systemctl</command>: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>systemctl status container@<replaceable>foo</replaceable> |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
If the container has started successfully, you can log in as root using the |
|
||||||
<command>root-login</command> operation: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container root-login <replaceable>foo</replaceable> |
|
||||||
<prompt>[root@foo:~]#</prompt> |
|
||||||
</screen> |
|
||||||
Note that only root on the host can do this (since there is no |
|
||||||
authentication). You can also get a regular login prompt using the |
|
||||||
<command>login</command> operation, which is available to all users on the |
|
||||||
host: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container login <replaceable>foo</replaceable> |
|
||||||
foo login: alice |
|
||||||
Password: *** |
|
||||||
</screen> |
|
||||||
With <command>nixos-container run</command>, you can execute arbitrary |
|
||||||
commands in the container: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container run <replaceable>foo</replaceable> -- uname -a |
|
||||||
Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
There are several ways to change the configuration of the container. First, |
|
||||||
on the host, you can edit |
|
||||||
<literal>/var/lib/container/<replaceable>name</replaceable>/etc/nixos/configuration.nix</literal>, |
|
||||||
and run |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container update <replaceable>foo</replaceable> |
|
||||||
</screen> |
|
||||||
This will build and activate the new configuration. You can also specify a |
|
||||||
new configuration on the command line: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container update <replaceable>foo</replaceable> --config ' |
|
||||||
<xref linkend="opt-services.httpd.enable"/> = true; |
|
||||||
<xref linkend="opt-services.httpd.adminAddr"/> = "foo@example.org"; |
|
||||||
<xref linkend="opt-networking.firewall.allowedTCPPorts"/> = [ 80 ]; |
|
||||||
' |
|
||||||
|
|
||||||
<prompt># </prompt>curl http://$(nixos-container show-ip <replaceable>foo</replaceable>)/ |
|
||||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">… |
|
||||||
</screen> |
|
||||||
However, note that this will overwrite the container’s |
|
||||||
<filename>/etc/nixos/configuration.nix</filename>. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
Alternatively, you can change the configuration from within the container |
|
||||||
itself by running <command>nixos-rebuild switch</command> inside the |
|
||||||
container. Note that the container by default does not have a copy of the |
|
||||||
NixOS channel, so you should run <command>nix-channel --update</command> |
|
||||||
first. |
|
||||||
</para> |
|
||||||
|
|
||||||
<para> |
|
||||||
Containers can be stopped and started using <literal>nixos-container |
|
||||||
stop</literal> and <literal>nixos-container start</literal>, respectively, or |
|
||||||
by using <command>systemctl</command> on the container’s service unit. To |
|
||||||
destroy a container, including its file system, do |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-container destroy <replaceable>foo</replaceable> |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
</section> |
|
@ -0,0 +1,38 @@ |
|||||||
|
# Logging {#sec-logging} |
||||||
|
|
||||||
|
System-wide logging is provided by systemd's *journal*, which subsumes |
||||||
|
traditional logging daemons such as syslogd and klogd. Log entries are |
||||||
|
kept in binary files in `/var/log/journal/`. The command `journalctl` |
||||||
|
allows you to see the contents of the journal. For example, |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ journalctl -b |
||||||
|
``` |
||||||
|
|
||||||
|
shows all journal entries since the last reboot. (The output of |
||||||
|
`journalctl` is piped into `less` by default.) You can use various |
||||||
|
options and match operators to restrict output to messages of interest. |
||||||
|
For instance, to get all messages from PostgreSQL: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ journalctl -u postgresql.service |
||||||
|
-- Logs begin at Mon, 2013-01-07 13:28:01 CET, end at Tue, 2013-01-08 01:09:57 CET. -- |
||||||
|
... |
||||||
|
Jan 07 15:44:14 hagbard postgres[2681]: [2-1] LOG: database system is shut down |
||||||
|
-- Reboot -- |
||||||
|
Jan 07 15:45:10 hagbard postgres[2532]: [1-1] LOG: database system was shut down at 2013-01-07 15:44:14 CET |
||||||
|
Jan 07 15:45:13 hagbard postgres[2500]: [1-1] LOG: database system is ready to accept connections |
||||||
|
``` |
||||||
|
|
||||||
|
Or to get all messages since the last reboot that have at least a |
||||||
|
"critical" severity level: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ journalctl -b -p crit |
||||||
|
Dec 17 21:08:06 mandark sudo[3673]: pam_unix(sudo:auth): auth could not identify password for [alice] |
||||||
|
Dec 29 01:30:22 mandark kernel[6131]: [1053513.909444] CPU6: Core temperature above threshold, cpu clock throttled (total events = 1) |
||||||
|
``` |
||||||
|
|
||||||
|
The system journal is readable by root and by users in the `wheel` and |
||||||
|
`systemd-journal` groups. All users have a private journal that can be |
||||||
|
read using `journalctl`. |
@ -1,43 +0,0 @@ |
|||||||
<chapter xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-logging"> |
|
||||||
<title>Logging</title> |
|
||||||
<para> |
|
||||||
System-wide logging is provided by systemd’s <emphasis>journal</emphasis>, |
|
||||||
which subsumes traditional logging daemons such as syslogd and klogd. Log |
|
||||||
entries are kept in binary files in <filename>/var/log/journal/</filename>. |
|
||||||
The command <literal>journalctl</literal> allows you to see the contents of |
|
||||||
the journal. For example, |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>journalctl -b |
|
||||||
</screen> |
|
||||||
shows all journal entries since the last reboot. (The output of |
|
||||||
<command>journalctl</command> is piped into <command>less</command> by |
|
||||||
default.) You can use various options and match operators to restrict output |
|
||||||
to messages of interest. For instance, to get all messages from PostgreSQL: |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>journalctl -u postgresql.service |
|
||||||
-- Logs begin at Mon, 2013-01-07 13:28:01 CET, end at Tue, 2013-01-08 01:09:57 CET. -- |
|
||||||
... |
|
||||||
Jan 07 15:44:14 hagbard postgres[2681]: [2-1] LOG: database system is shut down |
|
||||||
-- Reboot -- |
|
||||||
Jan 07 15:45:10 hagbard postgres[2532]: [1-1] LOG: database system was shut down at 2013-01-07 15:44:14 CET |
|
||||||
Jan 07 15:45:13 hagbard postgres[2500]: [1-1] LOG: database system is ready to accept connections |
|
||||||
</screen> |
|
||||||
Or to get all messages since the last reboot that have at least a |
|
||||||
“critical” severity level: |
|
||||||
<screen> |
|
||||||
<prompt>$ </prompt>journalctl -b -p crit |
|
||||||
Dec 17 21:08:06 mandark sudo[3673]: pam_unix(sudo:auth): auth could not identify password for [alice] |
|
||||||
Dec 29 01:30:22 mandark kernel[6131]: [1053513.909444] CPU6: Core temperature above threshold, cpu clock throttled (total events = 1) |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
The system journal is readable by root and by users in the |
|
||||||
<literal>wheel</literal> and <literal>systemd-journal</literal> groups. All |
|
||||||
users have a private journal that can be read using |
|
||||||
<command>journalctl</command>. |
|
||||||
</para> |
|
||||||
</chapter> |
|
@ -0,0 +1,11 @@ |
|||||||
|
# Maintenance Mode {#sec-maintenance-mode} |
||||||
|
|
||||||
|
You can enter rescue mode by running: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# systemctl rescue |
||||||
|
``` |
||||||
|
|
||||||
|
This will eventually give you a single-user root shell. Systemd will |
||||||
|
stop (almost) all system services. To get out of maintenance mode, just |
||||||
|
exit from the rescue shell. |
@ -1,16 +0,0 @@ |
|||||||
<section xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-maintenance-mode"> |
|
||||||
<title>Maintenance Mode</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
You can enter rescue mode by running: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>systemctl rescue</screen> |
|
||||||
This will eventually give you a single-user root shell. Systemd will stop |
|
||||||
(almost) all system services. To get out of maintenance mode, just exit from |
|
||||||
the rescue shell. |
|
||||||
</para> |
|
||||||
</section> |
|
@ -0,0 +1,21 @@ |
|||||||
|
# Network Problems {#sec-nix-network-issues} |
||||||
|
|
||||||
|
Nix uses a so-called *binary cache* to optimise building a package from |
||||||
|
source into downloading it as a pre-built binary. That is, whenever a |
||||||
|
command like `nixos-rebuild` needs a path in the Nix store, Nix will try |
||||||
|
to download that path from the Internet rather than build it from |
||||||
|
source. The default binary cache is `https://cache.nixos.org/`. If this |
||||||
|
cache is unreachable, Nix operations may take a long time due to HTTP |
||||||
|
connection timeouts. You can disable the use of the binary cache by |
||||||
|
adding `--option use-binary-caches false`, e.g. |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-rebuild switch --option use-binary-caches false |
||||||
|
``` |
||||||
|
|
||||||
|
If you have an alternative binary cache at your disposal, you can use it |
||||||
|
instead: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-rebuild switch --option binary-caches http://my-cache.example.org/ |
||||||
|
``` |
@ -1,27 +0,0 @@ |
|||||||
<section xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-nix-network-issues"> |
|
||||||
<title>Network Problems</title> |
|
||||||
|
|
||||||
<para> |
|
||||||
Nix uses a so-called <emphasis>binary cache</emphasis> to optimise building a |
|
||||||
package from source into downloading it as a pre-built binary. That is, |
|
||||||
whenever a command like <command>nixos-rebuild</command> needs a path in the |
|
||||||
Nix store, Nix will try to download that path from the Internet rather than |
|
||||||
build it from source. The default binary cache is |
|
||||||
<uri>https://cache.nixos.org/</uri>. If this cache is unreachable, Nix |
|
||||||
operations may take a long time due to HTTP connection timeouts. You can |
|
||||||
disable the use of the binary cache by adding <option>--option |
|
||||||
use-binary-caches false</option>, e.g. |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-rebuild switch --option use-binary-caches false |
|
||||||
</screen> |
|
||||||
If you have an alternative binary cache at your disposal, you can use it |
|
||||||
instead: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>nixos-rebuild switch --option binary-caches <replaceable>http://my-cache.example.org/</replaceable> |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
</section> |
|
@ -0,0 +1,30 @@ |
|||||||
|
# Rebooting and Shutting Down {#sec-rebooting} |
||||||
|
|
||||||
|
The system can be shut down (and automatically powered off) by doing: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# shutdown |
||||||
|
``` |
||||||
|
|
||||||
|
This is equivalent to running `systemctl poweroff`. |
||||||
|
|
||||||
|
To reboot the system, run |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# reboot |
||||||
|
``` |
||||||
|
|
||||||
|
which is equivalent to `systemctl reboot`. Alternatively, you can |
||||||
|
quickly reboot the system using `kexec`, which bypasses the BIOS by |
||||||
|
directly loading the new kernel into memory: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# systemctl kexec |
||||||
|
``` |
||||||
|
|
||||||
|
The machine can be suspended to RAM (if supported) using `systemctl suspend`, |
||||||
|
and suspended to disk using `systemctl hibernate`. |
||||||
|
|
||||||
|
These commands can be run by any user who is logged in locally, i.e. on |
||||||
|
a virtual console or in X11; otherwise, the user is asked for |
||||||
|
authentication. |
@ -1,35 +0,0 @@ |
|||||||
<chapter xmlns="http://docbook.org/ns/docbook" |
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink" |
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude" |
|
||||||
version="5.0" |
|
||||||
xml:id="sec-rebooting"> |
|
||||||
<title>Rebooting and Shutting Down</title> |
|
||||||
<para> |
|
||||||
The system can be shut down (and automatically powered off) by doing: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>shutdown |
|
||||||
</screen> |
|
||||||
This is equivalent to running <command>systemctl poweroff</command>. |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
To reboot the system, run |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>reboot |
|
||||||
</screen> |
|
||||||
which is equivalent to <command>systemctl reboot</command>. Alternatively, |
|
||||||
you can quickly reboot the system using <literal>kexec</literal>, which |
|
||||||
bypasses the BIOS by directly loading the new kernel into memory: |
|
||||||
<screen> |
|
||||||
<prompt># </prompt>systemctl kexec |
|
||||||
</screen> |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
The machine can be suspended to RAM (if supported) using <command>systemctl |
|
||||||
suspend</command>, and suspended to disk using <command>systemctl |
|
||||||
hibernate</command>. |
|
||||||
</para> |
|
||||||
<para> |
|
||||||
These commands can be run by any user who is logged in locally, i.e. on a |
|
||||||
virtual console or in X11; otherwise, the user is asked for authentication. |
|
||||||
</para> |
|
||||||
</chapter> |
|
@ -0,0 +1,38 @@ |
|||||||
|
# Rolling Back Configuration Changes {#sec-rollback} |
||||||
|
|
||||||
|
After running `nixos-rebuild` to switch to a new configuration, you may |
||||||
|
find that the new configuration doesn't work very well. In that case, |
||||||
|
there are several ways to return to a previous configuration. |
||||||
|
|
||||||
|
First, the GRUB boot manager allows you to boot into any previous |
||||||
|
configuration that hasn't been garbage-collected. These configurations |
||||||
|
can be found under the GRUB submenu "NixOS - All configurations". This |
||||||
|
is especially useful if the new configuration fails to boot. After the |
||||||
|
system has booted, you can make the selected configuration the default |
||||||
|
for subsequent boots: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# /run/current-system/bin/switch-to-configuration boot |
||||||
|
``` |
||||||
|
|
||||||
|
Second, you can switch to the previous configuration in a running |
||||||
|
system: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# nixos-rebuild switch --rollback |
||||||
|
``` |
||||||
|
|
||||||
|
This is equivalent to running: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
# /nix/var/nix/profiles/system-N-link/bin/switch-to-configuration switch |
||||||
|
``` |
||||||
|
|
||||||
|
where `N` is the number of the NixOS system configuration. To get a |
||||||
|
list of the available configurations, do: |
||||||
|
|
||||||
|
```ShellSession |
||||||
|
$ ls -l /nix/var/nix/profiles/system-*-link |
||||||
|
... |
||||||
|
lrwxrwxrwx 1 root root 78 Aug 12 13:54 /nix/var/nix/profiles/system-268-link -> /nix/store/202b...-nixos-13.07pre4932_5a676e4-4be1055 |
||||||
|
``` |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue