nixos/hadoop: replace "enabled" options with "enable" options

The module has been using "enabled" in place of enable since init ( 0c10b2baa6 )
main
illustris 3 years ago
parent 9ca4363191
commit c3d147f507
  1. 24
      nixos/modules/services/cluster/hadoop/hdfs.nix
  2. 10
      nixos/modules/services/cluster/hadoop/yarn.nix
  3. 22
      nixos/tests/hadoop/hadoop.nix
  4. 6
      nixos/tests/hadoop/hdfs.nix
  5. 4
      nixos/tests/hadoop/yarn.nix

@ -17,7 +17,7 @@ in
{
options.services.hadoop.hdfs = {
namenode = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -43,7 +43,7 @@ in
};
};
datanode = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -60,7 +60,7 @@ in
};
};
journalnode = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -77,7 +77,7 @@ in
};
};
zkfc = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -87,7 +87,7 @@ in
inherit restartIfChanged;
};
httpfs = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -106,7 +106,7 @@ in
};
config = mkMerge [
(mkIf cfg.hdfs.namenode.enabled {
(mkIf cfg.hdfs.namenode.enable {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
@ -130,7 +130,7 @@ in
8022 # namenode. servicerpc-address
]);
})
(mkIf cfg.hdfs.datanode.enabled {
(mkIf cfg.hdfs.datanode.enable {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
@ -150,7 +150,7 @@ in
9867 # datanode.ipc.address
]);
})
(mkIf cfg.hdfs.journalnode.enabled {
(mkIf cfg.hdfs.journalnode.enable {
systemd.services.hdfs-journalnode = {
description = "Hadoop HDFS JournalNode";
wantedBy = [ "multi-user.target" ];
@ -169,7 +169,7 @@ in
8485 # dfs.journalnode.rpc-address
]);
})
(mkIf cfg.hdfs.zkfc.enabled {
(mkIf cfg.hdfs.zkfc.enable {
systemd.services.hdfs-zkfc = {
description = "Hadoop HDFS ZooKeeper failover controller";
wantedBy = [ "multi-user.target" ];
@ -183,7 +183,7 @@ in
};
};
})
(mkIf cfg.hdfs.httpfs.enabled {
(mkIf cfg.hdfs.httpfs.enable {
systemd.services.hdfs-httpfs = {
description = "Hadoop httpfs";
wantedBy = [ "multi-user.target" ];
@ -209,7 +209,7 @@ in
]);
})
(mkIf (
cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled || cfg.hdfs.journalnode.enabled || cfg.hdfs.zkfc.enabled
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
@ -217,7 +217,7 @@ in
uid = config.ids.uids.hdfs;
};
})
(mkIf cfg.hdfs.httpfs.enabled {
(mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";

@ -17,7 +17,7 @@ in
{
options.services.hadoop.yarn = {
resourcemanager = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -34,7 +34,7 @@ in
};
};
nodemanager = {
enabled = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
@ -62,7 +62,7 @@ in
config = mkMerge [
(mkIf (
cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
) {
users.users.yarn = {
@ -72,7 +72,7 @@ in
};
})
(mkIf cfg.yarn.resourcemanager.enabled {
(mkIf cfg.yarn.resourcemanager.enable {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
@ -95,7 +95,7 @@ in
]);
})
(mkIf cfg.yarn.nodemanager.enabled {
(mkIf cfg.yarn.nodemanager.enable {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [

@ -55,41 +55,41 @@ import ../make-test-python.nix ({pkgs, ...}: {
nn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.namenode.enabled = true;
hdfs.zkfc.enabled = true;
hdfs.namenode.enable = true;
hdfs.zkfc.enable = true;
};
};
nn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.namenode.enabled = true;
hdfs.zkfc.enabled = true;
hdfs.namenode.enable = true;
hdfs.zkfc.enable = true;
};
};
jn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enabled = true;
hdfs.journalnode.enable = true;
};
};
jn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enabled = true;
hdfs.journalnode.enable = true;
};
};
jn3 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.journalnode.enabled = true;
hdfs.journalnode.enable = true;
};
};
dn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
hdfs.datanode.enabled = true;
hdfs.datanode.enable = true;
};
};
@ -99,7 +99,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.resourcemanager.enabled = true;
yarn.resourcemanager.enable = true;
};
};
rm2 = {pkgs, options, ...}: {
@ -107,7 +107,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.resourcemanager.enabled = true;
yarn.resourcemanager.enable = true;
};
};
nm1 = {pkgs, options, ...}: {
@ -115,7 +115,7 @@ import ../make-test-python.nix ({pkgs, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
yarn.nodemanager.enabled = true;
yarn.nodemanager.enable = true;
};
};
};

@ -7,10 +7,10 @@ import ../make-test-python.nix ({...}: {
package = pkgs.hadoop;
hdfs = {
namenode = {
enabled = true;
enable = true;
formatOnInit = true;
};
httpfs.enabled = true;
httpfs.enable = true;
};
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
@ -22,7 +22,7 @@ import ../make-test-python.nix ({...}: {
datanode = {pkgs, ...}: {
services.hadoop = {
package = pkgs.hadoop;
hdfs.datanode.enabled = true;
hdfs.datanode.enable = true;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
"hadoop.proxyuser.httpfs.groups" = "*";

@ -3,14 +3,14 @@ import ../make-test-python.nix ({...}: {
nodes = {
resourcemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
services.hadoop.yarn.resourcemanager.enabled = true;
services.hadoop.yarn.resourcemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
};
nodemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
services.hadoop.yarn.nodemanager.enabled = true;
services.hadoop.yarn.nodemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.hostname" = "resourcemanager";
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";

Loading…
Cancel
Save