summaryrefslogtreecommitdiffstats
path: root/nixos
diff options
context:
space:
mode:
authorBernardo Meurer <bernardo@meurer.org>2021-11-04 23:30:47 -0700
committerGitHub <noreply@github.com>2021-11-04 23:30:47 -0700
commit1403ce522da36e28811f0e4748bd457e149647a4 (patch)
tree0dc4a7934f24c30c83362c6c21908564a0b5a066 /nixos
parentfd6e9350d07360b25fe197ec9a9061e3105837c0 (diff)
parent57225575dfa9f3630ba495a9fa9c57b576a23dff (diff)
Merge pull request #144239 from illustris/hadoop
nixos/hadoop: Add HA services and HTTPFS
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/from_md/release-notes/rl-2111.section.xml23
-rw-r--r--nixos/doc/manual/release-notes/rl-2111.section.md5
-rw-r--r--nixos/modules/services/cluster/hadoop/conf.nix1
-rw-r--r--nixos/modules/services/cluster/hadoop/default.nix42
-rw-r--r--nixos/modules/services/cluster/hadoop/hdfs.nix117
-rw-r--r--nixos/modules/services/cluster/hadoop/yarn.nix23
-rw-r--r--nixos/tests/hadoop/hadoop.nix240
-rw-r--r--nixos/tests/hadoop/hdfs.nix38
-rw-r--r--nixos/tests/hadoop/yarn.nix15
9 files changed, 401 insertions, 103 deletions
diff --git a/nixos/doc/manual/from_md/release-notes/rl-2111.section.xml b/nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
index 39251105ab78..329ea1409c9a 100644
--- a/nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
+++ b/nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
@@ -52,6 +52,29 @@
</listitem>
<listitem>
<para>
+ Improvements have been made to the Hadoop module and package:
+ </para>
+ <itemizedlist spacing="compact">
+ <listitem>
+ <para>
+ HDFS and YARN now support production-ready highly
+ available deployments with automatic failover.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ Hadoop now defaults to Hadoop 3, updated from 2.
+ </para>
+ </listitem>
+ <listitem>
+ <para>
+ JournalNode, ZKFS and HTTPFS services have been added.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem>
+ <para>
Activation scripts can now opt int to be run when running
<literal>nixos-rebuild dry-activate</literal> and detect the
dry activation by reading <literal>$NIXOS_ACTION</literal>.
diff --git a/nixos/doc/manual/release-notes/rl-2111.section.md b/nixos/doc/manual/release-notes/rl-2111.section.md
index ef93e5c44d54..17f01d15b6fd 100644
--- a/nixos/doc/manual/release-notes/rl-2111.section.md
+++ b/nixos/doc/manual/release-notes/rl-2111.section.md
@@ -18,6 +18,11 @@ In addition to numerous new and upgraded packages, this release has the followin
- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.
+- Improvements have been made to the Hadoop module and package:
+ - HDFS and YARN now support production-ready highly available deployments with automatic failover.
+ - Hadoop now defaults to Hadoop 3, updated from 2.
+ - JournalNode, ZKFS and HTTPFS services have been added.
+
- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.
This allows activation scripts to output what they would change if the activation was really run.
The users/modules activation script supports this and outputs some of is actions.
diff --git a/nixos/modules/services/cluster/hadoop/conf.nix b/nixos/modules/services/cluster/hadoop/conf.nix
index 69472408cabe..0caec5cfc203 100644
--- a/nixos/modules/services/cluster/hadoop/conf.nix
+++ b/nixos/modules/services/cluster/hadoop/conf.nix
@@ -35,6 +35,7 @@ pkgs.runCommand "hadoop-conf" {} ''
cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
+ cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
diff --git a/nixos/modules/services/cluster/hadoop/default.nix b/nixos/modules/services/cluster/hadoop/default.nix
index da3e47b95d4d..90f22c48e055 100644
--- a/nixos/modules/services/cluster/hadoop/default.nix
+++ b/nixos/modules/services/cluster/hadoop/default.nix
@@ -15,7 +15,10 @@ with lib;
"fs.defaultFS" = "hdfs://localhost";
}
'';
- description = "Hadoop core-site.xml definition";
+ description = ''
+ Hadoop core-site.xml definition
+ <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
+ '';
};
hdfsSite = mkOption {
@@ -28,7 +31,10 @@ with lib;
"dfs.nameservices" = "namenode1";
}
'';
- description = "Hadoop hdfs-site.xml definition";
+ description = ''
+ Hadoop hdfs-site.xml definition
+ <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
+ '';
};
mapredSite = mkOption {
@@ -44,7 +50,10 @@ with lib;
"mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
}
'';
- description = "Hadoop mapred-site.xml definition";
+ description = ''
+ Hadoop mapred-site.xml definition
+ <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
+ '';
};
yarnSite = mkOption {
@@ -67,7 +76,24 @@ with lib;
"yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
}
'';
- description = "Hadoop yarn-site.xml definition";
+ description = ''
+ Hadoop yarn-site.xml definition
+ <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
+ '';
+ };
+
+ httpfsSite = mkOption {
+ default = { };
+ type = types.attrsOf types.anything;
+ example = literalExpression ''
+ {
+ "hadoop.http.max.threads" = 500;
+ }
+ '';
+ description = ''
+ Hadoop httpfs-site.xml definition
+ <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-hdfs-httpfs/httpfs-default.html"/>
+ '';
};
log4jProperties = mkOption {
@@ -92,7 +118,10 @@ with lib;
"feature.terminal.enabled" = 0;
}
'';
- description = "Yarn container-executor.cfg definition";
+ description = ''
+ Yarn container-executor.cfg definition
+ <link xlink:href="https://hadoop.apache.org/docs/r2.7.2/hadoop-yarn/hadoop-yarn-site/SecureContainer.html"/>
+ '';
};
extraConfDirs = mkOption {
@@ -118,7 +147,8 @@ with lib;
config = mkMerge [
(mkIf (builtins.hasAttr "yarn" config.users.users ||
- builtins.hasAttr "hdfs" config.users.users) {
+ builtins.hasAttr "hdfs" config.users.users ||
+ builtins.hasAttr "httpfs" config.users.users) {
users.groups.hadoop = {
gid = config.ids.gids.hadoop;
};
diff --git a/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixos/modules/services/cluster/hadoop/hdfs.nix
index e347b682b902..be667aa82d8a 100644
--- a/nixos/modules/services/cluster/hadoop/hdfs.nix
+++ b/nixos/modules/services/cluster/hadoop/hdfs.nix
@@ -17,11 +17,14 @@ in
{
options.services.hadoop.hdfs = {
namenode = {
- enabled = mkOption {
+ enable = mkEnableOption "Whether to run the HDFS NameNode";
+ formatOnInit = mkOption {
type = types.bool;
default = false;
description = ''
- Whether to run the HDFS NameNode
+ Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
+ For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
+ to initialize an HA cluster manually.
'';
};
inherit restartIfChanged;
@@ -34,34 +37,61 @@ in
};
};
datanode = {
- enabled = mkOption {
+ enable = mkEnableOption "Whether to run the HDFS DataNode";
+ inherit restartIfChanged;
+ openFirewall = mkOption {
type = types.bool;
- default = false;
+ default = true;
description = ''
- Whether to run the HDFS DataNode
+ Open firewall ports for datanode
'';
};
+ };
+ journalnode = {
+ enable = mkEnableOption "Whether to run the HDFS JournalNode";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
- Open firewall ports for datanode
+ Open firewall ports for journalnode
+ '';
+ };
+ };
+ zkfc = {
+ enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
+ inherit restartIfChanged;
+ };
+ httpfs = {
+ enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
+ tempPath = mkOption {
+ type = types.path;
+ default = "/tmp/hadoop/httpfs";
+ description = ''
+ HTTPFS_TEMP path used by HTTPFS
+ '';
+ };
+ inherit restartIfChanged;
+ openFirewall = mkOption {
+ type = types.bool;
+ default = true;
+ description = ''
+ Open firewall ports for HTTPFS
'';
};
};
};
config = mkMerge [
- (mkIf cfg.hdfs.namenode.enabled {
+ (mkIf cfg.hdfs.namenode.enable {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
inherit (cfg.hdfs.namenode) restartIfChanged;
- preStart = ''
+ preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
- '';
+ '');
serviceConfig = {
User = "hdfs";
@@ -74,9 +104,10 @@ in
networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
9870 # namenode.http-address
8020 # namenode.rpc-address
+ 8022 # namenode. servicerpc-address
]);
})
- (mkIf cfg.hdfs.datanode.enabled {
+ (mkIf cfg.hdfs.datanode.enable {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
@@ -96,8 +127,64 @@ in
9867 # datanode.ipc.address
]);
})
+ (mkIf cfg.hdfs.journalnode.enable {
+ systemd.services.hdfs-journalnode = {
+ description = "Hadoop HDFS JournalNode";
+ wantedBy = [ "multi-user.target" ];
+ inherit (cfg.hdfs.journalnode) restartIfChanged;
+
+ serviceConfig = {
+ User = "hdfs";
+ SyslogIdentifier = "hdfs-journalnode";
+ ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
+ Restart = "always";
+ };
+ };
+
+ networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
+ 8480 # dfs.journalnode.http-address
+ 8485 # dfs.journalnode.rpc-address
+ ]);
+ })
+ (mkIf cfg.hdfs.zkfc.enable {
+ systemd.services.hdfs-zkfc = {
+ description = "Hadoop HDFS ZooKeeper failover controller";
+ wantedBy = [ "multi-user.target" ];
+ inherit (cfg.hdfs.zkfc) restartIfChanged;
+
+ serviceConfig = {
+ User = "hdfs";
+ SyslogIdentifier = "hdfs-zkfc";
+ ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
+ Restart = "always";
+ };
+ };
+ })
+ (mkIf cfg.hdfs.httpfs.enable {
+ systemd.services.hdfs-httpfs = {
+ description = "Hadoop httpfs";
+ wantedBy = [ "multi-user.target" ];
+ inherit (cfg.hdfs.httpfs) restartIfChanged;
+
+ environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
+
+ preStart = ''
+ mkdir -p $HTTPFS_TEMP
+ '';
+
+ serviceConfig = {
+ User = "httpfs";
+ SyslogIdentifier = "hdfs-httpfs";
+ ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
+ Restart = "always";
+ };
+ };
+ networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
+ 14000 # httpfs.http.port
+ ]);
+ })
(mkIf (
- cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled
+ cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
@@ -105,6 +192,12 @@ in
uid = config.ids.uids.hdfs;
};
})
-
+ (mkIf cfg.hdfs.httpfs.enable {
+ users.users.httpfs = {
+ description = "Hadoop HTTPFS user";
+ group = "hadoop";
+ isSystemUser = true;
+ };
+ })
];
}
diff --git a/nixos/modules/services/cluster/hadoop/yarn.nix b/nixos/modules/services/cluster/hadoop/yarn.nix
index 0086a53e3b74..37c26ea10f76 100644
--- a/nixos/modules/services/cluster/hadoop/yarn.nix
+++ b/nixos/modules/services/cluster/hadoop/yarn.nix
@@ -17,13 +17,7 @@ in
{
options.services.hadoop.yarn = {
resourcemanager = {
- enabled = mkOption {
- type = types.bool;
- default = false;
- description = ''
- Whether to run the Hadoop YARN ResourceManager
- '';
- };
+ enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
inherit restartIfChanged;
openFirewall = mkOption {
type = types.bool;
@@ -34,13 +28,7 @@ in
};
};
nodemanager = {
- enabled = mkOption {
- type = types.bool;
- default = false;
- description = ''
- Whether to run the Hadoop YARN NodeManager
- '';
- };
+ enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
inherit restartIfChanged;
addBinBash = mkOption {
type = types.bool;
@@ -62,7 +50,7 @@ in
config = mkMerge [
(mkIf (
- cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
+ cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
) {
users.users.yarn = {
@@ -72,7 +60,7 @@ in
};
})
- (mkIf cfg.yarn.resourcemanager.enabled {
+ (mkIf cfg.yarn.resourcemanager.enable {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
@@ -91,10 +79,11 @@ in
8030 # resourcemanager.scheduler.address
8031 # resourcemanager.resource-tracker.address
8032 # resourcemanager.address
+ 8033 # resourcemanager.admin.address
]);
})
- (mkIf cfg.yarn.nodemanager.enabled {
+ (mkIf cfg.yarn.nodemanager.enable {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [
diff --git a/nixos/tests/hadoop/hadoop.nix b/nixos/tests/hadoop/hadoop.nix
index 46dfac26e065..b4ed0e17a852 100644
--- a/nixos/tests/hadoop/hadoop.nix
+++ b/nixos/tests/hadoop/hadoop.nix
@@ -1,70 +1,230 @@
+# This test is very comprehensive. It tests whether all hadoop services work well with each other.
+# Run this when updating the Hadoop package or making significant changes to the hadoop module.
+# For a more basic test, see hdfs.nix and yarn.nix
import ../make-test-python.nix ({pkgs, ...}: {
nodes = let
package = pkgs.hadoop;
coreSite = {
- "fs.defaultFS" = "hdfs://master";
+ "fs.defaultFS" = "hdfs://ns1";
+ };
+ hdfsSite = {
+ "dfs.namenode.rpc-bind-host" = "0.0.0.0";
+ "dfs.namenode.http-bind-host" = "0.0.0.0";
+ "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
+
+ # HA Quorum Journal Manager configuration
+ "dfs.nameservices" = "ns1";
+ "dfs.ha.namenodes.ns1" = "nn1,nn2";
+ "dfs.namenode.shared.edits.dir.ns1.nn1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
+ "dfs.namenode.shared.edits.dir.ns1.nn2" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
+ "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
+ "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
+ "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
+ "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
+ "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
+ "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
+
+ # Automatic failover configuration
+ "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
+ "dfs.ha.automatic-failover.enabled.ns1" = "true";
+ "dfs.ha.fencing.methods" = "shell(true)";
+ "ha.zookeeper.quorum" = "zk1:2181";
+ };
+ yarnSiteHA = {
+ "yarn.resourcemanager.zk-address" = "zk1:2181";
+ "yarn.resourcemanager.ha.enabled" = "true";
+ "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
+ "yarn.resourcemanager.hostname.rm1" = "rm1";
+ "yarn.resourcemanager.hostname.rm2" = "rm2";
+ "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
+ "yarn.resourcemanager.cluster-id" = "cluster1";
+ # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
+ # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
+ # that causes AM containers to fail otherwise.
+ "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
+ "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
};
in {
- master = {pkgs, options, ...}: {
+ zk1 = { ... }: {
+ services.zookeeper.enable = true;
+ networking.firewall.allowedTCPPorts = [ 2181 ];
+ };
+
+ # HDFS cluster
+ nn1 = {pkgs, options, ...}: {
services.hadoop = {
- inherit package coreSite;
- hdfs.namenode.enabled = true;
- yarn.resourcemanager.enabled = true;
+ inherit package coreSite hdfsSite;
+ hdfs.namenode.enable = true;
+ hdfs.zkfc.enable = true;
+ };
+ };
+ nn2 = {pkgs, options, ...}: {
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ hdfs.namenode.enable = true;
+ hdfs.zkfc.enable = true;
+ };
+ };
+
+ jn1 = {pkgs, options, ...}: {
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ hdfs.journalnode.enable = true;
+ };
+ };
+ jn2 = {pkgs, options, ...}: {
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ hdfs.journalnode.enable = true;
+ };
+ };
+ jn3 = {pkgs, options, ...}: {
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ hdfs.journalnode.enable = true;
};
- virtualisation.memorySize = 1024;
};
- worker = {pkgs, options, ...}: {
+ dn1 = {pkgs, options, ...}: {
services.hadoop = {
- inherit package coreSite;
- hdfs.datanode.enabled = true;
- yarn.nodemanager.enabled = true;
- yarnSite = options.services.hadoop.yarnSite.default // {
- "yarn.resourcemanager.hostname" = "master";
- };
+ inherit package coreSite hdfsSite;
+ hdfs.datanode.enable = true;
};
+ };
+
+ # YARN cluster
+ rm1 = {pkgs, options, ...}: {
+ virtualisation.memorySize = 1024;
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+ yarn.resourcemanager.enable = true;
+ };
+ };
+ rm2 = {pkgs, options, ...}: {
+ virtualisation.memorySize = 1024;
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+ yarn.resourcemanager.enable = true;
+ };
+ };
+ nm1 = {pkgs, options, ...}: {
virtualisation.memorySize = 2048;
+ services.hadoop = {
+ inherit package coreSite hdfsSite;
+ yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+ yarn.nodemanager.enable = true;
+ };
};
};
testScript = ''
start_all()
- master.wait_for_unit("network.target")
- master.wait_for_unit("hdfs-namenode")
+ #### HDFS tests ####
+
+ zk1.wait_for_unit("network.target")
+ jn1.wait_for_unit("network.target")
+ jn2.wait_for_unit("network.target")
+ jn3.wait_for_unit("network.target")
+ nn1.wait_for_unit("network.target")
+ nn2.wait_for_unit("network.target")
+ dn1.wait_for_unit("network.target")
+
+ zk1.wait_for_unit("zookeeper")
+ jn1.wait_for_unit("hdfs-journalnode")
+ jn2.wait_for_unit("hdfs-journalnode")
+ jn3.wait_for_unit("hdfs-journalnode")
+
+ zk1.wait_for_open_port(2181)
+ jn1.wait_for_open_port(8480)
+ jn1.wait_for_open_port(8485)
+ jn2.wait_for_open_port(8480)
+ jn2.wait_for_open_port(8485)
+
+ # Namenodes must be stopped before initializing the cluster
+ nn1.succeed("systemctl stop hdfs-namenode")
+ nn2.succeed("systemctl stop hdfs-namenode")
+ nn1.succeed("systemctl stop hdfs-zkfc")
+ nn2.succeed("systemctl stop hdfs-zkfc")
+
+ # Initialize zookeeper for failover controller
+ nn1.succeed("sudo -u hdfs hdfs zkfc -formatZK 2>&1 | systemd-cat")
+
+ # Format NN1 and start it
+ nn1.succeed("sudo -u hdfs hadoop namenode -format 2>&1 | systemd-cat")
+ nn1.succeed("systemctl start hdfs-namenode")
+ nn1.wait_for_open_port(9870)
+ nn1.wait_for_open_port(8022)
+ nn1.wait_for_open_port(8020)
+
+ # Bootstrap NN2 from NN1 and start it
+ nn2.succeed("sudo -u hdfs hdfs namenode -bootstrapStandby 2>&1 | systemd-cat")
+ nn2.succeed("systemctl start hdfs-namenode")
+ nn2.wait_for_open_port(9870)
+ nn2.wait_for_open_port(8022)
+ nn2.wait_for_open_port(8020)
+ nn1.succeed("netstat -tulpne | systemd-cat")
+
+ # Start failover controllers
+ nn1.succeed("systemctl start hdfs-zkfc")
+ nn2.succeed("systemctl start hdfs-zkfc")
- master.wait_for_open_port(8020)
- master.wait_for_open_port(9870)
+ # DN should have started by now, but confirm anyway
+ dn1.wait_for_unit("hdfs-datanode")
+ # Print states of namenodes
+ dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+ # Wait for cluster to exit safemode
+ dn1.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+ dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+ # test R/W
+ dn1.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+ assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
- worker.wait_for_unit("network.target")
- worker.wait_for_unit("hdfs-datanode")
- worker.wait_for_open_port(9864)
- worker.wait_for_open_port(9866)
- worker.wait_for_open_port(9867)
+ # Test NN failover
+ nn1.succeed("systemctl stop hdfs-namenode")
+ assert "active" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+ dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+ assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
- master.succeed("curl -f http://worker:9864")
- worker.succeed("curl -f http://master:9870")
+ nn1.succeed("systemctl start hdfs-namenode")
+ nn1.wait_for_open_port(9870)
+ nn1.wait_for_open_port(8022)
+ nn1.wait_for_open_port(8020)
+ assert "standby" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+ dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
- worker.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+ #### YARN tests ####
- master.wait_for_unit("yarn-resourcemanager")
+ rm1.wait_for_unit("network.target")
+ rm2.wait_for_unit("network.target")
+ nm1.wait_for_unit("network.target")
- master.wait_for_open_port(8030)
- master.wait_for_open_port(8031)
- master.wait_for_open_port(8032)
- master.wait_for_open_port(8088)
- worker.succeed("curl -f http://master:8088")
+ rm1.wait_for_unit("yarn-resourcemanager")
+ rm1.wait_for_open_port(8088)
+ rm2.wait_for_unit("yarn-resourcemanager")
+ rm2.wait_for_open_port(8088)
- worker.wait_for_unit("yarn-nodemanager")
- worker.wait_for_open_port(8042)
- worker.wait_for_open_port(8040)
- master.succeed("curl -f http://worker:8042")
+ nm1.wait_for_unit("yarn-nodemanager")
+ nm1.wait_for_open_port(8042)
+ nm1.wait_for_open_port(8040)
+ nm1.wait_until_succeeds("yarn node -list | grep Nodes:1")
+ nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+ nm1.succeed("sudo -u yarn yarn node -list | systemd-cat")
- assert "Total Nodes:1" in worker.succeed("yarn node -list")
+ # Test RM failover
+ rm1.succeed("systemctl stop yarn-resourcemanager")
+ assert "standby" not in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+ nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+ rm1.succeed("systemctl start yarn-resourcemanager")
+ rm1.wait_for_unit("yarn-resourcemanager")
+ rm1.wait_for_open_port(8088)
+ assert "standby" in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+ nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
- assert "Estimated value of Pi is" in worker.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
- assert "SUCCEEDED" in worker.succeed("yarn application -list -appStates FINISHED")
- worker.succeed("sudo -u hdfs hdfs dfs -ls / | systemd-cat")
+ assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
+ assert "SUCCEEDED" in nm1.succeed("yarn application -list -appStates FINISHED")
'';
- })
+})
diff --git a/nixos/tests/hadoop/hdfs.nix b/nixos/tests/hadoop/hdfs.nix
index f5907185c039..360dbd60ed27 100644
--- a/nixos/tests/hadoop/hdfs.nix
+++ b/nixos/tests/hadoop/hdfs.nix
@@ -1,36 +1,34 @@
+# Test a minimal HDFS cluster with no HA
import ../make-test-python.nix ({...}: {
nodes = {
namenode = {pkgs, ...}: {
+ virtualisation.memorySize = 1024;
services.hadoop = {
package = pkgs.hadoop;
- hdfs.namenode.enabled = true;
+ hdfs = {
+ namenode = {
+ enable = true;
+ formatOnInit = true;
+ };
+ httpfs.enable = true;
+ };
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
- };
- hdfsSite = {
- "dfs.replication" = 1;
- "dfs.namenode.rpc-bind-host" = "0.0.0.0";
- "dfs.namenode.http-bind-host" = "0.0.0.0";
+ "hadoop.proxyuser.httpfs.groups" = "*";
+ "hadoop.proxyuser.httpfs.hosts" = "*";
};
};
- networking.firewall.allowedTCPPorts = [
- 9870 # namenode.http-address
- 8020 # namenode.rpc-address
- ];
};
datanode = {pkgs, ...}: {
services.hadoop = {
package = pkgs.hadoop;
- hdfs.datanode.enabled = true;
+ hdfs.datanode.enable = true;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
+ "hadoop.proxyuser.httpfs.groups" = "*";
+ "hadoop.proxyuser.httpfs.hosts" = "*";
};
};
- networking.firewall.allowedTCPPorts = [
- 9864 # datanode.http.address
- 9866 # datanode.address
- 9867 # datanode.ipc.address
- ];
};
};
@@ -50,5 +48,13 @@ import ../make-test-python.nix ({...}: {
namenode.succeed("curl -f http://namenode:9870")
datanode.succeed("curl -f http://datanode:9864")
+
+ datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+ datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+ assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+
+ namenode.wait_for_unit("hdfs-httpfs")
+ namenode.wait_for_open_port(14000)
+ assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
'';
})
diff --git a/nixos/tests/hadoop/yarn.nix b/nixos/tests/hadoop/yarn.nix
index fbbb293eecd6..09bdb35791c7 100644
--- a/nixos/tests/hadoop/yarn.nix
+++ b/nixos/tests/hadoop/yarn.nix
@@ -1,28 +1,20 @@
+# This only tests if YARN is able to start its services
import ../make-test-python.nix ({...}: {
nodes = {
resourcemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
- services.hadoop.yarn.resourcemanager.enabled = true;
+ services.hadoop.yarn.resourcemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
- networking.firewall.allowedTCPPorts = [
- 8088 # resourcemanager.webapp.address
- 8031 # resourcemanager.resource-tracker.address
- ];
};
nodemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
- services.hadoop.yarn.nodemanager.enabled = true;
+ services.hadoop.yarn.nodemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.hostname" = "resourcemanager";
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";
- "yarn.nodemanager.address" = "0.0.0.0:8041";
};
- networking.firewall.allowedTCPPorts = [
- 8042 # nodemanager.webapp.address
- 8041 # nodemanager.address
- ];
};
};
@@ -38,7 +30,6 @@ import ../make-test-python.nix ({...}: {
nodemanager.wait_for_unit("yarn-nodemanager")
nodemanager.wait_for_unit("network.target")
nodemanager.wait_for_open_port(8042)
- nodemanager.wait_for_open_port(8041)
resourcemanager.succeed("curl -f http://localhost:8088")
nodemanager.succeed("curl -f http://localhost:8042")