summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoel Thompson <joel@jthompson.io>2017-07-11 10:56:58 -0400
committerJoel Thompson <joel@jthompson.io>2017-07-21 09:45:37 -0400
commit4b42fc4b8a969dc4621c76d556309021591a17a2 (patch)
treed917e44b3f47bc6197b689157a97b129f80b545f
parent91dc81156647c5d28e77dfd20d5c2f4b7253c742 (diff)
exhibitor: init at 3.4.9
Initial Exhibitor nix package and nixos module for Netflix's Exhibitor, which is a manager for Apache Zookeeper.
-rw-r--r--nixos/modules/module-list.nix1
-rw-r--r--nixos/modules/services/misc/exhibitor.nix361
-rw-r--r--pkgs/servers/exhibitor/default.nix54
-rw-r--r--pkgs/servers/zookeeper/default.nix7
-rw-r--r--pkgs/top-level/all-packages.nix2
5 files changed, 423 insertions, 2 deletions
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 8f78652fbfd6..b97c3b0d816b 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -287,6 +287,7 @@
./services/misc/emby.nix
./services/misc/errbot.nix
./services/misc/etcd.nix
+ ./services/misc/exhibitor.nix
./services/misc/felix.nix
./services/misc/folding-at-home.nix
./services/misc/fstrim.nix
diff --git a/nixos/modules/services/misc/exhibitor.nix b/nixos/modules/services/misc/exhibitor.nix
new file mode 100644
index 000000000000..33580962bf71
--- /dev/null
+++ b/nixos/modules/services/misc/exhibitor.nix
@@ -0,0 +1,361 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.services.exhibitor;
+ exhibitor = cfg.package;
+ exhibitorConfig = ''
+ zookeeper-install-directory=${cfg.baseDir}/zookeeper
+ zookeeper-data-directory=${cfg.zkDataDir}
+ zookeeper-log-directory=${cfg.zkLogDir}
+ zoo-cfg-extra=${cfg.zkExtraCfg}
+ client-port=${toString cfg.zkClientPort}
+ connect-port=${toString cfg.zkConnectPort}
+ election-port=${toString cfg.zkElectionPort}
+ cleanup-period-ms=${toString cfg.zkCleanupPeriod}
+ servers-spec=${concatStringsSep "," cfg.zkServersSpec}
+ auto-manage-instances=${toString cfg.autoManageInstances}
+ ${cfg.extraConf}
+ '';
+ configDir = pkgs.writeTextDir "exhibitor.properties" exhibitorConfig;
+ cliOptionsCommon = {
+ configtype = cfg.configType;
+ defaultconfig = "${configDir}/exhibitor.properties";
+ port = toString cfg.port;
+ hostname = cfg.hostname;
+ };
+ s3CommonOptions = { s3region = cfg.s3Region; s3credentials = cfg.s3Credentials; };
+ cliOptionsPerConfig = {
+ s3 = {
+ s3config = "${cfg.s3Config.bucketName}:${cfg.s3Config.objectKey}";
+ s3configprefix = cfg.s3Config.configPrefix;
+ };
+ zookeeper = {
+ zkconfigconnect = concatStringsSep "," cfg.zkConfigConnect;
+ zkconfigexhibitorpath = cfg.zkConfigExhibitorPath;
+ zkconfigpollms = toString cfg.zkConfigPollMs;
+ zkconfigretry = "${toString cfg.zkConfigRetry.sleepMs}:${toString cfg.zkConfigRetry.retryQuantity}";
+ zkconfigzpath = cfg.zkConfigZPath;
+ zkconfigexhibitorport = toString cfg.zkConfigExhibitorPort; # NB: This might be null
+ };
+ file = {
+ fsconfigdir = cfg.fsConfigDir;
+ fsconfiglockprefix = cfg.fsConfigLockPrefix;
+ fsConfigName = fsConfigName;
+ };
+ none = {
+ noneconfigdir = configDir;
+ };
+ };
+ cliOptions = concatStringsSep " " (mapAttrsToList (k: v: "--${k} ${v}") (filterAttrs (k: v: v != null && v != "") (cliOptionsCommon //
+ cliOptionsPerConfig."${cfg.configType}" //
+ s3CommonOptions //
+ optionalAttrs cfg.s3Backup { s3backup = "true"; } //
+ optionalAttrs cfg.fileSystemBackup { filesystembackup = "true"; }
+ )));
+in
+{
+ options = {
+ services.exhibitor = {
+ enable = mkOption {
+ type = types.bool;
+ default = false;
+ description = "
+ Whether to enable the exhibitor server.
+ ";
+ };
+ # See https://github.com/soabase/exhibitor/wiki/Running-Exhibitor for what these mean
+ # General options for any type of config
+ port = mkOption {
+ type = types.int;
+ default = 8080;
+ description = ''
+ The port for exhibitor to listen on and communicate with other exhibitors.
+ '';
+ };
+ baseDir = mkOption {
+ type = types.str;
+ default = "/var/exhibitor";
+ description = ''
+ Baseline directory for exhibitor runtime config.
+ '';
+ };
+ configType = mkOption {
+ type = types.enum [ "file" "s3" "zookeeper" "none" ];
+ description = ''
+ Which configuration type you want to use. Additional config will be
+ required depending on which type you are using.
+ '';
+ };
+ hostname = mkOption {
+ type = types.nullOr types.str;
+ description = ''
+ Hostname to use and advertise
+ '';
+ default = null;
+ };
+ autoManageInstances = mkOption {
+ type = types.bool;
+ description = ''
+ Automatically manage ZooKeeper instances in the ensemble
+ '';
+ default = false;
+ };
+ zkDataDir = mkOption {
+ type = types.str;
+ default = "${cfg.baseDir}/zkData";
+ description = ''
+ The Zookeeper data directory
+ '';
+ };
+ zkLogDir = mkOption {
+ type = types.path;
+ default = "${cfg.baseDir}/zkLogs";
+ description = ''
+ The Zookeeper logs directory
+ '';
+ };
+ extraConf = mkOption {
+ type = types.str;
+ default = "";
+ description = ''
+ Extra Exhibitor configuration to put in the ZooKeeper config file.
+ '';
+ };
+ zkExtraCfg = mkOption {
+ type = types.str;
+ default = ''initLimit=5&syncLimit=2&tickTime=2000'';
+ description = ''
+ Extra options to pass into Zookeeper
+ '';
+ };
+ zkClientPort = mkOption {
+ type = types.int;
+ default = 2181;
+ description = ''
+ Zookeeper client port
+ '';
+ };
+ zkConnectPort = mkOption {
+ type = types.int;
+ default = 2888;
+ description = ''
+ The port to use for followers to talk to each other.
+ '';
+ };
+ zkElectionPort = mkOption {
+ type = types.int;
+ default = 3888;
+ description = ''
+ The port for Zookeepers to use for leader election.
+ '';
+ };
+ zkCleanupPeriod = mkOption {
+ type = types.int;
+ default = 0;
+ description = ''
+ How often (in milliseconds) to run the Zookeeper log cleanup task.
+ '';
+ };
+ zkServersSpec = mkOption {
+ type = types.listOf types.str;
+ default = [];
+ description = ''
+ Zookeeper server spec for all servers in the ensemble.
+ '';
+ example = [ "S:1:zk1.example.com" "S:2:zk2.example.com" "S:3:zk3.example.com" "O:4:zk-observer.example.com" ];
+ };
+
+ # Backup options
+ s3Backup = mkOption {
+ type = types.bool;
+ default = false;
+ description = ''
+ Whether to enable backups to S3
+ '';
+ };
+ fileSystemBackup = mkOption {
+ type = types.bool;
+ default = false;
+ description = ''
+ Enables file system backup of ZooKeeper log files
+ '';
+ };
+
+ # Options for using zookeeper configType
+ zkConfigConnect = mkOption {
+ type = types.listOf types.str;
+ description = ''
+ The initial connection string for ZooKeeper shared config storage
+ '';
+ example = ["host1:2181" "host2:2181"];
+ };
+ zkConfigExhibitorPath = mkOption {
+ type = types.string;
+ description = ''
+ If the ZooKeeper shared config is also running Exhibitor, the URI path for the REST call
+ '';
+ default = "/";
+ };
+ zkConfigExhibitorPort = mkOption {
+ type = types.nullOr types.int;
+ description = ''
+ If the ZooKeeper shared config is also running Exhibitor, the port that
+ Exhibitor is listening on. IMPORTANT: if this value is not set it implies
+ that Exhibitor is not being used on the ZooKeeper shared config.
+ '';
+ };
+ zkConfigPollMs = mkOption {
+ type = types.int;
+ description = ''
+ The period in ms to check for changes in the config ensemble
+ '';
+ default = 10000;
+ };
+ zkConfigRetry = mkOption {
+ type = types.submodule {
+ options = {
+ sleepMs = mkOption {
+ type = types.int;
+ };
+ retryQuantity = mkOption {
+ type = types.int;
+ };
+ };
+ };
+ description = ''
+ The retry values to use
+ '';
+ default = { sleepMs = 1000; retryQuantity = 3; };
+ };
+ zkConfigZPath = mkOption {
+ type = types.str;
+ description = ''
+ The base ZPath that Exhibitor should use
+ '';
+ example = "/exhibitor/config";
+ };
+
+ # Config options for s3 configType
+ s3Config = mkOption {
+ type = types.submodule {
+ options = {
+ bucketName = mkOption {
+ type = types.str;
+ description = ''
+ Bucket name to store config
+ '';
+ };
+ objectKey = mkOption {
+ type = types.str;
+ description = ''
+ S3 key name to store the config
+ '';
+ };
+ configPrefix = mkOption {
+ type = types.str;
+ description = ''
+ When using AWS S3 shared config files, the prefix to use for values such as locks
+ '';
+ default = "exhibitor-";
+ };
+ };
+ };
+ };
+
+ # The next two are used for either s3backup or s3 configType
+ s3Credentials = mkOption {
+ type = types.nullOr types.path;
+ description = ''
+ Optional credentials to use for s3backup or s3config. Argument is the path
+ to an AWS credential properties file with two properties:
+ com.netflix.exhibitor.s3.access-key-id and com.netflix.exhibitor.s3.access-secret-key
+ '';
+ default = null;
+ };
+ s3Region = mkOption {
+ type = types.nullOr types.str;
+ description = ''
+ Optional region for S3 calls
+ '';
+ default = null;
+ };
+
+ # Config options for file config type
+ fsConfigDir = mkOption {
+ type = types.path;
+ description = ''
+ Directory to store Exhibitor properties (cannot be used with s3config).
+ Exhibitor uses file system locks so you can specify a shared location
+ so as to enable complete ensemble management.
+ '';
+ };
+ fsConfigLockPrefix = mkOption {
+ type = types.str;
+ description = ''
+ A prefix for a locking mechanism used in conjunction with fsconfigdir
+ '';
+ default = "exhibitor-lock-";
+ };
+ fsConfigName = mkOption {
+ type = types.str;
+ description = ''
+ The name of the file to store config in
+ '';
+ default = "exhibitor.properties";
+ };
+ };
+ };
+
+ config = mkIf cfg.enable {
+ systemd.services.exhibitor = {
+ description = "Exhibitor Daemon";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ environment = {
+ ZOO_LOG_DIR = cfg.baseDir;
+ };
+ serviceConfig = {
+ /***
+ Exhibitor is a bit un-nixy. It wants to present to you a user interface in order to
+ mutate the configuration of both itself and ZooKeeper, and to coordinate changes
+ among the members of the Zookeeper ensemble. I'm going for a different approach here,
+ which is to manage all the configuration via nix and have it write out the configuration
+ files that exhibitor will use, and to reduce the amount of inter-exhibitor orchestration.
+ ***/
+ ExecStart = ''
+ ${pkgs.exhibitor}/bin/startExhibitor.sh ${cliOptions}
+ '';
+ User = "zookeeper";
+ PermissionsStartOnly = true;
+ };
+ # This is a bit wonky, but the reason for this is that Exhibitor tries to write to
+ # ${cfg.baseDir}/zookeeper/bin/../conf/zoo.cfg
+ # I want everything but the conf directory to be in the immutable nix store, and I want defaults
+ # from the nix store
+ # If I symlink the bin directory in, then bin/../ will resolve to the parent of the symlink in the
+ # immutable nix store. Bind mounting a writable conf over the existing conf might work, but it gets very
+ # messy with trying to copy the existing out into a mutable store.
+ # Another option is to try to patch upstream exhibitor, but the current package just pulls down the
+ # prebuild JARs off of Maven, rather than building them ourselves, as Maven support in Nix isn't
+ # very mature. So, it seems like a reasonable compromise is to just copy out of the immutable store
+ # just before starting the service, so we're running binaries from the immutable store, but we work around
+ # Exhibitor's desire to mutate its current installation.
+ preStart = ''
+ mkdir -m 0700 -p ${cfg.baseDir}/zookeeper
+ # Not doing a chown -R to keep the base ZK files owned by root
+ chown zookeeper ${cfg.baseDir} ${cfg.baseDir}/zookeeper
+ cp -Rf ${pkgs.zookeeper}/* ${cfg.baseDir}/zookeeper
+ chown -R zookeeper ${cfg.baseDir}/zookeeper/conf
+ chmod -R u+w ${cfg.baseDir}/zookeeper/conf
+ '';
+ };
+ users.extraUsers = singleton {
+ name = "zookeeper";
+ uid = config.ids.uids.zookeeper;
+ description = "Zookeeper daemon user";
+ home = cfg.baseDir;
+ };
+ };
+}
diff --git a/pkgs/servers/exhibitor/default.nix b/pkgs/servers/exhibitor/default.nix
new file mode 100644
index 000000000000..445f52e360bc
--- /dev/null
+++ b/pkgs/servers/exhibitor/default.nix
@@ -0,0 +1,54 @@
+{ fetchFromGitHub, buildMaven, maven, jdk, makeWrapper, stdenv, ... }:
+stdenv.mkDerivation rec {
+ name = "exhibitor-${version}";
+ version = "1.5.6";
+
+ src = fetchFromGitHub {
+ owner = "soabase";
+ repo = "exhibitor";
+ sha256 = "07vikhkldxy51jbpy3jgva6wz75jksch6bjd6dqkagfgqd6baw45";
+ rev = "5fcdb411d06e8638c2380f7acb72a8a6909739cd";
+ };
+ mavenDependenciesSha256 = "00r69n9hwvrn5cbhxklx7w00sjmqvcxs7gvhbm150ggy7bc865qv";
+ # This is adapted from https://github.com/volth/nixpkgs/blob/6aa470dfd57cae46758b62010a93c5ff115215d7/pkgs/applications/networking/cluster/hadoop/default.nix#L20-L32
+ fetchedMavenDeps = stdenv.mkDerivation {
+ name = "exhibitor-${version}-maven-deps";
+ inherit src nativeBuildInputs;
+ buildPhase = ''
+ cd $pomFileDir;
+ while timeout --kill-after=21m 20m mvn package -Dmaven.repo.local=$out/.m2; [ $? = 124 ]; do
+ echo "maven hangs while downloading :("
+ done
+ '';
+ installPhase = ''find $out/.m2 -type f \! -regex '.+\(pom\|jar\|xml\|sha1\)' -delete''; # delete files with lastModified timestamps inside
+ outputHashAlgo = "sha256";
+ outputHashMode = "recursive";
+ outputHash = mavenDependenciesSha256;
+ };
+
+ # The purpose of this is to fetch the jar file out of public Maven and use Maven
+ # to build a monolithic, standalone jar, rather than build everything from source
+ # (given the state of Maven support in Nix). We're not actually building any java
+ # source here.
+ pomFileDir = "exhibitor-standalone/src/main/resources/buildscripts/standalone/maven";
+ nativeBuildInputs = [ maven ];
+ buildInputs = [ makeWrapper ];
+ buildPhase = ''
+ cd $pomFileDir
+ mvn package --offline -Dmaven.repo.local=$(cp -dpR ${fetchedMavenDeps}/.m2 ./ && chmod +w -R .m2 && pwd)/.m2
+ '';
+ meta = with stdenv.lib; {
+ homepage = "https://github.com/soabase/exhibitor";
+ description = "ZooKeeper co-process for instance monitoring, backup/recovery, cleanup and visualization";
+ license = licenses.asl20;
+ platforms = platforms.unix;
+ };
+
+ installPhase = ''
+ mkdir -p $out/bin
+ mkdir -p $out/share/java
+ mv target/$name.jar $out/share/java/
+ makeWrapper ${jdk}/bin/java $out/bin/startExhibitor.sh --add-flags "-jar $out/share/java/$name.jar" --suffix PATH : ${stdenv.lib.makeBinPath [ jdk ]}
+ '';
+
+}
diff --git a/pkgs/servers/zookeeper/default.nix b/pkgs/servers/zookeeper/default.nix
index 16807a6571cd..b3a9ad444849 100644
--- a/pkgs/servers/zookeeper/default.nix
+++ b/pkgs/servers/zookeeper/default.nix
@@ -1,4 +1,4 @@
-{ stdenv, fetchurl, jre, makeWrapper, bash }:
+{ stdenv, fetchurl, jre, makeWrapper, bash, coreutils }:
stdenv.mkDerivation rec {
name = "zookeeper-${version}";
@@ -17,12 +17,15 @@ stdenv.mkDerivation rec {
mkdir -p $out
cp -R conf docs lib ${name}.jar $out
mkdir -p $out/bin
- cp -R bin/{zkCli,zkCleanup,zkEnv}.sh $out/bin
+ cp -R bin/{zkCli,zkCleanup,zkEnv,zkServer}.sh $out/bin
for i in $out/bin/{zkCli,zkCleanup}.sh; do
wrapProgram $i \
--set JAVA_HOME "${jre}" \
--prefix PATH : "${bash}/bin"
done
+ substituteInPlace $out/bin/zkServer.sh \
+ --replace /bin/echo ${coreutils}/bin/echo \
+ --replace "/usr/bin/env bash" ${bash}/bin/bash
chmod -x $out/bin/zkEnv.sh
mkdir -p $out/share/zooinspector
diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix
index cca84222050a..dfbcf5d1f596 100644
--- a/pkgs/top-level/all-packages.nix
+++ b/pkgs/top-level/all-packages.nix
@@ -11048,6 +11048,8 @@ with pkgs;
ejabberd = callPackage ../servers/xmpp/ejabberd { };
+ exhibitor = callPackage ../servers/exhibitor { };
+
prosody = callPackage ../servers/xmpp/prosody {
lua5 = lua5_1;
inherit (lua51Packages) luasocket luasec luaexpat luafilesystem luabitop luaevent luazlib;