summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthias Beyer <mail@beyermatthias.de>2021-10-03 13:13:07 +0200
committerMatthias Beyer <mail@beyermatthias.de>2021-10-03 19:00:47 +0200
commitace5563a8f8428483c050759274d0a32de9f67d9 (patch)
treeaec23eadacd1dacfaec667bab601d583956e9216
parent13045eb086f5afbe2ed5a1b5daaa870005809ddd (diff)
Add first working setup for running kind cluster
Signed-off-by: Matthias Beyer <mail@beyermatthias.de>
-rw-r--r--configuration.nix169
-rw-r--r--default.nix90
-rw-r--r--nix/deploy-to-kind.nix51
-rw-r--r--nix/kubenix.nix8
-rw-r--r--nix/test-deployment.nix40
-rw-r--r--shell.nix1
6 files changed, 359 insertions, 0 deletions
diff --git a/configuration.nix b/configuration.nix
new file mode 100644
index 0000000..d39fb56
--- /dev/null
+++ b/configuration.nix
@@ -0,0 +1,169 @@
+{ type ? "dev" }:
+
+let
+ kubeVersion = "1.18";
+
+ helloApp = rec {
+ label = "hello";
+ port = 3000;
+
+ cpu = if type == "dev"
+ then "100m"
+ else "1000m";
+
+ imagePolicy = if type == "dev"
+ then "Never"
+ else "IfNotPresent";
+
+ env = [
+ { name = "APP_PORT"; value = "${toString port}"; }
+ { name = "HOST"; value = "0.0.0.0"; }
+ { name = "PORT"; value = "${toString port}"; }
+ ];
+ };
+
+ worldApp = rec {
+ label = "world";
+ port = 3000;
+
+ cpu = if type == "dev"
+ then "100m"
+ else "1000m";
+
+ imagePolicy = if type == "dev"
+ then "Never"
+ else "IfNotPresent";
+
+ env = [
+ { name = "APP_PORT"; value = "${toString port}"; }
+ { name = "HOST"; value = "0.0.0.0"; }
+ { name = "PORT"; value = "${toString port}"; }
+ ];
+ };
+
+ joinerApp = rec {
+ label = "joiner";
+ port = 3000;
+
+ cpu = if type == "dev"
+ then "100m"
+ else "1000m";
+
+ imagePolicy = if type == "dev"
+ then "Never"
+ else "IfNotPresent";
+
+ env = [
+ { name = "APP_PORT"; value = "${toString port}"; }
+ { name = "HOST"; value = "0.0.0.0"; }
+ { name = "PORT"; value = "${toString port}"; }
+ { name = "HELLO_SERVICE"; value = "${helloApp.label}:${toString helloApp.port}"; }
+ { name = "WORLD_SERVICE"; value = "${worldApp.label}:${toString worldApp.port}"; }
+ ];
+ };
+in rec
+{
+ kubernetes.version = kubeVersion;
+
+ kubernetes.resources.deployments = {
+ "${helloApp.label}" = {
+ metadata.labels.app = helloApp.label;
+
+ spec = {
+ replicas = 1;
+ selector.matchLabels.app = helloApp.label;
+ template = {
+ metadata.labels.app = helloApp.label;
+ spec.containers."${helloApp.label}" = {
+ name = "${helloApp.label}";
+ image = "hello-service:latest";
+ imagePullPolicy = helloApp.imagePolicy;
+ env = helloApp.env;
+ resources.requests.cpu = helloApp.cpu;
+ ports."${toString helloApp.port}" = {};
+ };
+ };
+ };
+
+ };
+
+ "${worldApp.label}" = {
+ metadata.labels.app = worldApp.label;
+
+ spec = {
+ replicas = 1;
+ selector.matchLabels.app = worldApp.label;
+ template = {
+ metadata.labels.app = worldApp.label;
+ spec.containers."${worldApp.label}" = {
+ name = "${worldApp.label}";
+ image = "world-service:latest";
+ imagePullPolicy = worldApp.imagePolicy;
+ env = worldApp.env;
+ resources.requests.cpu = worldApp.cpu;
+ ports."${toString worldApp.port}" = {};
+ };
+ };
+ };
+ };
+
+ "${joinerApp.label}" = {
+ metadata.labels.app = joinerApp.label;
+
+ spec = {
+ replicas = 1;
+ selector.matchLabels.app = joinerApp.label;
+ template = {
+ metadata.labels.app = joinerApp.label;
+ spec.containers."${joinerApp.label}" = {
+ name = "${joinerApp.label}";
+ image = "joiner-service:latest";
+ imagePullPolicy = joinerApp.imagePolicy;
+ env = joinerApp.env;
+ resources.requests.cpu = joinerApp.cpu;
+ ports."${toString joinerApp.port}" = {};
+ };
+ };
+ };
+ };
+ };
+
+ kubernetes.resources.services = {
+ "${helloApp.label}" = {
+ spec.selector.app = "${helloApp.label}";
+ spec.ports."${toString helloApp.port}".targetPort = helloApp.port;
+ };
+
+ "${worldApp.label}" = {
+ spec.selector.app = "${worldApp.label}";
+ spec.ports."${toString worldApp.port}".targetPort = worldApp.port;
+ };
+
+ "${joinerApp.label}" = {
+ spec.selector.app = "${joinerApp.label}";
+ spec.ports."8080".targetPort = joinerApp.port;
+ };
+ };
+
+ kubernetes.resources.ingresses = {
+ "app-ingress" = {
+ spec.rules = [
+ {
+ host = "localhost";
+ http.paths = [
+ {
+ pathType = "Prefix";
+ path = "/";
+ backend = let
+ svc = kubernetes.resources.services."${joinerApp.label}";
+ in {
+ serviceName = joinerApp.label;
+ servicePort = 8080;
+ };
+ }
+ ];
+ }
+ ];
+ };
+ };
+}
diff --git a/default.nix b/default.nix
new file mode 100644
index 0000000..e8cbc19
--- /dev/null
+++ b/default.nix
@@ -0,0 +1,90 @@
+{ channel ? "stable", pkgs ? import <nixpkgs> {} }:
+
+let
+ helloService = pkgs.callPackage ./service-hello {};
+ worldService = pkgs.callPackage ./service-world {};
+ joinerService = pkgs.callPackage ./service-joiner {};
+ kubenix = pkgs.callPackage ./nix/kubenix.nix {};
+
+ buildConfig = t: kubenix.buildResources { configuration = import ./configuration.nix { type = t; }; };
+
+ helloImage = pkgs.dockerTools.buildLayeredImage {
+ name = "hello-service";
+ tag = "latest";
+ config.Cmd = [ "${helloService}/bin/service-hello" ];
+ };
+
+ worldImage = pkgs.dockerTools.buildLayeredImage {
+ name = "world-service";
+ tag = "latest";
+ config.Cmd = [ "${worldService}/bin/service-world" ];
+ };
+
+ joinerImage = pkgs.dockerTools.buildLayeredImage {
+ name = "joiner-service";
+ tag = "latest";
+ config.Cmd = [ "${joinerService}/bin/service-joiner" ];
+ };
+in rec {
+ inherit helloImage worldImage joinerImage;
+
+ test-deployment = pkgs.callPackage ./nix/test-deployment.nix { };
+
+ deploy-to-kind = pkgs.callPackage ./nix/deploy-to-kind.nix {
+ inherit helloImage worldImage joinerImage;
+
+ config = buildConfig "dev";
+ };
+
+ config = buildConfig "dev";
+
+ redeploy = pkgs.writeScriptBin "redeploy" ''
+ cat ${config} | ${pkgs.kubectl}/bin/kubectl apply -f -
+ '';
+
+ deploy-and-test = pkgs.mkShell {
+ buildInputs = [ pkgs.kind deploy-to-kind test-deployment ];
+
+ shellHook = ''
+ deploy-to-kind
+ export KUBECONFIG=$(kind get kubeconfig-path)
+ wait-for-deployment hello
+ test-deployment
+ kind delete cluster
+ '';
+ };
+
+ shell = let
+ moz_overlay = import (
+ builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz
+ );
+
+ pkgs = import <nixpkgs> { overlays = [ moz_overlay ]; };
+ rustchannel = pkgs.rustChannelOf { inherit channel; };
+ in
+ pkgs.mkShell {
+ buildInputs = with pkgs; [
+ kind
+ deploy-to-kind
+ test-deployment
+ redeploy
+ curl
+ docker
+ kubectl
+
+ rustchannel.rust-std
+ rustchannel.rust
+ rustchannel.rustc
+ rustchannel.cargo
+
+ cmake
+ curl
+ gcc
+ openssl
+ pkgconfig
+ ];
+
+ LIBCLANG_PATH = "${pkgs.llvmPackages.libclang}/lib";
+ };
+
+}
diff --git a/nix/deploy-to-kind.nix b/nix/deploy-to-kind.nix
new file mode 100644
index 0000000..d40fd06
--- /dev/null
+++ b/nix/deploy-to-kind.nix
@@ -0,0 +1,51 @@
+{ kind
+, helloImage
+, worldImage
+, joinerImage
+, clusterName ? "cluster"
+, config
+, pkgs }:
+
+let
+ kindConfig = pkgs.writeText "kind-config" ''
+ kind: Cluster
+ apiVersion: kind.x-k8s.io/v1alpha4
+ nodes:
+ - role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 8080
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 8443
+ protocol: TCP
+ '';
+in
+
+pkgs.writeScriptBin "deploy-to-kind" ''
+ #! ${pkgs.runtimeShell}
+ set -euo pipefail
+
+ ${kind}/bin/kind delete ${clusterName} || true
+ ${kind}/bin/kind create ${clusterName} --config ${kindConfig}
+
+ echo "Loading the ${pkgs.docker}/bin/docker image inside the kind docker container ..."
+
+ kind load image-archive <(gzip --decompress --stdout ${helloImage})
+ kind load image-archive <(gzip --decompress --stdout ${worldImage})
+ kind load image-archive <(gzip --decompress --stdout ${joinerImage})
+
+ echo "Applying the configuration ..."
+ cat ${config} | ${pkgs.jq}/bin/jq "."
+ cat ${config} | ${pkgs.kubectl}/bin/kubectl apply -f -
+
+ echo "Applying nginx ingress ..."
+ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
+''
+
diff --git a/nix/kubenix.nix b/nix/kubenix.nix
new file mode 100644
index 0000000..c09161e
--- /dev/null
+++ b/nix/kubenix.nix
@@ -0,0 +1,8 @@
+{ pkgs }:
+
+import (pkgs.fetchFromGitHub {
+ owner = "xtruder";
+ repo = "kubenix";
+ rev = "473a18371d2c0bb22bf060f750589695fb7d3100";
+ sha256 = "sha256:0j3mzg81s8pd8gsa3lx7bmhawjpb3f5li2irh55kr7b9kyxr2nsy";
+ }) { inherit pkgs; }
diff --git a/nix/test-deployment.nix b/nix/test-deployment.nix
new file mode 100644
index 0000000..d10e5e6
--- /dev/null
+++ b/nix/test-deployment.nix
@@ -0,0 +1,40 @@
+{ kind, pkgs }:
+
+pkgs.writeScriptBin "test-deployment" ''
+ #! ${pkgs.runtimeShell}
+ set -euo pipefail
+
+ SERVICE_URL=http://localhost:8001/api/v1/namespaces/default/services/hello:3000/proxy/
+
+ KUBECONFIG=$(${kind}/bin/kind get kubeconfig-path --name="kind")
+ PROXY_PID=""
+ trap cleanup EXIT
+
+ function cleanup {
+ if ! [ -z $PROXY_PID ]; then
+ kill -9 $PROXY_PID
+ fi
+ }
+
+ CLUSTERS=$(${kind}/bin/kind get clusters)
+ if ! [ "$CLUSTERS" = "kind" ]; then
+ echo "Error: kind cluster not running"
+ exit 1
+ fi
+
+ echo "- Cluster seems to be up and running ✓"
+ ${pkgs.kubectl}/bin/kubectl proxy >/dev/null &
+
+ PROXY_PID=$!
+ sleep 3
+
+ RESPONSE=$(${pkgs.curl}/bin/curl --silent $SERVICE_URL)
+
+ if ! [ "$RESPONSE" == "Hello World" ]; then
+ echo "Error: did not get expected response from service:"
+ echo $RESPONSE
+ exit 1
+ fi
+ echo "- Service returns expected response ✓"
+''
+
diff --git a/shell.nix b/shell.nix
new file mode 100644
index 0000000..0d9af5e
--- /dev/null
+++ b/shell.nix
@@ -0,0 +1 @@
+(import ./. {}).shell