From 06fda438335b35a79d6dce92bffaf44ac468daca Mon Sep 17 00:00:00 2001
From: Sergey Yakubov <sergey.yakubov@desy.de>
Date: Tue, 3 Sep 2019 12:16:21 +0200
Subject: [PATCH] start preparing docker image

---
 .../build_env}/Ubuntu16.04/Dockerfile         |   0
 .../build_env}/Ubuntu16.04/build.sh           |   0
 .../build_env}/Ubuntu16.04/build_image.sh     |   0
 .../build_env}/Ubuntu16.04/install_curl.sh    |   0
 deploy/docker/cluster/Dockerfile              |  43 +++++
 deploy/docker/cluster/build_image.sh          |   3 +
 deploy/docker/cluster/jobs/asapo-brokers.nmd  |  75 ++++++++
 deploy/docker/cluster/jobs/asapo-logging.nmd  | 179 ++++++++++++++++++
 deploy/docker/cluster/jobs/asapo-mongo.nmd    |  59 ++++++
 deploy/docker/cluster/jobs/asapo-nginx.nmd    |  70 +++++++
 .../docker/cluster/jobs/asapo-perfmetrics.nmd | 108 +++++++++++
 .../docker/cluster/jobs/asapo-receivers.nmd   |  74 ++++++++
 deploy/docker/cluster/jobs/asapo-services.nmd | 128 +++++++++++++
 .../docker/cluster/jobs/authorizer.json.tpl   |  12 ++
 deploy/docker/cluster/jobs/broker.json.tpl    |   8 +
 deploy/docker/cluster/jobs/discovery.json.tpl |  10 +
 deploy/docker/cluster/jobs/fluentd.conf       |  55 ++++++
 deploy/docker/cluster/jobs/kibana.yml         |   6 +
 deploy/docker/cluster/jobs/nginx.conf.tpl     |  91 +++++++++
 deploy/docker/cluster/jobs/receiver.json.tpl  |  22 +++
 deploy/docker/cluster/run.sh                  |   1 +
 deploy/docker/cluster/supervisord.conf        |  16 ++
 .../full_chain/simple_chain/check_linux.sh    |  12 +-
 23 files changed, 966 insertions(+), 6 deletions(-)
 rename deploy/{docker_buildenv => docker/build_env}/Ubuntu16.04/Dockerfile (100%)
 rename deploy/{docker_buildenv => docker/build_env}/Ubuntu16.04/build.sh (100%)
 rename deploy/{docker_buildenv => docker/build_env}/Ubuntu16.04/build_image.sh (100%)
 rename deploy/{docker_buildenv => docker/build_env}/Ubuntu16.04/install_curl.sh (100%)
 create mode 100644 deploy/docker/cluster/Dockerfile
 create mode 100755 deploy/docker/cluster/build_image.sh
 create mode 100644 deploy/docker/cluster/jobs/asapo-brokers.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-logging.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-mongo.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-nginx.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-perfmetrics.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-receivers.nmd
 create mode 100644 deploy/docker/cluster/jobs/asapo-services.nmd
 create mode 100644 deploy/docker/cluster/jobs/authorizer.json.tpl
 create mode 100644 deploy/docker/cluster/jobs/broker.json.tpl
 create mode 100644 deploy/docker/cluster/jobs/discovery.json.tpl
 create mode 100644 deploy/docker/cluster/jobs/fluentd.conf
 create mode 100644 deploy/docker/cluster/jobs/kibana.yml
 create mode 100644 deploy/docker/cluster/jobs/nginx.conf.tpl
 create mode 100644 deploy/docker/cluster/jobs/receiver.json.tpl
 create mode 100755 deploy/docker/cluster/run.sh
 create mode 100644 deploy/docker/cluster/supervisord.conf

diff --git a/deploy/docker_buildenv/Ubuntu16.04/Dockerfile b/deploy/docker/build_env/Ubuntu16.04/Dockerfile
similarity index 100%
rename from deploy/docker_buildenv/Ubuntu16.04/Dockerfile
rename to deploy/docker/build_env/Ubuntu16.04/Dockerfile
diff --git a/deploy/docker_buildenv/Ubuntu16.04/build.sh b/deploy/docker/build_env/Ubuntu16.04/build.sh
similarity index 100%
rename from deploy/docker_buildenv/Ubuntu16.04/build.sh
rename to deploy/docker/build_env/Ubuntu16.04/build.sh
diff --git a/deploy/docker_buildenv/Ubuntu16.04/build_image.sh b/deploy/docker/build_env/Ubuntu16.04/build_image.sh
similarity index 100%
rename from deploy/docker_buildenv/Ubuntu16.04/build_image.sh
rename to deploy/docker/build_env/Ubuntu16.04/build_image.sh
diff --git a/deploy/docker_buildenv/Ubuntu16.04/install_curl.sh b/deploy/docker/build_env/Ubuntu16.04/install_curl.sh
similarity index 100%
rename from deploy/docker_buildenv/Ubuntu16.04/install_curl.sh
rename to deploy/docker/build_env/Ubuntu16.04/install_curl.sh
diff --git a/deploy/docker/cluster/Dockerfile b/deploy/docker/cluster/Dockerfile
new file mode 100644
index 000000000..1b8cdcd67
--- /dev/null
+++ b/deploy/docker/cluster/Dockerfile
@@ -0,0 +1,43 @@
+FROM ubuntu:18.04
+
+MAINTAINER DESY IT
+
+ENV CONSUL_VERSION=1.6.0
+ENV NOMAD_VERSION=0.9.5
+
+ENV HASHICORP_RELEASES=https://releases.hashicorp.com
+
+RUN apt-get update && apt-get install -y supervisor apt-transport-https \
+        ca-certificates \
+        curl \
+        gnupg-agent \
+        software-properties-common
+
+RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
+
+RUN add-apt-repository \
+   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+   $(lsb_release -cs) \
+   stable"
+
+RUN apt-get update && apt-get install -y docker-ce-cli wget unzip
+
+
+RUN set -eux && \
+    mkdir -p /tmp/build && \
+    cd /tmp/build && \
+    wget ${HASHICORP_RELEASES}/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip && \
+    unzip -d /bin consul_${CONSUL_VERSION}_linux_amd64.zip && \
+    wget ${HASHICORP_RELEASES}/nomad/${NOMAD_VERSION}/nomad_${NOMAD_VERSION}_linux_amd64.zip && \
+    unzip -d /bin nomad_${NOMAD_VERSION}_linux_amd64.zip && \
+    cd /tmp && \
+    rm -rf /tmp/build && \
+# tiny smoke test to ensure the binary we downloaded runs
+    consul version && \
+    nomad version
+
+ADD supervisord.conf /etc/
+
+RUN mkdir -p /var/log/supervisord/
+
+ENTRYPOINT ["supervisord", "--configuration", "/etc/supervisord.conf"]
diff --git a/deploy/docker/cluster/build_image.sh b/deploy/docker/cluster/build_image.sh
new file mode 100755
index 000000000..ddc6bd83c
--- /dev/null
+++ b/deploy/docker/cluster/build_image.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+docker build -t yakser/asapo-cluster .
+
diff --git a/deploy/docker/cluster/jobs/asapo-brokers.nmd b/deploy/docker/cluster/jobs/asapo-brokers.nmd
new file mode 100644
index 000000000..eacb8592c
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-brokers.nmd
@@ -0,0 +1,75 @@
+job "asapo-brokers" {
+  datacenters = ["dc1"]
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "brokers" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "fail"
+    }
+
+    task "brokers" {
+      driver = "docker"
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-broker-dev:feature_virtualized-deployment.latest"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/broker/config.json"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                fluentd-async-connect = true
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        network {
+          port "broker" {}
+        }
+      }
+
+      service {
+        port = "broker"
+        name = "asapo-broker"
+        check {
+          name     = "asapo-broker-alive"
+          type     = "http"
+          path     = "/health"
+          interval = "10s"
+          timeout  = "2s"
+        }
+        check_restart {
+          limit = 2
+          grace = "90s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "/usr/local/nomad_jobs/broker.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+
+      template {
+        source        = "/usr/local/nomad_jobs/auth_secret.key"
+        destination   = "secrets/secret.key"
+        change_mode   = "restart"
+      }
+   } #task brokers
+  }
+}
diff --git a/deploy/docker/cluster/jobs/asapo-logging.nmd b/deploy/docker/cluster/jobs/asapo-logging.nmd
new file mode 100644
index 000000000..fead57065
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-logging.nmd
@@ -0,0 +1,179 @@
+job "asapo-logging" {
+  datacenters = ["dc1"]
+
+#  update {
+#    max_parallel = 1
+#    min_healthy_time = "10s"
+#    healthy_deadline = "3m"
+#    auto_revert = false
+#  }
+
+  group "fluentd" {
+    count = 1
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "fluentd" {
+      driver = "docker"
+
+      meta {
+        change_me_to_restart = 1
+      }
+
+      config {
+        dns_servers = ["127.0.0.1"]
+        network_mode = "host"
+        image = "yakser/fluentd_elastic"
+        volumes = ["local/fluentd.conf:/fluentd/etc/fluent.conf",
+        "/${meta.shared_storage}/fluentd:/shared"]
+      }
+
+      resources {
+        cpu    = 500
+        memory = 256
+        network {
+          mbits = 10
+          port "fluentd" {
+          static = 9880
+          }
+        }
+      }
+
+      service {
+        port = "fluentd"
+        name = "fluentd"
+        check {
+          name     = "alive"
+          type     = "script"
+          command  = "/bin/pidof"
+          args     = ["ruby2.3"]
+          timeout  = "2s"
+	      interval = "10s"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+      template {
+         source        = "/usr/local/nomad_jobs/fluentd.conf"
+         destination   = "local/fluentd.conf"
+         change_mode   = "restart"
+      }
+   }
+  }
+#elasticsearch
+  group "elk" {
+    count = 1
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "elasticsearch" {
+      driver = "docker"
+
+      env {
+        bootstrap.memory_lock = "true"
+        cluster.name = "asapo-logging"
+        ES_JAVA_OPTS = "-Xms512m -Xmx512m"
+      }
+
+      config {
+        ulimit {
+          memlock = "-1:-1"
+          nofile = "65536:65536"
+          nproc = "8192"
+        }
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "docker.elastic.co/elasticsearch/elasticsearch:6.3.0"
+        volumes = ["/${meta.shared_storage}/esdatadir:/usr/share/elasticsearch/data"]
+      }
+
+      resources {
+        #MHz
+        cpu = 4000
+        #MB
+        memory = 2048
+        network {
+          mbits = 10
+          port "elasticsearch" {
+            static = 9200
+          }
+         }
+      }
+
+      service {
+        port = "elasticsearch"
+        name = "elasticsearch"
+        check {
+            name = "alive"
+            type     = "http"
+	        path     = "/_cluster/health"
+            interval = "10s"
+            timeout  = "1s"
+        }
+        check_restart {
+          limit = 2
+          grace = "90s"
+          ignore_warnings = false
+        }
+      }
+   }
+#kibana
+   task "kibana" {
+     driver = "docker"
+
+     config {
+       network_mode = "host"
+       dns_servers = ["127.0.0.1"]
+       image = "docker.elastic.co/kibana/kibana:6.3.0"
+       volumes = ["local/kibana.yml:/usr/share/kibana/config/kibana.yml"]
+     }
+
+      template {
+         source        = "/usr/local/nomad_jobs/kibana.yml"
+         destination   = "local/kibana.yml"
+         change_mode   = "restart"
+      }
+
+     resources {
+       cpu = 256
+       memory = 1024
+       network {
+         mbits = 10
+         port "kibana" {
+           static = 5601
+         }
+        }
+     }
+
+     service {
+       port = "kibana"
+       name = "kibana"
+       check {
+           name = "alive"
+           type     = "http"
+           path     = "/logsview"
+           interval = "10s"
+           timeout  = "1s"
+       }
+       check_restart {
+         limit = 2
+         grace = "90s"
+         ignore_warnings = false
+       }
+     }
+  }
+
+  }
+
+}
diff --git a/deploy/docker/cluster/jobs/asapo-mongo.nmd b/deploy/docker/cluster/jobs/asapo-mongo.nmd
new file mode 100644
index 000000000..be05168e3
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-mongo.nmd
@@ -0,0 +1,59 @@
+job "asapo-mongo" {
+  datacenters = ["dc1"]
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "mongo" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "mongo" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        image = "mongo:4.0.0"
+        volumes = ["/${meta.shared_storage}/mongodb:/data/db"]
+      }
+
+      resources {
+        cpu    = 1500
+        memory = 12560
+        network {
+          port "mongo" {
+          static = 27017
+          }
+        }
+      }
+
+      service {
+        port = "mongo"
+        name = "mongo"
+        check {
+          type     = "script"
+          name     = "alive"
+          command  = "mongo"
+          args     = ["--eval","db.version()"]
+          interval = "10s"
+          timeout  = "5s"
+        }
+        check_restart {
+          limit = 2
+          grace = "90s"
+          ignore_warnings = false
+        }
+      }
+    }
+  }
+}
diff --git a/deploy/docker/cluster/jobs/asapo-nginx.nmd b/deploy/docker/cluster/jobs/asapo-nginx.nmd
new file mode 100644
index 000000000..3e17c3739
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-nginx.nmd
@@ -0,0 +1,70 @@
+job "asapo-nginx" {
+  datacenters = ["dc1"]
+
+  type = "system"
+
+#  update {
+#    max_parallel = 1
+#    min_healthy_time = "10s"
+#    healthy_deadline = "3m"
+#    auto_revert = false
+#  }
+
+  group "nginx" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "nginx" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        image = "nginx:1.14"
+        volumes = ["local/nginx.conf:/etc/nginx/nginx.conf"]
+      }
+
+      resources {
+        cpu    = 500
+        memory = 256
+        network {
+          mbits = 10
+          port "nginx" {
+          static = 8400
+          }
+        }
+      }
+
+      service {
+        port = "nginx"
+        name = "nginx"
+        check {
+          name     = "alive"
+          type     = "http"
+	      path     = "/nginx-health"
+          timeout  = "2s"
+	      interval = "10s"
+        }
+
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "/usr/local/nomad_jobs/nginx.conf.tpl"
+         destination   = "local/nginx.conf"
+         change_mode   = "restart"
+      }
+
+
+   }
+  }
+}
diff --git a/deploy/docker/cluster/jobs/asapo-perfmetrics.nmd b/deploy/docker/cluster/jobs/asapo-perfmetrics.nmd
new file mode 100644
index 000000000..bb8db1b2b
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-perfmetrics.nmd
@@ -0,0 +1,108 @@
+job "asapo-perfmetrics" {
+  datacenters = ["dc1"]
+
+#  update {
+#    max_parallel = 1
+#    min_healthy_time = "10s"
+#    healthy_deadline = "3m"
+#    auto_revert = false
+#  }
+
+  group "perfmetrics" {
+    count = 1
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "influxdb" {
+      driver = "docker"
+
+      config {
+        dns_servers = ["127.0.0.1"]
+        network_mode = "host"
+        image = "influxdb"
+        volumes = ["/${meta.shared_storage}/influxdb:/var/lib/influxdb"]
+      }
+
+      resources {
+        cpu    = 1500
+        memory = 32000
+        network {
+          mbits = 10
+          port "influxdb" {
+          static = 8086
+          }
+        }
+      }
+
+     service {
+       port = "influxdb"
+       name = "influxdb"
+       check {
+           name = "alive"
+           type     = "http"
+           path     = "/ping"
+           interval = "10s"
+           timeout  = "1s"
+       }
+       check_restart {
+         limit = 2
+         grace = "90s"
+         ignore_warnings = false
+       }
+     }
+
+   } #influxdb
+
+
+    task "grafana" {
+      driver = "docker"
+
+      env {
+        GF_SERVER_DOMAIN = "${attr.unique.hostname}"
+        GF_SERVER_ROOT_URL = "%(protocol)s://%(domain)s/performance/"
+      }
+
+      config {
+        dns_servers = ["127.0.0.1"]
+        network_mode = "host"
+        image = "grafana/grafana"
+        volumes = ["/${meta.shared_storage}/grafana:/var/lib/grafana"]
+      }
+
+      resources {
+        cpu    = 1500
+        memory = 2560
+        network {
+          mbits = 10
+          port "grafana" {
+          static = 3000
+          }
+        }
+      }
+
+     service {
+       port = "grafana"
+       name = "grafana"
+       check {
+           name = "alive"
+           type     = "http"
+           path     = "/api/health"
+           interval = "10s"
+           timeout  = "1s"
+       }
+       check_restart {
+         limit = 2
+         grace = "90s"
+         ignore_warnings = false
+       }
+     }
+
+   } #grafana
+
+
+  }
+}
diff --git a/deploy/docker/cluster/jobs/asapo-receivers.nmd b/deploy/docker/cluster/jobs/asapo-receivers.nmd
new file mode 100644
index 000000000..1d3cf0cb2
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-receivers.nmd
@@ -0,0 +1,74 @@
+job "asapo-receivers" {
+  datacenters = ["dc1"]
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "receivers" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "fail"
+    }
+
+    task "receivers" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-receiver-dev:feature_virtualized-deployment.latest"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/receiver/config.json",
+                   "/bldocuments/support/asapo/data:/var/lib/receiver/data"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        network {
+          port "recv" {}
+          port "recv_ds" {}
+        }
+          memory = 40000
+      }
+
+      service {
+        name = "asapo-receiver"
+        port = "recv"
+        check {
+          name     = "asapo-receiver-alive"
+          type     = "script"
+          command  = "/bin/ps"
+          args     = ["-fC","receiver"]
+          interval = "10s"
+          timeout  = "2s"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "/usr/local/nomad_jobs/receiver.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+   } #task receivers
+  }
+}
+
diff --git a/deploy/docker/cluster/jobs/asapo-services.nmd b/deploy/docker/cluster/jobs/asapo-services.nmd
new file mode 100644
index 000000000..483adb57f
--- /dev/null
+++ b/deploy/docker/cluster/jobs/asapo-services.nmd
@@ -0,0 +1,128 @@
+job "asapo-services" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "asapo-authorizer" {
+    count = 1
+
+    task "asapo-authorizer" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-authorizer-dev:feature_virtualized-deployment.latest"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/authorizer/config.json",
+                   "/bldocuments/support/asapo/beamtime_beamline_mapping.txt:/var/lib/authorizer/beamtime_beamline_mapping.txt",
+                   "/bldocuments/support/asapo/ip_beamtime_mapping:/var/lib/authorizer/ip_beamtime_mapping"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                fluentd-async-connect = true
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "authorizer" {
+            static = "5007"
+          }
+        }
+      }
+
+      service {
+        name = "asapo-authorizer"
+        port = "authorizer"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/health-check"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "/usr/local/nomad_jobs/authorizer.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+      template {
+        source        = "/usr/local/nomad_jobs/auth_secret.key"
+        destination   = "secrets/secret.key"
+        change_mode   = "restart"
+      }
+   }
+  } #authorizer
+  group "asapo-discovery" {
+    count = 1
+
+    task "asapo-discovery" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-discovery-dev:feature_virtualized-deployment.latest"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/discovery/config.json"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                fluentd-async-connect = true
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "discovery" {
+            static = "5006"
+          }
+        }
+      }
+
+      service {
+        name = "asapo-discovery"
+        port = "discovery"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/receivers"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+
+      }
+
+      template {
+         source        = "/usr/local/nomad_jobs/discovery.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+   }
+  }
+}
diff --git a/deploy/docker/cluster/jobs/authorizer.json.tpl b/deploy/docker/cluster/jobs/authorizer.json.tpl
new file mode 100644
index 000000000..838627963
--- /dev/null
+++ b/deploy/docker/cluster/jobs/authorizer.json.tpl
@@ -0,0 +1,12 @@
+{
+  "Port": {{ env "NOMAD_PORT_authorizer" }},
+  "LogLevel":"debug",
+  "AlwaysAllowedBeamtimes":[{"BeamtimeId":"asapo_test","Beamline":"test"},
+  {"BeamtimeId":"asapo_test1","Beamline":"test1"},
+  {"BeamtimeId":"asapo_test2","Beamline":"test2"}],
+  "BeamtimeBeamlineMappingFile":"//var//lib//authorizer//beamtime_beamline_mapping.txt",
+  "IpBeamlineMappingFolder":"//var//lib//authorizer//ip_beamtime_mapping",
+  "SecretFile":"/secrets/secret.key"
+}
+
+
diff --git a/deploy/docker/cluster/jobs/broker.json.tpl b/deploy/docker/cluster/jobs/broker.json.tpl
new file mode 100644
index 000000000..9bc9f2edb
--- /dev/null
+++ b/deploy/docker/cluster/jobs/broker.json.tpl
@@ -0,0 +1,8 @@
+{
+  "BrokerDbAddress":"localhost:8400/mongo",
+  "MonitorDbAddress":"localhost:8400/influxdb",
+  "MonitorDbName": "asapo_brokers",
+  "port":{{ env "NOMAD_PORT_broker" }},
+  "LogLevel":"info",
+  "SecretFile":"/secrets/secret.key"
+}
diff --git a/deploy/docker/cluster/jobs/discovery.json.tpl b/deploy/docker/cluster/jobs/discovery.json.tpl
new file mode 100644
index 000000000..fdb277328
--- /dev/null
+++ b/deploy/docker/cluster/jobs/discovery.json.tpl
@@ -0,0 +1,10 @@
+{
+  "Mode": "consul",
+  "Receiver": {
+    "MaxConnections": 32
+  },
+  "Port": {{ env "NOMAD_PORT_discovery" }},
+  "LogLevel": "{{ keyOrDefault "log_level" "info" }}"
+}
+
+
diff --git a/deploy/docker/cluster/jobs/fluentd.conf b/deploy/docker/cluster/jobs/fluentd.conf
new file mode 100644
index 000000000..948c5109d
--- /dev/null
+++ b/deploy/docker/cluster/jobs/fluentd.conf
@@ -0,0 +1,55 @@
+<source>
+  @type forward
+  port 24224
+  source_hostname_key source_addr
+  bind 0.0.0.0
+</source>
+
+<source>
+ @type http
+ port 9880
+ bind 0.0.0.0
+ add_remote_addr true
+ format json
+ time_format %Y-%m-%d %H:%M:%S.%N
+</source>
+
+<filter asapo.docker>
+  @type parser
+  key_name log
+  format json
+  time_format %Y-%m-%d %H:%M:%S.%N
+  reserve_data true
+</filter>
+
+<filter asapo.docker>
+  @type record_transformer
+  enable_ruby
+  remove_keys ["log","container_id","container_name"]
+  <record>
+   source_addr ${record["source_addr"].split('.')[0]}
+ </record>
+</filter>
+
+<match asapo.**>
+@type copy
+<store>
+  @type elasticsearch
+  hosts localhost:8400/elasticsearch
+  flush_interval 5s
+  logstash_format true
+  time_key_format %Y-%m-%dT%H:%M:%S.%N
+  time_key time
+  time_key_exclude_timestamp true
+  buffer_type memory
+  flush_interval 1s
+  </store>
+  <store>
+  @type file
+  flush_interval 1s
+  append true
+  buffer_type memory
+  path /shared/asapo-logs
+  </store>
+</match>
+
diff --git a/deploy/docker/cluster/jobs/kibana.yml b/deploy/docker/cluster/jobs/kibana.yml
new file mode 100644
index 000000000..10675706c
--- /dev/null
+++ b/deploy/docker/cluster/jobs/kibana.yml
@@ -0,0 +1,6 @@
+elasticsearch:
+  url: "http://localhost:8400/elasticsearch"
+server:
+  basePath:   "/logsview"
+  rewriteBasePath: true
+  host: "0.0.0.0"
diff --git a/deploy/docker/cluster/jobs/nginx.conf.tpl b/deploy/docker/cluster/jobs/nginx.conf.tpl
new file mode 100644
index 000000000..9c5f88b89
--- /dev/null
+++ b/deploy/docker/cluster/jobs/nginx.conf.tpl
@@ -0,0 +1,91 @@
+worker_processes  1;
+
+events {
+    worker_connections  1024;
+}
+
+http {
+#    include       mime.types;
+#    default_type  application/octet-stream;
+
+#    sendfile        on;
+#    tcp_nopush     on;
+
+#    keepalive_timeout  0;
+#    keepalive_timeout  65;
+
+    resolver 127.0.0.1:8600 valid=1s;
+    server {
+          listen {{ env "NOMAD_PORT_nginx" }};
+          set $discovery_endpoint asapo-discovery.service.asapo;
+          set $authorizer_endpoint asapo-authorizer.service.asapo;
+          set $fluentd_endpoint fluentd.service.asapo;
+          set $kibana_endpoint kibana.service.asapo;
+          set $grafana_endpoint grafana.service.asapo;
+          set $mongo_endpoint mongo.service.asapo;
+          set $influxdb_endpoint influxdb.service.asapo;
+          set $elasticsearch_endpoint elasticsearch.service.asapo;
+
+   		  location /mongo/ {
+            rewrite ^/mongo(/.*) $1 break;
+            proxy_pass http://$mongo_endpoint:27017$uri$is_args$args;
+          }
+
+   		  location /influxdb/ {
+            rewrite ^/influxdb(/.*) $1 break;
+            proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args;
+          }
+
+   		  location /elasticsearch/ {
+            rewrite ^/elasticsearch(/.*) $1 break;
+            proxy_pass http://$elasticsearch_endpoint:9200$uri$is_args$args;
+          }
+
+          location /discovery/ {
+            rewrite ^/discovery(/.*) $1 break;
+            proxy_pass http://$discovery_endpoint:5006$uri$is_args$args;
+          }
+
+          location /logs/ {
+              rewrite ^/logs(/.*) $1 break;
+              proxy_pass http://$fluentd_endpoint:9880$uri$is_args$args;
+          }
+
+          location /logsview/ {
+            proxy_pass http://$kibana_endpoint:5601$uri$is_args$args;
+            proxy_set_header  X-Real-IP  $remote_addr;
+            proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header  Host $http_host;
+          }
+
+          location /performance/ {
+            rewrite ^/performance(/.*) $1 break;
+            proxy_pass http://$grafana_endpoint:3000$uri$is_args$args;
+          }
+
+          location /authorizer/ {
+             rewrite ^/authorizer(/.*) $1 break;
+             proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args;
+          }
+
+      	  location /nginx-health {
+  	        return 200 "healthy\n";
+	      }
+    }
+}
+
+stream {
+    resolver 127.0.0.1:8600 valid=1s;
+
+    map $remote_addr $upstream {
+        default fluentd.service.asapo;
+    }
+
+
+    server {
+        listen     9881;
+        proxy_pass $upstream:24224;
+    }
+}
+
+
diff --git a/deploy/docker/cluster/jobs/receiver.json.tpl b/deploy/docker/cluster/jobs/receiver.json.tpl
new file mode 100644
index 000000000..4410725d8
--- /dev/null
+++ b/deploy/docker/cluster/jobs/receiver.json.tpl
@@ -0,0 +1,22 @@
+{
+  "MonitorDbAddress":"localhost:8400/influxdb",
+  "MonitorDbName": "asapo_receivers",
+  "BrokerDbAddress":"localhost:8400/mongo",
+  "AuthorizationServer": "localhost:8400/authorizer",
+  "AuthorizationInterval": 10000,
+  "ListenPort": {{ env "NOMAD_PORT_recv" }},
+  "DataServer": {
+    "NThreads": 2,
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+  },
+  "DataCache": {
+    "Use": true,
+    "SizeGB": 30,
+    "ReservedShare": 10
+  },
+  "Tag": "{{ env "NOMAD_ADDR_recv" }}",
+  "WriteToDisk":true,
+  "WriteToDb":true,
+  "LogLevel": "{{ keyOrDefault "receiver_log_level" "info" }}",
+  "RootFolder" : "/var/lib/receiver/data"
+}
diff --git a/deploy/docker/cluster/run.sh b/deploy/docker/cluster/run.sh
new file mode 100755
index 000000000..a704681f7
--- /dev/null
+++ b/deploy/docker/cluster/run.sh
@@ -0,0 +1 @@
+docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`/jobs:/usr/local/nomad_jobs --name asapo --net=host -v /tmp/nomad:/tmp/nomad -v /var/lib/docker:/var/lib/docker -d yakser/asapo-cluster
diff --git a/deploy/docker/cluster/supervisord.conf b/deploy/docker/cluster/supervisord.conf
new file mode 100644
index 000000000..cabe55263
--- /dev/null
+++ b/deploy/docker/cluster/supervisord.conf
@@ -0,0 +1,16 @@
+[supervisord]
+nodaemon = true
+logfile=/var/log/supervisord/supervisord.log    ; supervisord log file
+logfile_maxbytes=50MB                           ; maximum size of logfile before rotation
+logfile_backups=10                              ; number of backed up logfiles
+loglevel=info                                  ; info, debug, warn, trace
+childlogdir=/var/log/supervisord/               ; where child log files will live
+use=root
+
+[program:consul]
+command=/bin/consul agent -dev -client 0.0.0.0
+#-config-dir=/etc/consul.d
+
+[program:nomad]
+command=/bin/nomad agent -data-dir=/tmp/nomad -dev -client -bind 0.0.0.0
+# -config=/etc/nomad.d
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
index b38f7abe8..faafb60cc 100644
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -23,7 +23,7 @@ Cleanup() {
     nomad stop broker
     nomad stop authorizer
     rm -rf out
-#    kill $producerid
+ #   kill $producerid
     echo "db.dropDatabase()" | mongo ${beamtime_id}_detector
     influx -execute "drop database ${monitor_database_name}"
 }
@@ -41,11 +41,11 @@ sleep 1
 
 #producer
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 0 4 0 100
+$1 localhost:8400 ${beamtime_id} 100 1000 4 0 100
 #producerid=`echo $!`
 
 
-#$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
-#cat out
-#cat out   | grep "Processed 1000 file(s)"
-#cat out | grep "Cannot get metadata"
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
+cat out
+cat out   | grep "Processed 1000 file(s)"
+cat out | grep "Cannot get metadata"
-- 
GitLab