From ed7c1f2a22a1e68af6e0476f57431399500be9fe Mon Sep 17 00:00:00 2001
From: Sergey Yakubov <sergey.yakubov@desy.de>
Date: Mon, 9 Sep 2019 15:13:27 +0200
Subject: [PATCH] discover mongodb via discovery service, not dns

---
 broker/src/asapo_broker/main/broker.go        |  2 +
 .../asapo_broker/server/process_request.go    |  4 +-
 .../src/asapo_broker/server/request_common.go |  2 +-
 broker/src/asapo_broker/server/server.go      | 47 ++++++++--
 .../asapo_broker/server/server_nottested.go   | 16 ++--
 broker/src/asapo_broker/server/server_test.go | 26 +++++-
 broker/src/asapo_broker/server/statistics.go  |  2 +-
 .../asapo_broker/server/statistics_writers.go |  4 +-
 common/go/src/asapo_common/utils/stucts.go    |  8 +-
 deploy/docker/cluster/Dockerfile              | 17 ++--
 deploy/docker/cluster/init_influxdb.sh        | 10 +++
 deploy/docker/cluster/jobs/asapo.tf           | 50 -----------
 deploy/docker/cluster/jobs/broker.json.tpl    |  8 --
 deploy/docker/cluster/run.sh                  | 19 +++-
 .../asapo-brokers.nmd.tpl}                    | 25 +++---
 .../{jobs => scripts}/asapo-logging.nmd       |  0
 .../asapo-mongo.nmd.tpl}                      |  9 +-
 .../{jobs => scripts}/asapo-nginx.nmd.tpl     |  2 +-
 .../asapo-perfmetrics.nmd.tpl}                | 28 +++---
 .../asapo-receivers.nmd.tpl}                  | 28 ++++--
 .../{jobs => scripts}/asapo-services.nmd.tpl  | 20 +++--
 .../{jobs => scripts}/asapo.auto.tfvars       |  3 +
 .../cluster/{jobs => scripts}/auth_secret.key |  0
 .../{jobs => scripts}/authorizer.json.tpl     |  0
 deploy/docker/cluster/scripts/broker.json.tpl |  9 ++
 .../{jobs => scripts}/discovery.json.tpl      |  0
 .../cluster/{jobs => scripts}/fluentd.conf    |  0
 .../cluster/{jobs => scripts}/kibana.yml      |  0
 .../cluster/{jobs => scripts}/nginx.conf.tpl  |  7 --
 deploy/docker/cluster/scripts/provider.tf     |  4 +
 .../{jobs => scripts}/receiver.json.tpl       |  9 +-
 deploy/docker/cluster/scripts/resources.tf    | 23 +++++
 deploy/docker/cluster/scripts/templates.tf    | 63 +++++++++++++
 deploy/docker/cluster/scripts/vars.tf         | 74 +++++++++++++++
 deploy/docker/cluster/supervisord.conf        |  2 +-
 deploy/docker/cluster/tf_run.sh               |  5 ++
 deploy/nomad_jobs/broker.json.tpl             |  7 +-
 deploy/nomad_jobs/nginx.conf.tpl              |  6 --
 deploy/nomad_jobs/receiver.json.tpl           |  7 +-
 .../request_handler/request_handler.go        |  1 +
 .../request_handler/request_handler_consul.go | 39 +++++++-
 .../request_handler_consul_test.go            | 51 +++++++++++
 .../request_handler/request_handler_static.go |  8 ++
 .../request_handler_static_test.go            |  9 +-
 .../asapo_discovery/server/get_receivers.go   | 11 +++
 .../src/asapo_discovery/server/listroutes.go  |  6 ++
 .../src/asapo_discovery/server/routes_test.go | 10 ++-
 .../asapo_discovery/server/settings_test.go   |  7 ++
 receiver/src/receiver_config.cpp              |  7 +-
 receiver/src/receiver_config.h                |  7 +-
 receiver/src/receiver_error.h                 |  9 +-
 receiver/src/request_handler_db.cpp           | 38 +++++++-
 receiver/src/request_handler_db.h             |  7 +-
 .../src/request_handler_db_meta_write.cpp     |  2 +-
 receiver/src/request_handler_db_write.cpp     |  4 +-
 receiver/src/statistics_sender_influx_db.cpp  |  8 +-
 receiver/unittests/mock_receiver_config.cpp   |  8 +-
 receiver/unittests/test_config.cpp            | 24 ++---
 .../unittests/test_request_handler_db.cpp     | 89 ++++++++++++++++++-
 .../test_request_handler_db_meta_writer.cpp   |  4 +-
 .../test_request_handler_db_writer.cpp        | 15 +++-
 .../test_statistics_sender_fluentd.cpp        |  4 +-
 .../test_statistics_sender_influx_db.cpp      | 10 +--
 .../broker_mongo_restart/check_linux.sh       |  2 +-
 .../receiver_mongo_restart/check_linux.sh     |  2 +-
 tests/automatic/settings/broker_settings.json |  6 +-
 .../settings/broker_settings.json.tpl         |  7 +-
 .../settings/discovery_fixed_settings.json    | 10 ---
 .../settings/discovery_settings.json.tpl      |  3 +
 tests/automatic/settings/nginx.conf.tpl       |  6 --
 .../settings/receiver.json.tpl.lin.in         |  7 +-
 .../settings/receiver.json.tpl.win.in         |  7 +-
 tests/manual/performance_broker/settings.json |  6 +-
 tests/manual/performance_broker/test.sh       |  2 +-
 .../performance_full_chain_simple/broker.json |  6 +-
 .../receiver.json                             |  7 +-
 .../performance_full_chain_simple/test.sh     |  4 +-
 .../receiver.json                             |  7 +-
 .../performance_producer_receiver/test.sh     |  2 +-
 .../python_tests/producer/receiver.json.tpl   |  7 +-
 .../manual/receiver_debug_local/receiver.json |  7 +-
 81 files changed, 757 insertions(+), 265 deletions(-)
 create mode 100755 deploy/docker/cluster/init_influxdb.sh
 delete mode 100644 deploy/docker/cluster/jobs/asapo.tf
 delete mode 100644 deploy/docker/cluster/jobs/broker.json.tpl
 rename deploy/docker/cluster/{jobs/asapo-brokers.nmd => scripts/asapo-brokers.nmd.tpl} (69%)
 rename deploy/docker/cluster/{jobs => scripts}/asapo-logging.nmd (100%)
 rename deploy/docker/cluster/{jobs/asapo-mongo.nmd => scripts/asapo-mongo.nmd.tpl} (83%)
 rename deploy/docker/cluster/{jobs => scripts}/asapo-nginx.nmd.tpl (94%)
 rename deploy/docker/cluster/{jobs/asapo-perfmetrics.nmd => scripts/asapo-perfmetrics.nmd.tpl} (74%)
 rename deploy/docker/cluster/{jobs/asapo-receivers.nmd => scripts/asapo-receivers.nmd.tpl} (69%)
 rename deploy/docker/cluster/{jobs => scripts}/asapo-services.nmd.tpl (86%)
 rename deploy/docker/cluster/{jobs => scripts}/asapo.auto.tfvars (97%)
 rename deploy/docker/cluster/{jobs => scripts}/auth_secret.key (100%)
 rename deploy/docker/cluster/{jobs => scripts}/authorizer.json.tpl (100%)
 create mode 100644 deploy/docker/cluster/scripts/broker.json.tpl
 rename deploy/docker/cluster/{jobs => scripts}/discovery.json.tpl (100%)
 rename deploy/docker/cluster/{jobs => scripts}/fluentd.conf (100%)
 rename deploy/docker/cluster/{jobs => scripts}/kibana.yml (100%)
 rename deploy/docker/cluster/{jobs => scripts}/nginx.conf.tpl (92%)
 create mode 100644 deploy/docker/cluster/scripts/provider.tf
 rename deploy/docker/cluster/{jobs => scripts}/receiver.json.tpl (66%)
 create mode 100644 deploy/docker/cluster/scripts/resources.tf
 create mode 100644 deploy/docker/cluster/scripts/templates.tf
 create mode 100644 deploy/docker/cluster/scripts/vars.tf
 create mode 100755 deploy/docker/cluster/tf_run.sh
 delete mode 100644 tests/automatic/settings/discovery_fixed_settings.json

diff --git a/broker/src/asapo_broker/main/broker.go b/broker/src/asapo_broker/main/broker.go
index f9a41942c..202022471 100644
--- a/broker/src/asapo_broker/main/broker.go
+++ b/broker/src/asapo_broker/main/broker.go
@@ -39,6 +39,8 @@ func main() {
 
 	log.SetLevel(logLevel)
 
+	server.CreateDiscoveryService()
+
 	err = server.InitDB(NewDefaultDatabase())
 	if err != nil {
 		log.Fatal(err.Error())
diff --git a/broker/src/asapo_broker/server/process_request.go b/broker/src/asapo_broker/server/process_request.go
index 39d6bef85..27c23cec4 100644
--- a/broker/src/asapo_broker/server/process_request.go
+++ b/broker/src/asapo_broker/server/process_request.go
@@ -30,7 +30,7 @@ func checkGroupID(w http.ResponseWriter, needGroupID bool, group_id string, db_n
 	}
 	if _, err := xid.FromString(group_id); err != nil {
 		err_str := "wrong groupid " + group_id
-		log_str := "processing get " + op + " request in " + db_name + " at " + settings.BrokerDbAddress + ": " + err_str
+		log_str := "processing get " + op + " request in " + db_name + " at " + settings.DatabaseServer + ": " + err_str
 		logger.Error(log_str)
 		fmt.Println(log_str)
 		w.WriteHeader(http.StatusBadRequest)
@@ -90,7 +90,7 @@ func processRequestInDb(db_name string, group_id string, op string, extra_param
 	defer db_new.Close()
 	statistics.IncreaseCounter()
 	answer, err := db_new.ProcessRequest(db_name, group_id, op, extra_param)
-	log_str := "processing request " + op + " in " + db_name + " at " + settings.BrokerDbAddress
+	log_str := "processing request " + op + " in " + db_name + " at " + settings.DatabaseServer
 	if err != nil {
 		return returnError(err, log_str)
 	}
diff --git a/broker/src/asapo_broker/server/request_common.go b/broker/src/asapo_broker/server/request_common.go
index 8cda3b996..d53a67d59 100644
--- a/broker/src/asapo_broker/server/request_common.go
+++ b/broker/src/asapo_broker/server/request_common.go
@@ -7,7 +7,7 @@ import (
 )
 
 func writeAuthAnswer(w http.ResponseWriter, requestName string, db_name string, err string) {
-	log_str := "processing " + requestName + " request in " + db_name + " at " + settings.BrokerDbAddress
+	log_str := "processing " + requestName + " request in " + db_name + " at " + settings.DatabaseServer
 	logger.Error(log_str + " - " + err)
 	w.WriteHeader(http.StatusUnauthorized)
 	w.Write([]byte(err))
diff --git a/broker/src/asapo_broker/server/server.go b/broker/src/asapo_broker/server/server.go
index 39213b7b1..1e9b930b8 100644
--- a/broker/src/asapo_broker/server/server.go
+++ b/broker/src/asapo_broker/server/server.go
@@ -2,15 +2,20 @@ package server
 
 import (
 	"asapo_broker/database"
+	log "asapo_common/logger"
 	"asapo_common/utils"
+	"errors"
+	"io/ioutil"
+	"net/http"
 )
 
 var db database.Agent
 
 type serverSettings struct {
-	BrokerDbAddress  string
-	MonitorDbAddress string
-	MonitorDbName    string
+	DiscoveryServer  string
+	DatabaseServer  string
+	PerformanceDbServer string
+	PerformanceDbName    string
 	SecretFile       string
 	Port             int
 	LogLevel         string
@@ -20,12 +25,44 @@ var settings serverSettings
 var statistics serverStatistics
 var auth utils.Auth
 
-func InitDB(dbAgent database.Agent) error {
+type discoveryAPI struct {
+	Client  *http.Client
+	baseURL string
+}
+
+var discoveryService discoveryAPI
+
+func (api *discoveryAPI) GetMongoDbAddress() (string, error) {
+	resp, err := api.Client.Get(api.baseURL + "/mongo")
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(resp.Body)
+	return string(body), err
+}
+
+func InitDB(dbAgent database.Agent) (err error) {
 	db = dbAgent
-	err := db.Connect(settings.BrokerDbAddress)
+	if settings.DatabaseServer == "auto" {
+		settings.DatabaseServer, err = discoveryService.GetMongoDbAddress()
+		if err != nil {
+			return err
+		}
+		if settings.DatabaseServer == "" {
+			return errors.New("no database servers found")
+		}
+		log.Debug("Got mongodb server: " + settings.DatabaseServer)
+	}
+
+	err = db.Connect(settings.DatabaseServer)
 	return err
 }
 
+func CreateDiscoveryService() {
+	discoveryService = discoveryAPI{&http.Client{}, "http://" + settings.DiscoveryServer}
+}
+
 func CleanupDB() {
 	if db != nil {
 		db.Close()
diff --git a/broker/src/asapo_broker/server/server_nottested.go b/broker/src/asapo_broker/server/server_nottested.go
index 943277aa7..990cf1409 100644
--- a/broker/src/asapo_broker/server/server_nottested.go
+++ b/broker/src/asapo_broker/server/server_nottested.go
@@ -38,20 +38,24 @@ func ReadConfig(fname string) (log.Level, error) {
 		return log.FatalLevel, err
 	}
 
-	if settings.BrokerDbAddress == "" {
-		return log.FatalLevel, errors.New("BrokerDbAddress not set")
+	if settings.DatabaseServer == "" {
+		return log.FatalLevel, errors.New("DatabaseServer not set")
 	}
 
-	if settings.MonitorDbAddress == "" {
-		return log.FatalLevel, errors.New("MonitorDbAddress not set")
+	if settings.PerformanceDbServer == "" {
+		return log.FatalLevel, errors.New("PerformanceDbServer not set")
+	}
+
+	if settings.DatabaseServer == "auto" && settings.DiscoveryServer == "" {
+		return log.FatalLevel, errors.New("DiscoveryServer not set for auto DatabaseServer")
 	}
 
 	if settings.Port == 0 {
 		return log.FatalLevel, errors.New("Server port not set")
 	}
 
-	if settings.MonitorDbName == "" {
-		return log.FatalLevel, errors.New("MonitorDbName not set")
+	if settings.PerformanceDbName == "" {
+		return log.FatalLevel, errors.New("PerformanceDbName not set")
 	}
 
 	if settings.SecretFile == "" {
diff --git a/broker/src/asapo_broker/server/server_test.go b/broker/src/asapo_broker/server/server_test.go
index 6654b6715..6f35c3a3f 100644
--- a/broker/src/asapo_broker/server/server_test.go
+++ b/broker/src/asapo_broker/server/server_test.go
@@ -6,6 +6,8 @@ import (
 	"errors"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/mock"
+	"net/http"
+	"net/http/httptest"
 	"testing"
 )
 
@@ -37,7 +39,7 @@ func TestInitDBWithWrongAddress(t *testing.T) {
 
 	mock_db.ExpectedCalls = nil
 
-	settings.BrokerDbAddress = "0.0.0.0:0000"
+	settings.DatabaseServer = "0.0.0.0:0000"
 
 	for _, test := range initDBTests {
 		mock_db.On("Connect", "0.0.0.0:0000").Return(test.answer)
@@ -50,6 +52,28 @@ func TestInitDBWithWrongAddress(t *testing.T) {
 	db = nil
 }
 
+func TestInitDBWithAutoAddress(t *testing.T) {
+	mock_db := setup()
+
+	mock_db.ExpectedCalls = nil
+
+	settings.DatabaseServer = "auto"
+	mock_server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+		assert.Equal(t, req.URL.String(), "/mongo", "request string")
+		rw.Write([]byte("0.0.0.0:0000"))
+	}))
+	defer mock_server.Close()
+
+	discoveryService = discoveryAPI{mock_server.Client(), mock_server.URL}
+
+	mock_db.On("Connect", "0.0.0.0:0000").Return(nil)
+	err := InitDB(mock_db)
+
+	assert.Equal(t, nil, err, "auto connect ok")
+	assertExpectations(t, mock_db)
+	db = nil
+}
+
 func TestCleanupDBWithoutInit(t *testing.T) {
 	mock_db := setup()
 
diff --git a/broker/src/asapo_broker/server/statistics.go b/broker/src/asapo_broker/server/statistics.go
index 97c6896f7..273e24919 100644
--- a/broker/src/asapo_broker/server/statistics.go
+++ b/broker/src/asapo_broker/server/statistics.go
@@ -47,7 +47,7 @@ func (st *serverStatistics) WriteStatistic() (err error) {
 func (st *serverStatistics) Monitor() {
 	for {
 		time.Sleep(1000 * time.Millisecond)
-		logstr := "sending statistics to " + settings.MonitorDbAddress + ", dbname: " + settings.MonitorDbName
+		logstr := "sending statistics to " + settings.PerformanceDbServer + ", dbname: " + settings.PerformanceDbName
 		if err := st.WriteStatistic(); err != nil {
 			log.Error(logstr + " - " + err.Error())
 		} else {
diff --git a/broker/src/asapo_broker/server/statistics_writers.go b/broker/src/asapo_broker/server/statistics_writers.go
index 4ac374603..cdad6dcb3 100644
--- a/broker/src/asapo_broker/server/statistics_writers.go
+++ b/broker/src/asapo_broker/server/statistics_writers.go
@@ -21,7 +21,7 @@ type StatisticInfluxDbWriter struct {
 
 func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error {
 	c, err := client.NewHTTPClient(client.HTTPConfig{
-		Addr: "http://"+ settings.MonitorDbAddress,
+		Addr: "http://"+ settings.PerformanceDbServer,
 	})
 	if err != nil {
 		return err
@@ -29,7 +29,7 @@ func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error
 	defer c.Close()
 
 	bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
-		Database:  settings.MonitorDbName,
+		Database:  settings.PerformanceDbName,
 	})
 
 	tags := map[string]string{"Group ID": "0"}
diff --git a/common/go/src/asapo_common/utils/stucts.go b/common/go/src/asapo_common/utils/stucts.go
index f9bf6c316..01b9bb636 100644
--- a/common/go/src/asapo_common/utils/stucts.go
+++ b/common/go/src/asapo_common/utils/stucts.go
@@ -11,10 +11,14 @@ type BrokerInfo struct {
 	StaticEndpoint		 string
 }
 
+type MongoInfo struct {
+	StaticEndpoint		 string
+}
 
 type Settings struct {
 	Receiver 		ReceiverInfo
 	Broker 		    BrokerInfo
+	Mongo 			MongoInfo
 	ConsulEndpoints []string
 	Mode			string
 	Port            int
@@ -23,8 +27,8 @@ type Settings struct {
 
 func (settings *Settings) Validate() error {
 	if settings.Mode != "consul"{
-		if len(settings.Receiver.StaticEndpoints) == 0 || len(settings.Broker.StaticEndpoint) == 0 {
-		return errors.New("receiver or broker endpoints not set")
+		if len(settings.Receiver.StaticEndpoints) == 0 || len(settings.Broker.StaticEndpoint) == 0 || len(settings.Mongo.StaticEndpoint) == 0{
+			return errors.New("static endpoints not set")
 		}
 	}
 
diff --git a/deploy/docker/cluster/Dockerfile b/deploy/docker/cluster/Dockerfile
index 94c777c5b..c0182e934 100644
--- a/deploy/docker/cluster/Dockerfile
+++ b/deploy/docker/cluster/Dockerfile
@@ -2,12 +2,6 @@ FROM ubuntu:18.04
 
 MAINTAINER DESY IT
 
-ENV CONSUL_VERSION=1.6.0
-ENV NOMAD_VERSION=0.9.5
-ENV TERRAFORM_VERSION=0.12.7
-
-ENV HASHICORP_RELEASES=https://releases.hashicorp.com
-
 RUN apt-get update && apt-get install -y supervisor apt-transport-https \
         ca-certificates \
         curl \
@@ -24,6 +18,11 @@ RUN add-apt-repository \
 RUN apt-get update && apt-get install -y docker-ce-cli wget unzip
 
 
+ENV CONSUL_VERSION=1.6.0
+ENV NOMAD_VERSION=0.9.5
+ENV TERRAFORM_VERSION=0.12.8
+ENV HASHICORP_RELEASES=https://releases.hashicorp.com
+
 RUN set -eux && \
     mkdir -p /tmp/build && \
     cd /tmp/build && \
@@ -37,10 +36,14 @@ RUN set -eux && \
     rm -rf /tmp/build && \
 # tiny smoke test to ensure the binary we downloaded runs
     consul version && \
-    nomad version
+    nomad version && \
+    terraform version
 
 ADD supervisord.conf /etc/
 
 RUN mkdir -p /var/log/supervisord/
 
+COPY scripts/ /var/run/asapo/
+
+
 ENTRYPOINT ["supervisord", "--configuration", "/etc/supervisord.conf"]
diff --git a/deploy/docker/cluster/init_influxdb.sh b/deploy/docker/cluster/init_influxdb.sh
new file mode 100755
index 000000000..bda6960f2
--- /dev/null
+++ b/deploy/docker/cluster/init_influxdb.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+influx=`dig +short @127.0.0.1 -p 8600 influxdb.service.asapo | head -1`
+
+databases="asapo_receivers asapo_brokers"
+
+for database in $databases
+do
+    curl -i -XPOST http://${influx}:8086/query --data-urlencode "q=CREATE DATABASE $database"
+done
diff --git a/deploy/docker/cluster/jobs/asapo.tf b/deploy/docker/cluster/jobs/asapo.tf
deleted file mode 100644
index 8e0496b35..000000000
--- a/deploy/docker/cluster/jobs/asapo.tf
+++ /dev/null
@@ -1,50 +0,0 @@
-provider "nomad" {
-  address = "http://localhost:4646"
-}
-
-variable "fluentd_logs" {
- default = true
-}
-
-variable "nginx_version" {
-  default = "latest"
-}
-
-variable "asapo_imagename_suffix" {
-  default = ""
-}
-
-variable "asapo_image_tag" {
-  default = "latest"
-}
-
-variable "shared_dir" {
-  default = "/tmp"
-}
-
-data "template_file" "nginx" {
-  template = "${file("./asapo-nginx.nmd.tpl")}"
-  vars = {
-    nginx_version = "${var.nginx_version}"
-  }
-}
-
-data "template_file" "asapo_services" {
-  template = "${file("./asapo-services.nmd.tpl")}"
-  vars = {
-    image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
-    shared_dir = "${var.shared_dir}"
-    fluentd_logs = "${var.fluentd_logs}"
-  }
-}
-
-resource "nomad_job" "asapo-nginx" {
-  jobspec = "${data.template_file.nginx.rendered}"
-}
-
-resource "nomad_job" "asapo-services" {
-  jobspec = "${data.template_file.asapo_services.rendered}"
-}
-
-
-
diff --git a/deploy/docker/cluster/jobs/broker.json.tpl b/deploy/docker/cluster/jobs/broker.json.tpl
deleted file mode 100644
index 9bc9f2edb..000000000
--- a/deploy/docker/cluster/jobs/broker.json.tpl
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "BrokerDbAddress":"localhost:8400/mongo",
-  "MonitorDbAddress":"localhost:8400/influxdb",
-  "MonitorDbName": "asapo_brokers",
-  "port":{{ env "NOMAD_PORT_broker" }},
-  "LogLevel":"info",
-  "SecretFile":"/secrets/secret.key"
-}
diff --git a/deploy/docker/cluster/run.sh b/deploy/docker/cluster/run.sh
index 64d07fb89..f41fc9497 100755
--- a/deploy/docker/cluster/run.sh
+++ b/deploy/docker/cluster/run.sh
@@ -1,2 +1,19 @@
-docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`/jobs:/usr/local/nomad_jobs --name asapo --net=host -v /tmp/nomad:/tmp/nomad -v /var/lib/docker:/var/lib/docker -d yakser/asapo-cluster
+#!/usr/bin/env bash
+
+NOMAD_ALLOC_HOST_SHARED=/tmp/asapo/container_host_shared/nomad_alloc
+SERVICE_DATA_CLUSTER_SHARED=/tmp/asapo/asapo_cluster_shared/service_data
+DATA_GLOBAL_SHARED=/tmp/asapo/global_shared/data
+
+mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
+chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
+
+docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
+ 	-v /var/lib/docker:/var/lib/docker \
+	-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
+	-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
+	-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
+	-e NOMAD_ALLOC=$NOMAD_ALLOC_HOST_SHARED \
+	-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
+	-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
+ 	--name asapo --net=host -d yakser/asapo-cluster
 
diff --git a/deploy/docker/cluster/jobs/asapo-brokers.nmd b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
similarity index 69%
rename from deploy/docker/cluster/jobs/asapo-brokers.nmd
rename to deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
index eacb8592c..6de159576 100644
--- a/deploy/docker/cluster/jobs/asapo-brokers.nmd
+++ b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
@@ -22,18 +22,19 @@ job "asapo-brokers" {
       driver = "docker"
       config {
         network_mode = "host"
-        dns_servers = ["127.0.0.1"]
-        image = "yakser/asapo-broker-dev:feature_virtualized-deployment.latest"
+        image = "yakser/asapo-broker${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/broker/config.json"]
-        logging {
-            type = "fluentd"
-            config {
-                fluentd-address = "localhost:9881"
-                fluentd-async-connect = true
-                tag = "asapo.docker"
-            }
+        %{ if fluentd_logs }
+          logging {
+          type = "fluentd"
+          config {
+            fluentd-address = "localhost:9881"
+            fluentd-async-connect = true
+            tag = "asapo.docker"
+          }
         }
+        %{endif}
       }
 
       resources {
@@ -60,14 +61,14 @@ job "asapo-brokers" {
       }
 
       template {
-         source        = "/usr/local/nomad_jobs/broker.json.tpl"
+         source        = "${scripts_dir}/broker.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
 
       template {
-        source        = "/usr/local/nomad_jobs/auth_secret.key"
-        destination   = "secrets/secret.key"
+        source        = "${scripts_dir}/auth_secret.key"
+        destination   = "local/secret.key"
         change_mode   = "restart"
       }
    } #task brokers
diff --git a/deploy/docker/cluster/jobs/asapo-logging.nmd b/deploy/docker/cluster/scripts/asapo-logging.nmd
similarity index 100%
rename from deploy/docker/cluster/jobs/asapo-logging.nmd
rename to deploy/docker/cluster/scripts/asapo-logging.nmd
diff --git a/deploy/docker/cluster/jobs/asapo-mongo.nmd b/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
similarity index 83%
rename from deploy/docker/cluster/jobs/asapo-mongo.nmd
rename to deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
index be05168e3..444afddb2 100644
--- a/deploy/docker/cluster/jobs/asapo-mongo.nmd
+++ b/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
@@ -23,16 +23,15 @@ job "asapo-mongo" {
 
       config {
         network_mode = "host"
-        image = "mongo:4.0.0"
-        volumes = ["/${meta.shared_storage}/mongodb:/data/db"]
+        image = "mongo:${mongo_version}"
+        volumes = ["/${service_dir}/mongodb:/data/db"]
       }
 
       resources {
-        cpu    = 1500
-        memory = 12560
+        memory = "${mongo_total_memory_size}"
         network {
           port "mongo" {
-          static = 27017
+          static = "${mongo_port}"
           }
         }
       }
diff --git a/deploy/docker/cluster/jobs/asapo-nginx.nmd.tpl b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
similarity index 94%
rename from deploy/docker/cluster/jobs/asapo-nginx.nmd.tpl
rename to deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
index 79c972024..b48075b7d 100644
--- a/deploy/docker/cluster/jobs/asapo-nginx.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
@@ -59,7 +59,7 @@ job "asapo-nginx" {
       }
 
       template {
-         source        = "/usr/local/nomad_jobs/nginx.conf.tpl"
+         source        = "${scripts_dir}/nginx.conf.tpl"
          destination   = "local/nginx.conf"
          change_mode   = "restart"
       }
diff --git a/deploy/docker/cluster/jobs/asapo-perfmetrics.nmd b/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
similarity index 74%
rename from deploy/docker/cluster/jobs/asapo-perfmetrics.nmd
rename to deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
index bb8db1b2b..b9f97011b 100644
--- a/deploy/docker/cluster/jobs/asapo-perfmetrics.nmd
+++ b/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
@@ -21,19 +21,20 @@ job "asapo-perfmetrics" {
       driver = "docker"
 
       config {
-        dns_servers = ["127.0.0.1"]
         network_mode = "host"
-        image = "influxdb"
-        volumes = ["/${meta.shared_storage}/influxdb:/var/lib/influxdb"]
+        image = "influxdb:${influxdb_version}"
+        volumes = ["/${service_dir}/influxdb:/var/lib/influxdb"]
+      }
+
+      env {
+        PRE_CREATE_DB="asapo_receivers;asapo_brokers"
       }
 
       resources {
-        cpu    = 1500
-        memory = 32000
+        memory = "${influxdb_total_memory_size}"
         network {
-          mbits = 10
           port "influxdb" {
-          static = 8086
+          static = "${influxdb_port}"
           }
         }
       }
@@ -62,24 +63,21 @@ job "asapo-perfmetrics" {
       driver = "docker"
 
       env {
-        GF_SERVER_DOMAIN = "${attr.unique.hostname}"
+        GF_SERVER_DOMAIN = "$${attr.unique.hostname}"
         GF_SERVER_ROOT_URL = "%(protocol)s://%(domain)s/performance/"
       }
 
       config {
-        dns_servers = ["127.0.0.1"]
         network_mode = "host"
-        image = "grafana/grafana"
-        volumes = ["/${meta.shared_storage}/grafana:/var/lib/grafana"]
+        image = "grafana/grafana:${grafana_version}"
+        volumes = ["/${service_dir}/grafana:/var/lib/grafana"]
       }
 
       resources {
-        cpu    = 1500
-        memory = 2560
+        memory = "${grafana_total_memory_size}"
         network {
-          mbits = 10
           port "grafana" {
-          static = 3000
+          static = "${grafana_port}"
           }
         }
       }
diff --git a/deploy/docker/cluster/jobs/asapo-receivers.nmd b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
similarity index 69%
rename from deploy/docker/cluster/jobs/asapo-receivers.nmd
rename to deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
index 1d3cf0cb2..83b5aeb60 100644
--- a/deploy/docker/cluster/jobs/asapo-receivers.nmd
+++ b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
@@ -24,17 +24,20 @@ job "asapo-receivers" {
       config {
         network_mode = "host"
         dns_servers = ["127.0.0.1"]
-        image = "yakser/asapo-receiver-dev:feature_virtualized-deployment.latest"
+        image = "yakser/asapo-receiver${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/receiver/config.json",
-                   "/bldocuments/support/asapo/data:/var/lib/receiver/data"]
+                   "${data_dir}:/var/lib/receiver/data"]
+        %{ if fluentd_logs }
         logging {
-            type = "fluentd"
-            config {
-                fluentd-address = "localhost:9881"
-                tag = "asapo.docker"
-            }
+        type = "fluentd"
+        config {
+        fluentd-address = "localhost:9881"
+        fluentd-async-connect = true
+        tag = "asapo.docker"
         }
+        }
+        %{endif}
       }
 
       resources {
@@ -42,9 +45,11 @@ job "asapo-receivers" {
           port "recv" {}
           port "recv_ds" {}
         }
-          memory = 40000
+          memory = "${receiver_total_memory_size}"
       }
 
+
+
       service {
         name = "asapo-receiver"
         port = "recv"
@@ -63,8 +68,13 @@ job "asapo-receivers" {
         }
       }
 
+      meta {
+        receiver_dataserver_cache_size = "${receiver_dataserver_cache_size}"
+      }
+
+
       template {
-         source        = "/usr/local/nomad_jobs/receiver.json.tpl"
+         source        = "${scripts_dir}/receiver.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
diff --git a/deploy/docker/cluster/jobs/asapo-services.nmd.tpl b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl
similarity index 86%
rename from deploy/docker/cluster/jobs/asapo-services.nmd.tpl
rename to deploy/docker/cluster/scripts/asapo-services.nmd.tpl
index 38056a13e..3922fb8a5 100644
--- a/deploy/docker/cluster/jobs/asapo-services.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl
@@ -56,12 +56,12 @@ job "asapo-services" {
       }
 
       template {
-         source        = "/usr/local/nomad_jobs/authorizer.json.tpl"
+         source        = "${scripts_dir}/authorizer.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
       template {
-        source        = "/usr/local/nomad_jobs/auth_secret.key"
+        source        = "${scripts_dir}/auth_secret.key"
         destination   = "local/secret.key"
         change_mode   = "restart"
       }
@@ -79,14 +79,16 @@ job "asapo-services" {
         image = "yakser/asapo-discovery${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/discovery/config.json"]
+        %{ if fluentd_logs }
         logging {
-            type = "fluentd"
-            config {
-                fluentd-address = "localhost:9881"
-                fluentd-async-connect = true
-                tag = "asapo.docker"
-            }
+        type = "fluentd"
+        config {
+        fluentd-address = "localhost:9881"
+        fluentd-async-connect = true
+        tag = "asapo.docker"
+        }
         }
+        %{endif}
       }
 
       resources {
@@ -119,7 +121,7 @@ job "asapo-services" {
       }
 
       template {
-         source        = "/usr/local/nomad_jobs/discovery.json.tpl"
+         source        = "${scripts_dir}/discovery.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
diff --git a/deploy/docker/cluster/jobs/asapo.auto.tfvars b/deploy/docker/cluster/scripts/asapo.auto.tfvars
similarity index 97%
rename from deploy/docker/cluster/jobs/asapo.auto.tfvars
rename to deploy/docker/cluster/scripts/asapo.auto.tfvars
index 060182276..62a3bf074 100644
--- a/deploy/docker/cluster/jobs/asapo.auto.tfvars
+++ b/deploy/docker/cluster/scripts/asapo.auto.tfvars
@@ -1,4 +1,7 @@
 nginx_version = "1.14"
+
+
+
 asapo_imagename_suffix="-dev"
 asapo_image_tag = "feature_virtualized-deployment.latest"
 
diff --git a/deploy/docker/cluster/jobs/auth_secret.key b/deploy/docker/cluster/scripts/auth_secret.key
similarity index 100%
rename from deploy/docker/cluster/jobs/auth_secret.key
rename to deploy/docker/cluster/scripts/auth_secret.key
diff --git a/deploy/docker/cluster/jobs/authorizer.json.tpl b/deploy/docker/cluster/scripts/authorizer.json.tpl
similarity index 100%
rename from deploy/docker/cluster/jobs/authorizer.json.tpl
rename to deploy/docker/cluster/scripts/authorizer.json.tpl
diff --git a/deploy/docker/cluster/scripts/broker.json.tpl b/deploy/docker/cluster/scripts/broker.json.tpl
new file mode 100644
index 000000000..f924faa3a
--- /dev/null
+++ b/deploy/docker/cluster/scripts/broker.json.tpl
@@ -0,0 +1,9 @@
+{
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
+  "PerformanceDbServer":"localhost:8400/influxdb",
+  "PerformanceDbName": "asapo_brokers",
+  "port":{{ env "NOMAD_PORT_broker" }},
+  "LogLevel":"info",
+  "SecretFile":"/local/secret.key"
+}
diff --git a/deploy/docker/cluster/jobs/discovery.json.tpl b/deploy/docker/cluster/scripts/discovery.json.tpl
similarity index 100%
rename from deploy/docker/cluster/jobs/discovery.json.tpl
rename to deploy/docker/cluster/scripts/discovery.json.tpl
diff --git a/deploy/docker/cluster/jobs/fluentd.conf b/deploy/docker/cluster/scripts/fluentd.conf
similarity index 100%
rename from deploy/docker/cluster/jobs/fluentd.conf
rename to deploy/docker/cluster/scripts/fluentd.conf
diff --git a/deploy/docker/cluster/jobs/kibana.yml b/deploy/docker/cluster/scripts/kibana.yml
similarity index 100%
rename from deploy/docker/cluster/jobs/kibana.yml
rename to deploy/docker/cluster/scripts/kibana.yml
diff --git a/deploy/docker/cluster/jobs/nginx.conf.tpl b/deploy/docker/cluster/scripts/nginx.conf.tpl
similarity index 92%
rename from deploy/docker/cluster/jobs/nginx.conf.tpl
rename to deploy/docker/cluster/scripts/nginx.conf.tpl
index 9c5f88b89..72a7cefa1 100644
--- a/deploy/docker/cluster/jobs/nginx.conf.tpl
+++ b/deploy/docker/cluster/scripts/nginx.conf.tpl
@@ -22,15 +22,9 @@ http {
           set $fluentd_endpoint fluentd.service.asapo;
           set $kibana_endpoint kibana.service.asapo;
           set $grafana_endpoint grafana.service.asapo;
-          set $mongo_endpoint mongo.service.asapo;
           set $influxdb_endpoint influxdb.service.asapo;
           set $elasticsearch_endpoint elasticsearch.service.asapo;
 
-   		  location /mongo/ {
-            rewrite ^/mongo(/.*) $1 break;
-            proxy_pass http://$mongo_endpoint:27017$uri$is_args$args;
-          }
-
    		  location /influxdb/ {
             rewrite ^/influxdb(/.*) $1 break;
             proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args;
@@ -81,7 +75,6 @@ stream {
         default fluentd.service.asapo;
     }
 
-
     server {
         listen     9881;
         proxy_pass $upstream:24224;
diff --git a/deploy/docker/cluster/scripts/provider.tf b/deploy/docker/cluster/scripts/provider.tf
new file mode 100644
index 000000000..19058d847
--- /dev/null
+++ b/deploy/docker/cluster/scripts/provider.tf
@@ -0,0 +1,4 @@
+provider "nomad" {
+  address = "http://localhost:4646"
+}
+
diff --git a/deploy/docker/cluster/jobs/receiver.json.tpl b/deploy/docker/cluster/scripts/receiver.json.tpl
similarity index 66%
rename from deploy/docker/cluster/jobs/receiver.json.tpl
rename to deploy/docker/cluster/scripts/receiver.json.tpl
index 4410725d8..b1948f8b1 100644
--- a/deploy/docker/cluster/jobs/receiver.json.tpl
+++ b/deploy/docker/cluster/scripts/receiver.json.tpl
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8400/influxdb",
-  "MonitorDbName": "asapo_receivers",
-  "BrokerDbAddress":"localhost:8400/mongo",
+  "PerformanceDbServer":"localhost:8400/influxdb",
+  "PerformanceDbName": "asapo_receivers",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
   "AuthorizationServer": "localhost:8400/authorizer",
   "AuthorizationInterval": 10000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
@@ -11,7 +12,7 @@
   },
   "DataCache": {
     "Use": true,
-    "SizeGB": 30,
+    "SizeGB": {{ env "NOMAD_META_receiver_dataserver_cache_size" }},
     "ReservedShare": 10
   },
   "Tag": "{{ env "NOMAD_ADDR_recv" }}",
diff --git a/deploy/docker/cluster/scripts/resources.tf b/deploy/docker/cluster/scripts/resources.tf
new file mode 100644
index 000000000..274425056
--- /dev/null
+++ b/deploy/docker/cluster/scripts/resources.tf
@@ -0,0 +1,23 @@
+resource "nomad_job" "asapo-perfmetrics" {
+  jobspec = "${data.template_file.asapo_perfmetrics.rendered}"
+}
+
+resource "nomad_job" "asapo-mongo" {
+  jobspec = "${data.template_file.asapo_mongo.rendered}"
+}
+
+resource "nomad_job" "asapo-nginx" {
+  jobspec = "${data.template_file.nginx.rendered}"
+}
+
+resource "nomad_job" "asapo-services" {
+  jobspec = "${data.template_file.asapo_services.rendered}"
+}
+
+resource "nomad_job" "asapo-receivers" {
+  jobspec = "${data.template_file.asapo_receivers.rendered}"
+}
+
+resource "nomad_job" "asapo-brokers" {
+  jobspec = "${data.template_file.asapo_brokers.rendered}"
+}
diff --git a/deploy/docker/cluster/scripts/templates.tf b/deploy/docker/cluster/scripts/templates.tf
new file mode 100644
index 000000000..edf06bbad
--- /dev/null
+++ b/deploy/docker/cluster/scripts/templates.tf
@@ -0,0 +1,63 @@
+data "template_file" "nginx" {
+  template = "${file("${var.job_scripts_dir}/asapo-nginx.nmd.tpl")}"
+  vars = {
+    scripts_dir = "${var.job_scripts_dir}"
+    nginx_version = "${var.nginx_version}"
+  }
+}
+
+data "template_file" "asapo_services" {
+  template = "${file("${var.job_scripts_dir}/asapo-services.nmd.tpl")}"
+  vars = {
+    scripts_dir = "${var.job_scripts_dir}"
+    image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
+    fluentd_logs = "${var.fluentd_logs}"
+  }
+}
+
+data "template_file" "asapo_receivers" {
+  template = "${file("${var.job_scripts_dir}/asapo-receivers.nmd.tpl")}"
+  vars = {
+    scripts_dir = "${var.job_scripts_dir}"
+    data_dir = "${var.data_dir}"
+    image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
+    fluentd_logs = "${var.fluentd_logs}"
+    receiver_total_memory_size = "${var.receiver_total_memory_size}"
+    receiver_dataserver_cache_size = "${var.receiver_dataserver_cache_size}"
+  }
+}
+
+data "template_file" "asapo_brokers" {
+  template = "${file("${var.job_scripts_dir}/asapo-brokers.nmd.tpl")}"
+  vars = {
+    scripts_dir = "${var.job_scripts_dir}"
+    image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
+    fluentd_logs = "${var.fluentd_logs}"
+  }
+}
+
+
+data "template_file" "asapo_perfmetrics" {
+  template = "${file("${var.job_scripts_dir}/asapo-perfmetrics.nmd.tpl")}"
+  vars = {
+    service_dir = "${var.service_dir}"
+    influxdb_version = "${var.influxdb_version}"
+    grafana_version = "${var.grafana_version}"
+    grafana_total_memory_size = "${var.grafana_total_memory_size}"
+    grafana_port = "${var.grafana_port}"
+    influxdb_total_memory_size = "${var.influxdb_total_memory_size}"
+    influxdb_port = "${var.influxdb_port}"
+
+    }
+}
+
+
+data "template_file" "asapo_mongo" {
+  template = "${file("${var.job_scripts_dir}/asapo-mongo.nmd.tpl")}"
+  vars = {
+    service_dir = "${var.service_dir}"
+    mongo_version = "${var.mongo_version}"
+    mongo_total_memory_size = "${var.mongo_total_memory_size}"
+    mongo_port = "${var.mongo_port}"
+  }
+}
diff --git a/deploy/docker/cluster/scripts/vars.tf b/deploy/docker/cluster/scripts/vars.tf
new file mode 100644
index 000000000..8144db2fb
--- /dev/null
+++ b/deploy/docker/cluster/scripts/vars.tf
@@ -0,0 +1,74 @@
+variable "fluentd_logs" {
+ default = true
+}
+
+variable "nginx_version" {
+  default = "latest"
+}
+
+variable "grafana_version" {
+  default = "latest"
+}
+
+
+variable "influxdb_version" {
+  default = "latest"
+}
+
+variable "asapo_imagename_suffix" {
+  default = ""
+}
+
+variable "asapo_image_tag" {
+#  default = "latest"
+}
+
+variable "job_scripts_dir" {
+  default = "/var/run/asapo"
+}
+
+variable "service_dir" {
+}
+
+variable "data_dir" {
+}
+
+
+variable "receiver_total_memory_size" {
+  default = "2000" #mb
+}
+
+variable "receiver_dataserver_cache_size" {
+  default = "1"
+}
+
+variable "grafana_total_memory_size" {
+  default = "2000" #mb
+}
+
+variable "influxdb_total_memory_size" {
+  default = "2000" #mb
+}
+
+variable "grafana_port" {
+  default = "3000"
+}
+
+variable "influxdb_port" {
+  default = "8086"
+}
+
+
+variable "mongo_port" {
+  default = "27017"
+}
+
+
+variable "mongo_version" {
+  default = "4.0.0"
+}
+
+variable "mongo_total_memory_size" {
+  default = "300"
+}
+
diff --git a/deploy/docker/cluster/supervisord.conf b/deploy/docker/cluster/supervisord.conf
index 0c5c6c7ba..d6cc9070e 100644
--- a/deploy/docker/cluster/supervisord.conf
+++ b/deploy/docker/cluster/supervisord.conf
@@ -12,5 +12,5 @@ command=/bin/consul agent -dev -client 0.0.0.0 -domain asapo -recursor=8.8.8.8
 #-config-dir=/etc/consul.d
 
 [program:nomad]
-command=/bin/nomad agent -data-dir=/tmp/nomad -dev -client -bind 0.0.0.0
+command=/bin/nomad agent -dev -client -bind 0.0.0.0 -alloc-dir=%(ENV_NOMAD_ALLOC)s
 # -config=/etc/nomad.d
diff --git a/deploy/docker/cluster/tf_run.sh b/deploy/docker/cluster/tf_run.sh
new file mode 100755
index 000000000..13bab997e
--- /dev/null
+++ b/deploy/docker/cluster/tf_run.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+docker exec -w /var/run/asapo asapo terraform init
+
+docker exec -w /var/run/asapo asapo terraform apply -auto-approve -var fluentd_logs=false
\ No newline at end of file
diff --git a/deploy/nomad_jobs/broker.json.tpl b/deploy/nomad_jobs/broker.json.tpl
index ddaeb1486..88f2df6be 100644
--- a/deploy/nomad_jobs/broker.json.tpl
+++ b/deploy/nomad_jobs/broker.json.tpl
@@ -1,7 +1,8 @@
 {
-  "BrokerDbAddress":"localhost:8400/mongo",
-  "MonitorDbAddress":"localhost:8400/influxdb",
-  "MonitorDbName": "asapo_brokers",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
+  "PerformanceDbServer":"localhost:8400/influxdb",
+  "PerformanceDbName": "asapo_brokers",
   "port":{{ env "NOMAD_PORT_broker" }},
   "LogLevel":"info",
   "SecretFile":"/secrets/secret.key"
diff --git a/deploy/nomad_jobs/nginx.conf.tpl b/deploy/nomad_jobs/nginx.conf.tpl
index c53d8c503..20dccdcd4 100644
--- a/deploy/nomad_jobs/nginx.conf.tpl
+++ b/deploy/nomad_jobs/nginx.conf.tpl
@@ -22,15 +22,9 @@ http {
           set $fluentd_endpoint fluentd.service.asapo;
           set $kibana_endpoint kibana.service.asapo;
           set $grafana_endpoint grafana.service.asapo;
-          set $mongo_endpoint mongo.service.asapo;
           set $influxdb_endpoint influxdb.service.asapo;
           set $elasticsearch_endpoint elasticsearch.service.asapo;
 
-   		  location /mongo/ {
-            rewrite ^/mongo(/.*) $1 break;
-            proxy_pass http://$mongo_endpoint:27017$uri$is_args$args;
-          }
-
    		  location /influxdb/ {
             rewrite ^/influxdb(/.*) $1 break;
             proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args;
diff --git a/deploy/nomad_jobs/receiver.json.tpl b/deploy/nomad_jobs/receiver.json.tpl
index 4410725d8..648f8b59f 100644
--- a/deploy/nomad_jobs/receiver.json.tpl
+++ b/deploy/nomad_jobs/receiver.json.tpl
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8400/influxdb",
-  "MonitorDbName": "asapo_receivers",
-  "BrokerDbAddress":"localhost:8400/mongo",
+  "PerformanceDbServer":"localhost:8400/influxdb",
+  "PerformanceDbName": "asapo_receivers",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
   "AuthorizationServer": "localhost:8400/authorizer",
   "AuthorizationInterval": 10000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler.go b/discovery/src/asapo_discovery/request_handler/request_handler.go
index 7c33af3f8..f3a036597 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler.go
@@ -5,6 +5,7 @@ import "asapo_common/utils"
 type Agent interface {
 	GetReceivers() ([]byte, error)
 	GetBroker() ([]byte, error)
+	GetMongo() ([]byte, error)
 	Init(settings utils.Settings) error
 }
 
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
index 9c2125045..ceda776a0 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
@@ -12,6 +12,7 @@ import (
 type ConsulRequestHandler struct {
 	MaxConnections int
 	client         *api.Client
+	staticHandler *StaticRequestHandler
 }
 
 type Responce struct {
@@ -48,20 +49,30 @@ func (rh *ConsulRequestHandler) GetServices(name string) ([]string, error) {
 }
 
 func (rh *ConsulRequestHandler) GetReceivers() ([]byte, error) {
+	if len(rh.staticHandler.receiverResponce.Uris)>0 {
+		return rh.staticHandler.GetReceivers()
+	}
+
+
+	var response Responce
+	response.MaxConnections = rh.MaxConnections
+
 	if (rh.client == nil) {
 		return nil, errors.New("consul client not connected")
 	}
-	var response Responce
 	var err error
 	response.Uris, err = rh.GetServices("asapo-receiver")
 	if err != nil {
 		return nil, err
 	}
-	response.MaxConnections = rh.MaxConnections
 	return utils.MapToJson(&response)
 }
 
 func (rh *ConsulRequestHandler) GetBroker() ([]byte, error) {
+	if len(rh.staticHandler.broker)>0 {
+		return rh.staticHandler.GetBroker()
+	}
+
 	if (rh.client == nil) {
 		return nil, errors.New("consul client not connected")
 	}
@@ -78,6 +89,28 @@ func (rh *ConsulRequestHandler) GetBroker() ([]byte, error) {
 	return nil, nil
 }
 
+func (rh *ConsulRequestHandler) GetMongo() ([]byte, error) {
+	if len(rh.staticHandler.mongo)>0 {
+		return rh.staticHandler.GetMongo()
+	}
+
+	if (rh.client == nil) {
+		return nil, errors.New("consul client not connected")
+	}
+	response, err := rh.GetServices("asapo-mongo")
+	if err != nil {
+		return nil, err
+	}
+	size := len(response)
+	if size ==0 {
+		return []byte(""),nil
+	}else {
+		return []byte(response[counter.Next(size)]),nil
+	}
+	return nil, nil
+}
+
+
 func (rh *ConsulRequestHandler) connectClient(uri string) (client *api.Client, err error) {
 	config := api.DefaultConfig()
 	if len(uri) > 0 {
@@ -94,6 +127,8 @@ func (rh *ConsulRequestHandler) connectClient(uri string) (client *api.Client, e
 }
 
 func (rh *ConsulRequestHandler) Init(settings utils.Settings) (err error) {
+	rh.staticHandler = new(StaticRequestHandler)
+	rh.staticHandler.Init(settings)
 	rh.MaxConnections = settings.Receiver.MaxConnections
 	if len(settings.ConsulEndpoints) == 0 {
 		rh.client, err = rh.connectClient("")
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
index 002afc6e0..e1f9d47b0 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
@@ -51,6 +51,7 @@ func (suite *ConsulHandlerTestSuite) SetupTest() {
 
 	suite.registerAgents("asapo-receiver")
 	suite.registerAgents("asapo-broker")
+	suite.registerAgents("asapo-mongo")
 
 }
 
@@ -59,6 +60,9 @@ func (suite *ConsulHandlerTestSuite) TearDownTest() {
 	suite.client.Agent().ServiceDeregister("asapo-receiver1235")
 	suite.client.Agent().ServiceDeregister("asapo-broker1234")
 	suite.client.Agent().ServiceDeregister("asapo-broker1235")
+	suite.client.Agent().ServiceDeregister("asapo-mongo1234")
+	suite.client.Agent().ServiceDeregister("asapo-mongo1235")
+
 }
 
 func (suite *ConsulHandlerTestSuite) TestInitDefaultUri() {
@@ -95,6 +99,14 @@ func (suite *ConsulHandlerTestSuite) TestGetReceivers() {
 	suite.Equal("{\"MaxConnections\":10,\"Uris\":[\"127.0.0.1:1234\",\"127.0.0.1:1235\"]}", string(res), "uris")
 }
 
+func (suite *ConsulHandlerTestSuite) TestGetReceiversStatic() {
+	consul_settings.Receiver.StaticEndpoints= []string{"127.0.0.1:0000"}
+	suite.handler.Init(consul_settings)
+	res, err := suite.handler.GetReceivers()
+	suite.NoError(err, "")
+	suite.Equal("{\"MaxConnections\":10,\"Uris\":[\"127.0.0.1:0000\"]}", string(res), "uris")
+}
+
 func (suite *ConsulHandlerTestSuite) TestGetReceiversWhenNotConnected() {
 	consul_settings.ConsulEndpoints = []string{"blabla"}
 	suite.handler.Init(consul_settings)
@@ -126,6 +138,45 @@ func (suite *ConsulHandlerTestSuite) TestGetBrokerRoundRobin() {
 }
 
 
+func (suite *ConsulHandlerTestSuite) TestGetMongoRoundRobin() {
+	suite.handler.Init(consul_settings)
+	res, err := suite.handler.GetMongo()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:1235", string(res), "uris")
+
+	res, err = suite.handler.GetMongo()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:1234", string(res), "uris")
+
+	res, err = suite.handler.GetMongo()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:1235", string(res), "uris")
+}
+
+func (suite *ConsulHandlerTestSuite) TestGetMongoStatic() {
+	consul_settings.Mongo.StaticEndpoint="127.0.0.1:0000"
+	suite.handler.Init(consul_settings)
+	res, err := suite.handler.GetMongo()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:0000", string(res), "uris")
+
+	res, err = suite.handler.GetMongo()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:0000", string(res), "uris")
+}
+
+func (suite *ConsulHandlerTestSuite) TestGetBrokerStatic() {
+	consul_settings.Broker.StaticEndpoint="127.0.0.1:0000"
+	suite.handler.Init(consul_settings)
+	res, err := suite.handler.GetBroker()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:0000", string(res), "uris")
+
+	res, err = suite.handler.GetBroker()
+	suite.NoError(err, "")
+	suite.Equal("127.0.0.1:0000", string(res), "uris")
+}
+
 func (suite *ConsulHandlerTestSuite) TestGetBrokerEmpty() {
 	suite.client.Agent().ServiceDeregister("asapo-broker1234")
 	suite.client.Agent().ServiceDeregister("asapo-broker1235")
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_static.go b/discovery/src/asapo_discovery/request_handler/request_handler_static.go
index f9668a541..8db0d0eb3 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_static.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_static.go
@@ -7,6 +7,7 @@ import (
 type StaticRequestHandler struct {
 	receiverResponce Responce
 	broker string
+	mongo string
 }
 
 
@@ -18,10 +19,17 @@ func (rh *StaticRequestHandler) GetBroker() ([]byte, error) {
 	return []byte(rh.broker),nil
 }
 
+func (rh *StaticRequestHandler) GetMongo() ([]byte, error) {
+	return []byte(rh.mongo),nil
+}
+
+
 
 func (rh *StaticRequestHandler) Init(settings utils.Settings) error {
 	rh.receiverResponce.MaxConnections = settings.Receiver.MaxConnections
 	rh.receiverResponce.Uris = settings.Receiver.StaticEndpoints
 	rh.broker = settings.Broker.StaticEndpoint
+	rh.mongo = settings.Mongo.StaticEndpoint
+
 	return nil
 }
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
index eaeed6fab..1fab0ce8e 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
@@ -11,7 +11,7 @@ var uris = []string{"ip1","ip2"}
 const max_conn = 1
 
 var static_settings utils.Settings= utils.Settings{Receiver:utils.ReceiverInfo{MaxConnections:max_conn,StaticEndpoints:uris},Broker:utils.BrokerInfo{
-	StaticEndpoint:"ip_broker"}}
+	StaticEndpoint:"ip_broker"}, Mongo:utils.MongoInfo{StaticEndpoint:"ip_mongo"}}
 
 
 
@@ -35,3 +35,10 @@ func TestStaticHandlerGetBrokerOK(t *testing.T) {
 	assert.Equal(t,string(res), "ip_broker")
 	assert.Nil(t, err)
 }
+
+func TestStaticHandlerGetMongoOK(t *testing.T) {
+	rh.Init(static_settings)
+	res,err := rh.GetMongo()
+	assert.Equal(t,string(res), "ip_mongo")
+	assert.Nil(t, err)
+}
diff --git a/discovery/src/asapo_discovery/server/get_receivers.go b/discovery/src/asapo_discovery/server/get_receivers.go
index 8f946dd7b..df0e5aa3b 100644
--- a/discovery/src/asapo_discovery/server/get_receivers.go
+++ b/discovery/src/asapo_discovery/server/get_receivers.go
@@ -15,6 +15,10 @@ func getService(service string) (answer []byte, code int) {
 	case "broker":
 		answer, err = requestHandler.GetBroker()
 		break
+	case "mongo":
+		answer, err = requestHandler.GetMongo()
+		break
+
 	default:
 		err = errors.New("wrong request: "+service)
 	}
@@ -42,3 +46,10 @@ func routeGetBroker(w http.ResponseWriter, r *http.Request) {
 	w.WriteHeader(code)
 	w.Write(answer)
 }
+
+func routeGetMongo(w http.ResponseWriter, r *http.Request) {
+	r.Header.Set("Content-type", "application/json")
+	answer,code := getService("mongo")
+	w.WriteHeader(code)
+	w.Write(answer)
+}
diff --git a/discovery/src/asapo_discovery/server/listroutes.go b/discovery/src/asapo_discovery/server/listroutes.go
index b6f36de2d..4eb1f5641 100644
--- a/discovery/src/asapo_discovery/server/listroutes.go
+++ b/discovery/src/asapo_discovery/server/listroutes.go
@@ -17,5 +17,11 @@ var listRoutes = utils.Routes{
 		"/broker",
 		routeGetBroker,
 	},
+	utils.Route{
+		"GetMongo",
+		"Get",
+		"/mongo",
+		routeGetMongo,
+	},
 
 }
diff --git a/discovery/src/asapo_discovery/server/routes_test.go b/discovery/src/asapo_discovery/server/routes_test.go
index a983fa2b9..d838ee181 100644
--- a/discovery/src/asapo_discovery/server/routes_test.go
+++ b/discovery/src/asapo_discovery/server/routes_test.go
@@ -31,7 +31,7 @@ type GetReceiversTestSuite struct {
 func (suite *GetReceiversTestSuite) SetupTest() {
 	requestHandler = new(request_handler.StaticRequestHandler)
 	var s utils.Settings= utils.Settings{Receiver:utils.ReceiverInfo{MaxConnections:10,StaticEndpoints:[]string{"ip1","ip2"}},
-	Broker:utils.BrokerInfo{StaticEndpoint:"ip_broker"}}
+	Broker:utils.BrokerInfo{StaticEndpoint:"ip_broker"},Mongo:utils.MongoInfo{StaticEndpoint:"ip_mongo"}}
 
 	requestHandler.Init(s)
 	logger.SetMockLog()
@@ -77,4 +77,12 @@ func (suite *GetReceiversTestSuite) TestGetBroker() {
 	assertExpectations(suite.T())
 }
 
+func (suite *GetReceiversTestSuite) TestGetMongo() {
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get mongo")))
 
+	w := doRequest("/mongo")
+
+	suite.Equal(http.StatusOK, w.Code, "code ok")
+	suite.Equal(w.Body.String(), "ip_mongo", "result")
+	assertExpectations(suite.T())
+}
diff --git a/discovery/src/asapo_discovery/server/settings_test.go b/discovery/src/asapo_discovery/server/settings_test.go
index 50307b06a..b6025b25a 100644
--- a/discovery/src/asapo_discovery/server/settings_test.go
+++ b/discovery/src/asapo_discovery/server/settings_test.go
@@ -14,6 +14,7 @@ func fillSettings(mode string) utils.Settings {
 	settings.LogLevel = "info"
 	settings.Receiver.StaticEndpoints=[]string{"ip1","ip2"}
 	settings.Broker.StaticEndpoint="ip_b"
+	settings.Mongo.StaticEndpoint="ip_m"
 	settings.ConsulEndpoints=[]string{"ipc1","ipc2"}
 	return settings
 }
@@ -44,6 +45,12 @@ func TestSettingsStaticModeNoBrokerEndpoints(t *testing.T) {
 	assert.NotNil(t, err)
 }
 
+func TestSettingsStaticModeNoMongoEndpoints(t *testing.T) {
+	settings := fillSettings("static")
+	settings.Mongo.StaticEndpoint=""
+	err := settings.Validate()
+	assert.NotNil(t, err)
+}
 
 
 func TestSettingsConsulModeNoEndpoints(t *testing.T) {
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index ef8c56b3a..ae55ea992 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -18,7 +18,7 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
     std::string log_level;
     Error err;
 
-    (err = parser.GetString("MonitorDbAddress", &config.monitor_db_uri)) ||
+    (err = parser.GetString("PerformanceDbServer", &config.performance_db_uri)) ||
     (err = parser.GetUInt64("ListenPort", &config.listen_port)) ||
     (err = parser.Embedded("DataServer").GetUInt64("ListenPort", &config.dataserver.listen_port)) ||
     (err = parser.Embedded("DataServer").GetUInt64("NThreads", &config.dataserver.nthreads)) ||
@@ -27,12 +27,13 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
     (err = parser.Embedded("DataCache").GetBool("Use", &config.use_datacache)) ||
     (err = parser.Embedded("DataCache").GetUInt64("SizeGB", &config.datacache_size_gb)) ||
     (err = parser.Embedded("DataCache").GetUInt64("ReservedShare", &config.datacache_reserved_share)) ||
-    (err = parser.GetString("BrokerDbAddress", &config.broker_db_uri)) ||
+    (err = parser.GetString("DatabaseServer", &config.database_uri)) ||
+    (err = parser.GetString("DiscoveryServer", &config.discovery_server)) ||
     (err = parser.GetString("Tag", &config.tag)) ||
     (err = parser.GetString("AuthorizationServer", &config.authorization_server)) ||
     (err = parser.GetUInt64("AuthorizationInterval", &config.authorization_interval_ms)) ||
     (err = parser.GetString("RootFolder", &config.root_folder)) ||
-    (err = parser.GetString("MonitorDbName", &config.monitor_db_name)) ||
+    (err = parser.GetString("PerformanceDbName", &config.performance_db_name)) ||
     (err = parser.GetString("LogLevel", &log_level));
 
     if (err) {
diff --git a/receiver/src/receiver_config.h b/receiver/src/receiver_config.h
index 0181c86f2..7052ac71e 100644
--- a/receiver/src/receiver_config.h
+++ b/receiver/src/receiver_config.h
@@ -9,9 +9,9 @@
 namespace asapo {
 
 struct ReceiverConfig {
-    std::string monitor_db_uri;
-    std::string monitor_db_name;
-    std::string broker_db_uri;
+    std::string performance_db_uri;
+    std::string performance_db_name;
+    std::string database_uri;
     std::string root_folder;
     uint64_t listen_port = 0;
     std::string authorization_server;
@@ -25,6 +25,7 @@ struct ReceiverConfig {
     std::string tag;
     std::string source_host;
     ReceiverDataCenterConfig dataserver;
+    std::string discovery_server;
 };
 
 const ReceiverConfig* GetReceiverConfig();
diff --git a/receiver/src/receiver_error.h b/receiver/src/receiver_error.h
index 1a12952e3..ccfffa5f7 100644
--- a/receiver/src/receiver_error.h
+++ b/receiver/src/receiver_error.h
@@ -9,7 +9,8 @@ enum class ReceiverErrorType {
     kInvalidOpCode,
     kBadRequest,
     kReject,
-    kAuthorizationFailure
+    kAuthorizationFailure,
+    kCannotConnectToDatabase
 };
 
 using ReceiverErrorTemplate = ServiceErrorTemplate<ReceiverErrorType, ErrorType::kReceiverError>;
@@ -24,6 +25,12 @@ auto const kReject = ReceiverErrorTemplate{
     "request rejected", ReceiverErrorType::kReject
 };
 
+
+auto const kCannotConnectToDatabase = ReceiverErrorTemplate{
+    "cannot connect to database", ReceiverErrorType::kCannotConnectToDatabase
+};
+
+
 auto const kBadRequest = ReceiverErrorTemplate{
     "Bad request", ReceiverErrorType::kBadRequest
 };
diff --git a/receiver/src/request_handler_db.cpp b/receiver/src/request_handler_db.cpp
index d01410d4c..059a693ba 100644
--- a/receiver/src/request_handler_db.cpp
+++ b/receiver/src/request_handler_db.cpp
@@ -17,6 +17,7 @@ Error RequestHandlerDb::ProcessRequest(Request* request) const {
 }
 
 RequestHandlerDb::RequestHandlerDb(std::string collection_name): log__{GetDefaultReceiverLogger()},
+    http_client__{DefaultHttpClient()},
     collection_name_{std::move(collection_name)} {
     DatabaseFactory factory;
     Error err;
@@ -27,10 +28,42 @@ StatisticEntity RequestHandlerDb::GetStatisticEntity() const {
     return StatisticEntity::kDatabase;
 }
 
+
+Error RequestHandlerDb::GetDatabaseServerUri(std::string* uri) const {
+    if (GetReceiverConfig()->database_uri != "auto") {
+        *uri = GetReceiverConfig()->database_uri;
+        return nullptr;
+    }
+
+    HttpCode code;
+    Error http_err;
+    *uri = http_client__->Get(GetReceiverConfig()->discovery_server + "/mongo", &code, &http_err);
+    if (http_err) {
+        log__->Error(std::string{"http error when discover database server "} + " from " + GetReceiverConfig()->discovery_server
+                     + " : " + http_err->Explain());
+        return ReceiverErrorTemplates::kCannotConnectToDatabase.Generate(http_err->Explain());
+    }
+
+    if (code != HttpCode::OK) {
+        log__->Error(std::string{"http error when discover database server "} + " from " + GetReceiverConfig()->discovery_server
+                     + " : http code" + std::to_string((int)code));
+        return ReceiverErrorTemplates::kCannotConnectToDatabase.Generate("error from discovery service");
+    }
+
+    log__->Debug(std::string{"found database server "} + *uri);
+
+    return nullptr;
+}
+
+
 Error RequestHandlerDb::ConnectToDbIfNeeded() const {
     if (!connected_to_db) {
-        Error err = db_client__->Connect(GetReceiverConfig()->broker_db_uri, db_name_,
-                                         collection_name_);
+        std::string uri;
+        auto err = GetDatabaseServerUri(&uri);
+        if (err) {
+            return err;
+        }
+        err = db_client__->Connect(uri, db_name_, collection_name_);
         if (err) {
             return err;
         }
@@ -39,4 +72,5 @@ Error RequestHandlerDb::ConnectToDbIfNeeded() const {
     return nullptr;
 }
 
+
 }
\ No newline at end of file
diff --git a/receiver/src/request_handler_db.h b/receiver/src/request_handler_db.h
index dcf0a0ed6..d25b6d1fa 100644
--- a/receiver/src/request_handler_db.h
+++ b/receiver/src/request_handler_db.h
@@ -4,12 +4,13 @@
 #include "request_handler.h"
 #include "database/database.h"
 #include "logger/logger.h"
+#include "http_client/http_client.h"
 
 #include "io/io.h"
 
 namespace asapo {
 
-class RequestHandlerDb: public ReceiverRequestHandler {
+class RequestHandlerDb : public ReceiverRequestHandler {
   public:
     RequestHandlerDb() = delete;
     RequestHandlerDb(std::string collection_name);
@@ -17,14 +18,16 @@ class RequestHandlerDb: public ReceiverRequestHandler {
     Error ProcessRequest(Request* request) const override;
     std::unique_ptr<Database> db_client__;
     const AbstractLogger* log__;
+    std::unique_ptr<HttpClient> http_client__;
   protected:
     Error ConnectToDbIfNeeded() const;
     mutable bool connected_to_db = false;
     mutable std::string db_name_;
     std::string collection_name_;
+  private:
+    Error GetDatabaseServerUri(std::string* uri) const ;
 };
 
 }
 
-
 #endif //ASAPO_REQUEST_HANDLER_DB_H
diff --git a/receiver/src/request_handler_db_meta_write.cpp b/receiver/src/request_handler_db_meta_write.cpp
index 74113d8ea..25e0277ce 100644
--- a/receiver/src/request_handler_db_meta_write.cpp
+++ b/receiver/src/request_handler_db_meta_write.cpp
@@ -20,7 +20,7 @@ Error RequestHandlerDbMetaWrite::ProcessRequest(Request* request) const {
     if (!err) {
         log__->Debug(std::string{"insert beamtime meta"} + " to " + collection_name_ + " in " +
                      db_name_ +
-                     " at " + GetReceiverConfig()->broker_db_uri);
+                     " at " + GetReceiverConfig()->database_uri);
     }
     return err;
 }
diff --git a/receiver/src/request_handler_db_write.cpp b/receiver/src/request_handler_db_write.cpp
index f1491480c..19670a85b 100644
--- a/receiver/src/request_handler_db_write.cpp
+++ b/receiver/src/request_handler_db_write.cpp
@@ -34,7 +34,7 @@ Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const {
         if (!err) {
             log__->Debug(std::string{"insert record id "} + std::to_string(file_info.id) + " to " + collection_name_ + " in " +
                          db_name_ +
-                         " at " + GetReceiverConfig()->broker_db_uri);
+                         " at " + GetReceiverConfig()->database_uri);
         }
     } else {
         auto subset_id = request->GetCustomData()[1];
@@ -44,7 +44,7 @@ Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const {
             log__->Debug(std::string{"insert record as subset id "} + std::to_string(subset_id) + ", id: " +
                          std::to_string(file_info.id) + " to " + collection_name_ + " in " +
                          db_name_ +
-                         " at " + GetReceiverConfig()->broker_db_uri);
+                         " at " + GetReceiverConfig()->database_uri);
         }
     }
     return err;
diff --git a/receiver/src/statistics_sender_influx_db.cpp b/receiver/src/statistics_sender_influx_db.cpp
index 06338e849..3598ca27b 100644
--- a/receiver/src/statistics_sender_influx_db.cpp
+++ b/receiver/src/statistics_sender_influx_db.cpp
@@ -21,11 +21,11 @@ void StatisticsSenderInfluxDb::SendStatistics(const StatisticsToSend& statistic)
     //todo: send statistics async
     HttpCode code;
     Error err;
-    auto response = httpclient__->Post(GetReceiverConfig()->monitor_db_uri + "/write?db=" +
-                                       GetReceiverConfig()->monitor_db_name, StatisticsToString(statistic),
+    auto response = httpclient__->Post(GetReceiverConfig()->performance_db_uri + "/write?db=" +
+                                       GetReceiverConfig()->performance_db_name, StatisticsToString(statistic),
                                        &code, &err);
-    std::string msg = "sending statistics to " + GetReceiverConfig()->monitor_db_name + " at " +
-                      GetReceiverConfig()->monitor_db_uri;
+    std::string msg = "sending statistics to " + GetReceiverConfig()->performance_db_name + " at " +
+                      GetReceiverConfig()->performance_db_uri;
     if (err) {
         log__->Error(msg + " - " + err->Explain());
         return;
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 1c00babde..29805ebde 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -41,9 +41,11 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
         break;
     }
 
-    auto config_string = std::string("{") + Key("MonitorDbAddress", error_field) + "\"" + config.monitor_db_uri + "\"";
-    config_string += "," + Key("MonitorDbName", error_field) + "\"" + config.monitor_db_name + "\"";
-    config_string += "," + Key("BrokerDbAddress", error_field) + "\"" + config.broker_db_uri + "\"";
+    auto config_string = std::string("{") + Key("PerformanceDbServer",
+                                                error_field) + "\"" + config.performance_db_uri + "\"";
+    config_string += "," + Key("PerformanceDbName", error_field) + "\"" + config.performance_db_name + "\"";
+    config_string += "," + Key("DatabaseServer", error_field) + "\"" + config.database_uri + "\"";
+    config_string += "," + Key("DiscoveryServer", error_field) + "\"" + config.discovery_server + "\"";
     config_string += "," + Key("ListenPort", error_field) + std::to_string(config.listen_port);
     config_string += "," + Key("DataServer", error_field) + "{";
     config_string += Key("ListenPort", error_field) + std::to_string(config.dataserver.listen_port);
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index 8b967a7dc..6231fed90 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -47,11 +47,11 @@ class ConfigTests : public Test {
         test_config.listen_port = 4200;
         test_config.dataserver.listen_port = 4201;
         test_config.tag = "receiver1";
-        test_config.monitor_db_name = "db_test";
-        test_config.monitor_db_uri = "localhost:8086";
+        test_config.performance_db_name = "db_test";
+        test_config.performance_db_uri = "localhost:8086";
         test_config.write_to_disk = true;
         test_config.write_to_db = true;
-        test_config.broker_db_uri = "localhost:27017";
+        test_config.database_uri = "localhost:27017";
         test_config.log_level = asapo::LogLevel::Error;
         test_config.root_folder = "test_fodler";
         test_config.authorization_interval_ms = 10000;
@@ -61,6 +61,7 @@ class ConfigTests : public Test {
         test_config.datacache_size_gb = 2;
         test_config.source_host = "host";
         test_config.dataserver.nthreads = 5;
+        test_config.discovery_server = "discovery";
     }
 
 };
@@ -74,9 +75,9 @@ TEST_F(ConfigTests, ReadSettings) {
     auto config = GetReceiverConfig();
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(config->monitor_db_uri, Eq("localhost:8086"));
-    ASSERT_THAT(config->monitor_db_name, Eq("db_test"));
-    ASSERT_THAT(config->broker_db_uri, Eq("localhost:27017"));
+    ASSERT_THAT(config->performance_db_uri, Eq("localhost:8086"));
+    ASSERT_THAT(config->performance_db_name, Eq("db_test"));
+    ASSERT_THAT(config->database_uri, Eq("localhost:27017"));
     ASSERT_THAT(config->listen_port, Eq(4200));
     ASSERT_THAT(config->dataserver.listen_port, Eq(4201));
     ASSERT_THAT(config->authorization_interval_ms, Eq(10000));
@@ -92,18 +93,17 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->source_host, Eq("host"));
     ASSERT_THAT(config->dataserver.nthreads, Eq(5));
     ASSERT_THAT(config->dataserver.tag, Eq("receiver1_ds"));
-
-
+    ASSERT_THAT(config->discovery_server, Eq("discovery"));
 }
 
 
 TEST_F(ConfigTests, ErrorReadSettings) {
     PrepareConfig();
 
-    std::vector<std::string>fields {"MonitorDbAddress", "ListenPort", "DataServer", "ListenPort", "WriteToDisk",
-                                    "WriteToDb", "DataCache", "Use", "SizeGB", "ReservedShare", "BrokerDbAddress", "Tag",
-                                    "AuthorizationServer", "AuthorizationInterval", "RootFolder", "MonitorDbName", "LogLevel",
-                                    "SourceHost", "NThreads"};
+    std::vector<std::string>fields {"PerformanceDbServer", "ListenPort", "DataServer", "ListenPort", "WriteToDisk",
+                                    "WriteToDb", "DataCache", "Use", "SizeGB", "ReservedShare", "DatabaseServer", "Tag",
+                                    "AuthorizationServer", "AuthorizationInterval", "RootFolder", "PerformanceDbName", "LogLevel",
+                                    "SourceHost", "NThreads", "DiscoveryServer"};
     for (const auto& field : fields) {
         auto err = asapo::SetReceiverConfig(test_config, field);
         ASSERT_THAT(err, Ne(nullptr));
diff --git a/receiver/unittests/test_request_handler_db.cpp b/receiver/unittests/test_request_handler_db.cpp
index 6e5b7c9c5..c0163e949 100644
--- a/receiver/unittests/test_request_handler_db.cpp
+++ b/receiver/unittests/test_request_handler_db.cpp
@@ -4,6 +4,7 @@
 #include "unittests/MockIO.h"
 #include "unittests/MockDatabase.h"
 #include "unittests/MockLogger.h"
+#include "unittests/MockHttpClient.h"
 
 #include "../src/receiver_error.h"
 #include "../src/request.h"
@@ -46,9 +47,12 @@ using asapo::RequestHandlerDb;
 using ::asapo::GenericRequestHeader;
 
 using asapo::MockDatabase;
+using ::asapo::MockHttpClient;
+
 using asapo::RequestFactory;
 using asapo::SetReceiverConfig;
 using asapo::ReceiverConfig;
+using asapo::HttpCode;
 
 
 namespace {
@@ -64,24 +68,65 @@ class DbHandlerTests : public Test {
     std::string expected_beamtime_id = "beamtime_id";
     std::string expected_stream = "stream";
     std::string expected_default_stream = "detector";
+    std::string expected_discovery_server = "discovery";
+    std::string expected_database_server = "127.0.0.1:27017";
+
+    MockHttpClient mock_http_client;
 
     void SetUp() override {
         GenericRequestHeader request_header;
         request_header.data_id = 2;
         handler.db_client__ = std::unique_ptr<asapo::Database> {&mock_db};
         handler.log__ = &mock_logger;
+        handler.http_client__ = std::unique_ptr<asapo::HttpClient> {&mock_http_client};
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, ""});
         ON_CALL(*mock_request, GetBeamtimeId()).WillByDefault(ReturnRef(expected_beamtime_id));
         ON_CALL(*mock_request, GetStream()).WillByDefault(ReturnRef(expected_stream));
 
     }
     void TearDown() override {
+        handler.http_client__.release();
         handler.db_client__.release();
     }
-
+    void MockAuthRequest(bool error, HttpCode code = HttpCode::OK);
 
 };
 
+
+void DbHandlerTests::MockAuthRequest(bool error, HttpCode code) {
+    if (error) {
+        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/mongo",  _, _)).
+        WillOnce(
+            DoAll(SetArgPointee<2>(new asapo::SimpleError("http error")),
+                  Return("")
+                 ));
+        EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("discover database server"),
+                                             HasSubstr("http error"),
+                                             HasSubstr(expected_discovery_server))));
+
+    } else {
+        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/mongo", _, _)).
+        WillOnce(
+            DoAll(
+                SetArgPointee<1>(code),
+                SetArgPointee<2>(nullptr),
+                Return(expected_database_server)
+            ));
+        if (code != HttpCode::OK) {
+            EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("failure discover database server"),
+                                                 HasSubstr("http code"),
+                                                 HasSubstr(std::to_string(int(code))),
+                                                 HasSubstr(expected_discovery_server))));
+        } else {
+            EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("found database server"),
+                                                 HasSubstr(expected_database_server))));
+        }
+    }
+
+
+}
+
+
 TEST(DbHandler, Constructor) {
     RequestHandlerDb handler{""};
     ASSERT_THAT(dynamic_cast<asapo::MongoDBClient*>(handler.db_client__.get()), Ne(nullptr));
@@ -95,8 +140,48 @@ TEST_F(DbHandlerTests, CheckStatisticEntity) {
 }
 
 
+TEST_F(DbHandlerTests, ProcessRequestDiscoversMongoDbAddress) {
+    config.database_uri = "auto";
+    config.discovery_server = expected_discovery_server;
+    SetReceiverConfig(config, "none");
+
+    MockAuthRequest(false, HttpCode::OK);
+
+    EXPECT_CALL(*mock_request, GetBeamtimeId())
+    .WillOnce(ReturnRef(expected_beamtime_id))
+    ;
+
+    EXPECT_CALL(*mock_request, GetStream())
+    .WillOnce(ReturnRef(expected_stream))
+    ;
+
+
+    EXPECT_CALL(mock_db, Connect_t(expected_database_server, expected_beamtime_id + "_" + expected_stream,
+                                   expected_collection_name)).
+    WillOnce(testing::Return(nullptr));
+
+    auto err = handler.ProcessRequest(mock_request.get());
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(DbHandlerTests, ProcessRequestErrorDiscoversMongoDbAddress) {
+    config.database_uri = "auto";
+    config.discovery_server = expected_discovery_server;
+    SetReceiverConfig(config, "none");
+
+
+    MockAuthRequest(true, HttpCode::BadRequest);
+
+    EXPECT_CALL(mock_db, Connect_t(_, _, _)).Times(0);
+
+    auto err = handler.ProcessRequest(mock_request.get());
+    ASSERT_THAT(err, Eq(asapo::ReceiverErrorTemplates::kCannotConnectToDatabase));
+}
+
+
+
 TEST_F(DbHandlerTests, ProcessRequestCallsConnectDbWhenNotConnected) {
-    config.broker_db_uri = "127.0.0.1:27017";
+    config.database_uri = "127.0.0.1:27017";
     SetReceiverConfig(config, "none");
 
 
diff --git a/receiver/unittests/test_request_handler_db_meta_writer.cpp b/receiver/unittests/test_request_handler_db_meta_writer.cpp
index 00aaee3ce..a81ff983d 100644
--- a/receiver/unittests/test_request_handler_db_meta_writer.cpp
+++ b/receiver/unittests/test_request_handler_db_meta_writer.cpp
@@ -96,7 +96,7 @@ TEST_F(DbMetaWriterHandlerTests, CallsUpdate) {
     ;
 
 
-    EXPECT_CALL(mock_db, Connect_t(config.broker_db_uri, expected_beamtime_id + "_" + expected_stream,
+    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_stream,
                                    expected_collection_name)).
     WillOnce(testing::Return(nullptr));
 
@@ -113,7 +113,7 @@ TEST_F(DbMetaWriterHandlerTests, CallsUpdate) {
     WillOnce(testing::Return(nullptr));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("insert beamtime meta"),
-                                         HasSubstr(config.broker_db_uri),
+                                         HasSubstr(config.database_uri),
                                          HasSubstr(expected_beamtime_id),
                                          HasSubstr(expected_collection_name)
                                         )
diff --git a/receiver/unittests/test_request_handler_db_writer.cpp b/receiver/unittests/test_request_handler_db_writer.cpp
index 5f83e9964..11047e9e9 100644
--- a/receiver/unittests/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/test_request_handler_db_writer.cpp
@@ -53,6 +53,13 @@ using asapo::ReceiverConfig;
 
 namespace {
 
+TEST(DbWriterHandler, Constructor) {
+    RequestHandlerDbWrite handler{""};
+    ASSERT_THAT(dynamic_cast<asapo::HttpClient*>(handler.http_client__.get()), Ne(nullptr));
+}
+
+
+
 class DbWriterHandlerTests : public Test {
   public:
     std::string expected_collection_name = asapo::kDBDataCollectionName;
@@ -81,7 +88,7 @@ class DbWriterHandlerTests : public Test {
         handler.db_client__ = std::unique_ptr<asapo::Database> {&mock_db};
         handler.log__ = &mock_logger;
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, ""});
-        config.broker_db_uri = "127.0.0.1:27017";
+        config.database_uri = "127.0.0.1:27017";
         config.source_host = expected_hostname;
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
@@ -127,7 +134,7 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     std::string db_name = expected_beamtime_id;
     db_name += "_" + stream;
 
-    EXPECT_CALL(mock_db, Connect_t(config.broker_db_uri, db_name, expected_collection_name)).
+    EXPECT_CALL(mock_db, Connect_t(config.database_uri, db_name, expected_collection_name)).
     WillOnce(testing::Return(nullptr));
 
     EXPECT_CALL(*mock_request, GetDataSize())
@@ -180,7 +187,7 @@ TEST_F(DbWriterHandlerTests, CallsInsert) {
     WillOnce(testing::Return(nullptr));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("insert record"),
-                                         HasSubstr(config.broker_db_uri),
+                                         HasSubstr(config.database_uri),
                                          HasSubstr(expected_beamtime_id),
                                          HasSubstr(expected_stream),
                                          HasSubstr(expected_collection_name)
@@ -201,7 +208,7 @@ TEST_F(DbWriterHandlerTests, CallsInsertSubset) {
     WillOnce(testing::Return(nullptr));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("insert record"),
-                                         HasSubstr(config.broker_db_uri),
+                                         HasSubstr(config.database_uri),
                                          HasSubstr(expected_beamtime_id),
                                          HasSubstr(expected_collection_name)
                                         )
diff --git a/receiver/unittests/test_statistics_sender_fluentd.cpp b/receiver/unittests/test_statistics_sender_fluentd.cpp
index 137bb4f84..306050755 100644
--- a/receiver/unittests/test_statistics_sender_fluentd.cpp
+++ b/receiver/unittests/test_statistics_sender_fluentd.cpp
@@ -65,8 +65,8 @@ class SenderFluentdTests : public Test {
         statistics.tags.push_back(std::make_pair("name1", "value1"));
         statistics.tags.push_back(std::make_pair("name2", "value2"));
 
-        config.monitor_db_uri = "test_uri";
-        config.monitor_db_name = "test_name";
+        config.performance_db_uri = "test_uri";
+        config.performance_db_name = "test_name";
         SetReceiverConfig(config, "none");
 
         sender.statistics_log__.reset(&mock_logger);
diff --git a/receiver/unittests/test_statistics_sender_influx_db.cpp b/receiver/unittests/test_statistics_sender_influx_db.cpp
index 97fe93076..d4e293c0c 100644
--- a/receiver/unittests/test_statistics_sender_influx_db.cpp
+++ b/receiver/unittests/test_statistics_sender_influx_db.cpp
@@ -66,8 +66,8 @@ class SenderInfluxDbTests : public Test {
         statistics.tags.push_back(std::make_pair("name1", "value1"));
         statistics.tags.push_back(std::make_pair("name2", "value2"));
 
-        config.monitor_db_uri = "test_uri";
-        config.monitor_db_name = "test_name";
+        config.performance_db_uri = "test_uri";
+        config.performance_db_name = "test_name";
         SetReceiverConfig(config, "none");
 
         sender.httpclient__.reset(&mock_http_client);
@@ -88,7 +88,7 @@ TEST_F(SenderInfluxDbTests, SendStatisticsCallsPost) {
               Return("")
              ));
 
-    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("sending statistics"), HasSubstr(config.monitor_db_uri))));
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("sending statistics"), HasSubstr(config.performance_db_uri))));
 
 
     sender.SendStatistics(statistics);
@@ -113,8 +113,8 @@ TEST_F(SenderInfluxDbTests, LogDebugSendStatistics) {
              ));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("sending statistics"),
-                                         HasSubstr(config.monitor_db_uri),
-                                         HasSubstr(config.monitor_db_name)
+                                         HasSubstr(config.performance_db_uri),
+                                         HasSubstr(config.performance_db_name)
                                         )
                                   )
                );
diff --git a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
index 8e7f70f9a..86ced0726 100644
--- a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
+++ b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
@@ -55,7 +55,7 @@ Cleanup() {
 influx -execute "create database ${monitor_database_name}"
 
 sed -i 's/27017/27016/g' receiver.json.tpl
-sed -i 's/27017/27016/g' broker.json.tpl
+sed -i 's/27017/27016/g' discovery.json.tpl
 sed -i 's/info/debug/g' broker.json.tpl
 
 start_mongo
diff --git a/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh b/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
index 4a5c6de8e..f8f9a7639 100644
--- a/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
+++ b/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
@@ -54,7 +54,7 @@ wait_mongo
 # create db before worker starts reading it. todo: git rid of it
 echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo --port 27016 ${beamtime_id}_detector
 
-sed -i 's/27017/27016/g' receiver.json.tpl
+sed -i 's/27017/27016/g' discovery.json.tpl
 
 
 nomad run authorizer.nmd
diff --git a/tests/automatic/settings/broker_settings.json b/tests/automatic/settings/broker_settings.json
index 9d95a7c87..dc2498380 100644
--- a/tests/automatic/settings/broker_settings.json
+++ b/tests/automatic/settings/broker_settings.json
@@ -1,7 +1,7 @@
 {
-  "BrokerDbAddress":"127.0.0.1:27017",
-  "MonitorDbAddress": "localhost:8086",
-  "MonitorDbName": "db_test",
+  "DatabaseServer":"127.0.0.1:27017",
+  "PerformanceDbServer": "localhost:8086",
+  "PerformanceDbName": "db_test",
   "port":5005,
   "LogLevel":"info",
   "SecretFile":"auth_secret.key"
diff --git a/tests/automatic/settings/broker_settings.json.tpl b/tests/automatic/settings/broker_settings.json.tpl
index c1cfd68aa..0ca5866f9 100644
--- a/tests/automatic/settings/broker_settings.json.tpl
+++ b/tests/automatic/settings/broker_settings.json.tpl
@@ -1,7 +1,8 @@
 {
-  "BrokerDbAddress":"127.0.0.1:27017",
-  "MonitorDbAddress": "localhost:8086",
-  "MonitorDbName": "db_test",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
+  "PerformanceDbServer": "localhost:8086",
+  "PerformanceDbName": "db_test",
   "port":{{ env "NOMAD_PORT_broker" }},
   "LogLevel":"info",
   "SecretFile":"auth_secret.key"
diff --git a/tests/automatic/settings/discovery_fixed_settings.json b/tests/automatic/settings/discovery_fixed_settings.json
deleted file mode 100644
index 25cba6782..000000000
--- a/tests/automatic/settings/discovery_fixed_settings.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "Mode": "consul",
-  "Receiver": {
-    "MaxConnections": 32
-  },
-  "Port": {{ env "NOMAD_PORT_discovery" }},
-  "LogLevel":"debug"
-}
-
-
diff --git a/tests/automatic/settings/discovery_settings.json.tpl b/tests/automatic/settings/discovery_settings.json.tpl
index 25cba6782..2095ef388 100644
--- a/tests/automatic/settings/discovery_settings.json.tpl
+++ b/tests/automatic/settings/discovery_settings.json.tpl
@@ -3,6 +3,9 @@
   "Receiver": {
     "MaxConnections": 32
   },
+  "Mongo": {
+    "StaticEndpoint": "127.0.0.1:27017"
+  },
   "Port": {{ env "NOMAD_PORT_discovery" }},
   "LogLevel":"debug"
 }
diff --git a/tests/automatic/settings/nginx.conf.tpl b/tests/automatic/settings/nginx.conf.tpl
index 8f9608e57..7d6c84e2c 100644
--- a/tests/automatic/settings/nginx.conf.tpl
+++ b/tests/automatic/settings/nginx.conf.tpl
@@ -22,15 +22,9 @@ http {
           set $fluentd_endpoint fluentd.service.asapo;
           set $kibana_endpoint kibana.service.asapo;
           set $grafana_endpoint grafana.service.asapo;
-          set $mongo_endpoint mongo.service.asapo;
           set $influxdb_endpoint influxdb.service.asapo;
           set $elasticsearch_endpoint elasticsearch.service.asapo;
 
-   		  location /mongo/ {
-            rewrite ^/mongo(/.*) $1 break;
-            proxy_pass http://$mongo_endpoint:27017$uri$is_args$args;
-          }
-
    		  location /influxdb/ {
             rewrite ^/influxdb(/.*) $1 break;
             proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args;
diff --git a/tests/automatic/settings/receiver.json.tpl.lin.in b/tests/automatic/settings/receiver.json.tpl.lin.in
index cfe057742..2f27019b7 100644
--- a/tests/automatic/settings/receiver.json.tpl.lin.in
+++ b/tests/automatic/settings/receiver.json.tpl.lin.in
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
diff --git a/tests/automatic/settings/receiver.json.tpl.win.in b/tests/automatic/settings/receiver.json.tpl.win.in
index 39ad1b7c1..7fcedb822 100644
--- a/tests/automatic/settings/receiver.json.tpl.win.in
+++ b/tests/automatic/settings/receiver.json.tpl.win.in
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/discovery",
   "AuthorizationServer": "localhost:8400/authorizer",
   "AuthorizationInterval": 10000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
diff --git a/tests/manual/performance_broker/settings.json b/tests/manual/performance_broker/settings.json
index 4385f5a65..72e89b21f 100644
--- a/tests/manual/performance_broker/settings.json
+++ b/tests/manual/performance_broker/settings.json
@@ -1,7 +1,7 @@
 {
-  "BrokerDbAddress":"localhost:27017",
-  "MonitorDbAddress": "localhost:8086",
-  "MonitorDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
+  "PerformanceDbServer": "localhost:8086",
+  "PerformanceDbName": "db_test",
   "port":5005,
   "LogLevel":"info",
   "SecretFile":"auth_secret.key"
diff --git a/tests/manual/performance_broker/test.sh b/tests/manual/performance_broker/test.sh
index 268483698..d1685e15e 100755
--- a/tests/manual/performance_broker/test.sh
+++ b/tests/manual/performance_broker/test.sh
@@ -22,7 +22,7 @@ worker_dir=~/broker_test
 service_dir=~/broker_test
 
 
-cat settings.json | jq ".MonitorDbAddress = \"${monitor_node}:${monitor_port}\"" > settings_tmp.json
+cat settings.json | jq ".PerformanceDbServer = \"${monitor_node}:${monitor_port}\"" > settings_tmp.json
 
 cat discovery.json | jq ".Broker.StaticEndpoint = \"${service_node}:5005\"" > discovery_tmp.json
 
diff --git a/tests/manual/performance_full_chain_simple/broker.json b/tests/manual/performance_full_chain_simple/broker.json
index 4385f5a65..72e89b21f 100644
--- a/tests/manual/performance_full_chain_simple/broker.json
+++ b/tests/manual/performance_full_chain_simple/broker.json
@@ -1,7 +1,7 @@
 {
-  "BrokerDbAddress":"localhost:27017",
-  "MonitorDbAddress": "localhost:8086",
-  "MonitorDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
+  "PerformanceDbServer": "localhost:8086",
+  "PerformanceDbName": "db_test",
   "port":5005,
   "LogLevel":"info",
   "SecretFile":"auth_secret.key"
diff --git a/tests/manual/performance_full_chain_simple/receiver.json b/tests/manual/performance_full_chain_simple/receiver.json
index b9bcf2a58..71aea67fd 100644
--- a/tests/manual/performance_full_chain_simple/receiver.json
+++ b/tests/manual/performance_full_chain_simple/receiver.json
@@ -1,8 +1,9 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
   "AuthorizationServer": "localhost:5007",
+  "DiscoveryServer": "localhost:8400/discovery",
   "AuthorizationInterval": 10000,
   "ListenPort":4200,
   "DataServer": {
diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh
index fb9f6a2af..1786a56a4 100755
--- a/tests/manual/performance_full_chain_simple/test.sh
+++ b/tests/manual/performance_full_chain_simple/test.sh
@@ -43,7 +43,7 @@ ssh ${receiver_node} mkdir -p ${receiver_dir}/files/${beamline}/${beamtime_id}
 scp ../../../cmake-build-release/receiver/receiver ${receiver_node}:${receiver_dir}
 cat receiver.json |
   jq "to_entries |
-       map(if .key == \"MonitorDbAddress\"
+       map(if .key == \"PerformanceDbServer\"
           then . + {value:\"${monitor_node}:${monitor_port}\"}
           elif .key == \"ListenPort\"
           then . + {value:${receiver_port}}
@@ -96,7 +96,7 @@ broker_dir=~/fullchain_tests
 ssh ${broker_node} mkdir -p ${broker_dir}/logs
 cat broker.json |
   jq "to_entries |
-       map(if .key == \"MonitorDbAddress\"
+       map(if .key == \"PerformanceDbServer\"
           then . + {value:\"${monitor_node}:${monitor_port}\"}
           else .
           end
diff --git a/tests/manual/performance_producer_receiver/receiver.json b/tests/manual/performance_producer_receiver/receiver.json
index b9bcf2a58..1e1ae4d70 100644
--- a/tests/manual/performance_producer_receiver/receiver.json
+++ b/tests/manual/performance_producer_receiver/receiver.json
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
+  "DiscoveryServer": "localhost:8400/discovery",
   "AuthorizationServer": "localhost:5007",
   "AuthorizationInterval": 10000,
   "ListenPort":4200,
diff --git a/tests/manual/performance_producer_receiver/test.sh b/tests/manual/performance_producer_receiver/test.sh
index faf249347..251f9d18a 100755
--- a/tests/manual/performance_producer_receiver/test.sh
+++ b/tests/manual/performance_producer_receiver/test.sh
@@ -54,7 +54,7 @@ scp ../../../cmake-build-release/examples/producer/dummy-data-producer/dummy-dat
 function do_work {
 cat receiver.json |
   jq "to_entries |
-       map(if .key == \"MonitorDbAddress\"
+       map(if .key == \"PerformanceDbServer\"
           then . + {value:\"${monitor_node}:${monitor_port}\"}
           elif .key == \"ListenPort\"
           then . + {value:${receiver_port}}
diff --git a/tests/manual/python_tests/producer/receiver.json.tpl b/tests/manual/python_tests/producer/receiver.json.tpl
index b361ba851..88be63ae3 100644
--- a/tests/manual/python_tests/producer/receiver.json.tpl
+++ b/tests/manual/python_tests/producer/receiver.json.tpl
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
+  "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
diff --git a/tests/manual/receiver_debug_local/receiver.json b/tests/manual/receiver_debug_local/receiver.json
index 0f6bdad83..abf1f8b09 100644
--- a/tests/manual/receiver_debug_local/receiver.json
+++ b/tests/manual/receiver_debug_local/receiver.json
@@ -1,7 +1,8 @@
 {
-  "MonitorDbAddress":"localhost:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress":"localhost:27017",
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"localhost:27017",
+  "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
     "NThreads": 2,
     "ListenPort": 22000
-- 
GitLab