diff --git a/common/cpp/include/common/io_error.h b/common/cpp/include/common/io_error.h
index f6fa351fa6727fcef518ee6d10b9f5f70c2b5d82..8e8c6442af0364ff127138746b8451256c4ef676 100644
--- a/common/cpp/include/common/io_error.h
+++ b/common/cpp/include/common/io_error.h
@@ -51,6 +51,7 @@ auto const kBadFileNumber = IOErrorTemplate {
 auto const kResourceTemporarilyUnavailable = IOErrorTemplate {
     "Resource temporarily unavailable", IOErrorType::kResourceTemporarilyUnavailable
 };
+
 auto const kPermissionDenied = IOErrorTemplate {
     "Permission denied", IOErrorType::kPermissionDenied
 };
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index 63f69cd3f4e73ca6ced7d0f3db9cda7ef9c32b85..66c8b02cf95c507a7e01db91a6bdf5da56d897f5 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -236,14 +236,15 @@ Error MongoDBClient::InsertAsSubset(const FileInfo& file,
     if (err) {
         return err;
     }
-    auto query = BCON_NEW ("$and","[","{","_id", BCON_INT64(subset_id),"}","{","images._id","{","$ne",BCON_INT64(file.id),"}","}","]");
+    auto query = BCON_NEW ("$and", "[", "{", "_id", BCON_INT64(subset_id), "}", "{", "images._id", "{", "$ne",
+                           BCON_INT64(file.id), "}", "}", "]");
     auto update = BCON_NEW ("$setOnInsert", "{",
                             "size", BCON_INT64 (subset_size),
                             "}",
                             "$addToSet", "{",
                             "images", BCON_DOCUMENT(document.get()), "}");
 
-    err = AddBsonDocumentToArray(query, update,ignore_duplicates);
+    err = AddBsonDocumentToArray(query, update, ignore_duplicates);
 
     bson_destroy (query);
     bson_destroy (update);
diff --git a/common/cpp/src/request/request_pool.cpp b/common/cpp/src/request/request_pool.cpp
index 03f1e9bffa525be677097245191a0bc3edb9b386..163ad4599f385253876302b2054a12ba30858df6 100644
--- a/common/cpp/src/request/request_pool.cpp
+++ b/common/cpp/src/request/request_pool.cpp
@@ -1,5 +1,3 @@
-#include <request/request_pool.h>
-#include <request/request_pool.h>
 #include "request/request_pool.h"
 
 namespace asapo {
diff --git a/common/cpp/src/system_io/system_io.cpp b/common/cpp/src/system_io/system_io.cpp
index db0a2760cb0467d3a2a6e8a156615fa19593a996..0764fcad7a9e60419ed6f6a157de308c77685323 100644
--- a/common/cpp/src/system_io/system_io.cpp
+++ b/common/cpp/src/system_io/system_io.cpp
@@ -290,10 +290,13 @@ std::unique_ptr<sockaddr_in> SystemIO::BuildSockaddrIn(const std::string& addres
     std::string host;
     uint16_t port = 0;
     std::tie(host, port) = *hostname_port_tuple;
-    host = ResolveHostnameToIp(host, err);
-    if (*err != nullptr) {
-        return nullptr;
-    }
+
+// this is not thread safe call we should not resolve hostname here - we actually already have ip in address.
+// todo: remove this
+//    host = ResolveHostnameToIp(host, err);
+//    if (*err != nullptr) {
+//        return nullptr;
+//    }
 
     short family = AddressFamilyToPosixFamily(AddressFamilies::INET);
     if (family == -1) {
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index 74d41e9e57c8a6ab0764d0ac281647c108f7cab8..e7978a953a949b2ad3f027747a7c42fe65ea8efc 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -35,6 +35,8 @@ Error GetLastErrorFromErrno() {
         return IOErrorTemplates::kBadFileNumber.Generate();
     case EAGAIN:
         return IOErrorTemplates::kResourceTemporarilyUnavailable.Generate();
+    case ENETUNREACH:
+        return IOErrorTemplates::kAddressNotValid.Generate();
     case ENOENT:
     case ENOTDIR:
         return IOErrorTemplates::kFileNotFound.Generate();
diff --git a/deploy/docker/cluster/Dockerfile b/deploy/docker/cluster/Dockerfile
index d979ca5fd97feb22ed3099ea83d01982a09d69b1..427524d878de46464d7953c947c3ef840be022ae 100644
--- a/deploy/docker/cluster/Dockerfile
+++ b/deploy/docker/cluster/Dockerfile
@@ -16,7 +16,7 @@ RUN add-apt-repository \
    $(lsb_release -cs) \
    stable"
 
-RUN apt-get update && apt-get install -y docker-ce-cli wget unzip iproute2
+RUN apt-get update && apt-get install -y docker-ce-cli wget unzip iproute2 vim
 
 
 ENV CONSUL_VERSION=1.6.0
@@ -50,6 +50,8 @@ RUN cd /var/run/asapo asapo && terraform init
 
 COPY asapo-*  /usr/bin/
 
+COPY *.sh  asapo_overwrite_vars.tfvars /tmp/asapo_runscripts/
+
 COPY *.py  /etc/asapo/
 COPY *.hcl.tpl  /etc/asapo/
 
diff --git a/deploy/docker/cluster/asapo-start b/deploy/docker/cluster/asapo-start
index 6b93af566596adcd14a643e6f4a1daafab0942c4..8d0be8b80414b3c97375c02025467f3f7b4fd5ab 100755
--- a/deploy/docker/cluster/asapo-start
+++ b/deploy/docker/cluster/asapo-start
@@ -6,4 +6,8 @@ if [ ! -f  /var/nomad/token ]; then
 	cp /var/nomad/token $TF_VAR_service_dir/nomad_token
 fi
 
-cd /var/run/asapo && terraform apply -auto-approve "$@"
\ No newline at end of file
+if [ -f /var/run/asapo/user_vars.tfvars ]; then
+  USER_VAR_FILE="-var-file=/var/run/asapo/user_vars.tfvars"
+fi
+
+cd /var/run/asapo && terraform apply -auto-approve $USER_VAR_FILE "$@"
\ No newline at end of file
diff --git a/deploy/docker/cluster/asapo_overwrite_vars.tfvars b/deploy/docker/cluster/asapo_overwrite_vars.tfvars
new file mode 100644
index 0000000000000000000000000000000000000000..aa170da0c38c1728593440d2f41876d759621c2a
--- /dev/null
+++ b/deploy/docker/cluster/asapo_overwrite_vars.tfvars
@@ -0,0 +1,16 @@
+elk_logs = true
+
+receiver_total_memory_size = 35000
+receiver_dataserver_cache_size = 30 #gb
+
+grafana_total_memory_size = 2000
+influxdb_total_memory_size = 2000
+fluentd_total_memory_size = 1000
+elasticsearch_total_memory_size = 3000
+kibana_total_memory_size = 1000
+mongo_total_memory_size = 20000
+authorizer_total_memory_size = 512
+discovery_total_memory_size = 512
+
+n_receivers = 1
+n_brokers = 1
\ No newline at end of file
diff --git a/deploy/docker/cluster/consul.hcl.tpl b/deploy/docker/cluster/consul.hcl.tpl
index d87a2c9dc60497cd6db628dbdb1b4d8fa9cd8d86..33764c77cbdabe350a4a838a62fcdbfe5211b381 100644
--- a/deploy/docker/cluster/consul.hcl.tpl
+++ b/deploy/docker/cluster/consul.hcl.tpl
@@ -22,8 +22,8 @@ node_meta = {
   ib_address = "$ib_address"
 }
 
-server =  $is_server
-bootstrap_expect = $n_servers
+server = $is_server
+$bootstrap_expect_string
 
 rejoin_after_leave = true
 retry_join = $server_adresses
diff --git a/deploy/docker/cluster/job.sh b/deploy/docker/cluster/job.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b809a675fb02c33708fc7a1be482bd7f9e86b289
--- /dev/null
+++ b/deploy/docker/cluster/job.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+#SBATCH --nodes=1
+#SBATCH -t 00:40:00
+
+srun --ntasks=$SLURM_JOB_NUM_NODES --ntasks-per-node=1 ./run_maxwell.sh
+
diff --git a/deploy/docker/cluster/nomad.hcl.tpl b/deploy/docker/cluster/nomad.hcl.tpl
index 6b1a19b3e74acaf7e5df04fc1aeacb5187b0afdc..fae4e73534cf1e34fa4718878133d021c4058a00 100644
--- a/deploy/docker/cluster/nomad.hcl.tpl
+++ b/deploy/docker/cluster/nomad.hcl.tpl
@@ -9,8 +9,8 @@ acl {
 }
 
 server {
-  enabled          = $is_server
-  bootstrap_expect = $n_servers
+  enabled = $is_server
+  $bootstrap_expect_string
 }
 
 data_dir = "/var/nomad"
@@ -18,7 +18,24 @@ data_dir = "/var/nomad"
 client {
   enabled       = true
   alloc_dir="$nomad_alloc_dir"
+  meta {
+      "asapo_service" = $is_asapo_lightweight_service_node
+      "ib_address" = "$ib_address"
+  }
 }
 
+plugin "docker" {
+  config {
+    endpoint = "$docker_endpoint"
 
+    tls {
+      cert = "/etc/nomad/cert.pem"
+      key  = "/etc/nomad/key.pem"
+      ca   = "/etc/nomad/ca.pem"
+    }
+
+    allow_privileged = true
+
+  }
+}
 
diff --git a/deploy/docker/cluster/orchestr_config.py b/deploy/docker/cluster/orchestr_config.py
index 29086301c70f5d408b25b6d31411537802c36bb3..f865f6dc65e385648ecb95c0aebf4514ce3b1f8e 100644
--- a/deploy/docker/cluster/orchestr_config.py
+++ b/deploy/docker/cluster/orchestr_config.py
@@ -5,9 +5,9 @@ import socket
 import json
 import os
 
-def is_server(ip,server_names):
+def in_server_list(ip,server_names, check_single=False):
     servers = json.loads(server_names)
-    if len(servers) == 1:
+    if len(servers) == 1 and check_single == False:
         return "true"
     for server in json.loads(server_names):
         try:
@@ -33,13 +33,21 @@ def set_parameters():
     except:
         print ("cannot define own ip")
         my_ip = "127.0.0.1"
+
+    d['docker_endpoint']=my_get_env('DOCKER_ENDPOINT',"unix:///var/run/docker.sock")
     d['advertise_ip']=my_get_env('ADVERTISE_IP',my_ip)
     d['n_servers']=my_get_env('N_SERVERS',1)
     d['server_adresses']=my_get_env('SERVER_ADRESSES','["'+socket.gethostname()+'"]')
-    d['is_server']=is_server(d['advertise_ip'],d['server_adresses'])
+    d['is_server']=in_server_list(d['advertise_ip'],d['server_adresses'])
+    if d['is_server']=="true":
+        d['bootstrap_expect_string'] = "bootstrap_expect = "+ str(d['n_servers'])
+    else:
+        d['bootstrap_expect_string'] = ""
     d['ib_address']=my_get_env('IB_ADDRESS',"none")
     d['nomad_alloc_dir']=my_get_env('NOMAD_ALLOC_DIR','')
     d['recursors']=my_get_env('RECURSORS','["8.8.8.8"]')
+    lightweight_service_nodes=my_get_env('ASAPO_LIGHTWEIGHT_SERVICE_NODES','[]')
+    d['is_asapo_lightweight_service_node']=in_server_list(d['advertise_ip'],lightweight_service_nodes, True)
     return d
 
 def process_file(file_in,file_out):
diff --git a/deploy/docker/cluster/run_maxwell.sh b/deploy/docker/cluster/run_maxwell.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0f38142cf0f1752dddec2373ab05956f5e0830de
--- /dev/null
+++ b/deploy/docker/cluster/run_maxwell.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+
+#folders
+NOMAD_ALLOC_HOST_SHARED=/tmp/asapo/container_host_shared/nomad_alloc
+SERVICE_DATA_CLUSTER_SHARED=/home/yakubov/asapo/asapo_cluster_shared/service_data
+DATA_GLOBAL_SHARED=/home/yakubov/asapo/global_shared/data
+
+#service distribution
+MAX_NOMAD_SERVERS=3 #  rest are clients
+N_ASAPO_LIGHTWEIGHT_SERVICE_NODES=1 # where to put influx, elk, ... . Rest are receivers, brokers, mongodb
+
+#DESY stuff
+RECURSORS=["\"131.169.40.200\"",\""131.169.194.200\""]
+
+ASAPO_USER=`id -u`:`id -g`
+
+ASAPO_VAR_FILE=`pwd`/asapo_overwrite_vars.tfvars
+
+
+# use ib interface for service discovery (all communications goes thourgh this interface)
+# todo: use ib only for communications with receiver (asapo discovery service should return correct ip using node meta IB_ADDRESS)
+USE_IP_OVER_IB=true
+
+#docker stuff
+DOCKER_ENDPOINT="127.0.0.1:2376" #comment to use unix sockets
+DOCKER_TLS_CA=/data/netapp/docker/certs/ca.pem
+DOCKER_TLS_KEY=/data/netapp/docker/certs/$USER/key.pem
+DOCKER_TLS_CERT=/data/netapp/docker/certs/$USER/cert.pem
+
+IB_HOSTNAME=`hostname --short`-ib
+IB_ADDRESS=`getent hosts $IB_HOSTNAME | awk '{ print $1 }'`
+
+if [ "$USE_IP_OVER_IB" == "true" ]; then
+  ADVERTISE_IP=$IB_ADDRESS
+  HOSTNAME_SUFFIX=-ib
+fi
+
+#prepare env variables based on the above input
+N_SERVERS=$(( $SLURM_JOB_NUM_NODES > $MAX_NOMAD_SERVERS ? $MAX_NOMAD_SERVERS : $SLURM_JOB_NUM_NODES ))
+
+SERVER_ADRESSES=`scontrol show hostnames $SLURM_JOB_NODELIST | head -$N_SERVERS | awk -v suf=$HOSTNAME_SUFFIX 'BEGIN{printf "["} {printf "%s\"%s%s\"",sep,$0,suf; sep=","} END{print "]"}'`
+ASAPO_LIGHTWEIGHT_SERVICE_NODES=`scontrol show hostnames $SLURM_JOB_NODELIST | head -$N_ASAPO_LIGHTWEIGHT_SERVICE_NODES | awk -v suf=$HOSTNAME_SUFFIX 'BEGIN{printf "["} {printf "%s\"%s%s\"",sep,$0,suf; sep=","} END{print "]"}'`
+
+# make folders if not exist
+mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
+chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
+cd $SERVICE_DATA_CLUSTER_SHARED
+mkdir esdatadir fluentd grafana influxdb mongodb
+chmod 777 *
+
+#todo: elastic search check
+mmc=`cat /proc/sys/vm/max_map_count`
+if (( mmc < 262144 )); then
+ 	echo consider increasing max_map_count - needed for elasticsearch
+#    exit 1
+fi
+
+docker rm -f asapo
+
+docker pull yakser/asapo-cluster
+
+if [ -f $ASAPO_VAR_FILE ]; then
+  MOUNT_VAR_FILE="-v $ASAPO_VAR_FILE:/var/run/asapo/user_vars.tfvars"
+fi
+
+dockerrun --rm  \
+	-u $ASAPO_USER \
+ 	-v /scratch/docker/100000.100000:/scratch/docker/100000.100000 \
+	-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
+	-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
+	-v $DOCKER_TLS_CA:/etc/nomad/ca.pem \
+	-v $DOCKER_TLS_KEY:/etc/nomad/key.pem \
+	-v $DOCKER_TLS_CERT:/etc/nomad/cert.pem \
+	-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
+	 $MOUNT_VAR_FILE \
+	-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
+	-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
+	-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
+	-e ADVERTISE_IP=$ADVERTISE_IP \
+	-e RECURSORS=$RECURSORS \
+	-e TF_VAR_asapo_user=$ASAPO_USER \
+	-e IB_ADDRESS=$IB_ADDRESS \
+	-e SERVER_ADRESSES=$SERVER_ADRESSES \
+	-e ASAPO_LIGHTWEIGHT_SERVICE_NODES=$ASAPO_LIGHTWEIGHT_SERVICE_NODES \
+	-e DOCKER_ENDPOINT=$DOCKER_ENDPOINT \
+	-e N_SERVERS=$N_SERVERS \
+ 	--name asapo yakser/asapo-cluster
+
diff --git a/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
index 3be9e92489f0150fe1d36c548a38d1002a071096..2bca8c1758c853ba19d7d46f003ee4052cefdde8 100644
--- a/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl
@@ -1,5 +1,10 @@
 job "asapo-brokers" {
   datacenters = ["dc1"]
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "false"
+    weight    = 100
+  }
 
   update {
     max_parallel = 1
@@ -9,8 +14,7 @@ job "asapo-brokers" {
   }
 
   group "brokers" {
-    count = 1
-
+    count = ${n_brokers}
     restart {
       attempts = 2
       interval = "3m"
@@ -23,6 +27,9 @@ job "asapo-brokers" {
       user = "${asapo_user}"
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/asapo-broker${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/broker/config.json"]
diff --git a/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl b/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl
index 5bfe365ff9e24fa9f892271fe30317c2e4cd9bc0..960395a8a682a707b2871bc107d332d29ae003f2 100644
--- a/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl
@@ -1,5 +1,11 @@
 job "asapo-logging" {
   datacenters = ["dc1"]
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "true"
+    weight    = 100
+  }
+
 
 #  update {
 #    max_parallel = 1
@@ -27,6 +33,9 @@ job "asapo-logging" {
       }
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/fluentd_elastic"
         volumes = ["local/fluentd.conf:/fluentd/etc/fluent.conf",
         "/${service_dir}/fluentd:/shared"]
@@ -95,6 +104,9 @@ job "asapo-logging" {
           nproc = "8192"
         }
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/elasticsearch:${elasticsearch_version}"
         volumes = ["/${service_dir}/esdatadir:/usr/share/elasticsearch/data"]
       }
@@ -131,6 +143,9 @@ job "asapo-logging" {
      user = "${asapo_user}"
      config {
        network_mode = "host"
+       privileged = true
+	   security_opt = ["no-new-privileges"]
+	   userns_mode = "host"
        image = "yakser/kibana:${kibana_version}"
        volumes = ["local/kibana.yml:/usr/share/kibana/config/kibana.yml"]
      }
diff --git a/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl b/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
index ae0631c8584650cdf8b7a702588889f9c1724a11..5d3146382e72f011e89dca18c7d85b53d688099a 100644
--- a/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-mongo.nmd.tpl
@@ -1,6 +1,10 @@
 job "asapo-mongo" {
   datacenters = ["dc1"]
-
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "false"
+    weight    = 100
+  }
   update {
     max_parallel = 1
     min_healthy_time = "10s"
@@ -24,6 +28,9 @@ job "asapo-mongo" {
 
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "mongo:${mongo_version}"
         volumes = ["/${service_dir}/mongodb:/data/db"]
       }
diff --git a/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
index 796991b8e9418c99c1ad70311ea7dd53b3020b6c..c76ec47f73ae4c924c74229dad633cf686b7709d 100644
--- a/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl
@@ -38,6 +38,9 @@ job "asapo-nginx" {
 
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "nginx:${nginx_version}"
         volumes = [
           "local/nginx.conf:/etc/nginx/nginx.conf"
diff --git a/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl b/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
index f699f42bfdb967c36710c4e6943bfe6407d64663..d4b69b7055913154a5ebb67ef997050eca4b506e 100644
--- a/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-perfmetrics.nmd.tpl
@@ -1,5 +1,10 @@
 job "asapo-perfmetrics" {
   datacenters = ["dc1"]
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "true"
+    weight    = 100
+  }
 
 #  update {
 #    max_parallel = 1
@@ -22,6 +27,9 @@ job "asapo-perfmetrics" {
       user = "${asapo_user}"
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "influxdb:${influxdb_version}"
         volumes = ["/${service_dir}/influxdb:/var/lib/influxdb"]
       }
@@ -69,6 +77,9 @@ job "asapo-perfmetrics" {
 
       config {
         network_mode = "host"
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "grafana/grafana:${grafana_version}"
         volumes = ["/${service_dir}/grafana:/var/lib/grafana"]
       }
diff --git a/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
index 5cc18d078daee4a26bd8e5857b0ce51142d19540..3eca7a63b427804231d9c42e3e7e11dee98f7be0 100644
--- a/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl
@@ -1,5 +1,10 @@
 job "asapo-receivers" {
   datacenters = ["dc1"]
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "false"
+    weight    = 100
+  }
 
   update {
     max_parallel = 1
@@ -9,7 +14,7 @@ job "asapo-receivers" {
   }
 
   group "receivers" {
-    count = 1
+    count = ${n_receivers}
 
     restart {
       attempts = 2
@@ -23,7 +28,9 @@ job "asapo-receivers" {
       user = "${asapo_user}"
       config {
         network_mode = "host"
-        dns_servers = ["127.0.0.1"]
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/asapo-receiver${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/receiver/config.json",
diff --git a/deploy/docker/cluster/scripts/asapo-services.nmd.tpl b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl
index 344ebe7f28017f54893f064aabb07e456d765adc..040a674643c2a86b38d633ce8caabfd70e284848 100644
--- a/deploy/docker/cluster/scripts/asapo-services.nmd.tpl
+++ b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl
@@ -1,5 +1,10 @@
 job "asapo-services" {
   datacenters = ["dc1"]
+  affinity {
+    attribute = "$${meta.asapo_service}"
+    value     = "true"
+    weight    = 100
+  }
 
   type = "service"
 
@@ -11,7 +16,9 @@ job "asapo-services" {
       user = "${asapo_user}"
       config {
         network_mode = "host"
-        dns_servers = ["127.0.0.1"]
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/asapo-authorizer${image_suffix}"
 	force_pull = true
         volumes = ["local/config.json:/var/lib/authorizer/config.json"]
@@ -74,7 +81,9 @@ job "asapo-services" {
       user = "${asapo_user}"
       config {
         network_mode = "host"
-        dns_servers = ["127.0.0.1"]
+	    privileged = true
+	    security_opt = ["no-new-privileges"]
+	    userns_mode = "host"
         image = "yakser/asapo-discovery${image_suffix}"
 	    force_pull = true
         volumes = ["local/config.json:/var/lib/discovery/config.json"]
diff --git a/deploy/docker/cluster/scripts/asapo.auto.tfvars.in b/deploy/docker/cluster/scripts/asapo.auto.tfvars.in
index dead45d6882c4bfe1cb3bde94c43038ef1c80f78..2e378e19cfc6039f27776f222aabdacbcda5fc16 100644
--- a/deploy/docker/cluster/scripts/asapo.auto.tfvars.in
+++ b/deploy/docker/cluster/scripts/asapo.auto.tfvars.in
@@ -39,4 +39,5 @@ discovery_port = 5006
 authorizer_port = 5007
 
 
-
+n_receivers = 1
+n_brokers = 1
\ No newline at end of file
diff --git a/deploy/docker/cluster/scripts/fluentd.conf.tpl b/deploy/docker/cluster/scripts/fluentd.conf.tpl
index 839bfedce01e70e7f28271a8b59ea4f5d1f0ff7a..a46d063838dc19462084e5a60a60b886f6f5c659 100644
--- a/deploy/docker/cluster/scripts/fluentd.conf.tpl
+++ b/deploy/docker/cluster/scripts/fluentd.conf.tpl
@@ -17,6 +17,7 @@
 <filter asapo.docker>
   @type parser
   key_name log
+
   format json
   time_format %Y-%m-%d %H:%M:%S.%N
   reserve_data true
@@ -37,14 +38,15 @@
 {{ if eq $use_logs "true"}}
   <store>
   @type elasticsearch
-  hosts localhost:8400/elasticsearch
+  host localhost
+  port 8400
+  path /elasticsearch/
   flush_interval 5s
   logstash_format true
   time_key_format %Y-%m-%dT%H:%M:%S.%N
   time_key time
   time_key_exclude_timestamp true
   buffer_type memory
-  flush_interval 1s
   </store>
 {{ end }}
   <store>
diff --git a/deploy/docker/cluster/scripts/kibana.yml b/deploy/docker/cluster/scripts/kibana.yml
index 10675706cbd4efa98311e7f74815c705c4f82c23..3e8d29c5f54df50bc5a22958691ee7f7dd2e5765 100644
--- a/deploy/docker/cluster/scripts/kibana.yml
+++ b/deploy/docker/cluster/scripts/kibana.yml
@@ -1,5 +1,5 @@
 elasticsearch:
-  url: "http://localhost:8400/elasticsearch"
+  hosts: ["http://localhost:8400/elasticsearch"]
 server:
   basePath:   "/logsview"
   rewriteBasePath: true
diff --git a/deploy/docker/cluster/scripts/receiver.json.tpl b/deploy/docker/cluster/scripts/receiver.json.tpl
index b1948f8b1e9d216887c26447220816a062a567da..e2770b2451c54189e6e5aa09c6ce6feb4428e723 100644
--- a/deploy/docker/cluster/scripts/receiver.json.tpl
+++ b/deploy/docker/cluster/scripts/receiver.json.tpl
@@ -1,4 +1,5 @@
 {
+  "AdvertiseIP": "{{ if or (env "meta.ib_address") "none" | regexMatch "none" }}{{ env "NOMAD_IP_recv" }}{{ else }}{{ env "meta.ib_address" }}{{ end }}",
   "PerformanceDbServer":"localhost:8400/influxdb",
   "PerformanceDbName": "asapo_receivers",
   "DatabaseServer":"auto",
@@ -15,7 +16,7 @@
     "SizeGB": {{ env "NOMAD_META_receiver_dataserver_cache_size" }},
     "ReservedShare": 10
   },
-  "Tag": "{{ env "NOMAD_ADDR_recv" }}",
+  "Tag": "{{ env "attr.unique.hostname" }}",
   "WriteToDisk":true,
   "WriteToDb":true,
   "LogLevel": "{{ keyOrDefault "receiver_log_level" "info" }}",
diff --git a/deploy/docker/cluster/scripts/resources_services.tf b/deploy/docker/cluster/scripts/resources_services.tf
index 63d18ae2801d3b8557dbd06439876e5b86f370d5..dc18b817b76c141fd3142108d133c47ce51ee8a1 100644
--- a/deploy/docker/cluster/scripts/resources_services.tf
+++ b/deploy/docker/cluster/scripts/resources_services.tf
@@ -17,7 +17,7 @@ resource "null_resource" "fluentd" {
   provisioner "local-exec" {
     command = "asapo-wait-service fluentd ${! var.nomad_logs}"
   }
-  depends_on = [nomad_job.asapo-logging]
+  depends_on = [nomad_job.asapo-logging,null_resource.elasticsearch]
 }
 
 resource "null_resource" "mongo" {
diff --git a/deploy/docker/cluster/scripts/templates.tf b/deploy/docker/cluster/scripts/templates.tf
index 8d6cbd1150fcb5c78ede42a21cb51124833ae223..b74df81275e7436182e25d98017647174e0ceb9f 100644
--- a/deploy/docker/cluster/scripts/templates.tf
+++ b/deploy/docker/cluster/scripts/templates.tf
@@ -39,6 +39,7 @@ data "template_file" "asapo_receivers" {
     receiver_total_memory_size = "${var.receiver_total_memory_size}"
     receiver_dataserver_cache_size = "${var.receiver_dataserver_cache_size}"
     asapo_user = "${var.asapo_user}"
+    n_receivers = "${var.n_receivers}"
   }
 }
 
@@ -49,6 +50,7 @@ data "template_file" "asapo_brokers" {
     image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
     nomad_logs = "${var.nomad_logs}"
     asapo_user = "${var.asapo_user}"
+    n_brokers = "${var.n_brokers}"
   }
 }
 
diff --git a/deploy/docker/cluster/scripts/vars.tf b/deploy/docker/cluster/scripts/vars.tf
index 39571a802c761a3ec77ace3f93020e9d6cb62609..9d6036284790989c895f0dfa264dfecdca33554a 100644
--- a/deploy/docker/cluster/scripts/vars.tf
+++ b/deploy/docker/cluster/scripts/vars.tf
@@ -62,4 +62,8 @@ variable "kibana_port" {}
 
 variable "discovery_port" {}
 
-variable "authorizer_port" {}
\ No newline at end of file
+variable "authorizer_port" {}
+
+variable "n_receivers" {}
+
+variable "n_brokers" {}
\ No newline at end of file
diff --git a/deploy/nomad_jobs/receiver.json.tpl b/deploy/nomad_jobs/receiver.json.tpl
index 648f8b59fae7f8b41176b64b48023d0cbfe4a266..8db2c97b3a93093f52e6132e2da859bf9c79aaa9 100644
--- a/deploy/nomad_jobs/receiver.json.tpl
+++ b/deploy/nomad_jobs/receiver.json.tpl
@@ -1,4 +1,5 @@
 {
+  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8400/influxdb",
   "PerformanceDbName": "asapo_receivers",
   "DatabaseServer":"auto",
diff --git a/examples/consumer/getnext_broker/check_linux.sh b/examples/consumer/getnext_broker/check_linux.sh
index 9fef9b8be3f24dfc3f6075139c71d4851773da95..10c158e9f4bcc0c34e604f5cf84576aea1d22016 100644
--- a/examples/consumer/getnext_broker/check_linux.sh
+++ b/examples/consumer/getnext_broker/check_linux.sh
@@ -30,7 +30,7 @@ done
 
 sleep 1
 
-$@ 127.0.0.1:8400 $source_path $beamtime_id 2 $token_test_run 1000 1  | tee /dev/stderr | grep "Processed 3 file(s)"
+$@ 127.0.0.1:8400 $source_path $beamtime_id 2 $token_test_run 12000 1  | tee /dev/stderr | grep "Processed 3 file(s)"
 
 
 
diff --git a/examples/consumer/getnext_broker/check_windows.bat b/examples/consumer/getnext_broker/check_windows.bat
index fb2e3816a0f446df3e0bbef9d2a06ef6aa88ef86..336e62956f067d0b482616909c02b9332bd7f169 100644
--- a/examples/consumer/getnext_broker/check_windows.bat
+++ b/examples/consumer/getnext_broker/check_windows.bat
@@ -16,7 +16,7 @@ ping 1.0.0.0 -n 10 -w 100 > nul
 for /l %%x in (1, 1, 3) do echo db.data.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
 
-"%1" 127.0.0.1:8400 %source_path% %beamtime_id% 1 %token_test_run% 1000 1 | findstr /c:"Processed 3 file" || goto :error
+"%1" 127.0.0.1:8400 %source_path% %beamtime_id% 1 %token_test_run% 12000 1 | findstr /c:"Processed 3 file" || goto :error
 goto :clean
 
 :error
diff --git a/examples/consumer/getnext_broker/getnext_broker.cpp b/examples/consumer/getnext_broker/getnext_broker.cpp
index 48844bbec8f6124941301894b559c0f739516f13..4f16ab67bcb06edfb4e8024bdb9992ece395af65 100644
--- a/examples/consumer/getnext_broker/getnext_broker.cpp
+++ b/examples/consumer/getnext_broker/getnext_broker.cpp
@@ -98,7 +98,7 @@ std::vector<std::thread> StartThreads(const Args& params,
 
             if (err) {
                 (*errors)[i] += ProcessError(err);
-                if (err != asapo::ConsumerErrorTemplates::kNoData ) {
+                if (err) {
                     std::cout << "Thread exit: " << i << std::endl;
                     break;
                 }
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index a9384b2a2b8bd3188d5c9e1452196d876209e3c2..8f62763ddeb4cdb66ac5d848fe332b4581eca83e 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -71,7 +71,7 @@ void ProcessCommandArguments(int argc, char* argv[], Args* args) {
         std::cout <<
                   "Usage: " << argv[0] <<
                   " <destination> <beamtime_id[%<stream>%<token>]> <number_of_byte> <iterations> <nthreads>"
-                  " <mode 0 -t tcp, 1 - filesystem> <timeout (sec)> [n images in set (default 1)]"
+                  " <mode x0 -t tcp, x1 - filesystem, 0x - write files, 1x - do not write files> <timeout (sec)> [n images in set (default 1)]"
                   << std::endl;
         exit(EXIT_FAILURE);
     }
@@ -127,7 +127,7 @@ asapo::FileData CreateMemoryBuffer(size_t size) {
 
 
 bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations, uint64_t images_in_set,
-                   const std::string& stream) {
+                   const std::string& stream, bool write_files) {
 
     asapo::Error err;
     if (iterations > 0) { // send wrong meta, for negative integration tests
@@ -149,7 +149,8 @@ bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t it
         }
         event_header.user_metadata = std::move(meta);
         if (images_in_set == 1) {
-            auto err = producer->SendData(event_header, std::move(buffer), asapo::kDefaultIngestMode, &ProcessAfterSend);
+            auto err = producer->SendData(event_header, std::move(buffer), write_files ? asapo::kDefaultIngestMode :
+                                          asapo::kTransferData, &ProcessAfterSend);
             if (err) {
                 std::cerr << "Cannot send file: " << err << std::endl;
                 return false;
@@ -165,7 +166,8 @@ bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t it
                     event_header.file_name = stream + "/" + event_header.file_name;
                 }
                 event_header.user_metadata = meta;
-                auto err = producer->SendData(event_header, std::move(buffer), asapo::kDefaultIngestMode, &ProcessAfterSend);
+                auto err = producer->SendData(event_header, std::move(buffer), write_files ? asapo::kDefaultIngestMode :
+                                              asapo::kTransferData, &ProcessAfterSend);
                 if (err) {
                     std::cerr << "Cannot send file: " << err << std::endl;
                     return false;
@@ -179,7 +181,7 @@ bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t it
 std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
     asapo::Error err;
     auto producer = asapo::Producer::Create(args.receiver_address, args.nthreads,
-                                            args.mode == 0 ? asapo::RequestHandlerType::kTcp : asapo::RequestHandlerType::kFilesystem,
+                                            args.mode % 10 == 0 ? asapo::RequestHandlerType::kTcp : asapo::RequestHandlerType::kFilesystem,
                                             asapo::SourceCredentials{args.beamtime_id, args.stream, args.token }, &err);
     if(err) {
         std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
@@ -231,7 +233,8 @@ int main (int argc, char* argv[]) {
 
     system_clock::time_point start_time = system_clock::now();
 
-    if(!SendDummyData(producer.get(), args.number_of_bytes, args.iterations, args.images_in_set, args.stream)) {
+    if(!SendDummyData(producer.get(), args.number_of_bytes, args.iterations, args.images_in_set, args.stream,
+                      args.mode / 10 == 0)) {
         return EXIT_FAILURE;
     }
 
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index ae55ea992df809839004dbc981d001d8a81c2681..8401555dfe9d798b6f8eb1611875a0a249553aa0 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -34,6 +34,7 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
     (err = parser.GetUInt64("AuthorizationInterval", &config.authorization_interval_ms)) ||
     (err = parser.GetString("RootFolder", &config.root_folder)) ||
     (err = parser.GetString("PerformanceDbName", &config.performance_db_name)) ||
+    (err = parser.GetString("AdvertiseIP", &config.advertise_ip)) ||
     (err = parser.GetString("LogLevel", &log_level));
 
     if (err) {
@@ -42,11 +43,6 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
 
     config.dataserver.tag = config.tag + "_ds";
 
-    config.source_host = io__->GetHostName(&err);
-    if (err) {
-        return err;
-    }
-
     config.log_level = StringToLogLevel(log_level, &err);
     return err;
 
diff --git a/receiver/src/receiver_config.h b/receiver/src/receiver_config.h
index 7052ac71e260a8f553366b6f4a22a3daab5c7539..831e7223262eb6bce5ac86329c0127bd3f98f6a4 100644
--- a/receiver/src/receiver_config.h
+++ b/receiver/src/receiver_config.h
@@ -23,7 +23,7 @@ struct ReceiverConfig {
     uint64_t datacache_reserved_share = 0;
     LogLevel log_level = LogLevel::Info;
     std::string tag;
-    std::string source_host;
+    std::string advertise_ip;
     ReceiverDataCenterConfig dataserver;
     std::string discovery_server;
 };
diff --git a/receiver/src/request_handler_db_write.cpp b/receiver/src/request_handler_db_write.cpp
index 19670a85b3b8f48dc0451e92f9e8301c2f910e3a..91580bfae63e1f59d0cc9635b560da855351ee7b 100644
--- a/receiver/src/request_handler_db_write.cpp
+++ b/receiver/src/request_handler_db_write.cpp
@@ -56,7 +56,7 @@ FileInfo RequestHandlerDbWrite::PrepareFileInfo(const Request* request) const {
     file_info.size = request->GetDataSize();
     file_info.id = request->GetDataID();
     file_info.buf_id = request->GetSlotId();
-    file_info.source = GetReceiverConfig()->source_host + ":" + string_format("%ld",
+    file_info.source = GetReceiverConfig()->advertise_ip + ":" + string_format("%ld",
                        GetReceiverConfig()->dataserver.listen_port);
     file_info.metadata = request->GetMetaData();
     return file_info;
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 29805ebde5ef0de2d1d1aada86537fcbacb82a6d..1ddc3ed4c83dc520f874c93ea1d759a067556f85 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -61,6 +61,7 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
     config_string += "," +  Key("WriteToDisk", error_field) + (config.write_to_disk ? "true" : "false");
     config_string += "," +  Key("WriteToDb", error_field) + (config.write_to_db ? "true" : "false");
     config_string += "," +  Key("LogLevel", error_field) + "\"" + log_level + "\"";
+    config_string += "," +  Key("AdvertiseIP", error_field) + "\"" + config.advertise_ip + "\"";
     config_string += "," +  Key("Tag", error_field) + "\"" + config.tag + "\"";
     config_string += "," +  Key("RootFolder", error_field) + "\"" + config.root_folder + "\"";
     config_string += "}";
@@ -70,20 +71,7 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
         testing::Return(config_string)
     );
 
-    if (error_field == "SourceHost") {
-        EXPECT_CALL(mock_io, GetHostName_t(_)).
-        WillOnce(
-            DoAll(SetArgPointee<0>(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()),
-                  Return("")
-                 ));
-    } else if (error_field == "none") {
-        EXPECT_CALL(mock_io, GetHostName_t(_)).
-        WillOnce(
-            DoAll(SetArgPointee<0>(nullptr),
-                  Return(config.source_host)
-                 ));
-    }
-
+    printf("%s\n", config_string.c_str());
     auto err = config_factory.SetConfig("fname");
 
     config_factory.io__.release();
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index 6231fed9089ba3bfbe11804e1d3543aa6b7450a1..aca938cff1aae4909ce0dc7e873677d5c856db05 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -59,9 +59,10 @@ class ConfigTests : public Test {
         test_config.use_datacache = false;
         test_config.datacache_reserved_share = 10;
         test_config.datacache_size_gb = 2;
-        test_config.source_host = "host";
+        test_config.advertise_ip = "host";
         test_config.dataserver.nthreads = 5;
         test_config.discovery_server = "discovery";
+        test_config.advertise_ip = "0.0.0.1";
     }
 
 };
@@ -90,10 +91,11 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->use_datacache, Eq(false));
     ASSERT_THAT(config->datacache_reserved_share, Eq(10));
     ASSERT_THAT(config->datacache_size_gb, Eq(2));
-    ASSERT_THAT(config->source_host, Eq("host"));
     ASSERT_THAT(config->dataserver.nthreads, Eq(5));
     ASSERT_THAT(config->dataserver.tag, Eq("receiver1_ds"));
     ASSERT_THAT(config->discovery_server, Eq("discovery"));
+    ASSERT_THAT(config->advertise_ip, Eq("0.0.0.1"));
+
 }
 
 
@@ -103,7 +105,7 @@ TEST_F(ConfigTests, ErrorReadSettings) {
     std::vector<std::string>fields {"PerformanceDbServer", "ListenPort", "DataServer", "ListenPort", "WriteToDisk",
                                     "WriteToDb", "DataCache", "Use", "SizeGB", "ReservedShare", "DatabaseServer", "Tag",
                                     "AuthorizationServer", "AuthorizationInterval", "RootFolder", "PerformanceDbName", "LogLevel",
-                                    "SourceHost", "NThreads", "DiscoveryServer"};
+                                    "NThreads", "DiscoveryServer", "AdvertiseIP"};
     for (const auto& field : fields) {
         auto err = asapo::SetReceiverConfig(test_config, field);
         ASSERT_THAT(err, Ne(nullptr));
diff --git a/receiver/unittests/test_request_handler_db_writer.cpp b/receiver/unittests/test_request_handler_db_writer.cpp
index 11047e9e98c819aee04be24e531d259fbb343f00..19c1ec322e678642ece25429416c87db5cb77dde 100644
--- a/receiver/unittests/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/test_request_handler_db_writer.cpp
@@ -71,7 +71,7 @@ class DbWriterHandlerTests : public Test {
     std::string expected_beamtime_id = "beamtime_id";
     std::string expected_default_stream = "detector";
     std::string expected_stream = "stream";
-    std::string expected_hostname = "host";
+    std::string expected_host_ip = "127.0.0.1";
     uint64_t expected_port = 1234;
     uint64_t expected_buf_id = 18446744073709551615ull;
     std::string expected_file_name = "2";
@@ -89,7 +89,7 @@ class DbWriterHandlerTests : public Test {
         handler.log__ = &mock_logger;
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, ""});
         config.database_uri = "127.0.0.1:27017";
-        config.source_host = expected_hostname;
+        config.advertise_ip = expected_host_ip;
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
 
@@ -173,7 +173,7 @@ FileInfo DbWriterHandlerTests::PrepareFileInfo() {
     file_info.name = expected_file_name;
     file_info.id = expected_id;
     file_info.buf_id = expected_buf_id;
-    file_info.source = expected_hostname + ":" + std::to_string(expected_port);
+    file_info.source = expected_host_ip + ":" + std::to_string(expected_port);
     file_info.metadata = expected_metadata;
     return file_info;
 }
diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh
index ec92a792852ece7b7a2037f9325c722296eaa05d..dcae8ebd0cdc880436a17d9d5e30d3d6dc374f8a 100644
--- a/tests/automatic/full_chain/two_beamlines/check_linux.sh
+++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh
@@ -55,5 +55,5 @@ $1 localhost:8400 ${beamtime_id2} 100 900 4 0 100 &
 #producerid=`echo $!`
 
 #consumers
-$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 10000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
-$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 10000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
+$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 12000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
+$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 12000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
diff --git a/tests/automatic/full_chain/two_beamlines/check_windows.bat b/tests/automatic/full_chain/two_beamlines/check_windows.bat
index 16e353f58f7d7a3db29d5ebd731985716a067d38..be98dc0f373142fdd8d8aabe83a4d302e5d05dc9 100644
--- a/tests/automatic/full_chain/two_beamlines/check_windows.bat
+++ b/tests/automatic/full_chain/two_beamlines/check_windows.bat
@@ -36,11 +36,11 @@ start /B "" "%1" %proxy_address% %beamtime_id2% 100 900 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM consumer
-"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 10000  0 > out1.txt
+"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 12000  0 > out1.txt
 type out1.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out1.txt || goto :error
 
-"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 10000  0 > out2.txt
+"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 12000  0 > out2.txt
 type out2.txt
 findstr /i /l /c:"Processed 900 file(s)"  out2.txt || goto :error
 
diff --git a/tests/automatic/full_chain/two_streams/check_windows.bat b/tests/automatic/full_chain/two_streams/check_windows.bat
index af2f4ec61a64feb5a83d26b72ec541cd48136eea..7dc456fa294a9253a86ac2a0fdf04976c3981be1 100644
--- a/tests/automatic/full_chain/two_streams/check_windows.bat
+++ b/tests/automatic/full_chain/two_streams/check_windows.bat
@@ -31,11 +31,11 @@ start /B "" "%1" %proxy_address% %beamtime_id%%%%stream2% 100 900 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM consumer
-"%2" %proxy_address% %receiver_folder% %beamtime_id%%%%stream1% 2 %token% 10000  0 > out1.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id%%%%stream1% 2 %token% 12000  0 > out1.txt
 type out1.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out1.txt || goto :error
 
-"%2" %proxy_address% %receiver_folder% %beamtime_id%%%%stream2% 2 %token% 10000  0 > out2.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id%%%%stream2% 2 %token% 12000  0 > out2.txt
 type out2.txt
 findstr /i /l /c:"Processed 900 file(s)"  out2.txt || goto :error
 
diff --git a/tests/automatic/mongo_db/insert_dataset/insert_dataset_mongodb.cpp b/tests/automatic/mongo_db/insert_dataset/insert_dataset_mongodb.cpp
index 2d595be84c638c923eccd7fde6559281241ae3eb..c15b83406905c4a5de98c1d4b8c80e98a639404b 100644
--- a/tests/automatic/mongo_db/insert_dataset/insert_dataset_mongodb.cpp
+++ b/tests/automatic/mongo_db/insert_dataset/insert_dataset_mongodb.cpp
@@ -46,7 +46,7 @@ int main(int argc, char* argv[]) {
     fi.source = "host:1234";
     fi.id = 1;
 
-    uint64_t subset_size=2;
+    uint64_t subset_size = 2;
 
     if (args.keyword != "Notconnected") {
         db.Connect("127.0.0.1", "data", "test");
diff --git a/tests/automatic/settings/receiver.json.tpl.lin.in b/tests/automatic/settings/receiver.json.tpl.lin.in
index 2f27019b74b1e721ecc52ea1a4b28c1c5e46e6a3..86fa61c0dbb173a77071e26568cb9913d5f8a4e1 100644
--- a/tests/automatic/settings/receiver.json.tpl.lin.in
+++ b/tests/automatic/settings/receiver.json.tpl.lin.in
@@ -1,4 +1,5 @@
 {
+  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"auto",
diff --git a/tests/automatic/settings/receiver.json.tpl.win.in b/tests/automatic/settings/receiver.json.tpl.win.in
index 7fcedb822a21daa57a82a0d3526bda85cbd26ca2..4cc81d11dafa9d9522dd01da192a3efa850a49c0 100644
--- a/tests/automatic/settings/receiver.json.tpl.win.in
+++ b/tests/automatic/settings/receiver.json.tpl.win.in
@@ -1,4 +1,5 @@
 {
+  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"auto",
diff --git a/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp b/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
index 8c304bcfd7bf0542c729ac6cd54f618178082be0..f5a880beea506ae2ace3c7d225fe10c6210c1d6e 100644
--- a/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
+++ b/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
@@ -16,7 +16,8 @@ using asapo::M_AssertEq;
 
 using namespace std::chrono;
 
-static const std::unique_ptr<asapo::IO> io(asapo::GenerateDefaultIO());
+static const std::unique_ptr<asapo::IO>
+    io(asapo::GenerateDefaultIO());
 static const std::string kListenAddress = "127.0.0.1:60123";
 static std::promise<void> kThreadStarted;
 static const int kNumberOfChecks = 2;
@@ -158,7 +159,7 @@ int main(int argc, char* argv[]) {
 
     std::cout << "[META] Check unknown host" << std::endl;
     io->CreateAndConnectIPTCPSocket("some-host-that-might-not-exists.aa:1234", &err);
-    if(asapo::IOErrorTemplates::kUnableToResolveHostname != err) {
+    if(err != asapo::IOErrorTemplates::kAddressNotValid) {
         ExitIfErrIsNotOk(&err, 303);
     }
 
diff --git a/tests/manual/python_tests/producer/receiver.json.tpl b/tests/manual/python_tests/producer/receiver.json.tpl
index 88be63ae321b4974c7c440f7f479dc7682acb806..0eaaa9848c36a37e666c5abadf9b856701581820 100644
--- a/tests/manual/python_tests/producer/receiver.json.tpl
+++ b/tests/manual/python_tests/producer/receiver.json.tpl
@@ -1,4 +1,5 @@
 {
+  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"localhost:27017",
diff --git a/tests/manual/receiver_debug_local/discovery.json b/tests/manual/receiver_debug_local/discovery.json
new file mode 100644
index 0000000000000000000000000000000000000000..89b991729d2c089f7bea1a0ce7523ca065a25d9d
--- /dev/null
+++ b/tests/manual/receiver_debug_local/discovery.json
@@ -0,0 +1,19 @@
+{
+  "Mode": "static",
+  "Receiver": {
+    "StaticEndpoints": [
+      "localhost:22001"
+    ],
+    "MaxConnections": 32
+  },
+  "Broker": {
+    "StaticEndpoint": "localhost:5005"
+  },
+  "Mongo": {
+    "StaticEndpoint": "127.0.0.1:27017"
+  },
+  "Port": 5900,
+  "LogLevel":"debug"
+}
+
+
diff --git a/tests/manual/receiver_debug_local/discovery.json.tpl b/tests/manual/receiver_debug_local/discovery.json.tpl
index b068613852287d5c712033c65bd17b1725d12faf..120614c760605d3ed0ca632d24830e96ffd42f94 100644
--- a/tests/manual/receiver_debug_local/discovery.json.tpl
+++ b/tests/manual/receiver_debug_local/discovery.json.tpl
@@ -9,6 +9,9 @@
   "Broker": {
     "StaticEndpoint": "localhost:5005"
   },
+  "Mongo": {
+    "StaticEndpoint": "127.0.0.1:27017"
+  },
   "Port": {{ env "NOMAD_PORT_discovery" }},
   "LogLevel":"debug"
 }
diff --git a/tests/manual/receiver_debug_local/nginx_kill.nmd b/tests/manual/receiver_debug_local/nginx_kill.nmd
new file mode 100644
index 0000000000000000000000000000000000000000..cb3abbac259780ce7366042f24a19d635f032994
--- /dev/null
+++ b/tests/manual/receiver_debug_local/nginx_kill.nmd
@@ -0,0 +1,17 @@
+job "nginx_kill" {
+  datacenters = ["dc1"]
+
+  type = "batch"
+
+  group "nginx_kill" {
+    count = 1
+
+    task "nginx_kill" {
+      driver = "raw_exec"
+      config {
+        command = "killall",
+        args =  ["nginx"]
+      }
+   }
+  }
+}
diff --git a/tests/manual/receiver_debug_local/start_dummyproducer.sh b/tests/manual/receiver_debug_local/start_dummyproducer.sh
index 902e17cb1a8c94ebe8c0be2ddcff213653f10917..55640449ed847c43db3afb678d99a7e6faf07d1b 100755
--- a/tests/manual/receiver_debug_local/start_dummyproducer.sh
+++ b/tests/manual/receiver_debug_local/start_dummyproducer.sh
@@ -2,13 +2,13 @@
 
 beamtime_id=asapo_test
 
-nfiles=10
+nfiles=1000
 timeout=100
-fsize=100
-mode=0 #tcp
-nthreads=4
+fsize=10000
+mode=10 #tcp & no file write
+nthreads=32
 
 exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/producer/dummy-data-producer/dummy-data-producer
 
 
-$exec localhost:8400 ${beamtime_id} $fsize $nfiles  $nthreads $mode  $timeout
\ No newline at end of file
+$exec localhost:8400 ${beamtime_id} $fsize $nfiles  $nthreads $mode  $timeout
diff --git a/tests/manual/receiver_debug_local/start_receiver.sh b/tests/manual/receiver_debug_local/start_receiver.sh
new file mode 100755
index 0000000000000000000000000000000000000000..231051afff79b8daa029c44a33e54a33098b3949
--- /dev/null
+++ b/tests/manual/receiver_debug_local/start_receiver.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+
+exec=/home/yakubov/projects/asapo/cmake-build-debug/receiver/receiver
+
+
+$exec receiver.json