diff --git a/common/cpp/include/common/networking.h b/common/cpp/include/common/networking.h
index 2ea7c7c520501ededc8dd4a23226aa221c340ec9..d5a5edb5976179df45b8b82e23e77ff00d8737d5 100644
--- a/common/cpp/include/common/networking.h
+++ b/common/cpp/include/common/networking.h
@@ -6,6 +6,8 @@
 #include <string>
 #include <cstring>
 
+#include "data_structs.h"
+
 namespace asapo {
 
 typedef uint64_t NetworkRequestId;
@@ -20,6 +22,7 @@ enum Opcode : uint8_t {
     kOpcodeCount,
 };
 
+
 enum NetworkErrorCode : uint16_t {
     kNetErrorNoError,
     kNetErrorWrongRequest,
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index f3225f60e190a56e3d6ab05d5af8aa616901df60..9d8b919d4d6140072a0ff82ea8652e08169dfe9c 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -97,6 +97,7 @@ bson_p PrepareBsonDocument(const FileInfo& file, Error* err) {
     bson_error_t mongo_err;
     auto s = file.Json();
     auto json = reinterpret_cast<const uint8_t*>(s.c_str());
+
     auto bson = bson_new_from_json(json, -1, &mongo_err);
     if (!bson) {
         *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
@@ -109,6 +110,11 @@ bson_p PrepareBsonDocument(const FileInfo& file, Error* err) {
 
 bson_p PrepareBsonDocument(const uint8_t* json, ssize_t len, Error* err) {
     bson_error_t mongo_err;
+    if (json == nullptr) {
+        *err = TextError("empty metadata");
+        return nullptr;
+    }
+
     auto bson = bson_new_from_json(json, len, &mongo_err);
     if (!bson) {
         *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
diff --git a/examples/pipeline/in_to_out/check_linux.sh b/examples/pipeline/in_to_out/check_linux.sh
index 6f00ecbb7ab8c66b8e19a5f176a89d3a3eca3705..69cc6afeb950de05edc7ab77430d4819c61ac3ed 100644
--- a/examples/pipeline/in_to_out/check_linux.sh
+++ b/examples/pipeline/in_to_out/check_linux.sh
@@ -4,8 +4,12 @@ source_path=.
 beamtime_id=asapo_test
 stream_in=detector
 stream_out=stream
+stream_out2=stream2
+
 indatabase_name=${beamtime_id}_${stream_in}
 outdatabase_name=${beamtime_id}_${stream_out}
+outdatabase_name2=${beamtime_id}_${stream_out2}
+
 token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
 beamline=test
@@ -28,6 +32,7 @@ Cleanup() {
 	echo "db.dropDatabase()" | mongo ${outdatabase_name}
 	rm -rf file1 file2 file3
     rm -rf ${receiver_root_folder}
+    rm -rf out out2
 
 }
 
@@ -50,11 +55,18 @@ done
 
 sleep 1
 
-$1 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out $token 2 1000 1 | tee /dev/stderr | grep "Processed 3 file(s)"
+$1 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out $token 2 1000 1  > out
+cat out
+cat out | grep "Processed 3 file(s)"
+cat out | grep "Sent 3 file(s)"
 
+echo "db.data.find({"_id":1})" | mongo ${outdatabase_name} | tee /dev/stderr | grep file1_${stream_out}
 
 cat ${receiver_folder}/file1_${stream_out} | grep hello1
-
 cat ${receiver_folder}/file2_${stream_out} | grep hello2
-
 cat ${receiver_folder}/file3_${stream_out} | grep hello3
+
+$1 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out2 $token 2 1000 0  > out2
+cat out2
+test ! -f ${receiver_folder}/file1_${stream_out2}
+echo "db.data.find({"_id":1})" | mongo ${outdatabase_name2} | tee /dev/stderr | grep ./file1
diff --git a/examples/pipeline/in_to_out/check_windows.bat b/examples/pipeline/in_to_out/check_windows.bat
index a63e3f77a32ed15d3668053768f64fa04db831c5..73825f8f91527ae79c6aca123d25ae89433a2d8e 100644
--- a/examples/pipeline/in_to_out/check_windows.bat
+++ b/examples/pipeline/in_to_out/check_windows.bat
@@ -1,22 +1,64 @@
 SET source_path=dummy
 
-SET beamtime_id=test_run
-SET stream=detector
-SET database_name=%beamtime_id%_%stream%
+SET source_path=.
+SET beamtime_id=asapo_test
+SET stream_in=detector
+SET stream_out=stream
+SET stream_out2=stream2
+
+SET indatabase_name=%beamtime_id%_%stream_in%
+SET outdatabase_name=%beamtime_id%_%stream_out%
+SET outdatabase_name2=%beamtime_id%_%stream_out2%
+
+SET token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
+
+SET beamline=test
+
+SET receiver_root_folder=c:\tmp\asapo\receiver\files
+SET receiver_folder="%receiver_root_folder%\%beamline%\%beamtime_id%"
+
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
-set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
+
 
 c:\opt\consul\nomad run discovery.nmd
 c:\opt\consul\nomad run broker.nmd
 c:\opt\consul\nomad run nginx.nmd
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run authorizer.nmd
 
 ping 1.0.0.0 -n 10 -w 100 > nul
 
-for /l %%x in (1, 1, 3) do echo db.data.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 3) do echo db.data.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
+
+mkdir %receiver_folder%
+
+echo hello1 > file1
+echo hello2 > file2
+echo hello3 > file3
+
+
+"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %stream_in% %stream_out% %token% 2 1000 1 > out
+type out
+findstr /I /L /C:"Processed 3 file(s)" out || goto :error
+findstr /I /L /C:"Sent 3 file(s)" out || goto :error
+
+echo db.data.find({"_id":1}) | %mongo_exe% %outdatabase_name% | findstr file1_%stream_out%  || goto :error
+
+findstr /I /L /C:"hello1" %receiver_folder%\file1_%stream_out% || goto :error
+findstr /I /L /C:"hello2" %receiver_folder%\file1_%stream_out% || goto :error
+findstr /I /L /C:"hello3" %receiver_folder%\file1_%stream_out% || goto :error
+
+
+"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %stream_in% %stream_out2% %token% 2 1000 1 > ou2
+type out2
+findstr /I /L /C:"Processed 3 file(s)" out2 || goto :error
+findstr /I /L /C:"Sent 3 file(s)" out2 || goto :error
+
+
+echo db.data.find({"_id":1}) | %mongo_exe% %outdatabase_name2% | findstr ./file1 || goto :error
 
 
-"%1" 127.0.0.1:8400 %source_path% %beamtime_id% 1 %token_test_run% 1000 1 | findstr /c:"Processed 3 file" || goto :error
 goto :clean
 
 :error
@@ -27,4 +69,10 @@ exit /b 1
 c:\opt\consul\nomad stop discovery
 c:\opt\consul\nomad stop broker
 c:\opt\consul\nomad stop nginx
-echo db.dropDatabase() | %mongo_exe% %database_name%
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop authorizer
+
+echo db.dropDatabase() | %mongo_exe% %indatabase_name%
+echo db.dropDatabase() | %mongo_exe% %outdatabase_name%
+rmdir /S /Q %receiver_root_folder%
+rmdir /S /Q file1 file2 file3 out out2
diff --git a/examples/pipeline/in_to_out/in_to_out.cpp b/examples/pipeline/in_to_out/in_to_out.cpp
index c1c39911c9b66b9ab07463b1a2c5cea210110e67..34963a00c84eefb9ee58f3a303e52226af18a58d 100644
--- a/examples/pipeline/in_to_out/in_to_out.cpp
+++ b/examples/pipeline/in_to_out/in_to_out.cpp
@@ -10,7 +10,6 @@
 #include <string>
 #include <sstream>
 
-
 #include "asapo_worker.h"
 #include "asapo_producer.h"
 
@@ -18,7 +17,12 @@ using std::chrono::system_clock;
 using asapo::Error;
 
 std::string group_id = "";
-std::mutex lock;
+std::mutex lock_in,lock_out;
+
+int files_sent;
+bool streamout_timer_started;
+system_clock::time_point streamout_start;
+system_clock::time_point streamout_finish;
 
 struct Args {
     std::string server;
@@ -37,6 +41,11 @@ void ProcessAfterSend(asapo::GenericRequestHeader header, asapo::Error err) {
         std::cerr << "Data was not successfully send: " << err << std::endl;
         return;
     }
+    lock_out.lock();
+    files_sent++;
+    streamout_finish = max(streamout_finish,system_clock::now());
+    lock_out.unlock();
+
 }
 
 
@@ -63,7 +72,7 @@ std::vector<std::thread> StartThreads(const Args& args, asapo::Producer* produce
 
         broker->SetTimeout((uint64_t) args.timeout_ms);
 
-        lock.lock();
+        lock_in.lock();
 
         if (group_id.empty()) {
             group_id = broker->GenerateNewGroupId(&err);
@@ -73,7 +82,7 @@ std::vector<std::thread> StartThreads(const Args& args, asapo::Producer* produce
             }
         }
 
-        lock.unlock();
+        lock_in.unlock();
 
         if (i == 0) {
             auto meta = broker->GetBeamtimeMeta(&err);
@@ -103,8 +112,14 @@ std::vector<std::thread> StartThreads(const Args& args, asapo::Producer* produce
                 header.file_name = args.file_path + "/" + header.file_name;
                 err_send = producer->SendData(header, nullptr, asapo::IngestModeFlags::kTransferMetaDataOnly, ProcessAfterSend);
                 std::cout << err_send << std::endl;
+            }
 
+            lock_out.lock();
+            if (!streamout_timer_started) {
+                streamout_timer_started = true;
+                streamout_start = system_clock::now();
             }
+            lock_out.unlock();
 
             if (err_send) {
                 std::cout << "Send error: " << err_send->Explain() << std::endl;
@@ -155,6 +170,22 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
     return producer;
 }
 
+void WaitThreadsFinished(const Args& args, int nfiles) {
+    int elapsed_ms = 0;
+    while (true) {
+        if (files_sent==nfiles) {
+            break;
+        }
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        elapsed_ms += 100;
+        if (elapsed_ms > args.timeout_ms) {
+            std::cerr << "Stream out exit on timeout " << std::endl;
+            break;
+        }
+    }
+}
+
+
 
 int main(int argc, char* argv[]) {
     asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
@@ -177,19 +208,32 @@ int main(int argc, char* argv[]) {
     args.transfer_data = atoi(argv[9]) == 1;
 
     auto producer = CreateProducer(args);
+    files_sent = 0;
+    streamout_timer_started = false;
 
     uint64_t duration_ms;
     int nerrors;
     auto nfiles = ProcessAllData(args, producer.get(), &duration_ms, &nerrors);
-    std::cout << "Processed " << nfiles << " file(s)" << std::endl;
-    std::cout << "Successfully: " << nfiles - nerrors << std::endl;
-    std::cout << "Errors : " << nerrors << std::endl;
-    std::cout << "Elapsed : " << duration_ms << "ms" << std::endl;
-    std::cout << "Rate : " << 1000.0f * nfiles / (duration_ms - args.timeout_ms) << std::endl;
+
+
+    WaitThreadsFinished(args,nfiles);
+    auto duration_streamout = std::chrono::duration_cast<std::chrono::milliseconds>(streamout_finish - streamout_start);
+
+    std::cout << "Stream in " << std::endl;
+    std::cout << "  Processed " << nfiles << " file(s)" << std::endl;
+    std::cout << "  Successfully: " << nfiles - nerrors << std::endl;
+    std::cout << "  Errors : " << nerrors << std::endl;
+    std::cout << "  Elapsed : " << duration_ms - args.timeout_ms << "ms" << std::endl;
+    std::cout << "  Rate : " << 1000.0f * nfiles / (duration_ms - args.timeout_ms) << std::endl;
+
+    std::cout << "Stream out " << std::endl;
+    std::cout << "  Sent " << files_sent << " file(s)" << std::endl;
+    std::cout << "  Elapsed : " << duration_streamout.count() << "ms" << std::endl;
+    std::cout << "  Rate : " << 1000.0f * files_sent / (duration_streamout.count()) << std::endl;
 
 
     std::this_thread::sleep_for(std::chrono::milliseconds(1000));
 
 
-    return nerrors == 0 ? 0 : 1;
+    return (nerrors == 0) && (files_sent==nfiles) ? 0 : 1;
 }
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index a7b6c2dab7478169727980fae3043bae4584453b..54d359a783781dd492330a7f078d43a674169fcb 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -1,4 +1,5 @@
 #include <iostream>
+#include <iostream>
 #include <chrono>
 #include <vector>
 #include <mutex>
@@ -186,7 +187,7 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
     }
 
     producer->EnableLocalLog(true);
-    producer->SetLogLevel(asapo::LogLevel::Info);
+    producer->SetLogLevel(asapo::LogLevel::Debug);
     return producer;
 }
 
diff --git a/producer/api/src/producer_impl.cpp b/producer/api/src/producer_impl.cpp
index 86dd8e7660e0eefde0691282bc552b16423cb2e8..ba82eb7b1fd34a33fb638ed8bdf68163f0728887 100644
--- a/producer/api/src/producer_impl.cpp
+++ b/producer/api/src/producer_impl.cpp
@@ -140,6 +140,7 @@ Error ProducerImpl::SetCredentials(SourceCredentials source_cred) {
 
 Error ProducerImpl::SendMetaData(const std::string& metadata, RequestCallback callback) {
     GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), 0, "beamtime_global.meta"};
+    request_header.custom_data[kPosInjestMode] = asapo::IngestModeFlags::kTransferData;
     FileData data{new uint8_t[metadata.size()]};
     strncpy((char*)data.get(), metadata.c_str(), metadata.size());
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(request_header),
diff --git a/producer/api/unittests/test_producer_impl.cpp b/producer/api/unittests/test_producer_impl.cpp
index 2a802c9a5cd0883430b3923722b4e2a7d8465060..e3f071ff696111b152ffdacd6527eb3e75415109 100644
--- a/producer/api/unittests/test_producer_impl.cpp
+++ b/producer/api/unittests/test_producer_impl.cpp
@@ -42,8 +42,7 @@ MATCHER_P9(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_i
                == uint64_t(subset_id) : true)
            && (op_code == asapo::kOpcodeTransferSubsetData ? ((asapo::GenericRequestHeader) (arg->header)).custom_data[2]
                == uint64_t(subset_size) : true)
-           && ((op_code == asapo::kOpcodeTransferSubsetData || op_code == asapo::kOpcodeTransferData) ?
-               ((asapo::GenericRequestHeader) (arg->header)).custom_data[asapo::kPosInjestMode] == uint64_t(injest_mode) : true)
+           && ((asapo::GenericRequestHeader) (arg->header)).custom_data[asapo::kPosInjestMode] == uint64_t(injest_mode)
            && strcmp(((asapo::GenericRequestHeader) (arg->header)).message, message) == 0;
 }
 
@@ -178,6 +177,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
     expected_id = 0;
     expected_metadata = "{\"meta\":10}";
     expected_size = expected_metadata.size();
+    expected_injest_mode = asapo::IngestModeFlags::kTransferData;
 
     producer.SetCredentials(expected_credentials);
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferMetaData,
@@ -186,7 +186,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
                                         expected_id,
                                         expected_size,
                                         "beamtime_global.meta",
-                                        0,
+                                        expected_injest_mode,
                                         10,
                                         10))).WillOnce(Return(
                                                     nullptr));
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
index bfba8347033bf39a9b89714e951879e9e5d843e4..b38f7abe87575bc28c9836aba5e4b1f6a1a70ecb 100644
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -41,11 +41,11 @@ sleep 1
 
 #producer
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 1000 4 0 100 &
+$1 localhost:8400 ${beamtime_id} 100 0 4 0 100
 #producerid=`echo $!`
 
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
-cat out
-cat out   | grep "Processed 1000 file(s)"
-cat out | grep "Cannot get metadata"
+#$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
+#cat out
+#cat out   | grep "Processed 1000 file(s)"
+#cat out | grep "Cannot get metadata"
diff --git a/tests/manual/receiver_debug_local/authorizer.json.tpl b/tests/manual/receiver_debug_local/authorizer.json.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..7c3a796d20b0bdb556fc1d88f82e13922b5d140d
--- /dev/null
+++ b/tests/manual/receiver_debug_local/authorizer.json.tpl
@@ -0,0 +1,10 @@
+{
+  "Port": {{ env "NOMAD_PORT_authorizer" }},
+  "LogLevel":"debug",
+  "AlwaysAllowedBeamtimes":[{"BeamtimeId":"asapo_test","Beamline":"test"},
+  {"BeamtimeId":"asapo_test1","Beamline":"test1"},
+  {"BeamtimeId":"asapo_test2","Beamline":"test2"}],
+  "SecretFile":"auth_secret.key"
+}
+
+
diff --git a/tests/manual/receiver_debug_local/authorizer.nmd b/tests/manual/receiver_debug_local/authorizer.nmd
new file mode 100644
index 0000000000000000000000000000000000000000..8b32105cf2e4644e97b6fda5e4aba06c6c269822
--- /dev/null
+++ b/tests/manual/receiver_debug_local/authorizer.nmd
@@ -0,0 +1,55 @@
+job "authorizer" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "authorizer" {
+      driver = "raw_exec"
+
+      config {
+        command = "/home/yakubov/projects/asapo/cmake-build-debug/authorizer/asapo-authorizer",
+        args =  ["-config","${NOMAD_TASK_DIR}/authorizer.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "authorizer" {
+            static = "5007"
+          }
+        }
+      }
+
+      service {
+        name = "authorizer"
+        port = "authorizer"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/health-check"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/authorizer.json.tpl"
+         destination   = "local/authorizer.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+      template {
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/auth_secret.key"
+         destination   = "auth_secret.key"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+    }
+  }
+}
diff --git a/tests/manual/receiver_debug_local/clean_db.sh b/tests/manual/receiver_debug_local/clean_db.sh
new file mode 100755
index 0000000000000000000000000000000000000000..359d98df324b2135bc015a6c244ec3b63e79e47a
--- /dev/null
+++ b/tests/manual/receiver_debug_local/clean_db.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+beamtime_id=asapo_test
+
+
+echo "db.dropDatabase()" | mongo ${beamtime_id}_detector
diff --git a/tests/manual/receiver_debug_local/discovery.json.tpl b/tests/manual/receiver_debug_local/discovery.json.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..b068613852287d5c712033c65bd17b1725d12faf
--- /dev/null
+++ b/tests/manual/receiver_debug_local/discovery.json.tpl
@@ -0,0 +1,16 @@
+{
+  "Mode": "static",
+  "Receiver": {
+    "StaticEndpoints": [
+      "localhost:22001"
+    ],
+    "MaxConnections": 32
+  },
+  "Broker": {
+    "StaticEndpoint": "localhost:5005"
+  },
+  "Port": {{ env "NOMAD_PORT_discovery" }},
+  "LogLevel":"debug"
+}
+
+
diff --git a/tests/manual/receiver_debug_local/discovery.nmd b/tests/manual/receiver_debug_local/discovery.nmd
new file mode 100644
index 0000000000000000000000000000000000000000..4a10894aa5b00e2fa0483133ad96a4654a64daa6
--- /dev/null
+++ b/tests/manual/receiver_debug_local/discovery.nmd
@@ -0,0 +1,49 @@
+job "discovery" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "discovery" {
+      driver = "raw_exec"
+
+      config {
+        command = "/home/yakubov/projects/asapo/cmake-build-debug/discovery/asapo-discovery",
+        args =  ["-config","${NOMAD_TASK_DIR}/discovery.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "discovery" {
+            static = "5006"
+          }
+        }
+      }
+
+      service {
+        name = "discovery"
+        port = "discovery"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/receivers"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "/home/yakubov/projects/asapo/tests/manual/receiver_debug_local/discovery.json.tpl"
+         destination   = "local/discovery.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+    }
+  }
+}
diff --git a/tests/manual/receiver_debug_local/nginx.conf.tpl b/tests/manual/receiver_debug_local/nginx.conf.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..a545307b376e004d2a0d6e5cbad696996b6a4136
--- /dev/null
+++ b/tests/manual/receiver_debug_local/nginx.conf.tpl
@@ -0,0 +1,42 @@
+worker_processes  1;
+daemon off;
+
+events {
+    worker_connections  1024;
+}
+
+http {
+#    include       mime.types;
+#    default_type  application/octet-stream;
+
+#    sendfile        on;
+#    tcp_nopush     on;
+
+#    keepalive_timeout  0;
+#    keepalive_timeout  65;
+
+    resolver 127.0.0.1:8600 valid=1s;
+    server {
+        listen       {{ env "NOMAD_PORT_nginx" }};
+          set $discovery_endpoint discovery.service.asapo;
+          set $authorizer_endpoint authorizer.service.asapo;
+       #   set $fluentd_endpoint localhost;
+          location /discovery/ {
+            rewrite ^/discovery(/.*) $1 break;
+            proxy_pass http://$discovery_endpoint:5006$uri;
+          }
+          location /logs/ {
+             # rewrite ^/logs(/.*) $1 break;
+              proxy_pass http://localhost:9880/asapo;
+          }
+           location /authorizer/ {
+            rewrite ^/authorizer(/.*) $1 break;
+            proxy_pass http://$authorizer_endpoint:5007$uri;
+                    }
+
+
+	location /nginx-health {
+  	  return 200 "healthy\n";
+	}
+    }
+}
diff --git a/tests/manual/receiver_debug_local/nginx.nmd b/tests/manual/receiver_debug_local/nginx.nmd
new file mode 100644
index 0000000000000000000000000000000000000000..b424e53874d17c7b0612106225f0250fa274fac4
--- /dev/null
+++ b/tests/manual/receiver_debug_local/nginx.nmd
@@ -0,0 +1,63 @@
+job "nginx" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "nginx" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "30m"
+      delay = "15s"
+      mode = "fail"
+    }
+
+    task "nginx" {
+      driver = "raw_exec"
+
+      config {
+        command = "nginx",
+        args =  ["-c","${NOMAD_TASK_DIR}/nginx.conf"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          mbits = 10
+          port "nginx" {
+          static = 8400
+          }
+        }
+      }
+
+      service {
+        port = "nginx"
+        name = "nginx"
+        check {
+          name     = "alive"
+          type     = "http"
+	  path     = "/nginx-health"
+          timeout  = "2s"
+	  interval = "10s"
+        }
+      }
+
+      template {
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/nginx.conf.tpl"
+         destination   = "local/nginx.conf"
+         change_mode   = "restart"
+      }
+
+
+   }
+  }
+}
diff --git a/tests/manual/receiver_debug_local/receiver.json b/tests/manual/receiver_debug_local/receiver.json
new file mode 100644
index 0000000000000000000000000000000000000000..0f6bdad83dbb9875b58a0ca3990663ef9f62a9ff
--- /dev/null
+++ b/tests/manual/receiver_debug_local/receiver.json
@@ -0,0 +1,22 @@
+{
+  "MonitorDbAddress":"localhost:8086",
+  "MonitorDbName": "db_test",
+  "BrokerDbAddress":"localhost:27017",
+  "DataServer": {
+    "NThreads": 2,
+    "ListenPort": 22000
+  },
+  "DataCache": {
+    "Use": true,
+    "SizeGB": 1,
+    "ReservedShare": 10
+  },
+  "AuthorizationServer": "localhost:8400/authorizer",
+  "AuthorizationInterval": 10000,
+  "ListenPort": 22001,
+  "Tag": "22001",
+  "WriteToDisk": true,
+  "WriteToDb": true,
+  "LogLevel" : "debug",
+  "RootFolder" : "/tmp/asapo/receiver/files"
+}
diff --git a/tests/manual/receiver_debug_local/start_dummyproducer.sh b/tests/manual/receiver_debug_local/start_dummyproducer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..902e17cb1a8c94ebe8c0be2ddcff213653f10917
--- /dev/null
+++ b/tests/manual/receiver_debug_local/start_dummyproducer.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+beamtime_id=asapo_test
+
+nfiles=10
+timeout=100
+fsize=100
+mode=0 #tcp
+nthreads=4
+
+exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/producer/dummy-data-producer/dummy-data-producer
+
+
+$exec localhost:8400 ${beamtime_id} $fsize $nfiles  $nthreads $mode  $timeout
\ No newline at end of file
diff --git a/tests/manual/receiver_debug_local/start_services.sh b/tests/manual/receiver_debug_local/start_services.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1315ced6786be8643775b09d20e47dc3718d0218
--- /dev/null
+++ b/tests/manual/receiver_debug_local/start_services.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+nomad run authorizer.nmd
+nomad run discovery.nmd
+nomad run nginx.nmd
diff --git a/tests/manual/receiver_debug_local/stop_services.sh b/tests/manual/receiver_debug_local/stop_services.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c05b82e1ce76b1ba1499cf090e87bb150b5d17f3
--- /dev/null
+++ b/tests/manual/receiver_debug_local/stop_services.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+nomad stop authorizer
+nomad stop discovery
+nomad stop nginx