diff --git a/CMakeLists.txt b/CMakeLists.txt
index ba3108103365816686cf2007f3bb83f4c7fbf5ca..2285e204fa4a3a56877f8b4857266d59723c070b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,6 +3,7 @@ project(ASAPO)
 set(CMAKE_CXX_STANDARD 11)
 IF(WIN32)
     set(CMAKE_CXX_FLAGS_DEBUG "/MTd")
+    add_definitions(-DWIN32)
 ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "GNU")
     SET( CMAKE_EXE_LINKER_FLAGS  "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++")
     set(CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -Wall")
@@ -16,6 +17,11 @@ ELSEIF(UNIX)
     SET_PROPERTY(GLOBAL PROPERTY ASAPO_COMMON_IO_LIBRARIES Threads::Threads)
 ENDIF(WIN32)
 
+if (CMAKE_BUILD_TYPE STREQUAL "Debug")
+    add_definitions(-DUNIT_TESTS)
+endif (CMAKE_BUILD_TYPE STREQUAL "Debug")
+
+
 option(BUILD_TESTS "Uses googletest to build tests" OFF)
 option(BUILD_INTEGRATION_TESTS "Include integration tests (CMAKE >3.7 is needed)" OFF)
 option(BUILD_DOCS "Uses doxygen to build the documentaion" OFF)
@@ -41,6 +47,9 @@ include(astyle)
 
 include(testing_cpp)
 
+include(prepare_asapo)
+
+
 if(BUILD_WORKER_TOOLS)
     set (BUILD_MONGODB_CLIENTLIB ON)
 endif()
diff --git a/CMakeModules/prepare_asapo.cmake b/CMakeModules/prepare_asapo.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..53580a80b0f90d9b3c3546efab2ec5a460216922
--- /dev/null
+++ b/CMakeModules/prepare_asapo.cmake
@@ -0,0 +1,18 @@
+function(prepare_asapo)
+    get_target_property(RECEIVER_DIR receiver-bin BINARY_DIR)
+    get_target_property(RECEIVER_NAME receiver-bin OUTPUT_NAME)
+    get_target_property(DISCOVERY_FULLPATH asapo-discovery EXENAME)
+    get_target_property(BROKER_FULLPATH asapo-broker EXENAME)
+    set(WORK_DIR ${CMAKE_CURRENT_BINARY_DIR})
+    if (WIN32)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.win receiver.json.tpl COPYONLY)
+    else()
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.lin receiver.json.tpl COPYONLY)
+    endif()
+    configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver.nmd.in  receiver.nmd @ONLY)
+    configure_file(${CMAKE_SOURCE_DIR}/config/nomad/discovery.nmd.in  discovery.nmd @ONLY)
+    configure_file(${CMAKE_SOURCE_DIR}/config/nomad/broker.nmd.in  broker.nmd @ONLY)
+    configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/discovery_settings.json.tpl discovery.json.tpl COPYONLY)
+    configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/broker_settings.json.tpl broker.json.tpl COPYONLY)
+endfunction()
+
diff --git a/broker/src/asapo_broker/database/database.go b/broker/src/asapo_broker/database/database.go
index e530bf03e1587c224a9e13dc92437fa45c656d2a..3a0683bf301fd3bf4b41c4d54544a1a970c3b158 100644
--- a/broker/src/asapo_broker/database/database.go
+++ b/broker/src/asapo_broker/database/database.go
@@ -2,6 +2,7 @@ package database
 
 type Agent interface {
 	GetNextRecord(db_name string) ([]byte, error)
+	GetRecordByID(dbname string, id int) ([]byte, error)
 	Connect(string) error
 	Close()
 	Copy() Agent
diff --git a/broker/src/asapo_broker/database/database_test.go b/broker/src/asapo_broker/database/database_test.go
index 3073ed61b1a621ae47e3a41903e75f2a85836255..68b1eadb44e0e5a02a19c66994a0c4cfe9540fbb 100644
--- a/broker/src/asapo_broker/database/database_test.go
+++ b/broker/src/asapo_broker/database/database_test.go
@@ -12,6 +12,7 @@ func TestMockDataBase(t *testing.T) {
 	db.On("Close").Return()
 	db.On("Copy").Return(nil)
 	db.On("GetNextRecord", "").Return([]byte(""), nil)
+	db.On("GetRecordByID", "").Return([]byte(""), nil)
 	db.Connect("")
 	db.GetNextRecord("")
 	db.Close()
diff --git a/broker/src/asapo_broker/database/mock_database.go b/broker/src/asapo_broker/database/mock_database.go
index cf25a1eea71a73fa8973027b8a70cfc574643fda..7ac5c13188e4c498566665b57b4448963d3fbe05 100644
--- a/broker/src/asapo_broker/database/mock_database.go
+++ b/broker/src/asapo_broker/database/mock_database.go
@@ -28,3 +28,8 @@ func (db *MockedDatabase) GetNextRecord(db_name string) (answer []byte, err erro
 	args := db.Called(db_name)
 	return args.Get(0).([]byte), args.Error(1)
 }
+
+func (db *MockedDatabase) GetRecordByID(db_name string, id int) (answer []byte, err error) {
+	args := db.Called(db_name, id)
+	return args.Get(0).([]byte), args.Error(1)
+}
diff --git a/broker/src/asapo_broker/database/mongodb.go b/broker/src/asapo_broker/database/mongodb.go
index 43213baf03226baa05d3b933175c1de7139b7051..55d9b8371a85320c6754aad974f6af1d3b45346b 100644
--- a/broker/src/asapo_broker/database/mongodb.go
+++ b/broker/src/asapo_broker/database/mongodb.go
@@ -3,10 +3,11 @@
 package database
 
 import (
+	"asapo_broker/utils"
+	"encoding/json"
 	"errors"
 	"gopkg.in/mgo.v2"
 	"gopkg.in/mgo.v2/bson"
-	"asapo_broker/utils"
 	"sync"
 	"time"
 )
@@ -150,15 +151,23 @@ func (db *Mongodb) incrementField(dbname string, max_ind int, res interface{}) (
 	return err
 }
 
-func (db *Mongodb) getRecordByID(dbname string, id int) (interface{}, error) {
+func (db *Mongodb) GetRecordByID(dbname string, id int) ([]byte, error) {
 	var res map[string]interface{}
 	q := bson.M{"_id": id}
 	c := db.session.DB(dbname).C(data_collection_name)
 	err := c.Find(q).One(&res)
 	if err == mgo.ErrNotFound {
-		return nil, &DBError{utils.StatusNoData, err.Error()}
+		var r = struct {
+			Id int `json:"id""`
+		}{id}
+		res, _ := json.Marshal(&r)
+		return nil, &DBError{utils.StatusNoData, string(res)}
+	}
+	if err != nil {
+		return nil, err
 	}
-	return &res, err
+
+	return utils.MapToJson(&res)
 }
 
 func (db *Mongodb) needCreateLocationPointersInDb(db_name string) bool {
@@ -230,11 +239,6 @@ func (db *Mongodb) GetNextRecord(db_name string) ([]byte, error) {
 	if err != nil {
 		return nil, err
 	}
+	return db.GetRecordByID(db_name, curPointer.Value)
 
-	res, err := db.getRecordByID(db_name, curPointer.Value)
-	if err != nil {
-		return nil, err
-	}
-
-	return utils.MapToJson(&res)
 }
diff --git a/broker/src/asapo_broker/database/mongodb_test.go b/broker/src/asapo_broker/database/mongodb_test.go
index a8b788ed2bbfa8c7921e7382e65fecd74a44121f..cf17a38123464f59ff19dfc63dfcc1dafaa73e45 100644
--- a/broker/src/asapo_broker/database/mongodb_test.go
+++ b/broker/src/asapo_broker/database/mongodb_test.go
@@ -3,9 +3,9 @@
 package database
 
 import (
+	"asapo_broker/utils"
 	"encoding/json"
 	"github.com/stretchr/testify/assert"
-	"asapo_broker/utils"
 	"sync"
 	"testing"
 )
@@ -66,6 +66,16 @@ func TestMongoDBGetNextErrorWhenEmptyCollection(t *testing.T) {
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 }
 
+func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
+	db.Connect(dbaddress)
+	db.databases = append(db.databases, dbname)
+	defer cleanup()
+	db.InsertRecord(dbname, &rec2)
+	_, err := db.GetNextRecord(dbname)
+	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
+	assert.Equal(t, "{\"id\":1}", err.Error())
+}
+
 func TestMongoDBGetNextOK(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
@@ -143,3 +153,21 @@ func TestMongoDBGetNextInParallel(t *testing.T) {
 
 	assert.Equal(t, n, getNOnes(results))
 }
+
+func TestMongoDBGetRecordByID(t *testing.T) {
+	db.Connect(dbaddress)
+	defer cleanup()
+	db.InsertRecord(dbname, &rec1)
+	res, err := db.GetRecordByID(dbname, 1)
+	assert.Nil(t, err)
+	assert.Equal(t, string(rec1_expect), string(res))
+}
+
+func TestMongoDBGetRecordByIDFails(t *testing.T) {
+	db.Connect(dbaddress)
+	defer cleanup()
+	db.InsertRecord(dbname, &rec1)
+	_, err := db.GetRecordByID(dbname, 2)
+	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
+	assert.Equal(t, "{\"id\":2}", err.Error())
+}
diff --git a/broker/src/asapo_broker/server/get_health.go b/broker/src/asapo_broker/server/get_health.go
new file mode 100644
index 0000000000000000000000000000000000000000..b7d9f2446fb62c2c3e7d353172978d4a9682e832
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_health.go
@@ -0,0 +1,11 @@
+package server
+
+import (
+	"net/http"
+)
+
+
+func routeGetHealth(w http.ResponseWriter, r *http.Request) {
+	r.Header.Set("Content-type", "application/json")
+	w.WriteHeader(http.StatusNoContent)
+}
diff --git a/broker/src/asapo_broker/server/get_health_test.go b/broker/src/asapo_broker/server/get_health_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0efbdf70d17c57c0ebe81ea9a7cc8956c06f01ee
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_health_test.go
@@ -0,0 +1,13 @@
+package server
+
+import (
+	"github.com/stretchr/testify/assert"
+	"net/http"
+	"testing"
+)
+
+
+func TestGetNext(t *testing.T) {
+	w := doRequest("/health")
+	assert.Equal(t, http.StatusNoContent, w.Code)
+}
diff --git a/broker/src/asapo_broker/server/get_id.go b/broker/src/asapo_broker/server/get_id.go
new file mode 100644
index 0000000000000000000000000000000000000000..50624ba0f442c00e0ea2d2937ee3aaf8234eb90c
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_id.go
@@ -0,0 +1,50 @@
+package server
+
+import (
+	"asapo_broker/logger"
+	"asapo_broker/utils"
+	"github.com/gorilla/mux"
+	"net/http"
+	"strconv"
+)
+
+func extractRequestParametersID(r *http.Request) (int, bool) {
+	vars := mux.Vars(r)
+	id_str, ok := vars["id"]
+	if !ok {
+		return 0, ok
+	}
+	id, err := strconv.Atoi(id_str)
+	return id, err == nil
+}
+
+func routeGetByID(w http.ResponseWriter, r *http.Request) {
+	r.Header.Set("Content-type", "application/json")
+	db_name, ok := extractRequestParameters(r)
+	if !ok {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	id, ok := extractRequestParametersID(r)
+	if !ok {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	answer, code := getRecordByID(db_name, id)
+	w.WriteHeader(code)
+	w.Write(answer)
+}
+
+func getRecordByID(db_name string, id int) (answer []byte, code int) {
+	db_new := db.Copy()
+	defer db_new.Close()
+	statistics.IncreaseCounter()
+	answer, err := db_new.GetRecordByID(db_name, id)
+	log_str := "processing get id request in " + db_name + " at " + settings.BrokerDbAddress
+	if err != nil {
+		return returnError(err, log_str)
+	}
+	logger.Debug(log_str)
+	return answer, utils.StatusOK
+}
diff --git a/broker/src/asapo_broker/server/get_id_test.go b/broker/src/asapo_broker/server/get_id_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b85aa898956ea7e12c5fb45f41248fc1f7be2d85
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_id_test.go
@@ -0,0 +1,74 @@
+package server
+
+import (
+	"asapo_broker/database"
+	"asapo_broker/logger"
+	"asapo_broker/utils"
+	"errors"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/mock"
+	"github.com/stretchr/testify/suite"
+	"net/http"
+	"testing"
+)
+
+func TestGetIdWithoutDatabaseName(t *testing.T) {
+	w := doRequest("/database/123")
+	assert.Equal(t, http.StatusNotFound, w.Code, "no database name")
+}
+
+func ExpectCopyCloseOnID(mock_db *database.MockedDatabase) {
+	mock_db.On("Copy").Return(mock_db)
+	mock_db.On("Close").Return()
+}
+
+type GetIDTestSuite struct {
+	suite.Suite
+	mock_db *database.MockedDatabase
+}
+
+func (suite *GetIDTestSuite) SetupTest() {
+	statistics.Reset()
+	suite.mock_db = new(database.MockedDatabase)
+	db = suite.mock_db
+	logger.SetMockLog()
+	ExpectCopyCloseOnID(suite.mock_db)
+}
+
+func (suite *GetIDTestSuite) TearDownTest() {
+	assertExpectations(suite.T(), suite.mock_db)
+	logger.UnsetMockLog()
+	db = nil
+}
+
+func TestGetIDTestSuite(t *testing.T) {
+	suite.Run(t, new(GetIDTestSuite))
+}
+
+func (suite *GetIDTestSuite) TestGetIDWithWrongDatabaseName() {
+	suite.mock_db.On("GetRecordByID", "foo", 1).Return([]byte(""),
+		&database.DBError{utils.StatusWrongInput, ""})
+
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("get id request in foo")))
+
+	w := doRequest("/database/foo/1")
+
+	suite.Equal(http.StatusBadRequest, w.Code, "wrong database name")
+}
+
+func (suite *GetIDTestSuite) TestGetIDWithInternalDBError() {
+	suite.mock_db.On("GetRecordByID", "foo", 1).Return([]byte(""), errors.New(""))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("get id request in foo")))
+
+	w := doRequest("/database/foo/1")
+	suite.Equal(http.StatusInternalServerError, w.Code, "internal error")
+}
+
+func (suite *GetIDTestSuite) TestGetIDOK() {
+	suite.mock_db.On("GetRecordByID", "dbname", 1).Return([]byte("Hello"), nil)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("get id request in dbname")))
+
+	w := doRequest("/database/dbname/1")
+	suite.Equal(http.StatusOK, w.Code, "GetID OK")
+	suite.Equal("Hello", string(w.Body.Bytes()), "GetID sends data")
+}
diff --git a/broker/src/asapo_broker/server/get_next.go b/broker/src/asapo_broker/server/get_next.go
index 8e4eede203c96314b59df566eb1d335f4f126873..3cc2a826e4c4e649c5543042e9eba4ee609e9108 100644
--- a/broker/src/asapo_broker/server/get_next.go
+++ b/broker/src/asapo_broker/server/get_next.go
@@ -1,10 +1,10 @@
 package server
 
 import (
-	"github.com/gorilla/mux"
 	"asapo_broker/database"
 	"asapo_broker/logger"
 	"asapo_broker/utils"
+	"github.com/gorilla/mux"
 	"net/http"
 )
 
diff --git a/broker/src/asapo_broker/server/listroutes.go b/broker/src/asapo_broker/server/listroutes.go
index 36d8f80a1fc8f7c23230a58e788f02ea2dfba7c1..2a6f70ecc1de6e6dc6517d540d6609ca6e00d7ac 100644
--- a/broker/src/asapo_broker/server/listroutes.go
+++ b/broker/src/asapo_broker/server/listroutes.go
@@ -11,4 +11,17 @@ var listRoutes = utils.Routes{
 		"/database/{dbname}/next",
 		routeGetNext,
 	},
+	utils.Route{
+		"GetID",
+		"Get",
+		"/database/{dbname}/{id}",
+		routeGetByID,
+	},
+
+	utils.Route{
+		"Health",
+		"Get",
+		"/health",
+		routeGetHealth,
+	},
 }
diff --git a/broker/src/asapo_broker/utils/status_codes.go b/broker/src/asapo_broker/utils/status_codes.go
index 9549e555eeb771d129d93c0e6eea3e7cb5124626..70a2193004b0e34bd07f5f0804b50375fd8ba910 100644
--- a/broker/src/asapo_broker/utils/status_codes.go
+++ b/broker/src/asapo_broker/utils/status_codes.go
@@ -10,5 +10,5 @@ const (
 	//error codes
 	StatusError      = http.StatusInternalServerError
 	StatusWrongInput = http.StatusBadRequest
-	StatusNoData     = http.StatusNoContent
+	StatusNoData     = http.StatusNotFound
 )
diff --git a/common/cpp/include/common/error.h b/common/cpp/include/common/error.h
index efef43d16a6b7887e9347a98db407b5bb17f82d9..ee07360567adef4c64e4747b24e013509796bf59 100644
--- a/common/cpp/include/common/error.h
+++ b/common/cpp/include/common/error.h
@@ -17,7 +17,8 @@ enum class ErrorType {
     kProducerError,
 
     kMemoryAllocationError,
-    kEndOfFile
+    kEndOfFile,
+    kTimeOut
 };
 
 class ErrorInterface;
diff --git a/common/cpp/include/common/networking.h b/common/cpp/include/common/networking.h
index 6d73f9f6a7728e30c939bd7aba93650a2244e0ad..7f59eb7cfd74afba30a696ec53e78da272a61df5 100644
--- a/common/cpp/include/common/networking.h
+++ b/common/cpp/include/common/networking.h
@@ -2,15 +2,18 @@
 #define ASAPO_COMMON__NETWORKING_H
 
 #include <cstdint>
+#include <algorithm>
+#include <string>
+#include <cstring>
 
 namespace asapo {
 
 typedef uint64_t NetworkRequestId;
 
 enum Opcode : uint8_t {
-    kNetOpcodeUnknownOp,
-    kNetOpcodeSendData,
-    kNetOpcodeCount,
+    kOpcodeUnknownOp,
+    kOpcodeTransferData,
+    kOpcodeCount,
 };
 
 enum NetworkErrorCode : uint16_t {
@@ -27,11 +30,19 @@ enum NetworkErrorCode : uint16_t {
  * RPC always return a response to a corresponding request
  * @{
  */
-struct GenericNetworkRequestHeader {
-    Opcode              op_code;
-    NetworkRequestId    request_id;
+
+const std::size_t kMaxFileNameSize = 1024;
+struct GenericRequestHeader {
+    GenericRequestHeader(Opcode i_op_code = kOpcodeUnknownOp, uint64_t i_data_id = 0,
+                         uint64_t i_data_size = 0, const std::string& i_file_name = ""):
+        op_code{i_op_code}, data_id{i_data_id}, data_size{i_data_size} {
+        auto size = std::min(i_file_name.size() + 1, kMaxFileNameSize);
+        memcpy(file_name, i_file_name.c_str(), size);
+    }
+    Opcode      op_code;
     uint64_t    data_id;
     uint64_t    data_size;
+    char        file_name[kMaxFileNameSize];
 };
 
 struct GenericNetworkResponse {
diff --git a/common/cpp/include/io/io.h b/common/cpp/include/io/io.h
index 6e4e55f98262159c31cd0e18e72c0319d4e34a89..f94e977febcc76cd399bd9a3cc86142abec9b2d7 100644
--- a/common/cpp/include/io/io.h
+++ b/common/cpp/include/io/io.h
@@ -41,6 +41,7 @@ enum class SocketProtocols {
 
 using FileDescriptor = int;
 using SocketDescriptor = int;
+const SocketDescriptor kDisconnectedSocketDescriptor = -1;
 
 class IO {
   public:
@@ -91,6 +92,7 @@ class IO {
     virtual size_t          Write           (FileDescriptor fd, const void* buf, size_t length, Error* err) const = 0;
 
     virtual Error          WriteDataToFile  (const std::string& fname, const FileData& data, size_t length) const = 0;
+    virtual Error          WriteDataToFile  (const std::string& fname, const uint8_t* data, size_t length) const = 0;
 
     virtual void            CreateNewDirectory      (const std::string& directory_name, Error* err) const = 0;
     virtual FileData        GetDataFromFile         (const std::string& fname, uint64_t fsize, Error* err) const = 0;
diff --git a/common/cpp/include/preprocessor/definitions.h b/common/cpp/include/preprocessor/definitions.h
new file mode 100644
index 0000000000000000000000000000000000000000..385ffd37242d3b209a7f95cc0a7404c0d7cd4fe9
--- /dev/null
+++ b/common/cpp/include/preprocessor/definitions.h
@@ -0,0 +1,19 @@
+#ifndef ASAPO_DEFINITIONS_H
+#define ASAPO_DEFINITIONS_H
+
+#ifdef UNIT_TESTS
+#define VIRTUAL virtual
+#else
+#define VIRTUAL
+#endif
+
+namespace  asapo {
+const char kPathSeparator =
+#ifdef WIN32
+    '\\';
+#else
+    '/';
+#endif
+}
+
+#endif //ASAPO_DEFINITIONS_H
diff --git a/common/cpp/include/unittests/MockHttpClient.h b/common/cpp/include/unittests/MockHttpClient.h
index dbb346f22ec297e5aab7a5b24039949bb628fac3..d128e8a84fd669604c99afdda2272445e434f471 100644
--- a/common/cpp/include/unittests/MockHttpClient.h
+++ b/common/cpp/include/unittests/MockHttpClient.h
@@ -12,15 +12,15 @@ class MockHttpClient : public HttpClient {
   public:
     std::string Get(const std::string& uri, HttpCode* code, Error* err) const noexcept override {
         ErrorInterface* error = nullptr;
-        auto responce = Get_t(uri, code, &error);
+        auto response = Get_t(uri, code, &error);
         err->reset(error);
-        return responce;
+        return response;
     }
     std::string Post(const std::string& uri, const std::string& data, HttpCode* code, Error* err) const noexcept override {
         ErrorInterface* error = nullptr;
-        auto responce = Post_t(uri, data, code, &error);
+        auto response = Post_t(uri, data, code, &error);
         err->reset(error);
-        return responce;
+        return response;
     }
     MOCK_CONST_METHOD3(Get_t,
                        std::string(const std::string& uri, HttpCode* code, ErrorInterface** err));
diff --git a/common/cpp/include/unittests/MockIO.h b/common/cpp/include/unittests/MockIO.h
index 803ce86909b1996ae889089a1faf2a2083b77247..5287004bad08d98aa0dea31ba44e1ff129b94049 100644
--- a/common/cpp/include/unittests/MockIO.h
+++ b/common/cpp/include/unittests/MockIO.h
@@ -178,7 +178,12 @@ class MockIO : public IO {
 
     }
 
-    MOCK_CONST_METHOD3(WriteDataToFile_t, ErrorInterface * (const std::string& fname, uint8_t* data, size_t fsize));
+    Error WriteDataToFile(const std::string& fname, const uint8_t* data, size_t length) const override {
+        return Error{WriteDataToFile_t(fname, data, length)};
+    }
+
+
+    MOCK_CONST_METHOD3(WriteDataToFile_t, ErrorInterface * (const std::string& fname, const uint8_t* data, size_t fsize));
 
     void CollectFileInformationRecursively(const std::string& path, std::vector<FileInfo>* files,
                                            Error* err) const override {
diff --git a/common/cpp/src/http_client/CMakeLists.txt b/common/cpp/src/http_client/CMakeLists.txt
index bae54a5d735ceef0f4792df590ede2d9e94f2d34..58f8813c18de3f7f4e22a974349cdd6de5afcbf4 100644
--- a/common/cpp/src/http_client/CMakeLists.txt
+++ b/common/cpp/src/http_client/CMakeLists.txt
@@ -1,7 +1,7 @@
 set(TARGET_NAME curl_http_client)
 set(SOURCE_FILES
         curl_http_client.cpp
-        http_client_factory.cpp)
+        http_client_factory.cpp ../../include/preprocessor/definitions.h)
 
 
 ################################
diff --git a/common/cpp/src/json_parser/rapid_json.cpp b/common/cpp/src/json_parser/rapid_json.cpp
index cf47a597c5bd54015523a1d6ecdf13e1236bb1f4..31fcd58424b7eb29e80262398695d1431651ef13 100644
--- a/common/cpp/src/json_parser/rapid_json.cpp
+++ b/common/cpp/src/json_parser/rapid_json.cpp
@@ -36,7 +36,7 @@ Error RapidJson::LazyInitialize()const noexcept {
     return nullptr;
 }
 
-asapo::Error CheckValueType(const std::string& name, ValueType type, const Value* val) {
+asapo::Error RapidJson::CheckValueType(const std::string& name, ValueType type, const Value* val) const {
     bool res = false;
     switch (type) {
     case ValueType::kObject:
@@ -56,7 +56,7 @@ asapo::Error CheckValueType(const std::string& name, ValueType type, const Value
         break;
     }
     if (!res) {
-        return TextError("wrong type: " + name);
+        return TextError("wrong type: " + name + " in: " + json_);
     }
 
     return nullptr;
diff --git a/common/cpp/src/json_parser/rapid_json.h b/common/cpp/src/json_parser/rapid_json.h
index bb23bd730c9295c3b3ed55a282ebb0df3b4d11f3..ca0c0b054ae31b9d0444f0d20c29ceb3cbe743c9 100644
--- a/common/cpp/src/json_parser/rapid_json.h
+++ b/common/cpp/src/json_parser/rapid_json.h
@@ -33,6 +33,7 @@ class RapidJson {
     std::string json_;
     mutable bool initialized_ = false;
     Error LazyInitialize() const noexcept;
+    Error CheckValueType(const std::string& name, ValueType type, const rapidjson::Value* val) const;
     Error embedded_error_ = nullptr;
 
     asapo::Error GetValuePointer(const std::string& name, ValueType type, rapidjson::Value** val)const noexcept;
diff --git a/common/cpp/src/system_io/system_io.cpp b/common/cpp/src/system_io/system_io.cpp
index e8096c3256e23753588d629e0a545670535b3b56..f8dc19c3148cdb603fa584dd4d41e5bd62ae8266 100644
--- a/common/cpp/src/system_io/system_io.cpp
+++ b/common/cpp/src/system_io/system_io.cpp
@@ -122,20 +122,25 @@ void asapo::SystemIO::CreateNewDirectory(const std::string& directory_name, Erro
     }
 }
 
-Error SystemIO::WriteDataToFile(const std::string& fname, const FileData& data, size_t length) const {
+Error SystemIO::WriteDataToFile(const std::string& fname, const uint8_t* data, size_t length) const {
     Error err;
     auto fd = Open(fname, IO_OPEN_MODE_CREATE_AND_FAIL_IF_EXISTS | IO_OPEN_MODE_RW, &err);
     if (err) {
         return err;
     }
 
-    Write(fd, data.get(), length, &err);
+    Write(fd, data, length, &err);
     if (err) {
         return err;
     }
 
     Close(fd, &err);
     return err;
+
+}
+
+Error SystemIO::WriteDataToFile(const std::string& fname, const FileData& data, size_t length) const {
+    return WriteDataToFile(fname, data.get(), length);
 }
 
 
@@ -189,13 +194,13 @@ asapo::FileDescriptor asapo::SystemIO::CreateAndConnectIPTCPSocket(const std::st
 
     FileDescriptor fd = CreateSocket(AddressFamilies::INET, SocketTypes::STREAM, SocketProtocols::IP, err);
     if(*err != nullptr) {
-        return -1;
+        return kDisconnectedSocketDescriptor;
     }
 
     InetConnect(fd, address, err);
     if (*err != nullptr) {
         CloseSocket(fd, nullptr);
-        return -1;
+        return kDisconnectedSocketDescriptor;
     }
 
     return fd;
diff --git a/common/cpp/src/system_io/system_io.h b/common/cpp/src/system_io/system_io.h
index ce033ff31ac4abd79468652f21bf79f5b116e473..9b7c1feb73fa1ddc619e1967fc489d1482ee4d01 100644
--- a/common/cpp/src/system_io/system_io.h
+++ b/common/cpp/src/system_io/system_io.h
@@ -102,6 +102,7 @@ class SystemIO final : public IO {
     void            CreateNewDirectory(const std::string& directory_name, Error* err) const;
     FileData        GetDataFromFile(const std::string& fname, uint64_t fsize, Error* err) const;
     Error           WriteDataToFile  (const std::string& fname, const FileData& data, size_t length) const;
+    Error           WriteDataToFile(const std::string& fname, const uint8_t* data, size_t length) const;
     void            CollectFileInformationRecursively(const std::string& path, std::vector<FileInfo>* files,
                                                       Error* err) const;
     std::string     ReadFileToString(const std::string& fname, Error* err) const;
diff --git a/common/cpp/unittests/json_parser/test_json_parser.cpp b/common/cpp/unittests/json_parser/test_json_parser.cpp
index e884cb43c4563e3fe7fa4ca12bf73dce8b156581..863e56516d44a30e58b5055ce232666c7d6be695 100644
--- a/common/cpp/unittests/json_parser/test_json_parser.cpp
+++ b/common/cpp/unittests/json_parser/test_json_parser.cpp
@@ -30,7 +30,7 @@ using asapo::IO;
 namespace {
 
 TEST(ParseString, SimpleConvertToJson) {
-    std::string json = R"({"_id":2,"foo":"foo","bar":1,"flag":true})";
+    std::string json = R"({"_id":2,"foo":"foo:\\1","bar":1,"flag":true})";
 
     JsonStringParser parser{json};
 
@@ -49,7 +49,7 @@ TEST(ParseString, SimpleConvertToJson) {
 
 
     ASSERT_THAT(id, Eq(2));
-    ASSERT_THAT(foo, Eq("foo"));
+    ASSERT_THAT(foo, Eq("foo:\\1"));
     ASSERT_THAT(bar, Eq(1));
     ASSERT_THAT(flag, true);
 
diff --git a/config/grafana/ASAP__O.json b/config/grafana/ASAP__O.json
index 7ed15c7eeef11c48042a4040e9350637865b477f..8311cfdbc7471b7a48e8d075aecfa59b98486279 100644
--- a/config/grafana/ASAP__O.json
+++ b/config/grafana/ASAP__O.json
@@ -1,8 +1,8 @@
 {
   "__inputs": [
     {
-      "name": "DS_TEST",
-      "label": "test",
+      "name": "DS_ASAPO",
+      "label": "asapo",
       "description": "",
       "type": "datasource",
       "pluginId": "influxdb",
@@ -27,6 +27,12 @@
       "id": "influxdb",
       "name": "InfluxDB",
       "version": "5.0.0"
+    },
+    {
+      "type": "panel",
+      "id": "singlestat",
+      "name": "Singlestat",
+      "version": "5.0.0"
     }
   ],
   "annotations": {
@@ -48,20 +54,144 @@
   "id": null,
   "links": [],
   "panels": [
+    {
+      "cacheTimeout": null,
+      "colorBackground": false,
+      "colorValue": false,
+      "colors": [
+        "#299c46",
+        "rgba(237, 129, 40, 0.89)",
+        "#d44a3a"
+      ],
+      "datasource": "${DS_ASAPO}",
+      "format": "none",
+      "gauge": {
+        "maxValue": 100,
+        "minValue": 0,
+        "show": false,
+        "thresholdLabels": false,
+        "thresholdMarkers": true
+      },
+      "gridPos": {
+        "h": 3,
+        "w": 5,
+        "x": 0,
+        "y": 0
+      },
+      "id": 9,
+      "interval": null,
+      "links": [],
+      "mappingType": 1,
+      "mappingTypes": [
+        {
+          "name": "value to text",
+          "value": 1
+        },
+        {
+          "name": "range to text",
+          "value": 2
+        }
+      ],
+      "maxDataPoints": 100,
+      "nullPointMode": "connected",
+      "nullText": null,
+      "postfix": "",
+      "postfixFontSize": "50%",
+      "prefix": "",
+      "prefixFontSize": "50%",
+      "rangeMaps": [
+        {
+          "from": "null",
+          "text": "N/A",
+          "to": "null"
+        }
+      ],
+      "sparkline": {
+        "fillColor": "rgba(31, 118, 189, 0.18)",
+        "full": false,
+        "lineColor": "rgb(31, 120, 193)",
+        "show": false
+      },
+      "tableColumn": "sum",
+      "targets": [
+        {
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "query": "SHOW TAG VALUES CARDINALITY FROM \"statistics\" WITH KEY = \"connection_from\"\n",
+          "rawQuery": false,
+          "refId": "A",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "elapsed_ms"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [],
+                "type": "sum"
+              },
+              {
+                "params": [
+                  " / 1000/10"
+                ],
+                "type": "math"
+              }
+            ]
+          ],
+          "tags": [
+            {
+              "key": "receiver_tag",
+              "operator": "=",
+              "value": "test_receiver"
+            }
+          ]
+        }
+      ],
+      "thresholds": "",
+      "title": "Number of Connections",
+      "type": "singlestat",
+      "valueFontSize": "80%",
+      "valueMaps": [
+        {
+          "op": "=",
+          "text": "N/A",
+          "value": "null"
+        }
+      ],
+      "valueName": "avg"
+    },
     {
       "aliasColors": {},
       "bars": false,
       "dashLength": 10,
       "dashes": false,
-      "datasource": "${DS_TEST}",
-      "fill": 0,
+      "datasource": "${DS_ASAPO}",
+      "fill": 1,
       "gridPos": {
         "h": 9,
         "w": 12,
-        "x": 0,
+        "x": 12,
         "y": 0
       },
-      "id": 6,
+      "id": 7,
       "legend": {
         "avg": false,
         "current": false,
@@ -85,13 +215,24 @@
       "steppedLine": false,
       "targets": [
         {
-          "alias": "Database",
-          "groupBy": [],
+          "alias": "Database share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "connection_from"
+              ],
+              "type": "tag"
+            }
+          ],
           "measurement": "statistics",
           "orderByTime": "ASC",
           "policy": "default",
-          "query": "SELECT \"db_share\" FROM \"statistics\" WHERE $timeFilter",
-          "rawQuery": false,
           "refId": "A",
           "resultFormat": "time_series",
           "select": [
@@ -101,14 +242,37 @@
                   "db_share"
                 ],
                 "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
               }
             ]
           ],
           "tags": []
         },
         {
-          "alias": "Disk",
-          "groupBy": [],
+          "alias": "Disk share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "connection_from"
+              ],
+              "type": "tag"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
           "measurement": "statistics",
           "orderByTime": "ASC",
           "policy": "default",
@@ -121,14 +285,37 @@
                   "disk_share"
                 ],
                 "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
               }
             ]
           ],
           "tags": []
         },
         {
-          "alias": "Network",
-          "groupBy": [],
+          "alias": "Network share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "connection_from"
+              ],
+              "type": "tag"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
           "measurement": "statistics",
           "orderByTime": "ASC",
           "policy": "default",
@@ -141,6 +328,10 @@
                   "network_share"
                 ],
                 "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
               }
             ]
           ],
@@ -150,7 +341,7 @@
       "thresholds": [],
       "timeFrom": null,
       "timeShift": null,
-      "title": "Shares",
+      "title": "Work shares",
       "tooltip": {
         "shared": true,
         "sort": 0,
@@ -188,15 +379,15 @@
       "bars": false,
       "dashLength": 10,
       "dashes": false,
-      "datasource": "${DS_TEST}",
-      "fill": 0,
+      "datasource": "${DS_ASAPO}",
+      "fill": 1,
       "gridPos": {
-        "h": 8,
-        "w": 11,
-        "x": 12,
-        "y": 0
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 3
       },
-      "id": 2,
+      "id": 6,
       "legend": {
         "avg": false,
         "current": false,
@@ -210,7 +401,7 @@
       "linewidth": 1,
       "links": [],
       "nullPointMode": "null",
-      "percentage": true,
+      "percentage": false,
       "pointradius": 5,
       "points": false,
       "renderer": "flot",
@@ -220,8 +411,15 @@
       "steppedLine": false,
       "targets": [
         {
-          "alias": "Total",
-          "groupBy": [],
+          "alias": "Database share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            }
+          ],
           "measurement": "statistics",
           "orderByTime": "ASC",
           "policy": "default",
@@ -231,15 +429,87 @@
             [
               {
                 "params": [
-                  "data_volume"
+                  "db_share"
                 ],
                 "type": "field"
               },
+              {
+                "params": [],
+                "type": "mean"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "alias": "Disk share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "B",
+          "resultFormat": "time_series",
+          "select": [
+            [
               {
                 "params": [
-                  " / elapsed_ms/1024/1024/1024*1000*8"
+                  "disk_share"
                 ],
-                "type": "math"
+                "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "alias": "Network share",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "C",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "network_share"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
               }
             ]
           ],
@@ -249,7 +519,7 @@
       "thresholds": [],
       "timeFrom": null,
       "timeShift": null,
-      "title": "Bandwidth",
+      "title": "Work shares",
       "tooltip": {
         "shared": true,
         "sort": 0,
@@ -287,15 +557,15 @@
       "bars": false,
       "dashLength": 10,
       "dashes": false,
-      "datasource": "${DS_TEST}",
-      "fill": 0,
+      "datasource": "${DS_ASAPO}",
+      "fill": 1,
       "gridPos": {
-        "h": 8,
-        "w": 11,
+        "h": 9,
+        "w": 12,
         "x": 12,
-        "y": 8
+        "y": 9
       },
-      "id": 4,
+      "id": 2,
       "legend": {
         "avg": false,
         "current": false,
@@ -323,7 +593,7 @@
           "groupBy": [
             {
               "params": [
-                "$__interval"
+                "10s"
               ],
               "type": "time"
             },
@@ -334,11 +604,11 @@
               "type": "fill"
             }
           ],
-          "measurement": "RequestsRate",
+          "measurement": "statistics",
           "orderByTime": "ASC",
           "policy": "default",
-          "query": "SELECT \"n_requests\" / elapsed_ms*1000 FROM \"statistics\" WHERE $timeFilter",
-          "rawQuery": true,
+          "query": "select SUM(n_requests)/10  from statistics where time>1527594865972143840 and time<=now()  group by time(10s)\n",
+          "rawQuery": false,
           "refId": "A",
           "resultFormat": "time_series",
           "select": [
@@ -351,15 +621,28 @@
               },
               {
                 "params": [],
-                "type": "mean"
+                "type": "sum"
+              },
+              {
+                "params": [
+                  " / 10"
+                ],
+                "type": "math"
               }
             ]
           ],
           "tags": []
         },
         {
-          "alias": "Broker",
-          "groupBy": [],
+          "alias": "Worker",
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            }
+          ],
           "measurement": "RequestsRate",
           "orderByTime": "ASC",
           "policy": "default",
@@ -372,6 +655,151 @@
                   "rate"
                 ],
                 "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
+              }
+            ]
+          ],
+          "tags": []
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Receiver/Worker Rates",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "${DS_ASAPO}",
+      "fill": 1,
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 12
+      },
+      "id": 4,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "groupBy": [
+            {
+              "params": [
+                "10s"
+              ],
+              "type": "time"
+            }
+          ],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "A",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "data_volume"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [],
+                "type": "sum"
+              },
+              {
+                "params": [
+                  " /10/1000/1000/1000*8"
+                ],
+                "type": "math"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "groupBy": [
+            {
+              "params": [
+                "connection_from"
+              ],
+              "type": "tag"
+            }
+          ],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "B",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "data_volume"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [
+                  " / elapsed_ms/1000/1000*8"
+                ],
+                "type": "math"
               }
             ]
           ],
@@ -381,13 +809,12 @@
       "thresholds": [],
       "timeFrom": null,
       "timeShift": null,
-      "title": "Number of Requests",
+      "title": "Bandwidth Gbits/s",
       "tooltip": {
         "shared": true,
         "sort": 0,
         "value_type": "individual"
       },
-      "transparent": false,
       "type": "graph",
       "xaxis": {
         "buckets": null,
@@ -416,7 +843,7 @@
       ]
     }
   ],
-  "refresh": false,
+  "refresh": "10s",
   "schemaVersion": 16,
   "style": "dark",
   "tags": [],
@@ -424,8 +851,8 @@
     "list": []
   },
   "time": {
-    "from": "now/d",
-    "to": "now/d"
+    "from": "now-5m",
+    "to": "now"
   },
   "timepicker": {
     "refresh_intervals": [
@@ -453,7 +880,7 @@
     ]
   },
   "timezone": "",
-  "title": "ASAP::O",
-  "uid": "3JvTwliiz",
-  "version": 4
+  "title": "Asapo",
+  "uid": "jZtUsU4mz",
+  "version": 6
 }
\ No newline at end of file
diff --git a/config/nomad/broker.nmd.in b/config/nomad/broker.nmd.in
new file mode 100644
index 0000000000000000000000000000000000000000..1d968aa427fc8816fa75ba1d82ffb491870b3432
--- /dev/null
+++ b/config/nomad/broker.nmd.in
@@ -0,0 +1,49 @@
+job "broker" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "service" {
+      driver = "raw_exec"
+
+      config {
+        command = "@BROKER_FULLPATH@",
+        args =  ["-config","${NOMAD_TASK_DIR}/broker.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "broker" {
+            static = "5005"
+          }
+        }
+      }
+
+      service {
+        name = "broker"
+        port = "broker"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/health"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "@WORK_DIR@/broker.json.tpl"
+         destination   = "local/broker.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+    }
+  }
+}
diff --git a/config/nomad/discovery.nmd.in b/config/nomad/discovery.nmd.in
new file mode 100644
index 0000000000000000000000000000000000000000..60a8a174e1f5013ea963bd090684c6b9866cd504
--- /dev/null
+++ b/config/nomad/discovery.nmd.in
@@ -0,0 +1,49 @@
+job "discovery" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "service" {
+      driver = "raw_exec"
+
+      config {
+        command = "@DISCOVERY_FULLPATH@",
+        args =  ["-config","${NOMAD_TASK_DIR}/discovery.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "discovery" {
+            static = "5006"
+          }
+        }
+      }
+
+      service {
+        name = "discovery"
+        port = "discovery"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/receivers"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "@WORK_DIR@/discovery.json.tpl"
+         destination   = "local/discovery.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+    }
+  }
+}
diff --git a/config/nomad/receiver.nmd.in b/config/nomad/receiver.nmd.in
new file mode 100644
index 0000000000000000000000000000000000000000..1a9a6e893d1c6559a95ec3150e09509357c85e66
--- /dev/null
+++ b/config/nomad/receiver.nmd.in
@@ -0,0 +1,46 @@
+job "receiver" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "service" {
+      driver = "raw_exec"
+
+      config {
+        command = "@RECEIVER_DIR@/@RECEIVER_NAME@",
+        args =  ["${NOMAD_TASK_DIR}/receiver.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "recv" {}
+        }
+      }
+
+      service {
+        name = "receiver"
+        port = "recv"
+        check {
+          name     = "alive"
+          type     = "tcp"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "@WORK_DIR@/receiver.json.tpl"
+         destination   = "local/receiver.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+    }
+  }
+}
diff --git a/examples/producer/dummy-data-producer/CMakeLists.txt b/examples/producer/dummy-data-producer/CMakeLists.txt
index ec7207b6a94f7317c205c22eadbe497a7f837779..5fda57ae7724433dc92a24b6be41bdffd7ea40c5 100644
--- a/examples/producer/dummy-data-producer/CMakeLists.txt
+++ b/examples/producer/dummy-data-producer/CMakeLists.txt
@@ -13,6 +13,11 @@ target_link_libraries(${TARGET_NAME} ${ASAPO_COMMON_IO_LIBRARIES})
 target_link_libraries(${TARGET_NAME} producer-api)
 set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
 
+set_target_properties(${TARGET_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
+        ${CMAKE_CURRENT_BINARY_DIR}$<$<CONFIG:Debug>:>
+)
+
+
 if (CMAKE_COMPILER_IS_GNUCXX)
     set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS_DEBUG "--coverage")
 endif()
@@ -27,8 +32,5 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CMakeLists_separate.txt DESTINATION "$
 configure_file(Makefile.in Makefile_LINUX @ONLY)
 install(FILES ${CMAKE_CURRENT_BINARY_DIR}/Makefile_LINUX DESTINATION "${dir}")
 
-IF(WIN32)
-add_script_test("${TARGET_NAME}" "${CMAKE_CURRENT_BINARY_DIR}/Debug/${TARGET_NAME}")
-ELSE()
+
 add_script_test("${TARGET_NAME}" "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}")
-ENDIF()
diff --git a/examples/producer/dummy-data-producer/check_linux.sh b/examples/producer/dummy-data-producer/check_linux.sh
index 51a14177248525416ad5e995fcca02f755bddf75..53b37762008d7b426ccb52311c0a641e3656da47 100644
--- a/examples/producer/dummy-data-producer/check_linux.sh
+++ b/examples/producer/dummy-data-producer/check_linux.sh
@@ -2,9 +2,20 @@
 
 database_name=test_run
 
-#set -e
+set -e
 
+trap Cleanup EXIT
 
-#just test that it starts, no reciever is running
-$@ 0.0.0.0 1 1 2>&1 | grep "Failed to connect"
+Cleanup() {
+rm -rf files
+}
 
+mkdir files
+
+$@ files 11 4 4 1 2>&1 | grep Rate
+
+
+ls -ln files/0.bin | awk '{ print $5 }'| grep 11264
+ls -ln files/1.bin | awk '{ print $5 }'| grep 11264
+ls -ln files/2.bin | awk '{ print $5 }'| grep 11264
+ls -ln files/3.bin | awk '{ print $5 }'| grep 11264
diff --git a/examples/producer/dummy-data-producer/check_windows.bat b/examples/producer/dummy-data-producer/check_windows.bat
index 95652e449db3fa550ce10cac2b8040f74a4b54e0..da2b59829a4f1ca31fb394839fd93be41419425b 100644
--- a/examples/producer/dummy-data-producer/check_windows.bat
+++ b/examples/producer/dummy-data-producer/check_windows.bat
@@ -1,4 +1,21 @@
-"%1" 0.0.0.0 1 1 2>&1 | findstr "not valid" || goto :error
+SET folder=files
+
+mkdir %folder%
+
+"%1" %folder% 11 4 4 1 2>&1 | findstr "Rate" || goto :error
+
+FOR /F "usebackq" %%A IN ('%folder%\0.bin') DO set size=%%~zA
+if %size% NEQ 11264 goto :error
+
+FOR /F "usebackq" %%A IN ('%folder%\1.bin') DO set size=%%~zA
+if %size% NEQ 11264 goto :error
+
+FOR /F "usebackq" %%A IN ('%folder%\2.bin') DO set size=%%~zA
+if %size% NEQ 11264 goto :error
+
+FOR /F "usebackq" %%A IN ('%folder%\3.bin') DO set size=%%~zA
+if %size% NEQ 11264 goto :error
+
 goto :clean
 
 :error
@@ -6,4 +23,5 @@ call :clean
 exit /b 1
 
 :clean
+rmdir /S /Q %folder%
 
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index d73fa38c6c2eaea39e96632c14febad26185d0fe..3b4a76e0800068a081f8ad355a7b44d0420b0db5 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -1,22 +1,50 @@
 #include <iostream>
 #include <chrono>
 #include <vector>
-#include <tuple>
+#include <mutex>
+#include <thread>
 
 #include "asapo_producer.h"
 
+
 using std::chrono::high_resolution_clock;
 
-typedef std::tuple<std::string, size_t, uint64_t> ArgumentTuple;
-ArgumentTuple ProcessCommandArguments(int argc, char* argv[]) {
-    if (argc != 4) {
+std::mutex mutex;
+int iterations_remained;
+
+struct Args {
+    std::string receiver_address;
+    size_t number_of_bytes;
+    uint64_t iterations;
+    uint64_t nthreads;
+    uint64_t mode;
+};
+
+void PrintCommandArguments(const Args& args) {
+    std::cout << "receiver_address: " << args.receiver_address << std::endl
+              << "Package size: " << args.number_of_bytes / 1024 << "k" << std::endl
+              << "iterations: " << args.iterations << std::endl
+              << "nthreads: " << args.nthreads << std::endl
+              << "mode: " << args.mode << std::endl
+              << std::endl;
+}
+
+
+void ProcessCommandArguments(int argc, char* argv[], Args* args) {
+    if (argc != 6) {
         std::cout <<
-                  "Usage: " << argv[0] << " <receiver_address> <number_of_byte> <iterations>"
+                  "Usage: " << argv[0] << " <destination> <number_of_byte> <iterations> <nthreads> <mode 0 -t tcp, 1 - filesystem>"
                   << std::endl;
         exit(EXIT_FAILURE);
     }
     try {
-        return ArgumentTuple(argv[1], std::stoull(argv[2]), std::stoull(argv[3]));
+        args->receiver_address = argv[1];
+        args->number_of_bytes = std::stoull(argv[2]) * 1024;
+        args->iterations = std::stoull(argv[3]);
+        args->nthreads = std::stoull(argv[4]);
+        args->mode = std::stoull(argv[5]);
+        PrintCommandArguments(*args);
+        return;
     } catch(std::exception& e) {
         std::cerr << "Fail to parse arguments" << std::endl;
         std::cerr << e.what() << std::endl;
@@ -24,54 +52,95 @@ ArgumentTuple ProcessCommandArguments(int argc, char* argv[]) {
     }
 }
 
-bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations) {
-    auto buffer = std::unique_ptr<uint8_t>(new uint8_t[number_of_byte]);
-
-    for(uint64_t i = 0; i < iterations; i++) {
-//        std::cerr << "Send file " << i + 1 << "/" << iterations << std::endl;
+void ProcessAfterSend(asapo::GenericRequestHeader header, asapo::Error err) {
+    mutex.lock();
+    iterations_remained--;
+    if (err) {
+        std::cerr << "File was not successfully send: " << err << std::endl;
+        mutex.unlock();
+        return;
+    }
+    mutex.unlock();
+}
 
-        auto err = producer->Send(i + 1, buffer.get(), number_of_byte);
+bool SendDummyData(asapo::Producer* producer, uint8_t* data, size_t number_of_byte, uint64_t iterations) {
 
+    for(uint64_t i = 0; i < iterations; i++) {
+        auto err = producer->Send(i + 1, data, number_of_byte, std::to_string(i), &ProcessAfterSend);
         if (err) {
-            std::cerr << "File was not successfully send: " << err << std::endl;
+            std::cerr << "Cannot send file: " << err << std::endl;
             return false;
-        } else {
-//            std::cerr << "File was successfully send." << std::endl;
         }
     }
-
     return true;
 }
 
-int main (int argc, char* argv[]) {
-    std::string receiver_address;
-    size_t number_of_kbytes;
-    uint64_t iterations;
-    std::tie(receiver_address, number_of_kbytes, iterations) = ProcessCommandArguments(argc, argv);
-
-    std::cout << "receiver_address: " << receiver_address << std::endl
-              << "Package size: " << number_of_kbytes << "k" << std::endl
-              << "iterations: " << iterations << std::endl
-              << std::endl;
-
-    auto producer = asapo::Producer::Create();
-    auto err = producer->ConnectToReceiver(receiver_address);
+std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
+    asapo::Error err;
+    auto producer = asapo::Producer::Create(args.receiver_address, args.nthreads,
+                                            args.mode == 0 ? asapo::RequestHandlerType::kTcp : asapo::RequestHandlerType::kFilesystem, &err);
     if(err) {
-        std::cerr << "Failed to connect to receiver. ProducerError: " << err << std::endl;
-        return EXIT_FAILURE;
+        std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
+        exit(EXIT_FAILURE);
     }
-    std::cout << "Successfully connected" << std::endl;
 
-    high_resolution_clock::time_point t1 = high_resolution_clock::now();
-    if(!SendDummyData(producer.get(), number_of_kbytes * 1024, iterations)) {
-        return EXIT_FAILURE;
+    producer->EnableLocalLog(true);
+    producer->SetLogLevel(asapo::LogLevel::Debug);
+    return producer;
+}
+
+void WaitThreadsFinished(const Args& args) {
+    uint64_t elapsed_ms = 0;
+    uint64_t timeout_sec = 30;
+    while (true) {
+        mutex.lock();
+        if (iterations_remained <= 0) {
+            mutex.unlock();
+            break;
+        }
+        mutex.unlock();
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        elapsed_ms += 100;
+        if (elapsed_ms > timeout_sec * 1000) {
+            std::cerr << "Exit on timeout " << std::endl;
+            exit(EXIT_FAILURE);
+        }
     }
+
+}
+
+void PrintOutput(const Args& args, const high_resolution_clock::time_point& start) {
     high_resolution_clock::time_point t2 = high_resolution_clock::now();
-    double duration_sec = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count() / 1000.0;
-    double size_gb = double(number_of_kbytes) * iterations / 1024.0 / 1024.0 * 8.0;
-    double rate = iterations / duration_sec;
+    double duration_sec = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - start ).count() / 1000.0;
+    double size_gb = double(args.number_of_bytes) * args.iterations / 1024.0  / 1024.0 / 1024.0 * 8.0;
+    double rate = args.iterations / duration_sec;
     std::cout << "Rate: " << rate << " Hz" << std::endl;
     std::cout << "Bandwidth " << size_gb / duration_sec << " Gbit/s" << std::endl;
+}
+
+
+std::unique_ptr<uint8_t> CreateMemoryBuffer(const Args& args) {
+    return std::unique_ptr<uint8_t>(new uint8_t[args.number_of_bytes]);
+}
+
+int main (int argc, char* argv[]) {
+    Args args;
+    ProcessCommandArguments(argc, argv, &args);
+
+    auto producer = CreateProducer(args);
+
+    iterations_remained = args.iterations;
+
+    auto buffer = CreateMemoryBuffer(args);
+
+    high_resolution_clock::time_point start_time = high_resolution_clock::now();
+
+    if(!SendDummyData(producer.get(), buffer.get(), args.number_of_bytes, args.iterations)) {
+        return EXIT_FAILURE;
+    }
+
+    WaitThreadsFinished(args);
+    PrintOutput(args, start_time);
 
     return EXIT_SUCCESS;
 }
diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp
index 2422d106797d30a31f8b70fc02d30ee37610cd9f..b8bcd17ed7b9f77704aab9d2fc61526d751df011 100644
--- a/examples/worker/getnext_broker/getnext_broker.cpp
+++ b/examples/worker/getnext_broker/getnext_broker.cpp
@@ -20,10 +20,11 @@ void WaitThreads(std::vector<std::thread>* threads) {
 
 int ProcessError(const Error& err) {
     if (err == nullptr) return 0;
-    if (err->GetErrorType() != asapo::ErrorType::kEndOfFile) {
+    if (err->GetErrorType() != asapo::ErrorType::kTimeOut) {
         std::cout << err->Explain() << std::endl;
         return 1;
     }
+    std::cout << err->Explain() << std::endl;
     return 0;
 }
 
@@ -33,7 +34,7 @@ std::vector<std::thread> StartThreads(const std::string& server, const std::stri
         asapo::FileInfo fi;
         Error err;
         auto broker = asapo::DataBrokerFactory::CreateServerBroker(server, run_name, &err);
-        broker->SetTimeout(1000);
+        broker->SetTimeout(10000);
         while ((err = broker->GetNext(&fi, nullptr)) == nullptr) {
             (*nfiles)[i] ++;
         }
diff --git a/producer/api/CMakeLists.txt b/producer/api/CMakeLists.txt
index 58b7b518268acc7f798eed765be6a908642fa4e5..e30da7fec7b71975b21b5cf774900ee60819eac2 100644
--- a/producer/api/CMakeLists.txt
+++ b/producer/api/CMakeLists.txt
@@ -1,15 +1,20 @@
 set(TARGET_NAME producer-api)
 set(SOURCE_FILES
         src/producer.cpp
-        src/producer_impl.h
         src/producer_impl.cpp
-        )
+        src/producer_logger.cpp
+        src/request_handler_tcp.cpp
+        src/request_handler_filesystem.cpp
+        src/request_pool.cpp
+        src/receiver_discovery_service.cpp
+        src/request_handler_factory.cpp
+        src/request.cpp include/producer/common.h)
 
 
 ################################
 # Library
 ################################
-add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io> $<TARGET_OBJECTS:logger>
+add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io> $<TARGET_OBJECTS:logger> $<TARGET_OBJECTS:json_parser>
         $<TARGET_OBJECTS:curl_http_client> )
 target_include_directories(${TARGET_NAME} PUBLIC include ${ASAPO_CXX_COMMON_INCLUDE_DIR})
 target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
@@ -20,6 +25,12 @@ target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}
 set(TEST_SOURCE_FILES
         unittests/test_producer_impl.cpp
         unittests/test_producer.cpp
+        unittests/test_request_handler_tcp.cpp
+        unittests/test_request_handler_filesystem.cpp
+        unittests/test_request_pool.cpp
+        unittests/test_receiver_discovery_service.cpp
+        unittests/test_request_handler_factory.cpp
+
         )
 set(TEST_LIBRARIES "${TARGET_NAME}")
 
diff --git a/producer/api/include/producer/common.h b/producer/api/include/producer/common.h
new file mode 100644
index 0000000000000000000000000000000000000000..4c0cb77926621fcde292485504d40cbc7ccf3fc7
--- /dev/null
+++ b/producer/api/include/producer/common.h
@@ -0,0 +1,24 @@
+#ifndef ASAPO_PRODUCER_COMMON_H
+#define ASAPO_PRODUCER_COMMON_H
+
+#include <cstdint>
+#include <functional>
+
+#include "common/networking.h"
+#include "common/error.h"
+
+namespace asapo {
+
+const uint8_t kMaxProcessingThreads = 32;
+
+using RequestCallback =  std::function<void(GenericRequestHeader, Error)>;
+
+enum class RequestHandlerType {
+    kTcp,
+    kFilesystem
+};
+
+
+}
+
+#endif //ASAPO_PRODUCER_COMMON_H
diff --git a/producer/api/include/producer/producer.h b/producer/api/include/producer/producer.h
index 192e9f2f2f98a504022fdd43775558298ed101a6..303f79d295d519ae0a93aa75979da2beef3cec19 100644
--- a/producer/api/include/producer/producer.h
+++ b/producer/api/include/producer/producer.h
@@ -4,15 +4,11 @@
 #include <memory>
 #include <string>
 
-#include "producer_error.h"
 #include "logger/logger.h"
+#include "producer/common.h"
 
 namespace asapo {
 
-enum class ProducerStatus {
-    kDisconnected,
-    kConnected,
-};
 
 class Producer {
   public:
@@ -20,26 +16,12 @@ class Producer {
     /*!
      * @return A unique_ptr to a new producer instance
      */
-    static std::unique_ptr<Producer> Create();
+    static std::unique_ptr<Producer> Create(const std::string& endpoint, uint8_t n_processing_threads,
+                                            asapo::RequestHandlerType type,
+                                            Error* err);
 
     virtual ~Producer() = default;
 
-    /*!
-     * @return The version of the producer
-     */
-    virtual uint64_t GetVersion() const = 0;
-
-    /*!
-     * @return The current status of the producer
-     */
-    virtual ProducerStatus GetStatus() const = 0;
-
-    //! Connects to a receiver
-    /*!
-      \param receiver_address - The address of the receiver. E.g. 127.0.0.1:4200
-      \return Error - nullptr on success
-    */
-    virtual Error ConnectToReceiver(const std::string& receiver_address) = 0;
     //! Sends data to the receiver
     /*!
       \param file_id - The id of the file. An error will be returned if this file id already exists on the receiver.
@@ -47,7 +29,8 @@ class Producer {
       \param file_size - The size of the data.
       \return Error - Will be nullptr on success
     */
-    virtual Error Send(uint64_t file_id, const void* data, size_t file_size) = 0;
+    virtual Error Send(uint64_t file_id, const void* data, size_t file_size, std::string file_name,
+                       RequestCallback callback) = 0;
     //! Set internal log level
     virtual void SetLogLevel(LogLevel level) = 0;
     //! Enables/Disables logs output to stdout
diff --git a/producer/api/include/producer/producer_error.h b/producer/api/include/producer/producer_error.h
index 720b5cf4db9365a536259470fbb93ce4ec8a5ff0..5a0641b7dd8c7ad1ee1159fee85fee954ebbfa5e 100644
--- a/producer/api/include/producer/producer_error.h
+++ b/producer/api/include/producer/producer_error.h
@@ -10,7 +10,9 @@ enum class ProducerErrorType {
     kConnectionNotReady,
     kFileTooLarge,
     kFileIdAlreadyInUse,
-    kUnknownServerError
+    kInternalServerError,
+    kCannotSendDataToReceivers,
+    kRequestPoolIsFull
 };
 
 //TODO Make a marco to create error class and error template class
@@ -51,6 +53,11 @@ class ProducerErrorTemplate : public SimpleErrorTemplate {
     }
 };
 
+static inline std::ostream& operator<<(std::ostream& os, const ProducerErrorTemplate& err) {
+    return os << err.Text();
+}
+
+
 namespace ProducerErrorTemplates {
 auto const kAlreadyConnected = ProducerErrorTemplate {
     "Already connected", ProducerErrorType::kAlreadyConnected
@@ -67,10 +74,20 @@ auto const kFileIdAlreadyInUse = ProducerErrorTemplate {
     "File already in use", ProducerErrorType::kFileIdAlreadyInUse
 };
 
-auto const kUnknownServerError = ProducerErrorTemplate {
-    "Unknown server error", ProducerErrorType::kUnknownServerError
+auto const kInternalServerError = ProducerErrorTemplate {
+    "Internal server error", ProducerErrorType::kInternalServerError
+};
+
+auto const kCannotSendDataToReceivers = ProducerErrorTemplate {
+    "Cannot connect/send data to receivers", ProducerErrorType::kCannotSendDataToReceivers
 };
 
+auto const kRequestPoolIsFull = ProducerErrorTemplate {
+    "Cannot add request to poll - hit pool size limit", ProducerErrorType::kRequestPoolIsFull
+};
+
+
+
 
 };
 }
diff --git a/producer/api/src/producer.cpp b/producer/api/src/producer.cpp
index c69f21e149e01bc31ad4e755354d779c21008b1a..a8e3e38b4ab8d124dc84f17558f122ac079b54ac 100644
--- a/producer/api/src/producer.cpp
+++ b/producer/api/src/producer.cpp
@@ -1,6 +1,21 @@
 #include "producer/producer.h"
 #include "producer_impl.h"
 
-std::unique_ptr<asapo::Producer> asapo::Producer::Create() {
-    return std::unique_ptr<asapo::Producer>(new ProducerImpl());
+std::unique_ptr<asapo::Producer> asapo::Producer::Create(const std::string& endpoint, uint8_t n_processing_threads,
+        asapo::RequestHandlerType type, Error* err) {
+    if (n_processing_threads > kMaxProcessingThreads) {
+        *err = TextError("Too many processing threads: " + std::to_string(n_processing_threads));
+        return nullptr;
+    }
+
+    try {
+        *err = nullptr;
+        return std::unique_ptr<asapo::Producer>(new ProducerImpl(endpoint, n_processing_threads, type));
+    } catch (const std::exception& ex) {
+        *err = TextError(ex.what());
+        return nullptr;
+    } catch (...) {
+        *err = TextError("Unknown exception in producer_api ");
+        return nullptr;
+    }
 }
diff --git a/producer/api/src/producer_impl.cpp b/producer/api/src/producer_impl.cpp
index 6e8a5acb8827ebe8ec0a82a79425cd14c624f2f6..59ddc768640373c0c63d2ce6661724feff7ef2fc 100644
--- a/producer/api/src/producer_impl.cpp
+++ b/producer/api/src/producer_impl.cpp
@@ -2,127 +2,55 @@
 #include <cstring>
 
 #include "producer_impl.h"
+#include "producer_logger.h"
 #include "io/io_factory.h"
+#include "producer/producer_error.h"
 
 namespace  asapo {
 
-const uint32_t ProducerImpl::kVersion = 1;
 const size_t ProducerImpl::kMaxChunkSize = size_t(1024) * size_t(1024) * size_t(1024) * size_t(2); //2GiByte
+const size_t ProducerImpl::kDiscoveryServiceUpdateFrequencyMs = 10000; // 10s
 
-ProducerImpl::ProducerImpl(): io__{GenerateDefaultIO()} {
-    //todo get fluentd uri from service discovery
-    log__ = CreateDefaultLoggerApi("producer_api", "http://max-wgs.desy.de:9880/asapo");
-}
 
-uint64_t ProducerImpl::GetVersion() const {
-    return kVersion;
-}
+ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, asapo::RequestHandlerType type):
+    log__{GetDefaultProducerLogger()} {
+    switch (type) {
+    case RequestHandlerType::kTcp:
+        discovery_service_.reset(new ReceiverDiscoveryService{endpoint, ProducerImpl::kDiscoveryServiceUpdateFrequencyMs});
+        request_handler_factory_.reset(new RequestHandlerFactory{discovery_service_.get()});
+        break;
+    case RequestHandlerType::kFilesystem:
+        request_handler_factory_.reset(nullptr);
+        request_handler_factory_.reset(new RequestHandlerFactory{endpoint});
 
-ProducerStatus ProducerImpl::GetStatus() const {
-    return status_;
-}
-
-Error ProducerImpl::InitializeSocketToReceiver(const std::string& receiver_address) {
-    Error err;
-    FileDescriptor fd = io__->CreateAndConnectIPTCPSocket(receiver_address, &err);
-    if(err != nullptr) {
-        log__->Debug("cannot connect to receiver at " + receiver_address + " - " + err->Explain());
-        return err;
     }
-
-    receiver_uri_ = receiver_address;
-    client_fd_ = fd;
-    return nullptr;
+    request_pool__.reset(new RequestPool{n_processing_threads, request_handler_factory_.get()});
 }
 
-Error ProducerImpl::ConnectToReceiver(const std::string& receiver_address) {
-    if(status_ != ProducerStatus::kDisconnected) {
-        return ProducerErrorTemplates::kAlreadyConnected.Generate();
-    }
-
-    auto error = InitializeSocketToReceiver(receiver_address);
-    if(error) {
-        status_ = ProducerStatus::kDisconnected;
-        return error;
-    }
-
-    status_ = ProducerStatus::kConnected;
-    log__->Info("connected to receiver at " + receiver_address);
-    return nullptr;
-}
-
-GenericNetworkRequestHeader ProducerImpl::GenerateNextSendRequest(uint64_t file_id, size_t file_size) {
-    GenericNetworkRequestHeader request;
-    request.op_code = kNetOpcodeSendData;
-    request.request_id = request_id_++;
-    request.data_id = file_id;
-    request.data_size = file_size;
+GenericRequestHeader ProducerImpl::GenerateNextSendRequest(uint64_t file_id, size_t file_size, std::string file_name) {
+    GenericRequestHeader request{kOpcodeTransferData, file_id, file_size, std::move(file_name)};
     return request;
 }
 
-Error ProducerImpl::SendHeaderAndData(const GenericNetworkRequestHeader& header, const void* data, size_t file_size) {
-    Error io_error;
-    io__->Send(client_fd_, &header, sizeof(header), &io_error);
-    if(io_error) {
-// todo: add meaningful message to the io_error (here and below)
-//        std::cerr << "ProducerImpl::Send/DataRequest error" << io_error << std::endl;
-        return io_error;
-    }
-
-    io__->Send(client_fd_, data, file_size, &io_error);
-    if(io_error) {
-//        std::cerr << "ProducerImpl::Send/data error" << io_error << std::endl;
-        return io_error;
-    }
-
-    return nullptr;
-}
-
-Error ProducerImpl::ReceiveResponce() {
-    Error err;
-    SendDataResponse sendDataResponse;
-    io__->Receive(client_fd_, &sendDataResponse, sizeof(sendDataResponse), &err);
-    if(err != nullptr) {
-//        std::cerr << "ProducerImpl::Receive error: " << err << std::endl;
-        return err;
+Error CheckProducerRequest(const GenericRequestHeader header) {
+    if (header.data_size > ProducerImpl::kMaxChunkSize) {
+        return ProducerErrorTemplates::kFileTooLarge.Generate();
     }
 
-    if(sendDataResponse.error_code) {
-        if(sendDataResponse.error_code == kNetErrorFileIdAlreadyInUse) {
-            return ProducerErrorTemplates::kFileIdAlreadyInUse.Generate();
-        }
-//        std::cerr << "Server reported an error. NetErrorCode: " << int(sendDataResponse.error_code) << std::endl;
-        return ProducerErrorTemplates::kUnknownServerError.Generate();
-    }
     return nullptr;
 }
 
 
-Error ProducerImpl::Send(uint64_t file_id, const void* data, size_t file_size) {
-    if(status_ != ProducerStatus::kConnected) {
-        return ProducerErrorTemplates::kConnectionNotReady.Generate();
-    }
-    if(file_size > kMaxChunkSize) {
-        return ProducerErrorTemplates::kFileTooLarge.Generate();
-    }
-
-    auto send_data_request = GenerateNextSendRequest(file_id, file_size);
-
-    auto  error = SendHeaderAndData(send_data_request, data, file_size);
-    if(error) {
-        log__->Debug("error sending to " + receiver_uri_ + " - " + error->Explain());
-        return error;
-    }
+Error ProducerImpl::Send(uint64_t file_id, const void* data, size_t file_size, std::string file_name,
+                         RequestCallback callback) {
+    auto request_header = GenerateNextSendRequest(file_id, file_size, std::move(file_name));
 
-    error =  ReceiveResponce();
-    if(error) {
-        log__->Debug("error receiving response from " + receiver_uri_ + " - " + error->Explain());
-        return error;
+    auto err = CheckProducerRequest(request_header);
+    if (err) {
+        return err;
     }
 
-    log__->Debug("succesfully sent data to " + receiver_uri_);
-
-    return nullptr;
+    return request_pool__->AddRequest(std::unique_ptr<Request> {new Request{request_header, data, callback}});
 }
 
 void ProducerImpl::SetLogLevel(LogLevel level) {
diff --git a/producer/api/src/producer_impl.h b/producer/api/src/producer_impl.h
index 5710d8dd561b5688465b221d57fe8b88490c2348..b675c11adae980f08277a92511221370f9c56c89 100644
--- a/producer/api/src/producer_impl.h
+++ b/producer/api/src/producer_impl.h
@@ -6,40 +6,37 @@
 #include <io/io.h>
 #include "producer/producer.h"
 #include "logger/logger.h"
+#include "request_pool.h"
+#include "request_handler_factory.h"
+#include "receiver_discovery_service.h"
 
 namespace asapo {
+
 class ProducerImpl : public Producer {
   private:
-    static const uint32_t kVersion;
-
-    int         client_fd_ = -1;
-    std::string receiver_uri_;
-    uint64_t    request_id_ = 0;
-
-    ProducerStatus status_ = ProducerStatus::kDisconnected;
-
-    Error InitializeSocketToReceiver(const std::string& receiver_address);
-    GenericNetworkRequestHeader GenerateNextSendRequest(uint64_t file_id, size_t file_size);
-    Error SendHeaderAndData(const GenericNetworkRequestHeader& header, const void* data, size_t file_size);
-    Error ReceiveResponce();
-
+    // important to create it before request_pool__
+    std::unique_ptr<ReceiverDiscoveryService> discovery_service_;
+    std::unique_ptr<RequestHandlerFactory> request_handler_factory_;
   public:
     static const size_t kMaxChunkSize;
+    static const size_t kDiscoveryServiceUpdateFrequencyMs;
 
-    ProducerImpl();
+    explicit ProducerImpl(std::string endpoint, uint8_t n_processing_threads, asapo::RequestHandlerType type);
     ProducerImpl(const ProducerImpl&) = delete;
     ProducerImpl& operator=(const ProducerImpl&) = delete;
 
-    uint64_t GetVersion() const override;
-    ProducerStatus GetStatus() const override;
     void SetLogLevel(LogLevel level) override;
     void EnableLocalLog(bool enable) override;
     void EnableRemoteLog(bool enable) override;
-    Error ConnectToReceiver(const std::string& receiver_address) override;
-    Error Send(uint64_t file_id, const void* data, size_t file_size) override;
-    std::unique_ptr<IO> io__;
-    Logger log__;
+    Error Send(uint64_t file_id, const void* data, size_t file_size, std::string file_name,
+               RequestCallback callback) override;
+    AbstractLogger* log__;
+    std::unique_ptr<RequestPool> request_pool__;
+  private:
+    GenericRequestHeader GenerateNextSendRequest(uint64_t file_id, size_t file_size, std::string file_name);
 };
+
+Error CheckProducerRequest(const GenericRequestHeader header);
 }
 
 #endif //ASAPO_PRODUCER__PRODUCER_IMPL_H
diff --git a/producer/api/src/producer_logger.cpp b/producer/api/src/producer_logger.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1fb485bbec99f8ff6f779f499bedf3fc5838aa1a
--- /dev/null
+++ b/producer/api/src/producer_logger.cpp
@@ -0,0 +1,14 @@
+#include "producer_logger.h"
+
+namespace asapo {
+
+
+AbstractLogger* GetDefaultProducerLogger() {
+    //todo get fluentd uri from service discovery
+//   static Logger logger = CreateDefaultLoggerApi("producer_api", "http://max-wgs.desy.de:9880/asapo");
+    static Logger logger = CreateDefaultLoggerBin("producer_api");
+
+    return logger.get();
+}
+
+}
diff --git a/producer/api/src/producer_logger.h b/producer/api/src/producer_logger.h
new file mode 100644
index 0000000000000000000000000000000000000000..abaf35c2490a820f75f1531f5db8595537a289f3
--- /dev/null
+++ b/producer/api/src/producer_logger.h
@@ -0,0 +1,14 @@
+#ifndef ASAPO_PRODUCER_LOGGER_H
+#define ASAPO_PRODUCER_LOGGER_H
+
+#include "logger/logger.h"
+
+namespace asapo {
+
+
+AbstractLogger* GetDefaultProducerLogger();
+
+}
+
+
+#endif //ASAPO_PRODUCER_LOGGER_H
diff --git a/producer/api/src/receiver_discovery_service.cpp b/producer/api/src/receiver_discovery_service.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5ca5f7315bacd63c8e2de397ef4eeb1e453bcc4a
--- /dev/null
+++ b/producer/api/src/receiver_discovery_service.cpp
@@ -0,0 +1,110 @@
+#include "receiver_discovery_service.h"
+
+#include "producer_logger.h"
+#include "json_parser/json_parser.h"
+
+#include <iostream>
+#include <algorithm>
+#include <numeric>
+
+namespace  asapo {
+
+ReceiverDiscoveryService::ReceiverDiscoveryService(std::string endpoint, uint64_t update_frequency_ms): httpclient__{DefaultHttpClient()},
+    log__{GetDefaultProducerLogger()},
+    endpoint_{std::move(endpoint) + "/receivers"}, update_frequency_ms_{update_frequency_ms} {
+
+}
+
+void ReceiverDiscoveryService::StartCollectingData() {
+    if (thread_ .joinable()) return;
+    log__->Debug("starting receiver discovery service");
+    thread_ = std::thread(
+                  std::bind(&ReceiverDiscoveryService::ThreadHandler, this));
+}
+
+
+Error ReceiverDiscoveryService::ParseResponse(const std::string& responce, ReceiversList* list,
+                                              uint64_t* max_connections) {
+    auto parser = JsonStringParser(responce);
+    Error err;
+    (err = parser.GetArrayString("Uris", list)) ||
+    (err = parser.GetUInt64("MaxConnections", max_connections));
+    return err;
+}
+
+Error ReceiverDiscoveryService::UpdateFromEndpoint(ReceiversList* list, uint64_t* max_connections) {
+    Error err;
+    HttpCode code;
+
+    auto responce = httpclient__->Get(endpoint_, &code, &err);
+    if (err != nullptr) {
+        return err;
+    }
+    if (code != HttpCode::OK) {
+        return TextError(responce);
+    }
+    return ParseResponse(responce, list, max_connections);
+
+}
+
+void ReceiverDiscoveryService::LogUriList(const ReceiversList& uris) {
+    std::string s;
+    s = std::accumulate(std::begin(uris), std::end(uris), s);
+    log__->Debug("got receivers from " + endpoint_ + ":" + s);
+}
+
+
+void ReceiverDiscoveryService::ThreadHandler() {
+    std::unique_lock<std::mutex> lock(mutex_);
+    do {
+        lock.unlock();
+        ReceiversList uris;
+        uint64_t max_connections;
+        auto err = UpdateFromEndpoint(&uris, &max_connections);
+        if (err != nullptr) {
+            log__->Error("getting receivers from " + endpoint_ + " - " + err->Explain());
+            lock.lock();
+            continue;
+        }
+        LogUriList(uris);
+        lock.lock();
+        max_connections_ = max_connections;
+        uri_list_ = uris;
+    } while (!condition_.wait_for(lock, std::chrono::milliseconds(update_frequency_ms_), [this] {return (quit_);})) ;
+}
+
+ReceiverDiscoveryService::~ReceiverDiscoveryService() {
+    mutex_.lock();
+    quit_ = true;
+    mutex_.unlock();
+    condition_.notify_one();
+
+    if(thread_.joinable()) {
+        log__->Debug("finishing discovery service");
+        thread_.join();
+    }
+}
+
+uint64_t ReceiverDiscoveryService::MaxConnections() {
+    std::lock_guard<std::mutex> lock{mutex_};
+    return max_connections_;
+}
+
+ReceiversList ReceiverDiscoveryService::RotatedUriList(uint64_t nthread) {
+    std::unique_lock<std::mutex> lock(mutex_);
+    auto size = uri_list_.size();
+    if (size == 0) {
+        return {};
+    }
+    ReceiversList list{uri_list_};
+    lock.unlock();
+    auto shift = nthread % size;
+    std::rotate(list.begin(), list.begin() + shift, list.end());
+    return list;
+}
+
+uint64_t ReceiverDiscoveryService::UpdateFrequency() {
+    return update_frequency_ms_;
+}
+
+}
\ No newline at end of file
diff --git a/producer/api/src/receiver_discovery_service.h b/producer/api/src/receiver_discovery_service.h
new file mode 100644
index 0000000000000000000000000000000000000000..c9893423e2fb1d2764f1411b26a49b92ac5b787a
--- /dev/null
+++ b/producer/api/src/receiver_discovery_service.h
@@ -0,0 +1,47 @@
+#ifndef ASAPO_RECEIVERS_STATUS_H
+#define ASAPO_RECEIVERS_STATUS_H
+
+#include <string>
+#include <vector>
+#include <mutex>
+#include <thread>
+#include <condition_variable>
+
+
+#include "http_client/http_client.h"
+#include "logger/logger.h"
+#include "preprocessor/definitions.h"
+
+namespace  asapo {
+
+using ReceiversList = std::vector<std::string>;
+
+class ReceiverDiscoveryService {
+  public:
+    explicit ReceiverDiscoveryService(std::string endpoint, uint64_t update_frequency_ms);
+    VIRTUAL void StartCollectingData();
+    ~ReceiverDiscoveryService();
+    VIRTUAL uint64_t MaxConnections();
+    VIRTUAL ReceiversList RotatedUriList(uint64_t nthread);
+    VIRTUAL uint64_t UpdateFrequency();
+  public:
+    std::unique_ptr<HttpClient> httpclient__;
+    AbstractLogger* log__;
+  private:
+    void ThreadHandler();
+    Error UpdateFromEndpoint(ReceiversList* list, uint64_t* max_connections);
+    Error ParseResponse(const std::string& responce, ReceiversList* list, uint64_t* max_connections);
+    void LogUriList(const ReceiversList& uris);
+    std::string endpoint_;
+    std::thread thread_;
+    std::condition_variable condition_;
+    std::mutex mutex_;
+    uint64_t max_connections_{0};
+    ReceiversList uri_list_;
+    bool quit_{false};
+    uint64_t update_frequency_ms_;
+};
+
+}
+
+#endif //ASAPO_RECEIVERS_STATUS_H
diff --git a/producer/api/src/request.cpp b/producer/api/src/request.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9f256f7803bd760910e05d068b5b21465f910981
--- /dev/null
+++ b/producer/api/src/request.cpp
@@ -0,0 +1,5 @@
+//
+// Created by yakubov on 17/05/18.
+//
+
+#include "request.h"
diff --git a/producer/api/src/request.h b/producer/api/src/request.h
new file mode 100644
index 0000000000000000000000000000000000000000..4c0a7ccdd4867bdf8d019e43634adaf5c69e01e1
--- /dev/null
+++ b/producer/api/src/request.h
@@ -0,0 +1,17 @@
+#ifndef ASAPO_PRODUCER_REQUEST_H
+#define ASAPO_PRODUCER_REQUEST_H
+
+#include "common/networking.h"
+#include "producer/common.h"
+
+namespace asapo {
+
+struct Request {
+    GenericRequestHeader header;
+    const void* data;
+    RequestCallback callback;
+};
+
+}
+
+#endif //ASAPO_PRODUCER_REQUEST_H
diff --git a/producer/api/src/request_handler.h b/producer/api/src/request_handler.h
new file mode 100644
index 0000000000000000000000000000000000000000..e99bbd284e57c5ff3f5dab18d49778cc79e05d92
--- /dev/null
+++ b/producer/api/src/request_handler.h
@@ -0,0 +1,22 @@
+#ifndef ASAPO_PRODUCER_REQUEST_HANDLER_H
+#define ASAPO_PRODUCER_REQUEST_HANDLER_H
+
+#include <memory>
+
+#include "common/error.h"
+#include "request.h"
+
+namespace  asapo {
+
+class RequestHandler {
+  public:
+    virtual void PrepareProcessingRequestLocked()  = 0;
+    virtual void TearDownProcessingRequestLocked(const Error& error_from_process)  = 0;
+    virtual Error ProcessRequestUnlocked(const Request* request)  = 0;
+    virtual bool ReadyProcessRequest() = 0;
+    virtual ~RequestHandler() = default;
+};
+
+
+}
+#endif //ASAPO_PRODUCER_REQUEST_HANDLER_H
diff --git a/producer/api/src/request_handler_factory.cpp b/producer/api/src/request_handler_factory.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3fbd76d28b04ceb2463388b55d70214fb64ce60a
--- /dev/null
+++ b/producer/api/src/request_handler_factory.cpp
@@ -0,0 +1,32 @@
+#include "request_handler_factory.h"
+
+#include "request_handler_tcp.h"
+#include "request_handler_filesystem.h"
+
+
+namespace  asapo {
+
+std::unique_ptr<RequestHandler> RequestHandlerFactory::NewRequestHandler(uint64_t thread_id, uint64_t* shared_counter) {
+    switch (type_) {
+    case asapo::RequestHandlerType::kTcp:
+        return std::unique_ptr<RequestHandler> {new RequestHandlerTcp(discovery_service_, thread_id, shared_counter)};
+    case asapo::RequestHandlerType::kFilesystem:
+        return std::unique_ptr<RequestHandler> {new RequestHandlerFilesystem(destination_folder_, thread_id)};
+
+    }
+    return nullptr;
+}
+
+RequestHandlerFactory::RequestHandlerFactory(ReceiverDiscoveryService* discovery_service): type_{RequestHandlerType::kTcp},
+    discovery_service_{discovery_service} {
+    if (discovery_service_) {
+        discovery_service_->StartCollectingData();
+    }
+}
+
+RequestHandlerFactory::RequestHandlerFactory(std::string destination_folder): type_{RequestHandlerType::kFilesystem},
+    destination_folder_{std::move(destination_folder)} {
+}
+
+
+}
\ No newline at end of file
diff --git a/producer/api/src/request_handler_factory.h b/producer/api/src/request_handler_factory.h
new file mode 100644
index 0000000000000000000000000000000000000000..066f0d1811d8229466792576c4e63db310ee9cda
--- /dev/null
+++ b/producer/api/src/request_handler_factory.h
@@ -0,0 +1,25 @@
+#ifndef ASAPO_REQUEST_HANDLER_FACTORY_H
+#define ASAPO_REQUEST_HANDLER_FACTORY_H
+
+#include "request_handler.h"
+#include "receiver_discovery_service.h"
+
+#include "preprocessor/definitions.h"
+
+namespace  asapo {
+
+class RequestHandlerFactory {
+  public:
+    RequestHandlerFactory(ReceiverDiscoveryService* discovery_service);
+    RequestHandlerFactory(std::string destination_folder);
+    VIRTUAL std::unique_ptr<RequestHandler> NewRequestHandler(uint64_t thread_id, uint64_t* shared_counter);
+  private:
+    RequestHandlerType type_;
+    ReceiverDiscoveryService* discovery_service_{nullptr};
+    std::string destination_folder_;
+};
+
+
+}
+
+#endif //ASAPO_REQUEST_HANDLER_FACTORY_H
diff --git a/producer/api/src/request_handler_filesystem.cpp b/producer/api/src/request_handler_filesystem.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..93ac0b49945030e02dbee22feec49ccaebb3406c
--- /dev/null
+++ b/producer/api/src/request_handler_filesystem.cpp
@@ -0,0 +1,26 @@
+#include "producer/producer_error.h"
+#include "request_handler_filesystem.h"
+#include "producer_logger.h"
+#include "io/io_factory.h"
+
+#include <cstdint>
+
+namespace asapo {
+
+RequestHandlerFilesystem::RequestHandlerFilesystem(std::string destination_folder, uint64_t thread_id):
+    io__{GenerateDefaultIO()}, log__{GetDefaultProducerLogger()}, destination_folder_{std::move(destination_folder)},
+    thread_id_{thread_id} {
+
+}
+
+Error RequestHandlerFilesystem::ProcessRequestUnlocked(const Request* request) {
+    std::string fullpath = destination_folder_ + "/" + request->header.file_name + ".bin";
+    auto err = io__->WriteDataToFile(fullpath, (uint8_t*)request->data, request->header.data_size);
+    if (request->callback) {
+        request->callback(request->header, std::move(err));
+    }
+    return nullptr;
+}
+
+
+}
diff --git a/producer/api/src/request_handler_filesystem.h b/producer/api/src/request_handler_filesystem.h
new file mode 100644
index 0000000000000000000000000000000000000000..ba29a407ed2ef2414ffb60990e14e079122e608f
--- /dev/null
+++ b/producer/api/src/request_handler_filesystem.h
@@ -0,0 +1,36 @@
+#ifndef ASAPO_REQUEST_HANDLER_FILESYSTEM_H
+#define ASAPO_REQUEST_HANDLER_FILESYSTEM_H
+
+#include <chrono>
+
+#include "io/io.h"
+#include "common/error.h"
+
+#include "producer/common.h"
+#include "request_handler.h"
+#include "logger/logger.h"
+
+using std::chrono::high_resolution_clock;
+
+namespace asapo {
+
+class RequestHandlerFilesystem: public RequestHandler {
+  public:
+    explicit RequestHandlerFilesystem(std::string destination_folder, uint64_t thread_id);
+    Error ProcessRequestUnlocked(const Request* request) override;
+    bool ReadyProcessRequest() override {
+        return true;
+    };
+    void PrepareProcessingRequestLocked()  override {};
+    void TearDownProcessingRequestLocked(const Error& error_from_process)  override {};
+
+    virtual ~RequestHandlerFilesystem() = default;
+    std::unique_ptr<IO> io__;
+    const AbstractLogger* log__;
+  private:
+    std::string destination_folder_;
+    uint64_t thread_id_;
+};
+}
+
+#endif //ASAPO_REQUEST_HANDLER_FILESYSTEM_H
diff --git a/producer/api/src/request_handler_tcp.cpp b/producer/api/src/request_handler_tcp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3b5972d8a965ff2ac024b9632b7946cd9a6e6148
--- /dev/null
+++ b/producer/api/src/request_handler_tcp.cpp
@@ -0,0 +1,182 @@
+#include "producer/producer_error.h"
+#include "request_handler_tcp.h"
+#include "producer_logger.h"
+#include "io/io_factory.h"
+
+
+namespace asapo {
+
+
+RequestHandlerTcp::RequestHandlerTcp(ReceiverDiscoveryService* discovery_service, uint64_t thread_id,
+                                     uint64_t* shared_counter):
+    io__{GenerateDefaultIO()}, log__{GetDefaultProducerLogger()}, discovery_service__{discovery_service}, thread_id_{thread_id},
+    ncurrent_connections_{shared_counter} {
+
+}
+
+Error RequestHandlerTcp::ConnectToReceiver(const std::string& receiver_address) {
+    Error err;
+    sd_ = io__->CreateAndConnectIPTCPSocket(receiver_address, &err);
+    if(err != nullptr) {
+        log__->Debug("cannot connect to receiver at " + receiver_address + " - " + err->Explain());
+        return err;
+    }
+    log__->Info("connected to receiver at " + receiver_address);
+    connected_receiver_uri_ = receiver_address;
+    return nullptr;
+}
+
+Error RequestHandlerTcp::SendHeaderAndData(const Request* request) {
+    Error io_error;
+    io__->Send(sd_, &(request->header), sizeof(request->header), &io_error);
+    if(io_error) {
+        return io_error;
+    }
+
+    io__->Send(sd_, request->data, request->header.data_size, &io_error);
+    if(io_error) {
+        return io_error;
+    }
+
+    return nullptr;
+}
+
+Error RequestHandlerTcp::ReceiveResponse() {
+    Error err;
+    SendDataResponse sendDataResponse;
+    io__->Receive(sd_, &sendDataResponse, sizeof(sendDataResponse), &err);
+    if(err != nullptr) {
+        return err;
+    }
+
+    if(sendDataResponse.error_code) {
+        if(sendDataResponse.error_code == kNetErrorFileIdAlreadyInUse) {
+            return ProducerErrorTemplates::kFileIdAlreadyInUse.Generate();
+        }
+        return ProducerErrorTemplates::kInternalServerError.Generate();
+    }
+    return nullptr;
+}
+
+Error RequestHandlerTcp::TrySendToReceiver(const Request* request) {
+    auto err = SendHeaderAndData(request);
+    if (err)  {
+        return err;
+    }
+
+    err = ReceiveResponse();
+    if (err)  {
+        return err;
+    }
+
+    log__->Debug(std::string("successfully sent data ") + " id: " + std::to_string(request->header.data_id) + " to " +
+        connected_receiver_uri_);
+    return nullptr;
+}
+
+
+void RequestHandlerTcp::UpdateIfNewConnection() {
+    if (Connected())
+        return;
+    UpdateReceiversList();
+    (*ncurrent_connections_)++;
+}
+
+bool RequestHandlerTcp::UpdateReceiversList() {
+    auto thread_receivers_new = discovery_service__->RotatedUriList(thread_id_);
+    last_receivers_uri_update_ = high_resolution_clock::now();
+    if (thread_receivers_new != receivers_list_) {
+        receivers_list_ = thread_receivers_new;
+        return true;
+    }
+    return false;
+}
+
+bool RequestHandlerTcp::TimeToUpdateReceiverList() {
+    uint64_t elapsed_ms = std::chrono::duration_cast<std::chrono::milliseconds>( high_resolution_clock::now() -
+        last_receivers_uri_update_).count();
+    return elapsed_ms > discovery_service__->UpdateFrequency();
+}
+
+
+bool RequestHandlerTcp::Disconnected() {
+    return !Connected();
+}
+
+
+bool RequestHandlerTcp::NeedRebalance() {
+    if (Disconnected())
+        return false;
+
+    if (TimeToUpdateReceiverList()) {
+        return UpdateReceiversList();
+    }
+    return false;
+}
+
+void RequestHandlerTcp::CloseConnectionToPeformRebalance() {
+    io__->CloseSocket(sd_, nullptr);
+    log__->Info("rebalancing");
+    sd_ = kDisconnectedSocketDescriptor;
+}
+
+void RequestHandlerTcp::Disconnect() {
+    io__->CloseSocket(sd_, nullptr);
+    sd_ = kDisconnectedSocketDescriptor;
+    log__->Debug("disconnected from  " + connected_receiver_uri_);
+    connected_receiver_uri_.clear();
+}
+
+bool RequestHandlerTcp::ServerError(const Error& err) {
+    return err != nullptr && err != ProducerErrorTemplates::kFileIdAlreadyInUse;
+}
+
+Error RequestHandlerTcp::ProcessRequestUnlocked(const Request* request) {
+    if (NeedRebalance()) {
+        CloseConnectionToPeformRebalance();
+    }
+    for (auto receiver_uri : receivers_list_) {
+        if (Disconnected()) {
+            auto err = ConnectToReceiver(receiver_uri);
+            if (err != nullptr ) continue;
+        }
+
+        auto err = TrySendToReceiver(request);
+        if (ServerError(err))  {
+            Disconnect();
+            log__->Debug("cannot send data to " + receiver_uri + ": " + err->Explain());
+            continue;
+        }
+
+        if (request->callback) {
+            request->callback(request->header, std::move(err));
+        }
+        return nullptr;
+    }
+    return ProducerErrorTemplates::kCannotSendDataToReceivers.Generate();
+}
+
+bool RequestHandlerTcp::Connected() {
+    return sd_ != kDisconnectedSocketDescriptor;
+}
+
+bool RequestHandlerTcp::CanCreateNewConnections() {
+    return (*ncurrent_connections_) < discovery_service__->MaxConnections();
+}
+
+bool RequestHandlerTcp::ReadyProcessRequest() {
+    return Connected() || CanCreateNewConnections();
+}
+
+void RequestHandlerTcp::PrepareProcessingRequestLocked() {
+    UpdateIfNewConnection();
+}
+
+void RequestHandlerTcp::TearDownProcessingRequestLocked(const Error& error_from_process) {
+    if (error_from_process) {
+        (*ncurrent_connections_)--;
+    }
+
+}
+
+}
diff --git a/producer/api/src/request_handler_tcp.h b/producer/api/src/request_handler_tcp.h
new file mode 100644
index 0000000000000000000000000000000000000000..08a02132c6aae4ac2f7d38eee9abf8aef7646013
--- /dev/null
+++ b/producer/api/src/request_handler_tcp.h
@@ -0,0 +1,55 @@
+#ifndef ASAPO_REQUEST_H
+#define ASAPO_REQUEST_H
+
+#include <chrono>
+
+#include "io/io.h"
+#include "common/error.h"
+#include "receiver_discovery_service.h"
+#include "common/networking.h"
+
+#include "producer/common.h"
+#include "request_handler.h"
+
+
+using std::chrono::high_resolution_clock;
+
+namespace asapo {
+
+class RequestHandlerTcp: public RequestHandler {
+  public:
+    explicit RequestHandlerTcp(ReceiverDiscoveryService* discovery_service, uint64_t thread_id, uint64_t* shared_counter);
+    Error ProcessRequestUnlocked(const Request* request) override;
+    bool ReadyProcessRequest() override;
+    void PrepareProcessingRequestLocked()  override;
+    void TearDownProcessingRequestLocked(const Error& error_from_process)  override;
+
+    virtual ~RequestHandlerTcp() = default;
+    std::unique_ptr<IO> io__;
+    const AbstractLogger* log__;
+    ReceiverDiscoveryService* discovery_service__;
+  private:
+    Error ConnectToReceiver(const std::string& receiver_address);
+    Error SendHeaderAndData(const Request*);
+    Error ReceiveResponse();
+    Error TrySendToReceiver(const Request* request);
+    SocketDescriptor sd_{kDisconnectedSocketDescriptor};
+    void UpdateIfNewConnection();
+    bool UpdateReceiversList();
+    bool TimeToUpdateReceiverList();
+    bool NeedRebalance();
+    void CloseConnectionToPeformRebalance();
+    bool Disconnected();
+    void Disconnect();
+    bool ServerError(const Error& err);
+    ReceiversList receivers_list_;
+    high_resolution_clock::time_point last_receivers_uri_update_;
+    bool Connected();
+    bool CanCreateNewConnections();
+    uint64_t thread_id_;
+    uint64_t* ncurrent_connections_;
+    std::string connected_receiver_uri_;
+};
+}
+
+#endif //ASAPO_REQUEST_H
diff --git a/producer/api/src/request_pool.cpp b/producer/api/src/request_pool.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a5598feed7ae120b7a2be1edc632f90342db5079
--- /dev/null
+++ b/producer/api/src/request_pool.cpp
@@ -0,0 +1,91 @@
+#include "request_pool.h"
+#include "producer_logger.h"
+
+
+namespace asapo {
+
+RequestPool:: RequestPool(uint8_t n_threads,
+                          RequestHandlerFactory* request_handler_factory): log__{GetDefaultProducerLogger()},
+    request_handler_factory__{request_handler_factory},
+    threads_{n_threads} {
+    for(size_t i = 0; i < threads_.size(); i++) {
+        log__->Debug("starting thread " + std::to_string(i));
+        threads_[i] = std::thread(
+                          [this, i] {ThreadHandler(i);});
+    }
+
+}
+
+Error RequestPool::AddRequest(std::unique_ptr<Request> request) {
+    std::unique_lock<std::mutex> lock(mutex_);
+    request_queue_.emplace_back(std::move(request));
+    lock.unlock();
+//todo: maybe notify_one is better here
+    condition_.notify_all();
+
+    return nullptr;
+}
+
+bool RequestPool::CanProcessRequest(const std::unique_ptr<RequestHandler>& request_handler) {
+    return request_queue_.size() && request_handler->ReadyProcessRequest();
+}
+
+std::unique_ptr<Request> RequestPool::GetRequestFromQueue() {
+    auto request = std::move(request_queue_.front());
+    request_queue_.pop_front();
+    return request;
+}
+
+void RequestPool::PutRequestBackToQueue(std::unique_ptr<Request> request) {
+    request_queue_.emplace_front(std::move(request));
+}
+
+void RequestPool::ProcessRequest(const std::unique_ptr<RequestHandler>& request_handler,
+                                 ThreadInformation* thread_info) {
+    request_handler->PrepareProcessingRequestLocked();
+    auto request = GetRequestFromQueue();
+    thread_info->lock.unlock();
+    auto err = request_handler->ProcessRequestUnlocked(request.get());
+    thread_info->lock.lock();
+    request_handler->TearDownProcessingRequestLocked(err);
+    if (err) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+        PutRequestBackToQueue(std::move(request));
+        condition_.notify_all();
+    }
+}
+
+void RequestPool::ThreadHandler(uint64_t id) {
+    ThreadInformation thread_info;
+    thread_info.lock =  std::unique_lock<std::mutex>(mutex_);
+    auto request_handler = request_handler_factory__->NewRequestHandler(id, &shared_counter_);
+    do {
+        auto do_work = condition_.wait_for(thread_info.lock, std::chrono::milliseconds(100), [this, &request_handler] {
+            return (CanProcessRequest(request_handler) || quit_);
+        });
+        //after wait, we own the lock
+        if (!quit_ && do_work) {
+            ProcessRequest(request_handler, &thread_info);
+        };
+    } while (!quit_);
+}
+
+RequestPool::~RequestPool() {
+    mutex_.lock();
+    quit_ = true;
+    mutex_.unlock();
+    condition_.notify_all();
+
+    for(size_t i = 0; i < threads_.size(); i++) {
+        if(threads_[i].joinable()) {
+            log__->Debug("finishing thread " + std::to_string(i));
+            threads_[i].join();
+        }
+    }
+}
+uint64_t RequestPool::NRequestsInQueue() {
+    std::lock_guard<std::mutex> lock{mutex_};
+    return request_queue_.size();
+}
+
+}
diff --git a/producer/api/src/request_pool.h b/producer/api/src/request_pool.h
new file mode 100644
index 0000000000000000000000000000000000000000..4fac37bbc12f5bc34f7746ff5784ea5a07784fae
--- /dev/null
+++ b/producer/api/src/request_pool.h
@@ -0,0 +1,49 @@
+#ifndef ASAPO_REQUEST_POOL_H
+#define ASAPO_REQUEST_POOL_H
+
+#include <string>
+#include <vector>
+#include <mutex>
+#include <thread>
+#include <condition_variable>
+#include <queue>
+
+
+#include "logger/logger.h"
+#include "request_handler_tcp.h"
+#include "request_handler_factory.h"
+
+#include "preprocessor/definitions.h"
+
+
+namespace asapo {
+
+class RequestPool {
+    struct ThreadInformation {
+        std::unique_lock<std::mutex> lock;
+    };
+  public:
+    explicit RequestPool(uint8_t n_threads, RequestHandlerFactory* request_handler_factory);
+    VIRTUAL Error AddRequest(std::unique_ptr<Request> request);
+    ~RequestPool();
+    AbstractLogger* log__;
+    uint64_t NRequestsInQueue();
+  private:
+    RequestHandlerFactory* request_handler_factory__;
+    std::vector<std::thread> threads_;
+    void ThreadHandler(uint64_t id);
+    bool quit_{false};
+    std::condition_variable condition_;
+    std::mutex mutex_;
+    std::deque<std::unique_ptr<Request>> request_queue_;
+    bool CanProcessRequest(const std::unique_ptr<RequestHandler>& request_handler);
+    void ProcessRequest(const std::unique_ptr<RequestHandler>& request_handler, ThreadInformation* thread_info);
+    std::unique_ptr<Request> GetRequestFromQueue();
+    void PutRequestBackToQueue(std::unique_ptr<Request>request);
+    uint64_t shared_counter_{0};
+
+};
+
+}
+
+#endif //ASAPO_REQUEST_POOL_H
diff --git a/producer/api/unittests/mocking.h b/producer/api/unittests/mocking.h
new file mode 100644
index 0000000000000000000000000000000000000000..7d231cc2afa4b359d13f495f8b943071af1d2646
--- /dev/null
+++ b/producer/api/unittests/mocking.h
@@ -0,0 +1,67 @@
+#ifndef ASAPO_MOCKING_H
+#define ASAPO_MOCKING_H
+
+#include <gtest/gtest.h>
+
+#include "../src/request_pool.h"
+#include "../src/request_handler_factory.h"
+#include "../src/receiver_discovery_service.h"
+
+namespace asapo {
+
+const std::string expected_endpoint = "expected_endpont";
+
+class MockDiscoveryService : public asapo::ReceiverDiscoveryService {
+  public:
+    MockDiscoveryService() : ReceiverDiscoveryService{expected_endpoint, 1} {};
+    MOCK_METHOD0(StartCollectingData, void());
+    MOCK_METHOD0(MaxConnections, uint64_t());
+    MOCK_METHOD1(RotatedUriList, ReceiversList(uint64_t));
+    uint64_t UpdateFrequency() override {
+        return 0;
+    }
+};
+
+
+class MockRequestPull : public RequestPool {
+  public:
+    MockRequestPull(RequestHandlerFactory* request_handler_factory) :
+        RequestPool{1, request_handler_factory} {};
+    asapo::Error AddRequest(std::unique_ptr<asapo::Request> request) override {
+        if (request == nullptr) {
+            return asapo::Error{AddRequest_t(nullptr)};
+        }
+        return asapo::Error{AddRequest_t(request.get())};
+    }
+    MOCK_METHOD1(AddRequest_t, asapo::ErrorInterface * (Request*));
+};
+
+
+class MockRequestHandler : public RequestHandler {
+  public:
+
+    Error ProcessRequestUnlocked(const Request* request) override {
+        return Error{ProcessRequestUnlocked_t(request)};
+    }
+    void TearDownProcessingRequestLocked(const Error& error_from_process) override {
+        if (error_from_process) {
+            TearDownProcessingRequestLocked_t(error_from_process.get());
+        } else {
+            TearDownProcessingRequestLocked_t(nullptr);
+        }
+    }
+    MOCK_METHOD0(PrepareProcessingRequestLocked, void());
+    MOCK_METHOD0(ReadyProcessRequest, bool());
+    MOCK_METHOD1(TearDownProcessingRequestLocked_t, void(ErrorInterface* error_from_process));
+    MOCK_METHOD1(ProcessRequestUnlocked_t, ErrorInterface * (const Request*));
+};
+
+
+
+}
+
+using asapo::MockRequestHandler;
+using asapo::MockDiscoveryService;
+using asapo::MockRequestPull;
+
+#endif //ASAPO_MOCKING_H
diff --git a/producer/api/unittests/test_producer.cpp b/producer/api/unittests/test_producer.cpp
index 80072d7c1441f932deb8e12a1c280f0f7dca8c06..941456e79f54b9901e2a4f2b6280b255554bf205 100644
--- a/producer/api/unittests/test_producer.cpp
+++ b/producer/api/unittests/test_producer.cpp
@@ -3,14 +3,47 @@
 
 #include "producer/producer.h"
 #include "../src/producer_impl.h"
+
 using ::testing::Ne;
+using ::testing::Eq;
 
 namespace {
 
-TEST(CreateProducer, PointerIsNotNullptr) {
-    std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create();
+TEST(CreateProducer, TcpProducer) {
+    asapo::Error err;
+    std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("endpoint", 4, asapo::RequestHandlerType::kTcp,
+                                                &err);
+    ASSERT_THAT(dynamic_cast<asapo::ProducerImpl*>(producer.get()), Ne(nullptr));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST(CreateProducer, FileSystemProducer) {
+    asapo::Error err;
+    std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("endpoint", 4,
+                                                asapo::RequestHandlerType::kFilesystem, &err);
     ASSERT_THAT(dynamic_cast<asapo::ProducerImpl*>(producer.get()), Ne(nullptr));
-    ASSERT_THAT(producer.get(), Ne(nullptr));
+    ASSERT_THAT(err, Eq(nullptr));
 }
 
+
+TEST(CreateProducer, TooManyThreads) {
+    asapo::Error err;
+    std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("", asapo::kMaxProcessingThreads + 1,
+                                                asapo::RequestHandlerType::kTcp, &err);
+    ASSERT_THAT(producer, Eq(nullptr));
+    ASSERT_THAT(err, Ne(nullptr));
+}
+
+TEST(Producer, SimpleWorkflowWihoutConnection) {
+    asapo::Error err;
+    std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("hello", 5, asapo::RequestHandlerType::kTcp, &err);
+    auto err_send = producer->Send(1, nullptr, 1, "", nullptr);
+    std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    ASSERT_THAT(producer, Ne(nullptr));
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(err_send, Eq(nullptr));
+}
+
+
+
 }
diff --git a/producer/api/unittests/test_producer_impl.cpp b/producer/api/unittests/test_producer_impl.cpp
index 0376a3f3d0f781db729986cbe4e8953be79cd971..084557fb3b41619621ead0daf8323e9905a6ad2f 100644
--- a/producer/api/unittests/test_producer_impl.cpp
+++ b/producer/api/unittests/test_producer_impl.cpp
@@ -1,12 +1,16 @@
 #include <gtest/gtest.h>
 #include <gmock/gmock.h>
 
-#include "unittests/MockIO.h"
 #include "unittests/MockLogger.h"
 #include "common/error.h"
-#include "io/io.h"
-#include "producer/producer.h"
+#include "producer/common.h"
 #include "../src/producer_impl.h"
+#include "producer/producer_error.h"
+
+#include "../src/request_pool.h"
+#include "../src/request_handler_tcp.h"
+
+#include "mocking.h"
 
 namespace {
 
@@ -21,303 +25,70 @@ using ::testing::Mock;
 using ::testing::InSequence;
 using ::testing::HasSubstr;
 
-TEST(get_version, VersionAboveZero) {
-    asapo::ProducerImpl producer;
-    EXPECT_GE(producer.GetVersion(), 0);
-}
 
+using asapo::RequestPool;
+using asapo::Request;
 
-TEST(Producer, Logger) {
-    asapo::ProducerImpl producer;
-    ASSERT_THAT(dynamic_cast<asapo::AbstractLogger*>(producer.log__.get()), Ne(nullptr));
+
+MATCHER_P3(M_CheckSendDataRequest, file_id, file_size, file_name,
+           "Checks if a valid GenericRequestHeader was Send") {
+    return ((asapo::GenericRequestHeader*)arg)->op_code == asapo::kOpcodeTransferData
+           && ((asapo::GenericRequestHeader*)arg)->data_id == file_id
+           && std::string(((asapo::GenericRequestHeader*)arg)->file_name) == file_name
+           && ((asapo::GenericRequestHeader*)arg)->data_size == file_size;
 }
 
-/**
- * ConnectToReceiver
- */
 
-class ProducerImpl : public testing::Test {
+TEST(ProducerImpl, Constructor) {
+    asapo::ProducerImpl producer{"", 4, asapo::RequestHandlerType::kTcp};
+    ASSERT_THAT(dynamic_cast<asapo::AbstractLogger*>(producer.log__), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<asapo::RequestPool*>(producer.request_pool__.get()), Ne(nullptr));
+}
+
+class ProducerImplTests : public testing::Test {
   public:
-    asapo::ProducerImpl producer;
-    testing::NiceMock<asapo::MockIO> mock_io;
+    testing::NiceMock<MockDiscoveryService> service;
+    asapo::RequestHandlerFactory factory{&service};
     testing::NiceMock<asapo::MockLogger> mock_logger;
-
-    asapo::FileDescriptor expected_fd = 83942;
-    uint64_t expected_file_id = 4224;
-    std::string expected_address = "127.0.0.1:9090";
-    uint64_t expected_request_id = 0;
-    uint64_t expected_file_size = 1337;
-    void*    expected_file_pointer = (void*)0xC00FE;
-
+    testing::NiceMock<MockRequestPull> mock_pull{&factory};
+    asapo::ProducerImpl producer{"", 1, asapo::RequestHandlerType::kTcp};
     void SetUp() override {
-        producer.io__ = std::unique_ptr<asapo::IO> {&mock_io};
-        producer.log__ = asapo::Logger {&mock_logger};
+        producer.log__ = &mock_logger;
+        producer.request_pool__ = std::unique_ptr<RequestPool> {&mock_pull};
     }
     void TearDown() override {
-        producer.io__.release();
-        producer.log__.release();
-    }
-
-    void ConnectToReceiver_DONE(asapo::FileDescriptor expected_fd = 1) {
-        EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(expected_address, _))
-        .Times(1)
-        .WillOnce(
-            DoAll(
-                testing::SetArgPointee<1>(nullptr),
-                Return(expected_fd)
-            ));
-        producer.ConnectToReceiver(expected_address);
-    }
-    void Send_DONE(int times = 1) {
-        EXPECT_CALL(mock_io, Send_t(_, _, _, _))
-        .Times(times)
-        .WillRepeatedly(
-            DoAll(
-                testing::SetArgPointee<3>(nullptr),
-                testing::ReturnArg<2>()
-            ));
+        producer.request_pool__.release();
     }
 };
 
-TEST_F(ProducerImpl, get_status__disconnected) {
-    asapo::ProducerStatus status = producer.GetStatus();
-    ASSERT_THAT(status, Eq(asapo::ProducerStatus::kDisconnected));
-}
-
-
-TEST_F(ProducerImpl, ConnectToReceiver__CreateAndConnectIPTCPSocket_error) {
-    EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(expected_address, _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<1>(asapo::IOErrorTemplates::kInvalidAddressFormat.Generate().release()),
-            Return(-1)
-        ));
-
-    EXPECT_CALL(mock_logger, Debug(HasSubstr("cannot connect")));
-
-    auto error = producer.ConnectToReceiver(expected_address);
-    auto status = producer.GetStatus();
-
-    ASSERT_THAT(error, Eq(asapo::IOErrorTemplates::kInvalidAddressFormat));
-    ASSERT_THAT(status, Eq(asapo::ProducerStatus::kDisconnected));
-}
-
-TEST_F(ProducerImpl, ConnectToReceiver) {
-    EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(expected_address, _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<1>(nullptr),
-            Return(expected_fd)
-        ));
-
-    EXPECT_CALL(mock_logger, Info(HasSubstr("connected")));
-
-
-    auto error = producer.ConnectToReceiver(expected_address);
-    auto status = producer.GetStatus();
-
-    ASSERT_THAT(error, Eq(nullptr));
-    ASSERT_THAT(status, Eq(asapo::ProducerStatus::kConnected));
-}
-
-TEST_F(ProducerImpl, ConnectToReceiver__already_connected) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE();
-
-    auto error = producer.ConnectToReceiver(expected_address);
-
-    ASSERT_THAT(error, Eq(asapo::ProducerErrorTemplates::kAlreadyConnected));
-}
-
-/**
- * Send
- */
-
-MATCHER_P3(M_CheckSendDataRequest, request_id, file_id, file_size,
-           "Checks if a valid GenericNetworkRequestHeader was Send") {
-    return ((asapo::GenericNetworkRequestHeader*)arg)->op_code == asapo::kNetOpcodeSendData
-           && ((asapo::GenericNetworkRequestHeader*)arg)->request_id == request_id
-           && ((asapo::GenericNetworkRequestHeader*)arg)->data_id == file_id
-           && ((asapo::GenericNetworkRequestHeader*)arg)->data_size == file_size;
-}
-
-ACTION_P2(A_WriteSendDataResponse, error_code, request_id) {
-    ((asapo::SendDataResponse*)arg1)->op_code = asapo::kNetOpcodeSendData;
-    ((asapo::SendDataResponse*)arg1)->error_code = error_code;
-    ((asapo::SendDataResponse*)arg1)->request_id = request_id;
-}
-
-TEST_F(ProducerImpl, Send__connection_not_ready) {
-
-    auto error = producer.Send(expected_file_id, nullptr, 1);
-
-    ASSERT_THAT(error, Eq(asapo::ProducerErrorTemplates::kConnectionNotReady));
-}
-
-TEST_F(ProducerImpl, Send__file_too_large) {
-
-    ConnectToReceiver_DONE(expected_fd);
-
-    auto error = producer.Send(expected_file_id, nullptr,
-                               size_t(1024) * size_t(1024) * size_t(1024) * size_t(3));
-
-    ASSERT_THAT(error, Eq(asapo::ProducerErrorTemplates::kFileTooLarge));
-}
-
-TEST_F(ProducerImpl, Send__sendDataRequest_error) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE(expected_fd);
-
-    EXPECT_CALL(mock_io, Send_t(expected_fd, M_CheckSendDataRequest(expected_request_id, expected_file_id,
-                                expected_file_size),
-                                sizeof(asapo::GenericNetworkRequestHeader), _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
-            Return(-1)
-        ));
-
-    auto error = producer.Send(expected_file_id, nullptr, expected_file_size);
-
-    ASSERT_THAT(error, Eq(asapo::IOErrorTemplates::kBadFileNumber));
+TEST_F(ProducerImplTests, SendReturnsError) {
+    EXPECT_CALL(mock_pull, AddRequest_t(_)).WillOnce(Return(
+            asapo::ProducerErrorTemplates::kRequestPoolIsFull.Generate().release()));
+    auto err = producer.Send(1, nullptr, 1, "", nullptr);
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kRequestPoolIsFull));
 }
 
-TEST_F(ProducerImpl, Send__sendData_error) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE(expected_fd);
-    Send_DONE();
-
-    EXPECT_CALL(mock_io, Send_t(expected_fd, expected_file_pointer, expected_file_size, _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
-            Return(-1)
-        ));
-
-    EXPECT_CALL(mock_logger, Debug(HasSubstr("error sending to " + expected_address)));
-
-    auto error = producer.Send(expected_file_id, expected_file_pointer, expected_file_size);
-
-    ASSERT_THAT(error, Eq(asapo::IOErrorTemplates::kBadFileNumber));
+TEST_F(ProducerImplTests, ErrorIfSizeTooLarge) {
+    auto err = producer.Send(1, nullptr, asapo::ProducerImpl::kMaxChunkSize + 1, "", nullptr);
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kFileTooLarge));
 }
 
 
-TEST_F(ProducerImpl, Send__Receive_error) {
-    InSequence sequence;
+TEST_F(ProducerImplTests, OKSendingRequest) {
+    uint64_t expected_size = 100;
+    uint64_t expected_id = 10;
+    std::string expected_name = "test_name";
 
-    ConnectToReceiver_DONE(expected_fd);
-    Send_DONE(2);
 
-    EXPECT_CALL(mock_io, Receive_t(expected_fd, _, sizeof(asapo::SendDataResponse), _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
-            testing::Return(-1)
-        ));
+    Request request{asapo::GenericRequestHeader{asapo::kOpcodeTransferData, expected_id, expected_size, expected_name}, nullptr, nullptr};
 
-    EXPECT_CALL(mock_logger, Debug(HasSubstr("error receiving response from " + expected_address)));
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(expected_id, expected_size, expected_name))).WillOnce(Return(
+                nullptr));
 
-    auto error = producer.Send(expected_file_id, expected_file_pointer, expected_file_size);
+    auto err = producer.Send(expected_id, nullptr, expected_size, expected_name, nullptr);
 
-    ASSERT_THAT(error, Eq(asapo::IOErrorTemplates::kBadFileNumber));
+    ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImpl, Send__Receive_server_error) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE(expected_fd);
-    Send_DONE(2);
-
-
-    EXPECT_CALL(mock_io, Receive_t(_, _, sizeof(asapo::SendDataResponse), _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(nullptr),
-            A_WriteSendDataResponse(asapo::kNetErrorAllocateStorageFailed, expected_request_id),
-            testing::ReturnArg<2>()
-        ));
-
-    auto error = producer.Send(expected_file_id, expected_file_pointer, expected_file_size);
-
-    ASSERT_THAT(error, Eq(asapo::ProducerErrorTemplates::kUnknownServerError));
-}
-
-TEST_F(ProducerImpl, Send__Receive_server_error_id_already_in_use) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE(expected_fd);
-    Send_DONE(2);
-
-
-    EXPECT_CALL(mock_io, Receive_t(_, _, sizeof(asapo::SendDataResponse), _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(nullptr),
-            A_WriteSendDataResponse(asapo::kNetErrorFileIdAlreadyInUse, expected_request_id),
-            testing::ReturnArg<2>()
-        ));
-
-    auto error = producer.Send(expected_file_id, expected_file_pointer, expected_file_size);
-
-    ASSERT_THAT(error, Eq(asapo::ProducerErrorTemplates::kFileIdAlreadyInUse));
-}
-
-TEST_F(ProducerImpl, Send) {
-    InSequence sequence;
-
-    ConnectToReceiver_DONE(expected_fd);
-    Send_DONE(2);
-
-
-    EXPECT_CALL(mock_io, Receive_t(_, _, sizeof(asapo::SendDataResponse), _))
-    .Times(1)
-    .WillOnce(
-        DoAll(
-            testing::SetArgPointee<3>(nullptr),
-            A_WriteSendDataResponse(asapo::kNetErrorNoError, expected_request_id),
-            testing::ReturnArg<2>()
-        ));
-
-    EXPECT_CALL(mock_logger, Debug(HasSubstr("succesfully sent data to " + expected_address)));
-
-    auto error = producer.Send(expected_file_id, expected_file_pointer, expected_file_size);
-
-    ASSERT_THAT(error, Eq(nullptr));
-}
-
-TEST_F(ProducerImpl, EnableLocalLog) {
-
-    EXPECT_CALL(mock_logger, EnableLocalLog(true));
-
-    producer.EnableLocalLog(true);
-
-}
-
-TEST_F(ProducerImpl, EnableRemoteLog) {
-
-    EXPECT_CALL(mock_logger, EnableRemoteLog(false));
-
-    producer.EnableRemoteLog(false);
-
-}
-
-
-TEST_F(ProducerImpl, SetLogLevel) {
-
-    EXPECT_CALL(mock_logger, SetLogLevel(asapo::LogLevel::Warning));
-
-    producer.SetLogLevel(asapo::LogLevel::Warning);
-
-}
 
 }
diff --git a/producer/api/unittests/test_receiver_discovery_service.cpp b/producer/api/unittests/test_receiver_discovery_service.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c69af5fdd533f2373d8fde6af12682c9e49caef6
--- /dev/null
+++ b/producer/api/unittests/test_receiver_discovery_service.cpp
@@ -0,0 +1,162 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <chrono>
+
+#include "unittests/MockLogger.h"
+#include "common/error.h"
+#include "common/io_error.h"
+
+#include "../src/receiver_discovery_service.h"
+#include "unittests/MockHttpClient.h"
+
+namespace {
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::AllOf;
+
+using ::testing::Test;
+
+using ::testing::InSequence;
+using ::testing::HasSubstr;
+using testing::SetArgPointee;
+using testing::ElementsAre;
+
+using asapo::Error;
+using asapo::MockHttpClient;
+using asapo::ReceiverDiscoveryService;
+
+std::mutex mutex;
+
+TEST(ReceiversStatus, Constructor) {
+    ReceiverDiscoveryService status{"endpoint", 1000};
+    ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(status.log__), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<const asapo::HttpClient*>(status.httpclient__.get()), Ne(nullptr));
+}
+
+
+class ReceiversStatusTests : public Test {
+  public:
+    // important to create logger before status, otherwise checks in destructor won't work
+    NiceMock<asapo::MockLogger> mock_logger;
+    NiceMock<MockHttpClient>* mock_http_client;
+
+    std::string expected_endpoint{"endpoint/receivers"};
+    ReceiverDiscoveryService status{"endpoint", 20};
+
+    void SetUp() override {
+        mock_http_client = new NiceMock<MockHttpClient>;
+        status.httpclient__.reset(mock_http_client);
+        status.log__ = &mock_logger;
+    }
+    void TearDown() override {
+    }
+};
+
+TEST_F(ReceiversStatusTests, LogWhenHttpError) {
+    EXPECT_CALL(*mock_http_client, Get_t(expected_endpoint, _, _))
+    .Times(1)
+    .WillOnce(
+        DoAll(SetArgPointee<2>(new asapo::IOError("Test Read Error", asapo::IOErrorType::kReadError)),
+              Return("")
+             ));
+
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("getting receivers"), HasSubstr(expected_endpoint))));
+    status.StartCollectingData();
+
+}
+
+TEST_F(ReceiversStatusTests, LogWhenWhenWrongHttpCode) {
+    EXPECT_CALL(*mock_http_client, Get_t(expected_endpoint, _, _))
+    .Times(testing::AnyNumber())
+    .WillRepeatedly(
+        DoAll(SetArgPointee<2>(nullptr),
+              SetArgPointee<1>(asapo::HttpCode::BadRequest),
+              Return("bad request")
+             ));
+
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("getting receivers"), HasSubstr(expected_endpoint),
+                                         HasSubstr("bad request")))).Times(testing::AtLeast(1));
+    status.StartCollectingData();
+    std::this_thread::sleep_for(std::chrono::milliseconds(30));
+
+}
+
+TEST_F(ReceiversStatusTests, LogWhenWhenCannotReadResponce) {
+    EXPECT_CALL(*mock_http_client, Get_t(expected_endpoint, _, _))
+    .WillOnce(
+        DoAll(SetArgPointee<2>(nullptr),
+              SetArgPointee<1>(asapo::HttpCode::OK),
+              Return("wrong response")
+             ));
+
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("getting receivers"), HasSubstr(expected_endpoint),
+                                         HasSubstr("parse"))));
+    status.StartCollectingData();
+}
+
+
+TEST_F(ReceiversStatusTests, GetsReqestedInformation) {
+    std::string json = R"({"Uris":["s1","s2","s3"], "MaxConnections":8})";
+
+    EXPECT_CALL(*mock_http_client, Get_t(expected_endpoint, _, _))
+    .Times(testing::AtLeast(1))
+    .WillRepeatedly(
+        DoAll(SetArgPointee<2>(nullptr),
+              SetArgPointee<1>(asapo::HttpCode::OK),
+              Return(json)
+             ));
+
+    status.StartCollectingData();
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+    auto nc = status.MaxConnections();
+    ASSERT_THAT(nc, Eq(8));
+    auto list = status.RotatedUriList(0);
+    ASSERT_THAT(list, ElementsAre("s1", "s2", "s3"));
+
+    list = status.RotatedUriList(1);
+    ASSERT_THAT(list, ElementsAre("s2", "s3", "s1"));
+
+    list = status.RotatedUriList(2);
+    ASSERT_THAT(list, ElementsAre("s3", "s1", "s2"));
+
+    list = status.RotatedUriList(3);
+    ASSERT_THAT(list, ElementsAre("s1", "s2", "s3"));
+
+}
+
+TEST_F(ReceiversStatusTests, JoinThreadAtTheEnd) {
+    std::string json = R"({"uri_list":["s1","s2","s3"], "max_connections":8})";
+    EXPECT_CALL(*mock_http_client, Get_t(expected_endpoint, _, _))
+    .Times(testing::AtLeast(1))
+    .WillRepeatedly(
+        DoAll(SetArgPointee<2>(nullptr),
+              SetArgPointee<1>(asapo::HttpCode::OK),
+              Return(json)
+             ));
+
+    EXPECT_CALL(mock_logger, Debug(HasSubstr("starting receiver discovery")));
+    EXPECT_CALL(mock_logger, Debug(HasSubstr("finishing")));
+    status.StartCollectingData();
+}
+
+TEST_F(ReceiversStatusTests, InitialMaxConnection) {
+    auto nc = status.MaxConnections();
+    ASSERT_THAT(nc, Eq(0));
+}
+
+TEST_F(ReceiversStatusTests, InitialUriList) {
+    auto list = status.RotatedUriList(0);
+    ASSERT_THAT(list.size(), Eq(0));
+}
+
+
+}
diff --git a/producer/api/unittests/test_request_handler_factory.cpp b/producer/api/unittests/test_request_handler_factory.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cbfc57d80376a357e6faefbfb39aa42bd2885f04
--- /dev/null
+++ b/producer/api/unittests/test_request_handler_factory.cpp
@@ -0,0 +1,40 @@
+#include <gtest/gtest.h>
+#include <unittests/MockIO.h>
+
+#include "../src/request_handler_factory.h"
+#include "../src/receiver_discovery_service.h"
+#include "../src/request_handler_tcp.h"
+#include "mocking.h"
+#include "../src/request_handler_filesystem.h"
+
+using ::testing::Ne;
+using ::testing::Eq;
+
+using asapo:: RequestHandlerFactory;
+
+
+namespace {
+
+TEST(CreateFactory, Tcp) {
+    MockDiscoveryService mock_discovery;
+    EXPECT_CALL(mock_discovery, StartCollectingData());
+
+    RequestHandlerFactory factory{&mock_discovery};
+
+    auto handler = factory.NewRequestHandler(1, nullptr);
+
+    ASSERT_THAT(dynamic_cast<asapo::RequestHandlerTcp*>(handler.get()), Ne(nullptr));
+
+}
+
+TEST(CreateFactory, Filesystem) {
+    RequestHandlerFactory factory{""};
+
+    auto handler = factory.NewRequestHandler(1, nullptr);
+
+    ASSERT_THAT(dynamic_cast<asapo::RequestHandlerFilesystem*>(handler.get()), Ne(nullptr));
+
+}
+
+
+}
diff --git a/producer/api/unittests/test_request_handler_filesystem.cpp b/producer/api/unittests/test_request_handler_filesystem.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7cee0ad2410d6dcb5ee78b9783c9aee690433d0e
--- /dev/null
+++ b/producer/api/unittests/test_request_handler_filesystem.cpp
@@ -0,0 +1,138 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+#include "unittests/MockIO.h"
+#include "unittests/MockLogger.h"
+#include "common/error.h"
+#include "io/io.h"
+
+#include "producer/common.h"
+#include "producer/producer_error.h"
+
+#include "../src/request_handler_filesystem.h"
+#include "io/io_factory.h"
+
+namespace {
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::AllOf;
+using testing::NiceMock;
+
+using ::testing::InSequence;
+using ::testing::HasSubstr;
+
+
+TEST(RequestHandlerFileSystem, Constructor) {
+    asapo::RequestHandlerFilesystem request_handler{"destination", 1};
+
+    ASSERT_THAT(dynamic_cast<const asapo::IO*>(request_handler.io__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(request_handler.log__), Ne(nullptr));
+}
+
+class RequestHandlerFilesystemTests : public testing::Test {
+  public:
+    NiceMock<asapo::MockIO> mock_io;
+
+    uint64_t expected_file_id = 42;
+    uint64_t expected_file_size = 1337;
+    std::string  expected_file_name = "test_name";
+    uint64_t expected_thread_id = 2;
+    std::string  expected_destination = "destination";
+    std::string expected_fullpath = expected_destination + "/" + expected_file_name + ".bin";
+    asapo::Opcode expected_op_code = asapo::kOpcodeTransferData;
+    uint8_t*    expected_data_pointer = (uint8_t*)0xC00FE;
+    asapo::Error callback_err;
+    asapo::GenericRequestHeader header{expected_op_code, expected_file_id, expected_file_size, expected_file_name};
+    bool called = false;
+    asapo::GenericRequestHeader callback_header;
+    asapo::Request request{header, expected_data_pointer, [this](asapo::GenericRequestHeader header, asapo::Error err) {
+        called = true;
+        callback_err = std::move(err);
+        callback_header = header;
+    }};
+
+    asapo::Request request_nocallback{header, expected_data_pointer, nullptr};
+    testing::NiceMock<asapo::MockLogger> mock_logger;
+
+    asapo::RequestHandlerFilesystem request_handler{expected_destination, expected_thread_id};
+
+    void SetUp() override {
+        request_handler.log__ = &mock_logger;
+        request_handler.io__.reset(&mock_io);
+    }
+    void TearDown() override {
+        request_handler.io__.release();
+    }
+};
+
+ACTION_P(A_WriteSendDataResponse, error_code) {
+    ((asapo::SendDataResponse*)arg1)->op_code = asapo::kOpcodeTransferData;
+    ((asapo::SendDataResponse*)arg1)->error_code = error_code;
+}
+
+MATCHER_P2(M_CheckSendDataRequest, file_id, file_size,
+           "Checks if a valid GenericRequestHeader was Send") {
+    return ((asapo::GenericRequestHeader*)arg)->op_code == asapo::kOpcodeTransferData
+           && ((asapo::GenericRequestHeader*)arg)->data_id == file_id
+           && ((asapo::GenericRequestHeader*)arg)->data_size == file_size;
+}
+
+TEST_F(RequestHandlerFilesystemTests, CallBackErrorIfCannotSaveFile) {
+    EXPECT_CALL(mock_io, WriteDataToFile_t(expected_fullpath, expected_data_pointer, expected_file_size))
+    .WillOnce(
+        Return(
+            asapo::IOErrorTemplates::kUnknownIOError.Generate().release())
+    );
+
+
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(callback_err, Eq(asapo::IOErrorTemplates::kUnknownIOError));
+    ASSERT_THAT(called, Eq(true));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(RequestHandlerFilesystemTests, WorksWithemptyCallback) {
+    EXPECT_CALL(mock_io, WriteDataToFile_t(expected_fullpath, expected_data_pointer, expected_file_size))
+    .WillOnce(
+        Return(
+            asapo::IOErrorTemplates::kUnknownIOError.Generate().release())
+    );
+
+
+    auto err = request_handler.ProcessRequestUnlocked(&request_nocallback);
+
+    ASSERT_THAT(called, Eq(false));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+
+
+TEST_F(RequestHandlerFilesystemTests, TransferOK) {
+    EXPECT_CALL(mock_io, WriteDataToFile_t(expected_fullpath, expected_data_pointer, expected_file_size))
+    .WillOnce(
+        Return(
+            nullptr)
+    );
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(callback_err, Eq(nullptr));
+    ASSERT_THAT(called, Eq(true));
+    ASSERT_THAT(callback_header.data_size, Eq(header.data_size));
+    ASSERT_THAT(callback_header.op_code, Eq(header.op_code));
+    ASSERT_THAT(callback_header.data_id, Eq(header.data_id));
+    ASSERT_THAT(std::string{callback_header.file_name}, Eq(std::string{header.file_name}));
+}
+
+
+}
diff --git a/producer/api/unittests/test_request_handler_tcp.cpp b/producer/api/unittests/test_request_handler_tcp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7e8b3acb693525d21ee6b80724f1606218ec2bca
--- /dev/null
+++ b/producer/api/unittests/test_request_handler_tcp.cpp
@@ -0,0 +1,509 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+#include "unittests/MockIO.h"
+#include "unittests/MockLogger.h"
+#include "common/error.h"
+#include "io/io.h"
+
+#include "producer/common.h"
+#include "producer/producer_error.h"
+
+#include "../src/request_handler_tcp.h"
+#include <common/networking.h>
+#include "io/io_factory.h"
+
+#include "mocking.h"
+
+namespace {
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::AllOf;
+using testing::NiceMock;
+
+using ::testing::InSequence;
+using ::testing::HasSubstr;
+
+
+TEST(RequestHandlerTcp, Constructor) {
+    MockDiscoveryService ds;
+    asapo::RequestHandlerTcp request{&ds, 1, nullptr};
+
+    ASSERT_THAT(dynamic_cast<const asapo::IO*>(request.io__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(request.log__), Ne(nullptr));
+    ASSERT_THAT(request.discovery_service__, Eq(&ds));
+
+}
+
+class RequestHandlerTcpTests : public testing::Test {
+  public:
+    NiceMock<asapo::MockIO> mock_io;
+    NiceMock<MockDiscoveryService> mock_discovery_service;
+
+    uint64_t expected_file_id = 42;
+    uint64_t expected_file_size = 1337;
+    std::string  expected_file_name = "test_name";
+    uint64_t expected_thread_id = 2;
+
+    asapo::Opcode expected_op_code = asapo::kOpcodeTransferData;
+    void*    expected_file_pointer = (void*)0xC00FE;
+    asapo::Error callback_err;
+    asapo::GenericRequestHeader header{expected_op_code, expected_file_id, expected_file_size, expected_file_name};
+    bool called = false;
+    asapo::GenericRequestHeader callback_header;
+    asapo::Request request{header, expected_file_pointer, [this](asapo::GenericRequestHeader header, asapo::Error err) {
+        called = true;
+        callback_err = std::move(err);
+        callback_header = header;
+    }};
+
+    asapo::Request request_nocallback{header, expected_file_pointer, nullptr};
+    testing::NiceMock<asapo::MockLogger> mock_logger;
+    uint64_t n_connections{0};
+    asapo::RequestHandlerTcp request_handler{&mock_discovery_service, expected_thread_id, &n_connections};
+
+    std::string expected_address1 = {"127.0.0.1:9090"};
+    std::string expected_address2 = {"127.0.0.1:9091"};
+    asapo::ReceiversList receivers_list{expected_address1, expected_address2};
+    asapo::ReceiversList receivers_list2{expected_address2, expected_address1};
+
+    asapo::ReceiversList receivers_list_single{expected_address1};
+
+    std::vector<asapo::SocketDescriptor> expected_sds{83942, 83943};
+
+    void ExpectFailConnect(bool only_once = false);
+    void ExpectFailSendHeader(bool only_once = false);
+    void ExpectFailSendData(bool only_once = false);
+    void ExpectOKConnect(bool only_once = false);
+    void ExpectOKSendHeader(bool only_once = false);
+    void ExpectOKSendData(bool only_once = false);
+    void ExpectFailReceive(bool only_once = false);
+    void ExpectOKReceive(bool only_once = true);
+    void DoSingleSend(bool connect = true, bool success = true);
+
+    void SetUp() override {
+        request_handler.log__ = &mock_logger;
+        request_handler.io__.reset(&mock_io);
+        ON_CALL(mock_discovery_service, RotatedUriList(_)).
+        WillByDefault(Return(receivers_list));
+
+    }
+    void TearDown() override {
+        request_handler.io__.release();
+    }
+};
+
+ACTION_P(A_WriteSendDataResponse, error_code) {
+    ((asapo::SendDataResponse*)arg1)->op_code = asapo::kOpcodeTransferData;
+    ((asapo::SendDataResponse*)arg1)->error_code = error_code;
+}
+
+MATCHER_P2(M_CheckSendDataRequest, file_id, file_size,
+           "Checks if a valid GenericRequestHeader was Send") {
+    return ((asapo::GenericRequestHeader*)arg)->op_code == asapo::kOpcodeTransferData
+           && ((asapo::GenericRequestHeader*)arg)->data_id == file_id
+           && ((asapo::GenericRequestHeader*)arg)->data_size == file_size;
+}
+
+
+void RequestHandlerTcpTests::ExpectFailConnect(bool only_once) {
+    for (auto expected_address : receivers_list) {
+        EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(expected_address, _))
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<1>(asapo::IOErrorTemplates::kInvalidAddressFormat.Generate().release()),
+                Return(asapo::kDisconnectedSocketDescriptor)
+            ));
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("cannot connect"),
+                                           HasSubstr(expected_address)
+                                       )
+                                      ));
+
+        if (only_once) break;
+    }
+
+}
+
+void RequestHandlerTcpTests::ExpectFailSendHeader(bool only_once) {
+    int i = 0;
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendDataRequest(expected_file_id,
+                                    expected_file_size),
+                                    sizeof(asapo::GenericRequestHeader), _))
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
+                Return(-1)
+            ));
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("disconnected"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("cannot send"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+        EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
+        if (only_once) break;
+        i++;
+    }
+
+}
+
+void RequestHandlerTcpTests::ExpectFailSendData(bool only_once) {
+    int i = 0;
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Send_t(expected_sd, expected_file_pointer, expected_file_size, _))
+        .Times(1)
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
+                Return(-1)
+            ));
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("disconnected"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("cannot send"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+        EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
+        if (only_once) break;
+        i++;
+    }
+
+}
+
+
+void RequestHandlerTcpTests::ExpectFailReceive(bool only_once) {
+    int i = 0;
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        .Times(1)
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(asapo::IOErrorTemplates::kBadFileNumber.Generate().release()),
+                testing::Return(-1)
+            ));
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("disconnected"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+
+
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("cannot send"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+        EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
+        if (only_once) break;
+        i++;
+    }
+
+}
+
+
+void RequestHandlerTcpTests::ExpectOKSendData(bool only_once) {
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Send_t(expected_sd, expected_file_pointer, expected_file_size, _))
+        .Times(1)
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(nullptr),
+                Return(expected_file_size)
+            ));
+        if (only_once) break;
+    }
+
+}
+
+
+
+void RequestHandlerTcpTests::ExpectOKSendHeader(bool only_once) {
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendDataRequest(expected_file_id,
+                                    expected_file_size),
+                                    sizeof(asapo::GenericRequestHeader), _))
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(nullptr),
+                Return(sizeof(asapo::GenericRequestHeader))
+            ));
+        if (only_once) break;
+    }
+
+}
+
+
+void RequestHandlerTcpTests::ExpectOKConnect(bool only_once) {
+    int i = 0;
+    for (auto expected_address : receivers_list) {
+        EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(expected_address, _))
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<1>(nullptr),
+                Return(expected_sds[i])
+            ));
+        EXPECT_CALL(mock_logger, Info(AllOf(
+                                          HasSubstr("connected"),
+                                          HasSubstr(expected_address)
+                                      )
+                                     ));
+        if (only_once) break;
+        i++;
+    }
+}
+
+
+void RequestHandlerTcpTests::ExpectOKReceive(bool only_once) {
+    int i = 0;
+    for (auto expected_sd : expected_sds) {
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        .WillOnce(
+            DoAll(
+                testing::SetArgPointee<3>(nullptr),
+                A_WriteSendDataResponse(asapo::kNetErrorNoError),
+                testing::ReturnArg<2>()
+            ));
+        EXPECT_CALL(mock_logger, Debug(AllOf(
+                                           HasSubstr("sent data"),
+                                           HasSubstr(receivers_list[i])
+                                       )
+                                      ));
+        if (only_once) break;
+        i++;
+    }
+}
+
+void RequestHandlerTcpTests::DoSingleSend(bool connect, bool success) {
+    if (connect) ExpectOKConnect(true);
+    ExpectOKSendHeader(true);
+    ExpectOKSendData(true);
+    if (success) {
+        ExpectOKReceive(true);
+    } else {
+        ExpectFailReceive(true);
+    }
+
+    if (connect) {
+        EXPECT_CALL(mock_discovery_service, RotatedUriList(_)).
+        WillOnce(Return(receivers_list_single));
+    }
+
+    request_handler.PrepareProcessingRequestLocked();
+    request_handler.ProcessRequestUnlocked(&request);
+
+    Mock::VerifyAndClearExpectations(&mock_io);
+    Mock::VerifyAndClearExpectations(&mock_logger);
+    Mock::VerifyAndClearExpectations(&mock_discovery_service);
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+}
+
+TEST_F(RequestHandlerTcpTests, CannotProcessRequestIfNotEnoughConnections) {
+    EXPECT_CALL(mock_discovery_service, MaxConnections()).WillOnce(Return(0));
+    auto res = request_handler.ReadyProcessRequest();
+    ASSERT_THAT(res, Eq(false));
+}
+
+TEST_F(RequestHandlerTcpTests, CanProcessRequestIfAlreadyConnected) {
+    DoSingleSend();
+    EXPECT_CALL(mock_discovery_service, MaxConnections()).Times(0);
+
+    auto res = request_handler.ReadyProcessRequest();
+
+    ASSERT_THAT(res, Eq(true));
+}
+
+TEST_F(RequestHandlerTcpTests, GetsUriListINotConnected) {
+    EXPECT_CALL(mock_discovery_service, RotatedUriList(_));
+    request_handler.PrepareProcessingRequestLocked();
+}
+
+TEST_F(RequestHandlerTcpTests, DoesNotGetsUriIfAlreadyConnected) {
+    DoSingleSend();
+    EXPECT_CALL(mock_discovery_service, RotatedUriList(_)).Times(0);
+    request_handler.PrepareProcessingRequestLocked();
+}
+
+TEST_F(RequestHandlerTcpTests, ReduceConnectionNumberAtTearDownIfError) {
+    auto err = asapo::TextError("error");
+    n_connections = 1;
+
+    request_handler.TearDownProcessingRequestLocked(err);
+
+    ASSERT_THAT(n_connections, Eq(0));
+
+}
+
+TEST_F(RequestHandlerTcpTests, DoNotReduceConnectionNumberAtTearDownIfNoError) {
+    n_connections = 1;
+
+    request_handler.TearDownProcessingRequestLocked(nullptr);
+
+    ASSERT_THAT(n_connections, Eq(1));
+}
+
+
+TEST_F(RequestHandlerTcpTests, TriesConnectWhenNotConnected) {
+    ExpectFailConnect();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+TEST_F(RequestHandlerTcpTests, DoesNotTryConnectWhenConnected) {
+    DoSingleSend();
+
+    EXPECT_CALL(mock_discovery_service, RotatedUriList(_)).
+    WillOnce(Return(receivers_list_single));
+
+
+    EXPECT_CALL(mock_io, CreateAndConnectIPTCPSocket_t(_, _))
+    .Times(0);
+
+    ExpectFailSendHeader(true);
+
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+
+
+TEST_F(RequestHandlerTcpTests, DoNotCloseWhenNotConnected) {
+    EXPECT_CALL(mock_io, CloseSocket_t(_, _)).Times(0);
+    ExpectOKConnect();
+    ExpectFailSendHeader();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+
+TEST_F(RequestHandlerTcpTests, CloseConnectionWhenRebalance) {
+    DoSingleSend();
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+    EXPECT_CALL(mock_discovery_service, RotatedUriList(_)).
+    WillOnce(Return(asapo::ReceiversList{}));
+
+    EXPECT_CALL(mock_io, CloseSocket_t(_, _));
+
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+
+
+TEST_F(RequestHandlerTcpTests, ErrorWhenCannotSendHeader) {
+    ExpectOKConnect();
+    ExpectFailSendHeader();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+
+TEST_F(RequestHandlerTcpTests, ErrorWhenCannotSendData) {
+    ExpectOKConnect();
+    ExpectOKSendHeader();
+    ExpectFailSendData();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+TEST_F(RequestHandlerTcpTests, ErrorWhenCannotReceiveData) {
+    ExpectOKConnect();
+    ExpectOKSendHeader();
+    ExpectOKSendData();
+    ExpectFailReceive();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kCannotSendDataToReceivers));
+}
+
+TEST_F(RequestHandlerTcpTests, ImmediatelyCallBackErrorIfFileAlreadyInUse) {
+    ExpectOKConnect(true);
+    ExpectOKSendHeader(true);
+    ExpectOKSendData(true);
+
+    EXPECT_CALL(mock_io, Receive_t(expected_sds[0], _, sizeof(asapo::SendDataResponse), _))
+    .WillOnce(
+        DoAll(
+            testing::SetArgPointee<3>(nullptr),
+            A_WriteSendDataResponse(asapo::kNetErrorFileIdAlreadyInUse),
+            testing::ReturnArg<2>()
+        ));
+
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(callback_err, Eq(asapo::ProducerErrorTemplates::kFileIdAlreadyInUse));
+    ASSERT_THAT(called, Eq(true));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+
+TEST_F(RequestHandlerTcpTests, SendEmptyCallBack) {
+    ExpectOKConnect(true);
+    ExpectOKSendHeader(true);
+    ExpectOKSendData(true);
+    ExpectOKReceive();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request_nocallback);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(called, Eq(false));
+}
+
+TEST_F(RequestHandlerTcpTests, SendOK) {
+    ExpectOKConnect(true);
+    ExpectOKSendHeader(true);
+    ExpectOKSendData(true);
+    ExpectOKReceive();
+
+    request_handler.PrepareProcessingRequestLocked();
+    auto err = request_handler.ProcessRequestUnlocked(&request);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(callback_err, Eq(nullptr));
+    ASSERT_THAT(called, Eq(true));
+    ASSERT_THAT(callback_header.data_size, Eq(header.data_size));
+    ASSERT_THAT(callback_header.op_code, Eq(header.op_code));
+    ASSERT_THAT(callback_header.data_id, Eq(header.data_id));
+    ASSERT_THAT(std::string{callback_header.file_name}, Eq(std::string{header.file_name}));
+}
+
+
+}
diff --git a/producer/api/unittests/test_request_pool.cpp b/producer/api/unittests/test_request_pool.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..64a0b70e6d0b1066e01e586ba58ccefb5fc97535
--- /dev/null
+++ b/producer/api/unittests/test_request_pool.cpp
@@ -0,0 +1,143 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <chrono>
+
+#include "unittests/MockLogger.h"
+#include "common/error.h"
+
+#include "../src/request_handler_tcp.h"
+#include "../src/request_pool.h"
+#include "../src/receiver_discovery_service.h"
+#include "../src/request_handler_factory.h"
+#include "mocking.h"
+
+#include "io/io_factory.h"
+
+namespace {
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::AllOf;
+using testing::DoAll;
+using testing::NiceMock;
+using ::testing::InSequence;
+using ::testing::HasSubstr;
+using testing::AtLeast;
+using testing::Ref;
+
+using asapo::RequestHandler;
+using asapo::RequestPool;
+using asapo::Error;
+using asapo::ErrorInterface;
+using asapo::Request;
+using asapo::GenericRequestHeader;
+
+
+
+class MockRequestHandlerFactory : public asapo::RequestHandlerFactory {
+  public:
+    MockRequestHandlerFactory(RequestHandler* request_handler):
+        RequestHandlerFactory(nullptr) {
+        request_handler_ = request_handler;
+    }
+    std::unique_ptr<RequestHandler> NewRequestHandler(uint64_t thread_id, uint64_t* shared_counter) override {
+        return std::unique_ptr<RequestHandler> {request_handler_};
+    }
+  private:
+    RequestHandler* request_handler_;
+};
+
+
+
+class RequestPoolTests : public testing::Test {
+  public:
+    NiceMock<MockRequestHandler>* mock_request_handler = new testing::NiceMock<MockRequestHandler>;
+    NiceMock<asapo::MockLogger> mock_logger;
+    MockRequestHandlerFactory request_handler_factory{mock_request_handler};
+    const uint8_t nthreads = 1;
+    asapo::RequestPool pool {nthreads, &request_handler_factory};
+    std::unique_ptr<Request> request{new Request{GenericRequestHeader{}, nullptr, nullptr}};
+    void SetUp() override {
+        pool.log__ = &mock_logger;
+    }
+    void TearDown() override {
+    }
+};
+
+
+TEST(RequestPool, Constructor) {
+    NiceMock<MockDiscoveryService> ds;
+    NiceMock<asapo::RequestHandlerFactory> request_handler_factory{&ds};
+
+    asapo::RequestPool pool{4, &request_handler_factory};
+
+    ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(pool.log__), Ne(nullptr));
+}
+
+TEST_F(RequestPoolTests, AddRequestDoesNotGoFurtherWhenNotReady) {
+
+    EXPECT_CALL(*mock_request_handler, ReadyProcessRequest()).Times(AtLeast(1)).WillRepeatedly(Return(false));
+    EXPECT_CALL(*mock_request_handler, PrepareProcessingRequestLocked()).Times(0);
+
+    auto err = pool.AddRequest(std::move(request));
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(RequestPoolTests, NRequestsInQueue) {
+    auto nreq = pool.NRequestsInQueue();
+    ASSERT_THAT(nreq, Eq(0));
+}
+
+void ExpectSend(MockRequestHandler* mock_handler, int ntimes = 1) {
+    EXPECT_CALL(*mock_handler, ReadyProcessRequest()).Times(ntimes).WillRepeatedly(Return(true));
+    EXPECT_CALL(*mock_handler, PrepareProcessingRequestLocked()).Times(ntimes);
+    EXPECT_CALL(*mock_handler, ProcessRequestUnlocked_t(_)).Times(ntimes).WillRepeatedly(Return(nullptr));
+    EXPECT_CALL(*mock_handler, TearDownProcessingRequestLocked_t(nullptr)).Times(ntimes);
+}
+
+
+
+TEST_F(RequestPoolTests, AddRequestCallsSend) {
+
+    ExpectSend(mock_request_handler);
+
+    auto err = pool.AddRequest(std::move(request));
+    std::this_thread::sleep_for(std::chrono::milliseconds(20));
+
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+
+TEST_F(RequestPoolTests, AddRequestCallsSendTwoRequests) {
+
+    Request* request2 = new Request{GenericRequestHeader{}, nullptr, nullptr};
+
+    ExpectSend(mock_request_handler, 2);
+
+
+
+    auto err1 = pool.AddRequest(std::move(request));
+    request.reset(request2);
+    auto err2 = pool.AddRequest(std::move(request));
+
+    std::this_thread::sleep_for(std::chrono::milliseconds(30));
+    ASSERT_THAT(err1, Eq(nullptr));
+    ASSERT_THAT(err2, Eq(nullptr));
+}
+
+
+
+TEST_F(RequestPoolTests, FinishProcessingThreads) {
+    EXPECT_CALL(mock_logger, Debug(HasSubstr("finishing thread"))).Times(nthreads);
+}
+
+
+}
diff --git a/receiver/CMakeLists.txt b/receiver/CMakeLists.txt
index 33b5e7bf4381ac4203da297f05f442db845df661..b21db56d503bb1e3e3c1f2acaeee21256d88eab4 100644
--- a/receiver/CMakeLists.txt
+++ b/receiver/CMakeLists.txt
@@ -8,7 +8,7 @@ set(SOURCE_FILES
         src/statistics.cpp
         src/statistics_sender_influx_db.cpp
         src/receiver_config.cpp
-        src/receiver_logger.cpp
+        src/producer_logger.cpp
         src/request_handler_db_write.cpp)
 
 
@@ -28,6 +28,10 @@ add_executable(${TARGET_NAME}-bin src/main.cpp)
 set_target_properties(${TARGET_NAME}-bin PROPERTIES OUTPUT_NAME ${TARGET_NAME})
 target_link_libraries(${TARGET_NAME}-bin ${TARGET_NAME})
 
+set_target_properties(${TARGET_NAME}-bin PROPERTIES RUNTIME_OUTPUT_DIRECTORY
+        ${CMAKE_CURRENT_BINARY_DIR}$<$<CONFIG:Debug>:>
+        )
+
 ################################
 # Testing
 ################################
diff --git a/receiver/src/connection.cpp b/receiver/src/connection.cpp
index b86a1f0ef061eff086234090366cf776f7e050ab..4c40b1742051f7be157c0997845441d8fa4156ea 100644
--- a/receiver/src/connection.cpp
+++ b/receiver/src/connection.cpp
@@ -11,11 +11,15 @@ namespace asapo {
 size_t Connection::kRequestHandlerMaxBufferSize;
 std::atomic<uint32_t> Connection::kNetworkProducerPeerImplGlobalCounter(0);
 
-Connection::Connection(SocketDescriptor socket_fd, const std::string& address): request_factory__{new RequestFactory},
+Connection::Connection(SocketDescriptor socket_fd, const std::string& address,
+                       std::string receiver_tag): request_factory__{new RequestFactory},
 io__{GenerateDefaultIO()}, statistics__{new Statistics}, log__{GetDefaultReceiverLogger()} {
     socket_fd_ = socket_fd;
     connection_id_ = kNetworkProducerPeerImplGlobalCounter++;
     address_ = address;
+    statistics__->AddTag("connection_from", address);
+    statistics__->AddTag("receiver_tag", std::move(receiver_tag));
+
 }
 
 uint64_t Connection::GetId() const noexcept {
@@ -51,7 +55,7 @@ Error Connection::ProcessRequest(const std::unique_ptr<Request>& request) const
 
 void Connection::ProcessStatisticsAfterRequest(const std::unique_ptr<Request>& request) const noexcept {
     statistics__->IncreaseRequestCounter();
-    statistics__->IncreaseRequestDataVolume(request->GetDataSize() + sizeof(GenericNetworkRequestHeader) +
+    statistics__->IncreaseRequestDataVolume(request->GetDataSize() + sizeof(GenericRequestHeader) +
                                             sizeof(GenericNetworkResponse));
     statistics__->SendIfNeeded();
 }
@@ -82,9 +86,9 @@ void Connection::Listen() const noexcept {
 
 std::unique_ptr<Request> Connection::WaitForNewRequest(Error* err) const noexcept {
     //TODO: to be overwritten with MessagePack (or similar)
-    GenericNetworkRequestHeader generic_request_header;
+    GenericRequestHeader generic_request_header;
     statistics__->StartTimer(StatisticEntity::kNetwork);
-    io__->ReceiveWithTimeout(socket_fd_, &generic_request_header, sizeof(GenericNetworkRequestHeader), 50, err);
+    io__->ReceiveWithTimeout(socket_fd_, &generic_request_header, sizeof(GenericRequestHeader), 50, err);
     if(*err) {
         if(*err == IOErrorTemplates::kTimeout) {
             *err = nullptr;//Not an error in this case
diff --git a/receiver/src/connection.h b/receiver/src/connection.h
index 11fdf1eff9fa62bb754f97df9851bd41e14a2d1e..3cb73a789bec8055e997028fe37d18e384bd3efe 100644
--- a/receiver/src/connection.h
+++ b/receiver/src/connection.h
@@ -29,7 +29,7 @@ class Connection {
     static size_t kRequestHandlerMaxBufferSize;
     static std::atomic<uint32_t> kNetworkProducerPeerImplGlobalCounter;
 
-    Connection(SocketDescriptor socket_fd, const std::string& address);
+    Connection(SocketDescriptor socket_fd, const std::string& address, std::string receiver_tag);
     ~Connection() = default;
 
     void Listen() const noexcept;
diff --git a/receiver/src/receiver_logger.cpp b/receiver/src/producer_logger.cpp
similarity index 100%
rename from receiver/src/receiver_logger.cpp
rename to receiver/src/producer_logger.cpp
diff --git a/receiver/src/receiver.cpp b/receiver/src/receiver.cpp
index 8d2c1b1d0ddd1f8d67cbadff66603e5a08ddae79..9a94afe45df43c47522ce68d850883976a6ad4ec 100644
--- a/receiver/src/receiver.cpp
+++ b/receiver/src/receiver.cpp
@@ -5,6 +5,8 @@
 #include "connection.h"
 #include <io/io_factory.h>
 
+#include "receiver_config.h"
+
 namespace asapo {
 
 
@@ -37,11 +39,9 @@ void Receiver::Listen(std::string listener_address, Error* err, bool exit_after_
     }
 }
 
-//TODO: remove error since it is not used
 void Receiver::ProcessConnections(Error* err) {
     std::string address;
     FileDescriptor connection_socket_fd;
-
     //TODO: Use InetAcceptConnectionWithTimeout
     auto client_info_tuple = io__->InetAcceptConnection(listener_fd_, err);
     if(*err) {
@@ -56,7 +56,7 @@ void Receiver::ProcessConnections(Error* err) {
 void Receiver::StartNewConnectionInSeparateThread(int connection_socket_fd, const std::string& address)  {
     log__->Info("new connection from " + address);
     auto thread = io__->NewThread([connection_socket_fd, address] {
-        auto connection = std::unique_ptr<Connection>(new Connection(connection_socket_fd, address));
+        auto connection = std::unique_ptr<Connection>(new Connection(connection_socket_fd, address, GetReceiverConfig()->tag));
         connection->Listen();
     });
 
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index 4dbae3f095e1a335239c6dfcd9e4632c3c99c935..98c5700c3b22b2abaaf68f5aab4687213ec3c6d7 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -21,6 +21,8 @@ Error ReceiverConfigFactory::SetConfigFromFile(std::string file_name) {
     (err = parser.GetBool("WriteToDb", &config.write_to_db)) ||
     (err = parser.GetString("BrokerDbAddress", &config.broker_db_uri)) ||
     (err = parser.GetString("BrokerDbName", &config.broker_db_name)) ||
+    (err = parser.GetString("Tag", &config.tag)) ||
+    (err = parser.GetString("RootFolder", &config.root_folder)) ||
     (err = parser.GetString("MonitorDbName", &config.monitor_db_name));
     (err = parser.GetString("LogLevel", &log_level));
     if (err) {
diff --git a/receiver/src/receiver_config.h b/receiver/src/receiver_config.h
index a4f8b44403d7179d671d17f3d55defa4bd5a8674..bb5ed448e5fa0d7d3aa4a4d3e701267c5c2879af 100644
--- a/receiver/src/receiver_config.h
+++ b/receiver/src/receiver_config.h
@@ -12,10 +12,12 @@ struct ReceiverConfig {
     std::string monitor_db_name;
     std::string broker_db_uri;
     std::string broker_db_name;
+    std::string root_folder;
     uint64_t listen_port = 0;
     bool write_to_disk = false;
     bool write_to_db = false;
     LogLevel log_level = LogLevel::Info;
+    std::string tag;
 };
 
 const ReceiverConfig* GetReceiverConfig();
diff --git a/receiver/src/receiver_error.h b/receiver/src/receiver_error.h
index a83f2bf090a7ae22b14f0f9dad54e92f9ed49bca..6af4287ebd29b981de49d42df436fa1ea7c95f6f 100644
--- a/receiver/src/receiver_error.h
+++ b/receiver/src/receiver_error.h
@@ -48,6 +48,11 @@ class ReceiverErrorTemplate : public SimpleErrorTemplate {
     }
 };
 
+static inline std::ostream& operator<<(std::ostream& os, const ReceiverErrorTemplate& err) {
+    return os << err.Text();
+}
+
+
 namespace ReceiverErrorTemplates {
 auto const kInvalidOpCode = ReceiverErrorTemplate {
     "Invalid Opcode", ReceiverErrorType::kInvalidOpCode
diff --git a/receiver/src/request.cpp b/receiver/src/request.cpp
index 3db277350cea069989a54cf768931c348a503d72..1a836e9f67e2f2907a0b194f30eb693d81900e78 100644
--- a/receiver/src/request.cpp
+++ b/receiver/src/request.cpp
@@ -4,7 +4,7 @@
 #include "receiver_config.h"
 namespace asapo {
 
-Request::Request(const GenericNetworkRequestHeader& header,
+Request::Request(const GenericRequestHeader& header,
                  SocketDescriptor socket_fd) : io__{GenerateDefaultIO()}, request_header_(header), socket_fd_{socket_fd} {
 }
 
@@ -77,12 +77,12 @@ std::string Request::GetFileName() const {
     return std::to_string(request_header_.data_id) + ".bin";
 }
 
-std::unique_ptr<Request> RequestFactory::GenerateRequest(const GenericNetworkRequestHeader&
+std::unique_ptr<Request> RequestFactory::GenerateRequest(const GenericRequestHeader&
         request_header, SocketDescriptor socket_fd,
         Error* err) const noexcept {
     *err = nullptr;
     switch (request_header.op_code) {
-    case Opcode::kNetOpcodeSendData: {
+    case Opcode::kOpcodeTransferData: {
         auto request = std::unique_ptr<Request> {new Request{request_header, socket_fd}};
 
         if (GetReceiverConfig()->write_to_disk) {
diff --git a/receiver/src/request.h b/receiver/src/request.h
index a8fca67689ff37590fa52d0a7b6b831e4dffbf80..3b2f05698d8d7482e5f4c5127165ebd267a6edd9 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -17,7 +17,7 @@ class Request {
   public:
     virtual Error Handle(std::unique_ptr<Statistics>*);
     virtual ~Request() = default;
-    Request(const GenericNetworkRequestHeader& request_header, SocketDescriptor socket_fd);
+    Request(const GenericRequestHeader& request_header, SocketDescriptor socket_fd);
     void AddHandler(const RequestHandler*);
     const RequestHandlerList& GetListHandlers() const;
     virtual uint64_t GetDataSize() const;
@@ -29,7 +29,7 @@ class Request {
   private:
     Error AllocateDataBuffer();
     Error ReceiveData();
-    const GenericNetworkRequestHeader request_header_;
+    const GenericRequestHeader request_header_;
     const SocketDescriptor socket_fd_;
     FileData data_buffer_;
     RequestHandlerList handlers_;
@@ -37,7 +37,7 @@ class Request {
 
 class RequestFactory {
   public:
-    virtual std::unique_ptr<Request> GenerateRequest(const GenericNetworkRequestHeader& request_header,
+    virtual std::unique_ptr<Request> GenerateRequest(const GenericRequestHeader& request_header,
                                                      SocketDescriptor socket_fd, Error* err) const noexcept;
   private:
     RequestHandlerFileWrite request_handler_filewrite_;
diff --git a/receiver/src/request_handler_file_write.cpp b/receiver/src/request_handler_file_write.cpp
index f27a62d0336623d2ca476f038d5c11cddab3eb64..62f0c6395885b17a812892e80a126abbe1ed7400 100644
--- a/receiver/src/request_handler_file_write.cpp
+++ b/receiver/src/request_handler_file_write.cpp
@@ -2,6 +2,8 @@
 #include "io/io_factory.h"
 #include "request.h"
 #include "receiver_logger.h"
+#include "receiver_config.h"
+#include "preprocessor/definitions.h"
 
 namespace asapo {
 
@@ -14,10 +16,10 @@ Error RequestHandlerFileWrite::ProcessRequest(const Request& request) const {
     const FileData& data = request.GetData();
 
     auto fname = request.GetFileName();
-//TODO: folder to write in config file
-    auto err =  io__->WriteDataToFile("files/" + fname, data, fsize);
+    auto root_folder = GetReceiverConfig()->root_folder + kPathSeparator;
+    auto err =  io__->WriteDataToFile(root_folder + fname, data, fsize);
     if (!err) {
-        log__->Debug("saved file of size " + std::to_string(fsize) + " to files/" + fname);
+        log__->Debug("saved file of size " + std::to_string(fsize) + " to " + root_folder + fname);
     }
     return err;
 
diff --git a/receiver/src/statistics.cpp b/receiver/src/statistics.cpp
index 3f072331578c3ed093eaf37d8c920c3a0f6e3df8..f2c431e81a4425c99e54f3d766c1d95a04db9af0 100644
--- a/receiver/src/statistics.cpp
+++ b/receiver/src/statistics.cpp
@@ -23,6 +23,7 @@ StatisticsToSend Statistics::PrepareStatisticsToSend() const noexcept {
     stat.n_requests = nrequests_;
     stat.data_volume = volume_counter_;
     stat.elapsed_ms = std::max(uint64_t{1}, GetTotalElapsedMs());
+    stat.tags = tag_;
     for (auto i = 0; i < kNStatisticEntities; i++) {
         stat.entity_shares[i] =  double(GetElapsedMs(StatisticEntity(i))) / stat.elapsed_ms;
     }
@@ -76,5 +77,11 @@ void Statistics::StopTimer() noexcept {
     time_counters_[current_statistic_entity_] += elapsed;
 }
 
+void Statistics::AddTag(const std::string& name, const std::string& value) noexcept {
+    if (!tag_.empty()) {
+        tag_ += ",";
+    }
+    tag_ += name + "=" + value;
+}
 
 }
\ No newline at end of file
diff --git a/receiver/src/statistics.h b/receiver/src/statistics.h
index 1f78754460e7f4fc78be920ca479166706332bba..bbe409dff521dbbc167c5185cb222af03df64ebb 100644
--- a/receiver/src/statistics.h
+++ b/receiver/src/statistics.h
@@ -3,8 +3,11 @@
 
 #include <chrono>
 #include <memory>
+#include <string>
+
 
 #include "statistics_sender.h"
+#include "preprocessor/definitions.h"
 
 namespace asapo {
 
@@ -20,18 +23,20 @@ struct StatisticsToSend {
     uint64_t elapsed_ms;
     uint64_t data_volume;
     uint64_t n_requests;
+    std::string tags;
 };
 
 class Statistics {
   public:
-// virtual needed for unittests, could be replaced with #define VIRTUAL ... in case of performance issues
-    virtual void SendIfNeeded() noexcept;
-    virtual void Send() noexcept;
+    VIRTUAL void SendIfNeeded() noexcept;
+    VIRTUAL void Send() noexcept;
     explicit Statistics(unsigned int write_interval = kDefaultStatisticWriteIntervalMs);
-    virtual void IncreaseRequestCounter() noexcept;
-    virtual void StartTimer(const StatisticEntity& entity) noexcept;
-    virtual void IncreaseRequestDataVolume(uint64_t transferred_data_volume) noexcept;
-    virtual void StopTimer() noexcept;
+    VIRTUAL void IncreaseRequestCounter() noexcept;
+    VIRTUAL void StartTimer(const StatisticEntity& entity) noexcept;
+    VIRTUAL void IncreaseRequestDataVolume(uint64_t transferred_data_volume) noexcept;
+    VIRTUAL void StopTimer() noexcept;
+    VIRTUAL void AddTag(const std::string& name, const std::string& value) noexcept;
+
 
     void SetWriteInterval(uint64_t interval_ms);
     std::unique_ptr<StatisticsSender> statistics_sender__;
@@ -48,6 +53,7 @@ class Statistics {
     std::chrono::nanoseconds time_counters_[kNStatisticEntities];
     uint64_t volume_counter_;
     unsigned int write_interval_;
+    std::string tag_;
 
 };
 
diff --git a/receiver/src/statistics_sender_influx_db.cpp b/receiver/src/statistics_sender_influx_db.cpp
index 476729904cefd41cf73702edb6e98e9dfb780ee4..642f2aea9ae38126d85fe77b8b951330ab9346dc 100644
--- a/receiver/src/statistics_sender_influx_db.cpp
+++ b/receiver/src/statistics_sender_influx_db.cpp
@@ -21,7 +21,7 @@ void StatisticsSenderInfluxDb::SendStatistics(const StatisticsToSend& statistic)
     //todo: send statistics async
     HttpCode code;
     Error err;
-    auto responce = httpclient__->Post(GetReceiverConfig()->monitor_db_uri + "/write?db=" +
+    auto response = httpclient__->Post(GetReceiverConfig()->monitor_db_uri + "/write?db=" +
                                        GetReceiverConfig()->monitor_db_name, StatisticsToString(statistic),
                                        &code, &err);
     std::string msg = "sending statistics to " + GetReceiverConfig()->monitor_db_name + " at " +
@@ -32,7 +32,7 @@ void StatisticsSenderInfluxDb::SendStatistics(const StatisticsToSend& statistic)
     }
 
     if (code != HttpCode::OK && code != HttpCode::NoContent) {
-        log__->Error(msg + " - " + responce);
+        log__->Error(msg + " - " + response);
         return;
     }
 
@@ -41,8 +41,7 @@ void StatisticsSenderInfluxDb::SendStatistics(const StatisticsToSend& statistic)
 
 std::string StatisticsSenderInfluxDb::StatisticsToString(const StatisticsToSend& statistic) const noexcept {
     std::string str;
-    std::string tags = "receiver=1,connection=1";
-    str = "statistics," + tags + " elapsed_ms=" + string_format("%ld", statistic.elapsed_ms);
+    str = "statistics," + statistic.tags + " elapsed_ms=" + string_format("%ld", statistic.elapsed_ms);
     str += ",data_volume=" + string_format("%ld", statistic.data_volume);
     str += ",n_requests=" + string_format("%ld", statistic.n_requests);
     str += ",db_share=" + string_format("%.4f", statistic.entity_shares[StatisticEntity::kDatabase]);
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 45b0185962979d1c6edf4a4d83c7dd6ad60827d7..6b945bdd5d833f46d9e5c245b4b5d264e70aca6b 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -42,6 +42,9 @@ Error SetReceiverConfig (const ReceiverConfig& config) {
     config_string += "," + std::string("\"WriteToDisk\":") + (config.write_to_disk ? "true" : "false");
     config_string += "," + std::string("\"WriteToDb\":") + (config.write_to_db ? "true" : "false");
     config_string += "," + std::string("\"LogLevel\":") + "\"" + log_level + "\"";
+    config_string += "," + std::string("\"Tag\":") + "\"" + config.tag + "\"";
+    config_string += "," + std::string("\"RootFolder\":") + "\"" + config.root_folder + "\"";
+
 
     config_string += "}";
 
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index f44fe078932b3df317b5851a8b5ca18d5524e6a8..bc3487bad984a869e406d49c24833379f231b890 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -50,6 +50,7 @@ TEST_F(ConfigTests, ReadSettings) {
 
     asapo::ReceiverConfig test_config;
     test_config.listen_port = 4200;
+    test_config.tag = "receiver1";
     test_config.monitor_db_name = "db_test";
     test_config.monitor_db_uri = "localhost:8086";
     test_config.write_to_disk = true;
@@ -57,6 +58,7 @@ TEST_F(ConfigTests, ReadSettings) {
     test_config.broker_db_uri = "localhost:27017";
     test_config.broker_db_name = "test";
     test_config.log_level = asapo::LogLevel::Error;
+    test_config.root_folder = "test_fodler";
 
     auto err = asapo::SetReceiverConfig(test_config);
 
@@ -71,6 +73,8 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->write_to_disk, Eq(true));
     ASSERT_THAT(config->write_to_db, Eq(true));
     ASSERT_THAT(config->log_level, Eq(asapo::LogLevel::Error));
+    ASSERT_THAT(config->tag, Eq("receiver1"));
+    ASSERT_THAT(config->root_folder, Eq("test_fodler"));
 
 }
 
diff --git a/receiver/unittests/test_connection.cpp b/receiver/unittests/test_connection.cpp
index 630ca07b3407af4c23ed07652076b989e0e9ca14..af7d83535f771910921fdb508841a6ff6af86894 100644
--- a/receiver/unittests/test_connection.cpp
+++ b/receiver/unittests/test_connection.cpp
@@ -30,9 +30,9 @@ using asapo::Error;
 using asapo::ErrorInterface;
 using asapo::FileDescriptor;
 using asapo::SocketDescriptor;
-using asapo::GenericNetworkRequestHeader;
+using asapo::GenericRequestHeader;
 using asapo::SendDataResponse;
-using asapo::GenericNetworkRequestHeader;
+using asapo::GenericRequestHeader;
 using asapo::GenericNetworkResponse;
 using asapo::Opcode;
 using asapo::Connection;
@@ -46,17 +46,17 @@ using asapo::MockStatistics;
 namespace {
 
 TEST(Connection, Constructor) {
-    Connection connection{0, "some_address"};
+    Connection connection{0, "some_address", "some_tag"};
     ASSERT_THAT(dynamic_cast<asapo::Statistics*>(connection.statistics__.get()), Ne(nullptr));
+
     ASSERT_THAT(dynamic_cast<asapo::IO*>(connection.io__.get()), Ne(nullptr));
     ASSERT_THAT(dynamic_cast<asapo::RequestFactory*>(connection.request_factory__.get()), Ne(nullptr));
     ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(connection.log__), Ne(nullptr));
-
 }
 
-class MockRequest: public Request {
+class MockRequestHandler: public Request {
   public:
-    MockRequest(const GenericNetworkRequestHeader& request_header, SocketDescriptor socket_fd):
+    MockRequestHandler(const GenericRequestHeader& request_header, SocketDescriptor socket_fd):
         Request(request_header, socket_fd) {};
     Error Handle(std::unique_ptr<Statistics>* statistics) override {
         return Error{Handle_t()};
@@ -67,7 +67,7 @@ class MockRequest: public Request {
 
 class MockRequestFactory: public asapo::RequestFactory {
   public:
-    std::unique_ptr<Request> GenerateRequest(const GenericNetworkRequestHeader& request_header,
+    std::unique_ptr<Request> GenerateRequest(const GenericRequestHeader& request_header,
                                              SocketDescriptor socket_fd,
                                              Error* err) const noexcept override {
         ErrorInterface* error = nullptr;
@@ -76,7 +76,7 @@ class MockRequestFactory: public asapo::RequestFactory {
         return std::unique_ptr<Request> {res};
     }
 
-    MOCK_CONST_METHOD3(GenerateRequest_t, Request * (const GenericNetworkRequestHeader&,
+    MOCK_CONST_METHOD3(GenerateRequest_t, Request * (const GenericRequestHeader&,
                                                      SocketDescriptor socket_fd,
                                                      ErrorInterface**));
 
@@ -85,7 +85,7 @@ class MockRequestFactory: public asapo::RequestFactory {
 class ConnectionTests : public Test {
   public:
     std::string connected_uri{"some_address"};
-    Connection connection{0, connected_uri};
+    Connection connection{0, connected_uri, "some_tag"};
     MockIO mock_io;
     MockRequestFactory mock_factory;
     NiceMock<MockStatistics> mock_statictics;
@@ -138,8 +138,8 @@ ACTION_P(SaveArg1ToGenericNetworkResponse, value) {
 
 TEST_F(ConnectionTests, CallsHandleRequest) {
 
-    GenericNetworkRequestHeader header;
-    auto request = new MockRequest{header, 1};
+    GenericRequestHeader header;
+    auto request = new MockRequestHandler{header, 1};
 
     EXPECT_CALL(mock_io, ReceiveWithTimeout_t(_, _, _, _, _));
 
@@ -171,8 +171,8 @@ TEST_F(ConnectionTests, CallsHandleRequest) {
 
 TEST_F(ConnectionTests, SendsErrorToProducer) {
 
-    GenericNetworkRequestHeader header;
-    auto request = new MockRequest{header, 1};
+    GenericRequestHeader header;
+    auto request = new MockRequestHandler{header, 1};
 
     EXPECT_CALL(mock_io, ReceiveWithTimeout_t(_, _, _, _, _));
 
@@ -207,10 +207,10 @@ void MockExitCycle(const MockIO& mock_io, MockStatistics& mock_statictics) {
     );
 }
 
-MockRequest* MockWaitRequest(const MockRequestFactory& mock_factory) {
-    GenericNetworkRequestHeader header;
+MockRequestHandler* MockWaitRequest(const MockRequestFactory& mock_factory) {
+    GenericRequestHeader header;
     header.data_size = 1;
-    auto request = new MockRequest{header, 1};
+    auto request = new MockRequestHandler{header, 1};
     EXPECT_CALL(mock_factory, GenerateRequest_t(_, _, _)).WillOnce(
         Return(request)
     );
@@ -240,7 +240,7 @@ TEST_F(ConnectionTests, FillsStatistics) {
 
     EXPECT_CALL(mock_statictics, IncreaseRequestCounter_t());
 
-    EXPECT_CALL(mock_statictics, IncreaseRequestDataVolume_t(1 + sizeof(asapo::GenericNetworkRequestHeader) +
+    EXPECT_CALL(mock_statictics, IncreaseRequestDataVolume_t(1 + sizeof(asapo::GenericRequestHeader) +
                 sizeof(asapo::GenericNetworkResponse)));
 
 
diff --git a/receiver/unittests/test_request.cpp b/receiver/unittests/test_request.cpp
index a22fec1ed520b2b4d15c616616b47e78deb7589e..f238e9891b126f63b805d23c7268cb2971deb12f 100644
--- a/receiver/unittests/test_request.cpp
+++ b/receiver/unittests/test_request.cpp
@@ -28,9 +28,9 @@ using ::asapo::Error;
 using ::asapo::ErrorInterface;
 using ::asapo::FileDescriptor;
 using ::asapo::SocketDescriptor;
-using ::asapo::GenericNetworkRequestHeader;
+using ::asapo::GenericRequestHeader;
 using ::asapo::SendDataResponse;
-using ::asapo::GenericNetworkRequestHeader;
+using ::asapo::GenericRequestHeader;
 using ::asapo::GenericNetworkResponse;
 using ::asapo::Opcode;
 using ::asapo::Connection;
@@ -63,7 +63,7 @@ class MockReqestHandler : public asapo::RequestHandler {
 
 class RequestTests : public Test {
   public:
-    GenericNetworkRequestHeader generic_request_header;
+    GenericRequestHeader generic_request_header;
     asapo::SocketDescriptor socket_fd_{1};
     uint64_t data_size_ {100};
     uint64_t data_id_{15};
diff --git a/receiver/unittests/test_request_factory.cpp b/receiver/unittests/test_request_factory.cpp
index 5b2bc53bd919ec110287d36d13998784adea5f12..f3a34e8401303efae1dfb1592e0682ac00c7a2cb 100644
--- a/receiver/unittests/test_request_factory.cpp
+++ b/receiver/unittests/test_request_factory.cpp
@@ -29,7 +29,7 @@ using ::testing::InSequence;
 using ::testing::SetArgPointee;
 using ::asapo::Error;
 using ::asapo::ErrorInterface;
-using ::asapo::GenericNetworkRequestHeader;
+using ::asapo::GenericRequestHeader;
 using ::asapo::GenericNetworkResponse;
 using ::asapo::Opcode;
 using ::asapo::Connection;
@@ -50,11 +50,11 @@ class FactoryTests : public Test {
   public:
     RequestFactory factory;
     Error err{nullptr};
-    GenericNetworkRequestHeader generic_request_header;
+    GenericRequestHeader generic_request_header;
     ReceiverConfig config;
 
     void SetUp() override {
-        generic_request_header.op_code = asapo::Opcode::kNetOpcodeSendData;
+        generic_request_header.op_code = asapo::Opcode::kOpcodeTransferData;
         config.write_to_disk = true;
         config.write_to_db = true;
         SetReceiverConfig(config);
@@ -64,14 +64,14 @@ class FactoryTests : public Test {
 };
 
 TEST_F(FactoryTests, ErrorOnWrongCode) {
-    generic_request_header.op_code = asapo::Opcode::kNetOpcodeUnknownOp;
+    generic_request_header.op_code = asapo::Opcode::kOpcodeUnknownOp;
     auto request = factory.GenerateRequest(generic_request_header, 1, &err);
 
     ASSERT_THAT(err, Ne(nullptr));
 }
 
 TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCode) {
-    generic_request_header.op_code = asapo::Opcode::kNetOpcodeSendData;
+    generic_request_header.op_code = asapo::Opcode::kOpcodeTransferData;
     auto request = factory.GenerateRequest(generic_request_header, 1, &err);
 
     ASSERT_THAT(err, Eq(nullptr));
diff --git a/receiver/unittests/test_request_handler_db_writer.cpp b/receiver/unittests/test_request_handler_db_writer.cpp
index c4fce648c7e8fc5d185911fbc8e65bf6b6303d39..465b1efbbd9029c027e912b39efb601d7fd45d1f 100644
--- a/receiver/unittests/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/test_request_handler_db_writer.cpp
@@ -40,7 +40,7 @@ using ::asapo::SocketDescriptor;
 using ::asapo::MockIO;
 using asapo::Request;
 using asapo::RequestHandlerDbWrite;
-using ::asapo::GenericNetworkRequestHeader;
+using ::asapo::GenericRequestHeader;
 
 using asapo::MockDatabase;
 using asapo::RequestFactory;
@@ -50,9 +50,9 @@ using asapo::ReceiverConfig;
 
 namespace {
 
-class MockRequest: public Request {
+class MockRequestHandler: public Request {
   public:
-    MockRequest(const GenericNetworkRequestHeader& request_header, SocketDescriptor socket_fd):
+    MockRequestHandler(const GenericRequestHeader& request_header, SocketDescriptor socket_fd):
         Request(request_header, socket_fd) {};
 
     MOCK_CONST_METHOD0(GetFileName, std::string());
@@ -65,16 +65,16 @@ class DbWriterHandlerTests : public Test {
   public:
     RequestHandlerDbWrite handler;
     NiceMock<MockIO> mock_io;
-    std::unique_ptr<NiceMock<MockRequest>> mock_request;
+    std::unique_ptr<NiceMock<MockRequestHandler>> mock_request;
     NiceMock<MockDatabase> mock_db;
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     void SetUp() override {
-        GenericNetworkRequestHeader request_header;
+        GenericRequestHeader request_header;
         request_header.data_id = 2;
         handler.db_client__ = std::unique_ptr<asapo::Database> {&mock_db};
         handler.log__ = &mock_logger;
-        mock_request.reset(new NiceMock<MockRequest> {request_header, 1});
+        mock_request.reset(new NiceMock<MockRequestHandler> {request_header, 1});
     }
     void TearDown() override {
         handler.db_client__.release();
diff --git a/receiver/unittests/test_request_handler_file_write.cpp b/receiver/unittests/test_request_handler_file_write.cpp
index f9747ffeee3169a3fe25cda45e68feaa4b3e29dc..07a5edac2ccecadfe3c1fbb31c26fc8de4adb0c9 100644
--- a/receiver/unittests/test_request_handler_file_write.cpp
+++ b/receiver/unittests/test_request_handler_file_write.cpp
@@ -9,7 +9,8 @@
 #include "../src/request_handler.h"
 #include "../src/request_handler_file_write.h"
 #include "common/networking.h"
-
+#include "mock_receiver_config.h"
+#include "preprocessor/definitions.h"
 
 using ::testing::Test;
 using ::testing::Return;
@@ -35,7 +36,7 @@ using ::asapo::SocketDescriptor;
 using ::asapo::MockIO;
 using asapo::Request;
 using asapo::RequestHandlerFileWrite;
-using ::asapo::GenericNetworkRequestHeader;
+using ::asapo::GenericRequestHeader;
 
 namespace {
 
@@ -46,9 +47,9 @@ TEST(FileWrite, Constructor) {
 }
 
 
-class MockRequest: public Request {
+class MockRequestHandler: public Request {
   public:
-    MockRequest(const GenericNetworkRequestHeader& request_header, SocketDescriptor socket_fd):
+    MockRequestHandler(const GenericRequestHeader& request_header, SocketDescriptor socket_fd):
         Request(request_header, socket_fd) {};
 
     MOCK_CONST_METHOD0(GetFileName, std::string());
@@ -60,15 +61,15 @@ class FileWriteHandlerTests : public Test {
   public:
     RequestHandlerFileWrite handler;
     NiceMock<MockIO> mock_io;
-    std::unique_ptr<MockRequest> mock_request;
+    std::unique_ptr<MockRequestHandler> mock_request;
     NiceMock<asapo::MockLogger> mock_logger;
     std::string expected_file_name = "2.bin";
     uint64_t expected_file_size = 10;
     void MockRequestData();
     void SetUp() override {
-        GenericNetworkRequestHeader request_header;
+        GenericRequestHeader request_header;
         request_header.data_id = 2;
-        mock_request.reset(new MockRequest{request_header, 1});
+        mock_request.reset(new MockRequestHandler{request_header, 1});
         handler.io__ = std::unique_ptr<asapo::IO> {&mock_io};
         handler.log__ = &mock_logger;
     }
@@ -120,10 +121,16 @@ void FileWriteHandlerTests::MockRequestData() {
 }
 
 TEST_F(FileWriteHandlerTests, CallsWriteFile) {
+    asapo::ReceiverConfig test_config;
+    test_config.root_folder = "test_folder";
+
+    asapo::SetReceiverConfig(test_config);
 
     MockRequestData();
 
-    EXPECT_CALL(mock_io, WriteDataToFile_t("files/" + expected_file_name, _, expected_file_size))
+    std::string expected_path = std::string("test_folder") + asapo::kPathSeparator + expected_file_name;
+
+    EXPECT_CALL(mock_io, WriteDataToFile_t(expected_path.c_str(), _, expected_file_size))
     .WillOnce(
         Return(asapo::IOErrorTemplates::kUnknownIOError.Generate().release())
     );
diff --git a/receiver/unittests/test_statistics.cpp b/receiver/unittests/test_statistics.cpp
index 01959339eef6dc832e477f1bf9afb65db4e21b21..59cace0fe901f15abaf4a45681614aa2e6883ca4 100644
--- a/receiver/unittests/test_statistics.cpp
+++ b/receiver/unittests/test_statistics.cpp
@@ -60,6 +60,7 @@ ACTION_P(SaveArg1ToSendStat, value) {
     value->n_requests = resp.n_requests;
     value->data_volume = resp.data_volume;
     value->elapsed_ms = resp.elapsed_ms;
+    value->tags = resp.tags;
     for (int i = 0; i < asapo::kNStatisticEntities; i++) {
         value->entity_shares[i] = resp.entity_shares[i];
     }
@@ -92,6 +93,26 @@ TEST_F(StatisticTests, IncreaseRequestCounter) {
     ASSERT_THAT(stat.n_requests, Eq(1));
 }
 
+TEST_F(StatisticTests, AddTag) {
+    statistics.AddTag("name", "value");
+
+    auto stat = ExtractStat();
+
+    ASSERT_THAT(stat.tags, Eq("name=value"));
+}
+
+TEST_F(StatisticTests, AddTagTwice) {
+    statistics.AddTag("name1", "value1");
+    statistics.AddTag("name2", "value2");
+
+    auto stat = ExtractStat();
+
+    ASSERT_THAT(stat.tags, Eq("name1=value1,name2=value2"));
+}
+
+
+
+
 TEST_F(StatisticTests, StatisticsResetAfterSend) {
     statistics.IncreaseRequestCounter();
 
diff --git a/receiver/unittests/test_statistics_sender_influx_db.cpp b/receiver/unittests/test_statistics_sender_influx_db.cpp
index 6408099ceb964e362f4c9d9c8d3f7db029d0a8ca..48f7f704c54916a69230e4b82dcc1950791b6b8f 100644
--- a/receiver/unittests/test_statistics_sender_influx_db.cpp
+++ b/receiver/unittests/test_statistics_sender_influx_db.cpp
@@ -61,6 +61,7 @@ class SenderInfluxDbTests : public Test {
         statistics.entity_shares[asapo::StatisticEntity::kDatabase] = 0.1;
         statistics.elapsed_ms = 100;
         statistics.data_volume = 1000;
+        statistics.tags = "name1=value1,name2=value2";
 
         config.monitor_db_uri = "test_uri";
         config.monitor_db_name = "test_name";
@@ -76,7 +77,7 @@ class SenderInfluxDbTests : public Test {
 
 
 TEST_F(SenderInfluxDbTests, SendStatisticsCallsPost) {
-    std::string expect_string = "statistics,receiver=1,connection=1 elapsed_ms=100,data_volume=1000,"
+    std::string expect_string = "statistics,name1=value1,name2=value2 elapsed_ms=100,data_volume=1000,"
                                 "n_requests=4,db_share=0.1000,network_share=0.3000,disk_share=0.6000";
     EXPECT_CALL(mock_http_client, Post_t("test_uri/write?db=test_name", expect_string, _, _)).
     WillOnce(
diff --git a/tests/automatic/broker/get_next/check_linux.sh b/tests/automatic/broker/get_next/check_linux.sh
index e23ec0fbe01dba7db0c7b5362029abcc8bd7fb3c..8c2da0557ee06fc479e37bb9d5c4498a5212640a 100644
--- a/tests/automatic/broker/get_next/check_linux.sh
+++ b/tests/automatic/broker/get_next/check_linux.sh
@@ -23,4 +23,4 @@ brokerid=`echo $!`
 curl -v  --silent 127.0.0.1:5005/database/data/next --stderr - | grep '"_id":1'
 curl -v  --silent 127.0.0.1:5005/database/data/next --stderr - | grep '"_id":2'
 
-curl -v  --silent 127.0.0.1:5005/database/data/next --stderr - | grep "No Content"
+curl -v  --silent 127.0.0.1:5005/database/data/next --stderr - | grep "Not Found"
diff --git a/tests/automatic/broker/get_next/check_windows.bat b/tests/automatic/broker/get_next/check_windows.bat
index 9ba1a0810a2682abe9c575313993df5ca794dac3..026563bea3cc4fea8d233d23ca70cf80c7280aca 100644
--- a/tests/automatic/broker/get_next/check_windows.bat
+++ b/tests/automatic/broker/get_next/check_windows.bat
@@ -13,7 +13,7 @@ ping 1.0.0.0 -n 1 -w 100 > nul
 
 C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/next --stderr - | findstr \"_id\":1  || goto :error
 C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/next --stderr - | findstr \"_id\":2  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/next --stderr - | findstr  "No Content"  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/next --stderr - | findstr  "Not Found"  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/full_chain/simple_chain/CMakeLists.txt b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
index 0fb21c69ec31121c8dd570340c3ef690487cc73f..a63811d4f4a90e4230614139ea1f403c28add8a7 100644
--- a/tests/automatic/full_chain/simple_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
@@ -3,7 +3,5 @@ set(TARGET_NAME full_chain_simple_chain)
 ################################
 # Testing
 ################################
-configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json receiver.json COPYONLY)
-configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/broker_settings.json broker.json COPYONLY)
-
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:receiver-bin> $<TARGET_PROPERTY:asapo-broker,EXENAME> $<TARGET_FILE:getnext_broker>" nomem)
+prepare_asapo()
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
index a85cbe6c4e84d0197c3df9518e549ad393973de6..f384a4b70254a617746823ea3c5f84e69ed619de 100644
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -8,34 +8,32 @@ broker_database_name=test_run
 monitor_database_name=db_test
 broker_address=127.0.0.1:5005
 
+receiver_folder=/tmp/asapo/receiver/files
+
 Cleanup() {
-	echo cleanup
-	rm -rf files
-    kill -9 $receiverid
-    kill -9 $brokerid
-    #kill -9 $producerrid
+    echo cleanup
+    rm -rf ${receiver_folder}
+    nomad stop receiver
+    nomad stop discovery
+    nomad stop broker
+#    kill $producerid
     echo "db.dropDatabase()" | mongo ${broker_database_name}
     influx -execute "drop database ${monitor_database_name}"
 }
 
 influx -execute "create database ${monitor_database_name}"
+echo "db.${broker_database_name}.insert({dummy:1})" | mongo ${broker_database_name}
 
+nomad run receiver.nmd
+nomad run discovery.nmd
+nomad run broker.nmd
 
-#receiver
-$2 receiver.json &
-sleep 0.3
-receiverid=`echo $!`
-
-#broker
-$3 -config broker.json &
-sleep 0.3
-brokerid=`echo $!`
-
+sleep 1
 
 #producer
-mkdir files
-$1 localhost:4200 100 100 &
-#producerrid=`echo $!`
-sleep 0.1
+mkdir -p ${receiver_folder}
+$1 localhost:5006 100 1000 4 0 &
+#producerid=`echo $!`
+
 
-$4 ${broker_address} ${broker_database_name} 2 | grep "Processed 100 file(s)"
+$2 ${broker_address} ${broker_database_name} 2 | grep "Processed 1000 file(s)"
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
index 7c3fc7a03a897f6a2acb4e0b7ec4a815c4d1fdf6..bd7115b3858e0283c548d108f8143d80e17f7c2c 100644
--- a/tests/automatic/full_chain/simple_chain/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -1,24 +1,23 @@
-REM receiver
-set full_recv_name="%2"
-set short_recv_name="%~nx2"
-start /B "" "%full_recv_name%" receiver.json
-ping 1.0.0.0 -n 1 -w 100 > nul
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+set broker_database_name=test_run
+SET receiver_folder="c:\tmp\asapo\receiver\files"
 
-REM broker
-set full_broker_name="%3"
-set short_broker_name="%~nx3"
-start /B "" "%full_broker_name%" -config broker.json
-ping 1.0.0.0 -n 1 -w 100 > nul
+echo db.%broker_database_name%.insert({dummy:1}) | %mongo_exe% %broker_database_name%
+
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run discovery.nmd
+c:\opt\consul\nomad run broker.nmd
+
+ping 1.0.0.0 -n 10 -w 100 > nul
 
 REM producer
-mkdir files
-start /B "" "%1" localhost:4200 100 100
+mkdir %receiver_folder%
+start /B "" "%1" localhost:5006 100 1000 4 0
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM worker
 set broker_address="127.0.0.1:5005"
-set broker_database_name="test_run"
-"%4" %broker_address% %broker_database_name% 2 | findstr "Processed 100 file(s)"  || goto :error
+"%2" %broker_address% %broker_database_name% 2 | findstr "Processed 1000 file(s)"  || goto :error
 
 
 goto :clean
@@ -28,10 +27,10 @@ call :clean
 exit /b 1
 
 :clean
-Taskkill /IM "%short_recv_name%" /F
-Taskkill /IM "%short_broker_name%" /F
-rmdir /S /Q files
-SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop discovery
+c:\opt\consul\nomad stop broker
+rmdir /S /Q %receiver_folder%
 echo db.dropDatabase() | %mongo_exe% %broker_database_name%
 
 
diff --git a/tests/automatic/producer_receiver/check_monitoring/CMakeLists.txt b/tests/automatic/producer_receiver/check_monitoring/CMakeLists.txt
index 298b81ed25e3e9ef14c159b37e04c9a8a000d29f..80b82ae8e85c275dbd132c72e2838c1cbc7f8545 100644
--- a/tests/automatic/producer_receiver/check_monitoring/CMakeLists.txt
+++ b/tests/automatic/producer_receiver/check_monitoring/CMakeLists.txt
@@ -3,6 +3,6 @@ set(TARGET_NAME receiver)
 ################################
 # Testing
 ################################
-configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json receiver.json COPYONLY)
-add_script_test("${TARGET_NAME}-monitoring" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:receiver-bin>" nomem
-        )
+prepare_asapo()
+
+add_script_test("${TARGET_NAME}-monitoring" "$<TARGET_FILE:dummy-data-producer>" nomem)
diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
index 5eeb022b926ff0efa59dbc3496a5af90c2749ef2..817e76757865684c21985e90c262eb2a056a2b33 100644
--- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
+++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
@@ -2,7 +2,7 @@
 
 database_name=db_test
 mongo_database_name=test_run
-
+receiver_folder=/tmp/asapo/receiver/files
 set -e
 
 trap Cleanup EXIT
@@ -10,20 +10,22 @@ trap Cleanup EXIT
 Cleanup() {
 	echo cleanup
 	influx -execute "drop database ${database_name}"
-    kill $receiverid
-	rm -rf files
+    nomad stop receiver
+    nomad stop discovery
     echo "db.dropDatabase()" | mongo ${mongo_database_name}
+    rm -rf ${receiver_folder}
 }
 
+mkdir -p ${receiver_folder}
+
 influx -execute "create database ${database_name}"
 
-nohup $2 receiver.json &>/dev/null &
-sleep 0.3
-receiverid=`echo $!`
+nomad run receiver.nmd
+nomad run discovery.nmd
 
-mkdir files
+sleep 1
 
-$1 localhost:4200 100 112
+$1 localhost:5006 100 112 4  0
 
 sleep 1
 
diff --git a/tests/automatic/producer_receiver/transfer_single_file/CMakeLists.txt b/tests/automatic/producer_receiver/transfer_single_file/CMakeLists.txt
index f745213b85ef22c898bce3130b1ae24dcfa33a13..7b299ca60c8ad5d3df328007c44f62a58069de24 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/CMakeLists.txt
+++ b/tests/automatic/producer_receiver/transfer_single_file/CMakeLists.txt
@@ -3,5 +3,5 @@ set(TARGET_NAME transfer-single-file)
 ################################
 # Testing
 ################################
-configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json receiver.json COPYONLY)
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:receiver-bin>" nomem)
+prepare_asapo()
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer>" nomem)
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
index 48d4ff3dc0a304c83e88d3fc659acb39bcfa2014..d904c84f601fb0dee70caedc51fe94f5b0ae091f 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
@@ -4,21 +4,28 @@ set -e
 
 trap Cleanup EXIT
 
-database_name=test_run
+database_name=db_test
+mongo_database_name=test_run
+receiver_folder=/tmp/asapo/receiver/files
 
 Cleanup() {
 	echo cleanup
-	rm -rf files
-    kill $receiverid
-    echo "db.dropDatabase()" | mongo ${database_name}
+	rm -rf ${receiver_folder}
+    nomad stop receiver
+    nomad stop discovery
+    echo "db.dropDatabase()" | mongo ${mongo_database_name}
+    influx -execute "drop database ${database_name}"
 }
 
-nohup $2 receiver.json &>/dev/null &
-sleep 0.3
-receiverid=`echo $!`
+influx -execute "create database ${database_name}"
+echo "db.${mongo_database_name}.insert({dummy:1})" | mongo ${mongo_database_name}
 
-mkdir files
+nomad run receiver.nmd
+nomad run discovery.nmd
 
-$1 localhost:4200 100 1
+mkdir -p ${receiver_folder}
 
-ls -ln files/1.bin | awk '{ print $5 }'| grep 102400
+$1 localhost:5006 100 1 1  0
+
+
+ls -ln ${receiver_folder}/1.bin | awk '{ print $5 }'| grep 102400
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
index 8a470b403ca5365ccd54e6fd4be67f649e15a922..af71a9d81b50613e19968b03b0f19aff90adc46c 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
@@ -1,18 +1,22 @@
-set full_recv_name="%2"
-set short_recv_name="%~nx2"
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+SET database_name=test_run
+SET receiver_folder="c:\tmp\asapo\receiver\files"
+
+echo db.%database_name%.insert({dummy:1})" | %mongo_exe% %database_name%
+
 
-start /B "" "%full_recv_name%" receiver.json
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run discovery.nmd
 
 ping 1.0.0.0 -n 1 -w 100 > nul
 
-mkdir files
+mkdir %receiver_folder%
 
-%1 localhost:4200 100 1
+%1 localhost:5006 100 1 1 0
 
 ping 1.0.0.0 -n 1 -w 100 > nul
 
-FOR /F "usebackq" %%A IN ('files\1.bin') DO set size=%%~zA
-
+FOR /F "usebackq" %%A IN ('%receiver_folder%\1.bin') DO set size=%%~zA
 if %size% NEQ 102400 goto :error
 
 goto :clean
@@ -22,10 +26,9 @@ call :clean
 exit /b 1
 
 :clean
-Taskkill /IM "%short_recv_name%" /F
-rmdir /S /Q files
-SET database_name=test_run
-SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop discovery
+rmdir /S /Q %receiver_folder%
 echo db.dropDatabase() | %mongo_exe% %database_name%
 
 
diff --git a/tests/automatic/settings/broker_settings.json.tpl b/tests/automatic/settings/broker_settings.json.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..af6d1dcb2492b7cacf45268b761a92e20ab0521e
--- /dev/null
+++ b/tests/automatic/settings/broker_settings.json.tpl
@@ -0,0 +1,7 @@
+{
+  "BrokerDbAddress":"127.0.0.1:27017",
+  "MonitorDbAddress": "localhost:8086",
+  "MonitorDbName": "db_test",
+  "port":{{ env "NOMAD_PORT_broker" }},
+  "LogLevel":"info"
+}
\ No newline at end of file
diff --git a/tests/automatic/settings/discovery_settings.json.tpl b/tests/automatic/settings/discovery_settings.json.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..62cb9864b6b7cf0c4a738ca8f1a2b254ff4400fc
--- /dev/null
+++ b/tests/automatic/settings/discovery_settings.json.tpl
@@ -0,0 +1,8 @@
+{
+  "MaxConnections": 32,
+  "Mode": "consul",
+  "Port": {{ env "NOMAD_PORT_discovery" }},
+  "LogLevel":"debug"
+}
+
+
diff --git a/tests/automatic/settings/receiver.json b/tests/automatic/settings/receiver.json.tpl.lin
similarity index 53%
rename from tests/automatic/settings/receiver.json
rename to tests/automatic/settings/receiver.json.tpl.lin
index 709682ff6c1c9c8ffa90d5bb07f37c2d8bdf9bb3..8d98fd1abee361de21a2b4de1d87bd2fa20f277b 100644
--- a/tests/automatic/settings/receiver.json
+++ b/tests/automatic/settings/receiver.json.tpl.lin
@@ -3,8 +3,10 @@
   "MonitorDbName": "db_test",
   "BrokerDbAddress":"localhost:27017",
   "BrokerDbName": "test_run",
-  "ListenPort":4200,
+  "ListenPort": {{ env "NOMAD_PORT_recv" }},
+  "Tag": "{{ env "NOMAD_ADDR_recv" }}",
   "WriteToDisk":true,
   "WriteToDb":true,
-  "LogLevel" : "info"
-}
\ No newline at end of file
+  "LogLevel" : "debug",
+  "RootFolder" : "/tmp/asapo/receiver/files"
+}
diff --git a/tests/automatic/settings/receiver.json.tpl.win b/tests/automatic/settings/receiver.json.tpl.win
new file mode 100644
index 0000000000000000000000000000000000000000..d4cb5e03853d46bdce0e0fcb19dd7eb016c2ce28
--- /dev/null
+++ b/tests/automatic/settings/receiver.json.tpl.win
@@ -0,0 +1,12 @@
+{
+  "MonitorDbAddress":"localhost:8086",
+  "MonitorDbName": "db_test",
+  "BrokerDbAddress":"localhost:27017",
+  "BrokerDbName": "test_run",
+  "ListenPort": {{ env "NOMAD_PORT_recv" }},
+  "Tag": "{{ env "NOMAD_ADDR_recv" }}",
+  "WriteToDisk":true,
+  "WriteToDb":true,
+  "LogLevel" : "debug",
+  "RootFolder" : "c:\\tmp\\asapo\\receiver\\files"
+}
diff --git a/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp b/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
index 09b866ca5d980a9dd3dca3cf096116f42c424736..add455cd3eb1a6ba79a7b6bf0e8ee5c8214119c3 100644
--- a/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
+++ b/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
@@ -51,7 +51,11 @@ int main(int argc, char* argv[]) {
     auto params = GetParams(argc, argv);
 
     auto io = std::unique_ptr<asapo::IO> {asapo::GenerateDefaultIO()};
-    FileData data{new uint8_t[params.length]{'1', '2', '3'}};
+    auto array = new uint8_t[params.length];
+    array[0] = '1';
+    array[1] = '2';
+    array[2] = '3';
+    FileData data{array};
 
     auto err = io->WriteDataToFile(params.fname, data, params.length);
 
diff --git a/tests/manual/performance_full_chain_simple/broker.json b/tests/manual/performance_full_chain_simple/broker.json
index 31aef140d3ed64a122684062b159008e1b52a372..a2c1a4a5ab7238e14c26667e5bfc7335e935d96d 100644
--- a/tests/manual/performance_full_chain_simple/broker.json
+++ b/tests/manual/performance_full_chain_simple/broker.json
@@ -3,5 +3,5 @@
   "MonitorDbAddress": "localhost:8086",
   "MonitorDbName": "db_test",
   "port":5005,
-  "LogLevel":"debug"
+  "LogLevel":"info"
 }
\ No newline at end of file
diff --git a/tests/automatic/settings/discovery_settings.json b/tests/manual/performance_full_chain_simple/discovery.json
similarity index 52%
rename from tests/automatic/settings/discovery_settings.json
rename to tests/manual/performance_full_chain_simple/discovery.json
index 642dcfffb79eaecb41b920ece18f86983b888c12..476f732bbedad31adc0e4ce4fbbee1ca081cc025 100644
--- a/tests/automatic/settings/discovery_settings.json
+++ b/tests/manual/performance_full_chain_simple/discovery.json
@@ -1,7 +1,7 @@
 {
   "MaxConnections": 32,
-  "Endpoints": ["localhost:8086"],
   "Mode": "static",
+  "Endpoints":["localhost:4200"],
   "Port":5006,
-  "LogLevel":"debug"
+  "LogLevel":"info"
 }
\ No newline at end of file
diff --git a/tests/manual/performance_full_chain_simple/receiver.json b/tests/manual/performance_full_chain_simple/receiver.json
index 5330d75a7380acc0c09b321e87bb6bf1253d962e..7cf0d85c122d91a081050a4c7fe648618e84f841 100644
--- a/tests/manual/performance_full_chain_simple/receiver.json
+++ b/tests/manual/performance_full_chain_simple/receiver.json
@@ -6,5 +6,6 @@
   "ListenPort":4200,
   "WriteToDisk":true,
   "WriteToDb":true,
-  "LogLevel":"debug"
-}
\ No newline at end of file
+  "LogLevel":"info",
+  "Tag": "test_receiver"
+}
diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh
index 4cece2b86f0e04b0cd4a0191eb8f5e8f53ccf68f..880e038853ebcc12ff5604d03743a150bac3a707 100755
--- a/tests/manual/performance_full_chain_simple/test.sh
+++ b/tests/manual/performance_full_chain_simple/test.sh
@@ -9,6 +9,7 @@ Cleanup() {
 set +e
 ssh ${receiver_node} rm -f ${receiver_dir}/files/*
 ssh ${receiver_node} killall receiver
+ssh ${receiver_node} killall asapo-discovery
 ssh ${broker_node} killall asapo-broker
 ssh ${broker_node} docker rm -f -v mongo
 }
@@ -24,16 +25,14 @@ log_dir=~/fullchain_tests/logs
 # starts receiver on $receiver_node
 # runs producer with various file sizes from $producer_node and measures performance
 
-file_size=1000
-file_num=$((10000000 / $file_size))
+file_size=10000
+file_num=$((100000000 / $file_size))
 echo filesize: ${file_size}K, filenum: $file_num
 
 # receiver_setup
 receiver_node=max-wgs
-receiver_ip=`resolveip -s ${receiver_node}`
 receiver_port=4201
 receiver_dir=/gpfs/petra3/scratch/yakubov/receiver_tests
-ssh ${receiver_node} mkdir -p ${receiver_dir}/logs
 ssh ${receiver_node} mkdir -p ${receiver_dir}/files
 scp ../../../cmake-build-release/receiver/receiver ${receiver_node}:${receiver_dir}
 cat receiver.json |
@@ -45,14 +44,36 @@ cat receiver.json |
           else .
           end
          ) |
-      from_entries" > settings_tmp.json
-scp settings_tmp.json ${receiver_node}:${receiver_dir}/settings.json
+      from_entries" > receiver_tmp.json
+
+scp receiver_tmp.json ${receiver_node}:${receiver_dir}/receiver.json
+rm receiver_tmp.json
+
+
+# discovery_setup
+discovery_port=5006
+cat discovery.json |
+  jq "to_entries |
+       map(if .key == \"Port\"
+          then . + {value:${discovery_port}}
+          elif .key == \"Endpoints\"
+          then . + {value:[\"${receiver_node}:${receiver_port}\"]}
+          else .
+          end
+         ) |
+      from_entries" > discovery_tmp.json
+scp ../../../cmake-build-release/discovery/asapo-discovery ${receiver_node}:${receiver_dir}
+scp discovery_tmp.json ${receiver_node}:${receiver_dir}/discovery.json
+discovery_ip=`resolveip -s ${receiver_node}`
+rm discovery_tmp.json
 
 #producer_setup
 producer_node=max-display001
 #producer_node=max-wgs
 producer_dir=~/fullchain_tests
 scp ../../../cmake-build-release/examples/producer/dummy-data-producer/dummy-data-producer ${producer_node}:${producer_dir}
+producer_nthreads=16
+
 
 #broker_setup
 broker_node=max-wgs
@@ -84,8 +105,12 @@ ssh ${monitor_node} influx -execute \"create database db_test\"
 #mongo_start
 ssh ${broker_node} docker run -d -p 27017:27017 --name mongo mongo
 
+#discovery_start
+ssh ${receiver_node} "bash -c 'cd ${receiver_dir}; nohup ./asapo-discovery -config discovery.json &> ${log_dir}/discovery.log &'"
+sleep 0.3
+
 #receiver_start
-ssh ${receiver_node} "bash -c 'cd ${receiver_dir}; nohup ./receiver settings.json &> ${log_dir}/log.receiver &'"
+ssh ${receiver_node} "bash -c 'cd ${receiver_dir}; nohup ./receiver receiver.json &> ${log_dir}/log.receiver &'"
 sleep 0.3
 
 #broker_start
@@ -93,8 +118,9 @@ ssh ${broker_node} "bash -c 'cd ${broker_dir}; nohup ./asapo-broker -config brok
 sleep 0.3
 
 #producer_start
-ssh ${producer_node} "bash -c 'cd ${producer_dir}; nohup ./dummy-data-producer ${receiver_ip}:${receiver_port} ${file_size} ${file_num} &> ${producer_dir}/producer.log &'"
-sleep 0.3
+ssh ${producer_node} "bash -c 'cd ${producer_dir}; nohup ./dummy-data-producer ${discovery_ip}:${discovery_port} ${file_size} ${file_num} ${producer_nthreads} 0 &> ${log_dir}/producer.log &'"
+
+sleep 1
 
 #worker_start
 ssh ${worker_node} ${worker_dir}/getnext_broker ${broker_node}:5005 test_run ${nthreads}
diff --git a/tests/manual/performance_producer_receiver/discovery.json b/tests/manual/performance_producer_receiver/discovery.json
new file mode 100644
index 0000000000000000000000000000000000000000..476f732bbedad31adc0e4ce4fbbee1ca081cc025
--- /dev/null
+++ b/tests/manual/performance_producer_receiver/discovery.json
@@ -0,0 +1,7 @@
+{
+  "MaxConnections": 32,
+  "Mode": "static",
+  "Endpoints":["localhost:4200"],
+  "Port":5006,
+  "LogLevel":"info"
+}
\ No newline at end of file
diff --git a/tests/manual/performance_producer_receiver/receiver.json b/tests/manual/performance_producer_receiver/receiver.json
index dcb3879550833d7552374b0cc6ddf4f53fe166a2..7cf0d85c122d91a081050a4c7fe648618e84f841 100644
--- a/tests/manual/performance_producer_receiver/receiver.json
+++ b/tests/manual/performance_producer_receiver/receiver.json
@@ -6,5 +6,6 @@
   "ListenPort":4200,
   "WriteToDisk":true,
   "WriteToDb":true,
-  "LogLevel":"info"
+  "LogLevel":"info",
+  "Tag": "test_receiver"
 }
diff --git a/tests/manual/performance_producer_receiver/settings_tmp.json b/tests/manual/performance_producer_receiver/settings_tmp.json
deleted file mode 100644
index 7b5460157dced99ec9b24104f48b482173cc6d47..0000000000000000000000000000000000000000
--- a/tests/manual/performance_producer_receiver/settings_tmp.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-  "MonitorDbAddress": "zitpcx27016:8086",
-  "MonitorDbName": "db_test",
-  "BrokerDbAddress": "localhost:27017",
-  "BrokerDbName": "test_run",
-  "ListenPort": 4201,
-  "WriteToDisk": true,
-  "WriteToDb": true
-}
diff --git a/tests/manual/performance_producer_receiver/test.sh b/tests/manual/performance_producer_receiver/test.sh
index 862ae2ec85bafd6df51f610336c10aaa2855e7bc..83a52730d310e50cd533cb3fe85475cef4a8a391 100755
--- a/tests/manual/performance_producer_receiver/test.sh
+++ b/tests/manual/performance_producer_receiver/test.sh
@@ -2,13 +2,17 @@
 
 set -e
 
+trap Cleanup EXIT
+
+
 # starts receiver on $service_node
 # runs producer with various file sizes from $worker_node and measures performance
 
 # a working directory
 service_node=max-wgs
 service_ip=`resolveip -s ${service_node}`
-service_port=4201
+discovery_port=5006
+receiver_port=4201
 
 monitor_node=zitpcx27016
 monitor_port=8086
@@ -28,6 +32,7 @@ ssh ${service_node} mkdir -p ${service_dir}/files
 ssh ${worker_node} mkdir -p ${worker_dir}
 
 scp ../../../cmake-build-release/receiver/receiver ${service_node}:${service_dir}
+scp ../../../cmake-build-release/discovery/asapo-discovery ${service_node}:${service_dir}
 scp ../../../cmake-build-release/examples/producer/dummy-data-producer/dummy-data-producer ${worker_node}:${worker_dir}
 
 function do_work {
@@ -36,25 +41,41 @@ cat receiver.json |
        map(if .key == \"MonitorDbAddress\"
           then . + {value:\"${monitor_node}:${monitor_port}\"}
           elif .key == \"ListenPort\"
-          then . + {value:${service_port}}
+          then . + {value:${receiver_port}}
           elif .key == \"WriteToDisk\"
           then . + {value:$1}
           else .
           end
          ) |
-      from_entries" > settings_tmp.json
-scp settings_tmp.json ${service_node}:${service_dir}/settings.json
-ssh ${service_node} "bash -c 'cd ${service_dir}; nohup ./receiver settings.json &> ${service_dir}/receiver.log &'"
+      from_entries" > receiver_tmp.json
+cat discovery.json |
+  jq "to_entries |
+       map(if .key == \"Port\"
+          then . + {value:${discovery_port}}
+          elif .key == \"Endpoints\"
+          then . + {value:[\"${service_node}:${receiver_port}\"]}
+          else .
+          end
+         ) |
+      from_entries" > discovery_tmp.json
+
+scp discovery_tmp.json ${service_node}:${service_dir}/discovery.json
+scp receiver_tmp.json ${service_node}:${service_dir}/receiver.json
+rm discovery_tmp.json receiver_tmp.json
+ssh ${service_node} "bash -c 'cd ${service_dir}; nohup ./receiver receiver.json &> ${service_dir}/receiver.log &'"
+ssh ${service_node} "bash -c 'cd ${service_dir}; nohup ./asapo-discovery -config discovery.json &> ${service_dir}/discovery.log &'"
+
 sleep 0.3
 for size  in 100 1000 10000
 do
 ssh ${service_node} docker run -d -p 27017:27017 --name mongo mongo
 echo ===================================================================
-ssh ${worker_node} ${worker_dir}/dummy-data-producer ${service_ip}:${service_port} ${size} 1000
+ssh ${worker_node} ${worker_dir}/dummy-data-producer ${service_ip}:${discovery_port} ${size} 1000 8 0
 ssh ${service_node} rm -f ${service_dir}/files/*
 ssh ${service_node} docker rm -f -v mongo
 done
 ssh ${service_node} killall receiver
+ssh ${service_node} killall asapo-discovery
 }
 
 echo
diff --git a/worker/api/cpp/src/server_data_broker.cpp b/worker/api/cpp/src/server_data_broker.cpp
index e00a2daa8a2e7650b701fa0b1c23d1c66a7dc8ef..887c9bfc77d97156c77cdbf1bb07a629eff37233 100644
--- a/worker/api/cpp/src/server_data_broker.cpp
+++ b/worker/api/cpp/src/server_data_broker.cpp
@@ -2,13 +2,14 @@
 
 #include <chrono>
 
+#include <json_parser/json_parser.h>
+
 #include "io/io_factory.h"
 
 #include "http_client/http_error.h"
 
 using std::chrono::high_resolution_clock;
 
-
 namespace asapo {
 
 Error HttpCodeToWorkerError(const HttpCode& code) {
@@ -22,12 +23,9 @@ Error HttpCodeToWorkerError(const HttpCode& code) {
     case HttpCode::InternalServerError:
         message = WorkerErrorMessage::kErrorReadingSource;
         break;
-    case HttpCode::NoContent:
+    case HttpCode::NotFound:
         message = WorkerErrorMessage::kNoData;
         return TextErrorWithType(message, ErrorType::kEndOfFile);
-    case HttpCode::NotFound:
-        message = WorkerErrorMessage::kSourceNotFound;
-        break;
     default:
         message = WorkerErrorMessage::kErrorReadingSource;
         break;
@@ -35,10 +33,8 @@ Error HttpCodeToWorkerError(const HttpCode& code) {
     return Error{new HttpError(message, code)};
 }
 
-
-
 ServerDataBroker::ServerDataBroker(const std::string& server_uri,
-                                   const std::string& source_name):
+                                   const std::string& source_name) :
     io__{GenerateDefaultIO()}, httpclient__{DefaultHttpClient()},
     server_uri_{server_uri}, source_name_{source_name} {
 }
@@ -51,28 +47,59 @@ void ServerDataBroker::SetTimeout(uint64_t timeout_ms) {
     timeout_ms_ = timeout_ms;
 }
 
-Error ServerDataBroker::GetFileInfoFromServer(FileInfo* info, const std::string& operation) {
-    std::string full_uri = server_uri_ + "/database/" + source_name_ + "/" + operation;
+std::string GetIDFromJson(const std::string& json_string, Error* err) {
+    JsonStringParser parser(json_string);
+    uint64_t id;
+    if ((*err = parser.GetUInt64("id", &id)) != nullptr) {
+        return "";
+    }
+    return std::to_string(id);
+}
+
+void ServerDataBroker::ProcessServerError(Error* err,const std::string& response,std::string* redirect_uri) {
+    if ((*err)->GetErrorType() != asapo::ErrorType::kEndOfFile) {
+        (*err)->Append(response);
+        return;
+    } else {
+        if (response.find("id") != std::string::npos) {
+            auto id = GetIDFromJson(response, err);
+            if (*err) {
+                return;
+            }
+            *redirect_uri = server_uri_ + "/database/" + source_name_ + "/" + id;
+        }
+    }
+    *err=nullptr;
+    return;
+}
+
+Error ServerDataBroker::ProcessRequest(std::string* response,std::string request_uri) {
     Error err;
     HttpCode code;
+    *response = httpclient__->Get(request_uri, &code, &err);
+    if (err != nullptr) {
+        return err;
+    }
+    return HttpCodeToWorkerError(code);
+}
 
-    std::string response;
+Error ServerDataBroker::GetFileInfoFromServer(FileInfo* info, const std::string& operation) {
+    std::string request_uri = server_uri_ + "/database/" + source_name_ + "/" + operation;
     uint64_t elapsed_ms = 0;
+    std::string response;
     while (true) {
-        response = httpclient__->Get(full_uri, &code, &err);
-        if (err != nullptr) {
-            return err;
+        auto err = ProcessRequest(&response,request_uri);
+        if (err == nullptr) {
+            break;
         }
 
-        err = HttpCodeToWorkerError(code);
-        if (err == nullptr) break;
-        if (err->GetErrorType() != asapo::ErrorType::kEndOfFile) {
-            err->Append(response);
-//            return err;
+        ProcessServerError(&err,response,&request_uri);
+        if (err != nullptr) {
+            return err;
         }
 
         if (elapsed_ms >= timeout_ms_) {
-            err->Append("exit on timeout");
+            err = TextErrorWithType("no more data found, exit on timeout", asapo::ErrorType::kTimeOut);
             return err;
         }
         std::this_thread::sleep_for(std::chrono::milliseconds(100));
@@ -90,7 +117,7 @@ Error ServerDataBroker::GetNext(FileInfo* info, FileData* data) {
         return TextError(WorkerErrorMessage::kWrongInput);
     }
 
-    auto  err = GetFileInfoFromServer(info, "next");
+    auto err = GetFileInfoFromServer(info, "next");
     if (err != nullptr) {
         return err;
     }
diff --git a/worker/api/cpp/src/server_data_broker.h b/worker/api/cpp/src/server_data_broker.h
index 36d277deb0cc7660b9ef1d259da4626223b8ec28..f17ce25c30bff2362c8c84f25094e63b5c566312 100644
--- a/worker/api/cpp/src/server_data_broker.h
+++ b/worker/api/cpp/src/server_data_broker.h
@@ -20,6 +20,8 @@ class ServerDataBroker final : public asapo::DataBroker {
     std::unique_ptr<HttpClient> httpclient__;
   private:
     Error GetFileInfoFromServer(FileInfo* info, const std::string& operation);
+    void ProcessServerError(Error* err,const std::string& response,std::string* redirect_uri);
+    Error ProcessRequest(std::string* response,std::string request_uri);
     std::string server_uri_;
     std::string source_name_;
     uint64_t timeout_ms_ = 0;
diff --git a/worker/api/cpp/unittests/test_server_broker.cpp b/worker/api/cpp/unittests/test_server_broker.cpp
index 2424eb9cd9ad48f5d85a2557413cef1b6966ceab..28e2caef439d0a720983785b64dba9575e5db300 100644
--- a/worker/api/cpp/unittests/test_server_broker.cpp
+++ b/worker/api/cpp/unittests/test_server_broker.cpp
@@ -92,42 +92,46 @@ TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUri) {
     data_broker->GetNext(&info, nullptr);
 }
 
-TEST_F(ServerDataBrokerTests, GetNextReturnsErrorFromHttpClient) {
-    EXPECT_CALL(mock_http_client, Get_t(_, _, _)).WillOnce(DoAll(
+
+TEST_F(ServerDataBrokerTests, GetNextReturnsEOFFromHttpClient) {
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
                 SetArgPointee<1>(HttpCode::NotFound),
                 SetArgPointee<2>(nullptr),
-                Return("")));
+                Return("{\"id\":1}")));
 
     auto err = data_broker->GetNext(&info, nullptr);
 
-    ASSERT_THAT(err->Explain(), HasSubstr(asapo::WorkerErrorMessage::kSourceNotFound));
-    ASSERT_THAT(err->GetErrorType(), asapo::ErrorType::kHttpError);
-    ASSERT_THAT(dynamic_cast<HttpError*>(err.get())->GetCode(), Eq(HttpCode::NotFound));
+    ASSERT_THAT(err->Explain(), HasSubstr("timeout"));
 }
 
-TEST_F(ServerDataBrokerTests, GetNextReturnsEOFFromHttpClient) {
-    EXPECT_CALL(mock_http_client, Get_t(_, _, _)).WillOnce(DoAll(
-                SetArgPointee<1>(HttpCode::NoContent),
+TEST_F(ServerDataBrokerTests, GetNextReturnsWrongResponseFromHttpClient) {
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
+                SetArgPointee<1>(HttpCode::NotFound),
                 SetArgPointee<2>(nullptr),
-                Return("")));
+                Return("id")));
 
     auto err = data_broker->GetNext(&info, nullptr);
 
-    ASSERT_THAT(err->Explain(), HasSubstr(asapo::WorkerErrorMessage::kNoData));
-    ASSERT_THAT(err->GetErrorType(), asapo::ErrorType::kEndOfFile);
+    ASSERT_THAT(err->Explain(), HasSubstr("Cannot parse"));
 }
 
+
 TEST_F(ServerDataBrokerTests, GetNextReturnsEOFFromHttpClientUntilTimeout) {
-    EXPECT_CALL(mock_http_client, Get_t(_, _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
-                SetArgPointee<1>(HttpCode::NoContent),
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
+                SetArgPointee<1>(HttpCode::NotFound),
                 SetArgPointee<2>(nullptr),
-                Return("")));
+                Return("{\"id\":1}")));
+
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr("1"), _, _)).Times(AtLeast(1)).WillRepeatedly(DoAll(
+                SetArgPointee<1>(HttpCode::NotFound),
+                SetArgPointee<2>(nullptr),
+                Return("{\"id\":1}")));
+
 
     data_broker->SetTimeout(100);
     auto err = data_broker->GetNext(&info, nullptr);
 
-    ASSERT_THAT(err->Explain(), HasSubstr(asapo::WorkerErrorMessage::kNoData));
-    ASSERT_THAT(err->GetErrorType(), asapo::ErrorType::kEndOfFile);
+    ASSERT_THAT(err->Explain(), HasSubstr("timeout"));
 }