diff --git a/CMakeLists.txt b/CMakeLists.txt
index f444089d0209112a31fa486ea7da8003692bac36..0277a02318b5fd146bc98fb27e7c353951712de9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -43,6 +43,7 @@ option(BUILD_CONSUMER_TOOLS "Build consumer tools" OFF)
 option(BUILD_EXAMPLES "Build examples" OFF)
 
 option(ENABLE_LIBFABRIC "Enables LibFabric support for RDMA transfers" OFF)
+option(ENABLE_LIBFABRIC_LOCALHOST "Emulates LibFabric stack over TCP. Only for localhost and testing purposes." OFF)
 
 set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/CMakeModules/)
 
@@ -74,7 +75,11 @@ IF(ENABLE_LIBFABRIC)
     message(STATUS "LibFabric support enabled")
     message(STATUS "LIB_FABRIC: Path: ${LIBFABRIC_LIBRARY} Include: ${LIBFABRIC_INCLUDE_DIR}")
     add_definitions(-DLIBFABRIC_ENABLED)
-    SET(ASAPO_COMMON_FABRIC_LIBRARIES ${ASAPO_COMMON_FABRIC_LIBRARIES} fabric)
+    SET(ASAPO_COMMON_FABRIC_LIBRARIES ${ASAPO_COMMON_FABRIC_LIBRARIES} dl)
+    IF(ENABLE_LIBFABRIC_LOCALHOST)
+        message(STATUS "LIB_FABRIC: Enabled emulated RDMA when localhost is used. Should only be used for tests.")
+        add_definitions(-DLIBFARBIC_ALLOW_LOCALHOST)
+    ENDIF()
 ENDIF()
 SET_PROPERTY(GLOBAL PROPERTY ASAPO_COMMON_FABRIC_LIBRARIES ${ASAPO_COMMON_FABRIC_LIBRARIES})
 
@@ -123,4 +128,4 @@ endif()
 
 include(prepare_version)
 
-add_subdirectory(docs)
\ No newline at end of file
+add_subdirectory(docs)
diff --git a/CMakeModules/FindLibFabric.cmake b/CMakeModules/FindLibFabric.cmake
index 24b54d5b1991820ae24ae3777d05c5ba5e98c41f..d3639409ad3d569444126695eab11e08734b9fb0 100644
--- a/CMakeModules/FindLibFabric.cmake
+++ b/CMakeModules/FindLibFabric.cmake
@@ -9,7 +9,9 @@
 
 cmake_minimum_required(VERSION 2.6)
 
-find_path(LIBFABRIC_INCLUDE_DIR fabric.h)
+find_path(LIBFABRIC_INCLUDE_DIR fabric.h HINT ENV LIBFABRIC_INCLUDE_DIR)
+get_filename_component(LIBFABRIC_INCLUDE_DIR ${LIBFABRIC_INCLUDE_DIR} DIRECTORY)
+
 find_library(LIBFABRIC_LIBRARY fabric)
 
 mark_as_advanced(LIBFABRIC_INCLUDE_DIR LIBFABRIC_LIBRARY)
diff --git a/CMakeModules/prepare_asapo.cmake b/CMakeModules/prepare_asapo.cmake
index ce5c33d92ff39fb8ee26332194512844b64c79e7..981593512e060dcb2ebbf1e2d4cf940b5ddc2909 100644
--- a/CMakeModules/prepare_asapo.cmake
+++ b/CMakeModules/prepare_asapo.cmake
@@ -27,22 +27,25 @@ function(prepare_asapo)
     endif()
 
     if (WIN32)
-        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.win.in receiver.json.tpl @ONLY)
-        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_win.nmd nginx_kill.nmd @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver_tcp.json.tpl.win.in receiver_tcp.json.tpl @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/authorizer_settings.json.tpl.win authorizer.json.tpl COPYONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/start_services.bat start_services.bat COPYONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/stop_services.bat stop_services.bat COPYONLY)
 
+        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver_tcp.nmd.in  receiver_tcp.nmd @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_win.nmd nginx_kill.nmd @ONLY)
     else()
-        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.lin.in receiver.json.tpl @ONLY)
-        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_lin.nmd nginx_kill.nmd @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver_tcp.json.tpl.lin.in receiver_tcp.json.tpl @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver_fabric.json.tpl.lin.in receiver_fabric.json.tpl @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/authorizer_settings.json.tpl.lin authorizer.json.tpl COPYONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/start_services.sh start_services.sh COPYONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/stop_services.sh stop_services.sh COPYONLY)
 
+        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver_tcp.nmd.in  receiver_tcp.nmd @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver_fabric.nmd.in  receiver_fabric.nmd @ONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_lin.nmd nginx_kill.nmd @ONLY)
     endif()
 
-    configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver.nmd.in  receiver.nmd @ONLY)
     configure_file(${CMAKE_SOURCE_DIR}/config/nomad/discovery.nmd.in  discovery.nmd @ONLY)
     configure_file(${CMAKE_SOURCE_DIR}/config/nomad/authorizer.nmd.in  authorizer.nmd @ONLY)
     configure_file(${CMAKE_SOURCE_DIR}/config/nomad/file_transfer.nmd.in  file_transfer.nmd @ONLY)
@@ -54,7 +57,6 @@ function(prepare_asapo)
     configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/nginx.conf.tpl nginx.conf.tpl COPYONLY)
     configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx.nmd.in nginx.nmd @ONLY)
 
-
 endfunction()
 
 macro(configure_files srcDir destDir)
diff --git a/common/cpp/include/asapo_fabric/asapo_fabric.h b/common/cpp/include/asapo_fabric/asapo_fabric.h
index a9b9f8da1e9729d4b636dcc92ec804ce4f1ba926..51466e8834c17f4596db9bb087168baa7f82541e 100644
--- a/common/cpp/include/asapo_fabric/asapo_fabric.h
+++ b/common/cpp/include/asapo_fabric/asapo_fabric.h
@@ -78,7 +78,7 @@ class FabricFactory {
     virtual std::unique_ptr<FabricClient> CreateClient(Error* error) const = 0;
 };
 
-std::unique_ptr<FabricFactory> GenerateDefaultFabricFactory();
+std::unique_ptr<FabricFactory> GenerateDefaultFabricFactory(); // <- will try to load the library with dlopen first
 }
 }
 
diff --git a/common/cpp/include/asapo_fabric/fabric_error.h b/common/cpp/include/asapo_fabric/fabric_error.h
index 854d677f4b4d0773038eb183fead83323148f350..1a8ca01061397a9d59384fe11e92505692d0c1d8 100644
--- a/common/cpp/include/asapo_fabric/fabric_error.h
+++ b/common/cpp/include/asapo_fabric/fabric_error.h
@@ -5,13 +5,14 @@ namespace asapo {
 namespace fabric {
 enum class FabricErrorType {
     kNotSupported,
-    kOutdatedLibrary,
+    kLibraryNotFound,
+    kLibraryCompatibilityError,
+    kLibraryOutdated,
     kInternalError, // An error that was produced by LibFabric
     kInternalOperationCanceled, // An error that was produced by LibFabric
     kInternalConnectionError, // This might occur when the connection is unexpectedly closed
     kNoDeviceFound,
     kClientNotInitialized,
-    kTimeout,
     kConnectionRefused,
 };
 
@@ -23,8 +24,14 @@ namespace FabricErrorTemplates {
 auto const kNotSupportedOnBuildError = FabricErrorTemplate {
     "This build of ASAPO does not support LibFabric", FabricErrorType::kNotSupported
 };
+auto const kLibraryNotFoundError = FabricErrorTemplate {
+    "LibFabric or dependencies were not found", FabricErrorType::kLibraryNotFound
+};
+auto const kLibraryCompatibilityError = FabricErrorTemplate {
+    "LibFabric was found but somehow some a function is missing", FabricErrorType::kLibraryCompatibilityError
+};
 auto const kOutdatedLibraryError = FabricErrorTemplate {
-    "LibFabric outdated", FabricErrorType::kOutdatedLibrary
+    "LibFabric outdated", FabricErrorType::kLibraryOutdated
 };
 auto const kInternalError = FabricErrorTemplate {
     "Internal LibFabric error", FabricErrorType::kInternalError
@@ -39,10 +46,6 @@ auto const kClientNotInitializedError = FabricErrorTemplate {
     "The client was not initialized. Add server address first!",
     FabricErrorType::kClientNotInitialized
 };
-auto const kTimeout = FabricErrorTemplate {
-    "Timeout",
-    FabricErrorType::kTimeout
-};
 auto const kConnectionRefusedError = FabricErrorTemplate {
     "Connection refused",
     FabricErrorType::kConnectionRefused
diff --git a/common/cpp/include/common/data_structs.h b/common/cpp/include/common/data_structs.h
index ad749ede8580daafc9efbbb4202241775aceeb35..0195cd61b28acf38d3428f506576510767fe8690 100644
--- a/common/cpp/include/common/data_structs.h
+++ b/common/cpp/include/common/data_structs.h
@@ -30,9 +30,9 @@ class FileInfo {
 };
 
 struct StreamInfo {
-  uint64_t last_id{0};
-  std::string Json() const;
-  bool SetFromJson(const std::string& json_string);
+    uint64_t last_id{0};
+    std::string Json() const;
+    bool SetFromJson(const std::string& json_string);
 };
 
 inline bool operator==(const FileInfo& lhs, const FileInfo& rhs) {
diff --git a/common/cpp/include/common/networking.h b/common/cpp/include/common/networking.h
index c37c5a15cf33a591543a12106b037cd124ec76fb..1f7959723e082384115f0080157a5d16238e1235 100644
--- a/common/cpp/include/common/networking.h
+++ b/common/cpp/include/common/networking.h
@@ -12,8 +12,10 @@ namespace asapo {
 
 typedef uint64_t NetworkRequestId;
 
-enum class NetworkConnectionType {
+enum class NetworkConnectionType : uint32_t {
+    kUndefined,
     kAsapoTcp, // ASAPOs TCP (Multiple connections for parallel data transfers)
+    kFabric, // Fabric connection (Primarily used for InfiniBand verbs)
 };
 
 enum Opcode : uint8_t {
@@ -48,27 +50,29 @@ const std::size_t kPosDataSetId = 1;
 const std::size_t kPosDataSetSize = 2;
 
 struct GenericRequestHeader {
+    GenericRequestHeader(const GenericRequestHeader& header) {
+        op_code = header.op_code, data_id = header.data_id, data_size = header.data_size, meta_size = header.meta_size,
+        memcpy(custom_data, header.custom_data, kNCustomParams * sizeof(uint64_t)),
+        memcpy(message, header.message, kMaxMessageSize);
+        strncpy(substream, header.substream, kMaxMessageSize);
+    }
+
+    /* Keep in mind that the message here is just strncpy'ed, you can change the message later */
     GenericRequestHeader(Opcode i_op_code = kOpcodeUnknownOp, uint64_t i_data_id = 0,
                          uint64_t i_data_size = 0, uint64_t i_meta_size = 0, const std::string& i_message = "",
                          const std::string& i_substream = ""):
         op_code{i_op_code}, data_id{i_data_id}, data_size{i_data_size}, meta_size{i_meta_size} {
-        strncpy(message, i_message.c_str(), kMaxMessageSize); // TODO must be memcpy in order to send raw MemoryDetails
+        strncpy(message, i_message.c_str(), kMaxMessageSize);
         strncpy(substream, i_substream.c_str(), kMaxMessageSize);
     }
-    GenericRequestHeader(const GenericRequestHeader& header) {
-        op_code = header.op_code, data_id = header.data_id, data_size = header.data_size, meta_size = header.meta_size,
-        memcpy(custom_data, header.custom_data, kNCustomParams * sizeof(uint64_t)),
-        strncpy(message, header.message, kMaxMessageSize); // TODO must be memcpy in order to send raw MemoryDetails
-        strncpy(substream, header.substream, kMaxMessageSize);
-    }
 
     Opcode      op_code;
     uint64_t    data_id;
     uint64_t    data_size;
     uint64_t    meta_size;
     CustomRequestData    custom_data;
-    char        message[kMaxMessageSize];
-    char        substream[kMaxMessageSize];
+    char        message[kMaxMessageSize]; /* Can also be a binary message (e.g. MemoryRegionDetails) */
+    char        substream[kMaxMessageSize]; /* Must be a string (strcpy is used) */
     std::string Json() {
         std::string s = "{\"id\":" + std::to_string(data_id) + ","
                         "\"buffer\":\"" + std::string(message) + "\"" + ","
diff --git a/common/cpp/include/unittests/MockDatabase.h b/common/cpp/include/unittests/MockDatabase.h
index a06d0446a553e5b112f71fe90517cc0116110e43..eca63353034e5ae1c8203072416d17d865af67b0 100644
--- a/common/cpp/include/unittests/MockDatabase.h
+++ b/common/cpp/include/unittests/MockDatabase.h
@@ -53,13 +53,13 @@ class MockDatabase : public Database {
 
 
     Error GetStreamInfo(const std::string& collection, StreamInfo* info) const override {
-      return Error{GetStreamInfo_t(collection, info)};
+        return Error{GetStreamInfo_t(collection, info)};
     }
 
     MOCK_CONST_METHOD2(GetStreamInfo_t, ErrorInterface * (const std::string&, StreamInfo*));
 
 
-  // stuff to test db destructor is called and avoid "uninteresting call" messages
+    // stuff to test db destructor is called and avoid "uninteresting call" messages
     MOCK_METHOD0(Die, void());
     virtual ~MockDatabase() override {
         if (check_destructor)
diff --git a/common/cpp/include/unittests/MockFabric.h b/common/cpp/include/unittests/MockFabric.h
index f433814860e1eaaa1f566a3413242db0d9762a3b..9e393161c38a0edca4e4b78921ddff40704f9570 100644
--- a/common/cpp/include/unittests/MockFabric.h
+++ b/common/cpp/include/unittests/MockFabric.h
@@ -2,15 +2,23 @@
 #define ASAPO_MOCKFABRIC_H
 
 #include <asapo_fabric/asapo_fabric.h>
+#include <gmock/gmock.h>
 
 namespace asapo {
 namespace fabric {
 
 class MockFabricMemoryRegion : public FabricMemoryRegion {
+  public:
+    MockFabricMemoryRegion() = default;
+    ~MockFabricMemoryRegion() override {
+        Destructor();
+    }
+    MOCK_METHOD0(Destructor, void());
     MOCK_CONST_METHOD0(GetDetails, const MemoryRegionDetails * ());
 };
 
 class MockFabricContext : public FabricContext {
+  public:
     MOCK_CONST_METHOD0(GetAddress, std::string());
 
     std::unique_ptr<FabricMemoryRegion> ShareMemoryRegion(void* src, size_t size, Error* error) override {
@@ -51,6 +59,7 @@ class MockFabricContext : public FabricContext {
 };
 
 class MockFabricClient : public MockFabricContext, public FabricClient {
+  public:
     FabricAddress AddServerAddress(const std::string& serverAddress, Error* error) override {
         ErrorInterface* err = nullptr;
         auto data = AddServerAddress_t(serverAddress, &err);
@@ -58,9 +67,34 @@ class MockFabricClient : public MockFabricContext, public FabricClient {
         return data;
     }
     MOCK_METHOD2(AddServerAddress_t, FabricAddress (const std::string& serverAddress, ErrorInterface** err));
+  public: // Link to FabricContext
+    std::string GetAddress() const override {
+        return MockFabricContext::GetAddress();
+    }
+
+    std::unique_ptr<FabricMemoryRegion> ShareMemoryRegion(void* src, size_t size, Error* error) override {
+        return MockFabricContext::ShareMemoryRegion(src, size, error);
+    }
+
+    void Send(FabricAddress dstAddress, FabricMessageId messageId,
+              const void* src, size_t size, Error* error) override {
+        MockFabricContext::Send(dstAddress, messageId, src, size, error);
+    }
+
+    void Recv(FabricAddress srcAddress, FabricMessageId messageId,
+              void* dst, size_t size, Error* error) override {
+        MockFabricContext::Recv(srcAddress, messageId, dst, size, error);
+    }
+
+    void RdmaWrite(FabricAddress dstAddress,
+                   const MemoryRegionDetails* details, const void* buffer, size_t size,
+                   Error* error) override {
+        MockFabricContext::RdmaWrite(dstAddress, details, buffer, size, error);
+    }
 };
 
 class MockFabricServer : public MockFabricContext, public FabricServer {
+  public:
     void RecvAny(FabricAddress* srcAddress, FabricMessageId* messageId, void* dst, size_t size, Error* error) override {
         ErrorInterface* err = nullptr;
         RecvAny_t(srcAddress, messageId, dst, size, &err);
@@ -68,6 +102,30 @@ class MockFabricServer : public MockFabricContext, public FabricServer {
     }
     MOCK_METHOD5(RecvAny_t, void(FabricAddress* srcAddress, FabricMessageId* messageId,
                                  void* dst, size_t size, ErrorInterface** err));
+  public: // Link to FabricContext
+    std::string GetAddress() const override {
+        return MockFabricContext::GetAddress();
+    }
+
+    std::unique_ptr<FabricMemoryRegion> ShareMemoryRegion(void* src, size_t size, Error* error) override {
+        return MockFabricContext::ShareMemoryRegion(src, size, error);
+    }
+
+    void Send(FabricAddress dstAddress, FabricMessageId messageId,
+              const void* src, size_t size, Error* error) override {
+        MockFabricContext::Send(dstAddress, messageId, src, size, error);
+    }
+
+    void Recv(FabricAddress srcAddress, FabricMessageId messageId,
+              void* dst, size_t size, Error* error) override {
+        MockFabricContext::Recv(srcAddress, messageId, dst, size, error);
+    }
+
+    void RdmaWrite(FabricAddress dstAddress,
+                   const MemoryRegionDetails* details, const void* buffer, size_t size,
+                   Error* error) override {
+        MockFabricContext::RdmaWrite(dstAddress, details, buffer, size, error);
+    }
 };
 
 class MockFabricFactory : public FabricFactory {
diff --git a/common/cpp/include/unittests/MockHttpClient.h b/common/cpp/include/unittests/MockHttpClient.h
index 8399c03a4b5a5d43909f00f43e74957f421c7c8f..572ecd08858cd58e01049e9d857a758e1fcfea22 100644
--- a/common/cpp/include/unittests/MockHttpClient.h
+++ b/common/cpp/include/unittests/MockHttpClient.h
@@ -19,7 +19,7 @@ class MockHttpClient : public HttpClient {
     std::string Post(const std::string& uri, const std::string& cookie, const std::string& data, HttpCode* code,
                      Error* err) const noexcept override {
         ErrorInterface* error = nullptr;
-        auto response = Post_t(uri, cookie,data, code, &error);
+        auto response = Post_t(uri, cookie, data, code, &error);
         err->reset(error);
         return response;
     }
@@ -40,7 +40,8 @@ class MockHttpClient : public HttpClient {
     MOCK_CONST_METHOD3(Get_t,
                        std::string(const std::string& uri, HttpCode* code, ErrorInterface** err));
     MOCK_CONST_METHOD5(Post_t,
-                       std::string(const std::string& uri,const std::string& cookie, const std::string& data, HttpCode* code, ErrorInterface** err));
+                       std::string(const std::string& uri, const std::string& cookie, const std::string& data, HttpCode* code,
+                                   ErrorInterface** err));
     MOCK_CONST_METHOD6(PostReturnArray_t,
                        ErrorInterface * (const std::string& uri, const std::string& cookie, const std::string& input_data,
                                          FileData* ouput_data, uint64_t output_data_size, HttpCode* code));
diff --git a/common/cpp/include/unittests/MockIO.h b/common/cpp/include/unittests/MockIO.h
index 3eb3e0c8b2d59c45b8a55871dcdbb8232da1cfe4..880f1b054eb4bcc6d72450b894793433aec6afc9 100644
--- a/common/cpp/include/unittests/MockIO.h
+++ b/common/cpp/include/unittests/MockIO.h
@@ -145,9 +145,11 @@ class MockIO : public IO {
     }
     MOCK_CONST_METHOD4(Send_t, size_t(SocketDescriptor socket_fd, const void* buf, size_t length, ErrorInterface** err));
 
-
-    MOCK_CONST_METHOD1(SplitAddressToHostnameAndPort,
-                       std::unique_ptr<std::tuple<std::string, uint16_t>>(const std::string& address));
+    std::unique_ptr<std::tuple<std::string, uint16_t>> SplitAddressToHostnameAndPort(const std::string& address) const
+    override {
+        return std::unique_ptr<std::tuple<std::string, uint16_t>>(SplitAddressToHostnameAndPort_t(address));
+    }
+    MOCK_CONST_METHOD1(SplitAddressToHostnameAndPort_t, std::tuple<std::string, uint16_t>* (const std::string& address));
 
     void Skip(SocketDescriptor socket_fd, size_t length, Error* err) const override {
         ErrorInterface* error = nullptr;
diff --git a/common/cpp/src/asapo_fabric/CMakeLists.txt b/common/cpp/src/asapo_fabric/CMakeLists.txt
index fefef76399ebc6e0d9cc04e2335d7fb8e84663de..89e0591ee0cdc34f7784e8c7564d6b84e25f921c 100644
--- a/common/cpp/src/asapo_fabric/CMakeLists.txt
+++ b/common/cpp/src/asapo_fabric/CMakeLists.txt
@@ -2,7 +2,10 @@ set(TARGET_NAME asapo-fabric)
 
 include_directories(include)
 
-set(SOURCE_FILES asapo_fabric.cpp)
+set(SOURCE_FILES
+		asapo_fabric.cpp
+		fabric_factory_not_supported.cpp
+	)
 
 IF(ENABLE_LIBFABRIC)
 	set(SOURCE_FILES ${SOURCE_FILES}
@@ -19,10 +22,6 @@ IF(ENABLE_LIBFABRIC)
             server/task/fabric_recv_any_task.cpp
 			server/task/fabric_handshake_accepting_task.cpp
 		)
-ELSE()
-	set(SOURCE_FILES ${SOURCE_FILES}
-			fabric_factory_not_supported.cpp
-		)
 ENDIF()
 
 ################################
@@ -31,4 +30,4 @@ ENDIF()
 
 add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io>)
 
-target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR})
+target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR} ${LIBFABRIC_INCLUDE_DIR})
diff --git a/common/cpp/src/asapo_fabric/asapo_fabric.cpp b/common/cpp/src/asapo_fabric/asapo_fabric.cpp
index 96aa4fd5215db008e4ec0a9548b8db7bba9e31a9..a74a25c93e0945e2e17f864c5385c4df4125a395 100644
--- a/common/cpp/src/asapo_fabric/asapo_fabric.cpp
+++ b/common/cpp/src/asapo_fabric/asapo_fabric.cpp
@@ -1,17 +1,52 @@
 #include <asapo_fabric/asapo_fabric.h>
+#include "fabric_factory_not_supported.h"
 
 #ifdef LIBFABRIC_ENABLED
+#include <dlfcn.h>
+#include <mutex>
 #include "fabric_factory_impl.h"
-#else
-#include "fabric_factory_not_supported.h"
+#include "fabric_function_map.h"
 #endif
 
 using namespace asapo::fabric;
 
 std::unique_ptr<FabricFactory> asapo::fabric::GenerateDefaultFabricFactory() {
 #ifdef LIBFABRIC_ENABLED
-    return std::unique_ptr<FabricFactory>(new FabricFactoryImpl());
-#else
-    return std::unique_ptr<FabricFactory>(new FabricFactoryNotSupported());
+    static std::mutex lock;
+    std::unique_lock<std::mutex> local_lock (lock);
+
+    if (gffm().is_init_) {
+        return std::unique_ptr<FabricFactory>(new FabricFactoryImpl());
+    }
+
+    void* handle = dlopen("libfabric.so.1", RTLD_LAZY);
+    if (handle) {
+#define ADD_FABRIC_CALL(fName) do { if (!(*((void**)&gffm().fName) = dlsym(handle, #fName))) goto functionNotFoundError; } while(0)
+        ADD_FABRIC_CALL(fi_version);
+        ADD_FABRIC_CALL(fi_dupinfo);
+        ADD_FABRIC_CALL(fi_freeinfo);
+        ADD_FABRIC_CALL(fi_getinfo);
+        ADD_FABRIC_CALL(fi_fabric);
+        ADD_FABRIC_CALL(fi_strerror);
+#undef ADD_FABRIC_CALL
+
+        gffm().is_init_ = true;
+
+        return std::unique_ptr<FabricFactory>(new FabricFactoryImpl());
+functionNotFoundError:
+        dlclose(handle);
+        return std::unique_ptr<FabricFactory>(new FabricFactoryNotSupported(FabricErrorTemplates::kLibraryCompatibilityError));
+    } else {
+        return std::unique_ptr<FabricFactory>(new FabricFactoryNotSupported(FabricErrorTemplates::kLibraryNotFoundError));
+    }
 #endif
+    return std::unique_ptr<FabricFactory>(new FabricFactoryNotSupported(FabricErrorTemplates::kNotSupportedOnBuildError));
 }
+
+#ifdef LIBFABRIC_ENABLED
+// Global fabric function map
+extern FabricFunctionMap& gffm() {
+    static FabricFunctionMap gffm_ {};
+    return gffm_;
+}
+#endif
diff --git a/common/cpp/src/asapo_fabric/common/fabric_context_impl.cpp b/common/cpp/src/asapo_fabric/common/fabric_context_impl.cpp
index b4cac75c1e61600bd5013fd0c6c2c1bf19ff132f..3d46fe0425591868257d9216dc47ff0c6e3f2d15 100644
--- a/common/cpp/src/asapo_fabric/common/fabric_context_impl.cpp
+++ b/common/cpp/src/asapo_fabric/common/fabric_context_impl.cpp
@@ -7,6 +7,7 @@
 #include <netinet/in.h>
 #include <arpa/inet.h>
 #include <rdma/fi_tagged.h>
+#include <iostream>
 #include "fabric_context_impl.h"
 #include "fabric_memory_region_impl.h"
 
@@ -31,8 +32,7 @@ std::string __PRETTY_FUNCTION_TO_NAMESPACE__(const std::string& prettyFunction)
         }                                                       \
     } while(0) // Enforce ';'
 
-// TODO: It is super important that version 1.10 is installed, but since its not released yet we go with 1.9
-const uint32_t FabricContextImpl::kMinExpectedLibFabricVersion = FI_VERSION(1, 9);
+const uint32_t FabricContextImpl::kMinExpectedLibFabricVersion = FI_VERSION(1, 11);
 
 FabricContextImpl::FabricContextImpl() : io__{ GenerateDefaultIO() }, alive_check_response_task_(this) {
 }
@@ -56,7 +56,7 @@ FabricContextImpl::~FabricContextImpl() {
         fi_close(&fabric_->fid);
 
     if (fabric_info_)
-        fi_freeinfo(fabric_info_);
+        gffm().fi_freeinfo(fabric_info_);
 }
 
 void FabricContextImpl::InitCommon(const std::string& networkIpHint, uint16_t serverListenPort, Error* error) {
@@ -65,9 +65,16 @@ void FabricContextImpl::InitCommon(const std::string& networkIpHint, uint16_t se
     // The server must know where the packages are coming from, FI_SOURCE allows this.
     uint64_t additionalFlags = isServer ? FI_SOURCE : 0;
 
-    fi_info* hints = fi_allocinfo();
-    if (networkIpHint == "127.0.0.1") {
+    fi_info* hints = gffm().fi_dupinfo(nullptr);
+
+#ifdef LIBFARBIC_ALLOW_LOCALHOST
+    constexpr bool allowLocalhost = true;
+#else
+    constexpr bool allowLocalhost = false;
+#endif
+    if (networkIpHint == "127.0.0.1" && allowLocalhost) {
         // sockets mode
+        printf("WARN: Using sockets to emulate RDMA, this should only used for tests.\n");
         hints->fabric_attr->prov_name = strdup("sockets");
         hotfix_using_sockets_ = true;
     } else {
@@ -89,7 +96,7 @@ void FabricContextImpl::InitCommon(const std::string& networkIpHint, uint16_t se
     hints->domain_attr->mr_mode = FI_MR_ALLOCATED | FI_MR_VIRT_ADDR | FI_MR_PROV_KEY;// | FI_MR_LOCAL;
     hints->addr_format = FI_SOCKADDR_IN;
 
-    int ret = fi_getinfo(
+    int ret = gffm().fi_getinfo(
                   kMinExpectedLibFabricVersion, networkIpHint.c_str(), isServer ? std::to_string(serverListenPort).c_str() : nullptr,
                   additionalFlags, hints, &fabric_info_);
 
@@ -99,7 +106,7 @@ void FabricContextImpl::InitCommon(const std::string& networkIpHint, uint16_t se
         } else {
             *error = ErrorFromFabricInternal("fi_getinfo", ret);
         }
-        fi_freeinfo(hints);
+        gffm().fi_freeinfo(hints);
         return;
     }
     // fprintf(stderr, fi_tostr(fabric_info_, FI_TYPE_INFO)); // Print the found fabric details
@@ -112,9 +119,9 @@ void FabricContextImpl::InitCommon(const std::string& networkIpHint, uint16_t se
     // fabric_info_->rx_attr->total_buffered_recv = 0;
     // If something strange is happening with receive requests, we should set this to 0.
 
-    fi_freeinfo(hints);
+    gffm().fi_freeinfo(hints);
 
-    FI_OK(fi_fabric(fabric_info_->fabric_attr, &fabric_, nullptr));
+    FI_OK(gffm().fi_fabric(fabric_info_->fabric_attr, &fabric_, nullptr));
     FI_OK(fi_domain(fabric_, fabric_info_, &domain_, nullptr));
 
     fi_av_attr av_attr{};
@@ -281,16 +288,19 @@ bool FabricContextImpl::TargetIsAliveCheck(FabricAddress address) {
 
 void FabricContextImpl::InternalWait(FabricAddress targetAddress, FabricWaitableTask* task, Error* error) {
 
+    // Try to fail fast when no target is set (used by e.g. RecvAny)
+    auto timeoutMs = targetAddress == FI_ASAPO_ADDR_NO_ALIVE_CHECK ? requestFastTimeoutMs_ : requestTimeoutMs_;
+
     // Check if we simply can wait for our task
-    task->Wait(requestTimeoutMs_, error);
+    task->Wait(timeoutMs, error);
 
-    if (*error == FabricErrorTemplates::kTimeout) {
+    if (*error == IOErrorTemplates::kTimeout) {
         if (targetAddress == FI_ASAPO_ADDR_NO_ALIVE_CHECK) {
             CancelTask(task, error);
             // We expect the task to fail with 'Operation canceled'
             if (*error == FabricErrorTemplates::kInternalOperationCanceledError) {
                 // Switch it to a timeout so its more clearly what happened
-                *error = FabricErrorTemplates::kTimeout.Generate();
+                *error = IOErrorTemplates::kTimeout.Generate();
             }
         } else {
             InternalWaitWithAliveCheck(targetAddress, task, error);
@@ -301,9 +311,8 @@ void FabricContextImpl::InternalWait(FabricAddress targetAddress, FabricWaitable
 void FabricContextImpl::InternalWaitWithAliveCheck(FabricAddress targetAddress, FabricWaitableTask* task,
         Error* error) {// Handle advanced alive check
     bool aliveCheckFailed = false;
-    for (uint32_t i = 0; i < maxTimeoutRetires_ && *error == FabricErrorTemplates::kTimeout; i++) {
+    for (uint32_t i = 0; i < maxTimeoutRetires_ && *error == IOErrorTemplates::kTimeout; i++) {
         *error = nullptr;
-        printf("HandleFiCommandAndWait - Tries: %d\n", i);
         if (!TargetIsAliveCheck(targetAddress)) {
             aliveCheckFailed = true;
             break;
@@ -316,12 +325,12 @@ void FabricContextImpl::InternalWaitWithAliveCheck(FabricAddress targetAddress,
     if (aliveCheckFailed) {
         *error = FabricErrorTemplates::kInternalConnectionError.Generate();
     } else if(*error == FabricErrorTemplates::kInternalOperationCanceledError) {
-        *error = FabricErrorTemplates::kTimeout.Generate();
+        *error = IOErrorTemplates::kTimeout.Generate();
     }
 }
 
 void FabricContextImpl::CancelTask(FabricWaitableTask* task, Error* error) {
     *error = nullptr;
     fi_cancel(&endpoint_->fid, task);
-    task->Wait(0, error); // You can probably expect a kInternalOperationCanceledError
+    task->Wait(taskCancelTimeout_, error); // You can probably expect a kInternalOperationCanceledError
 }
diff --git a/common/cpp/src/asapo_fabric/common/fabric_context_impl.h b/common/cpp/src/asapo_fabric/common/fabric_context_impl.h
index 8d51c4cb18a57f62509faff4a19d9538de508ed3..c4c66a1b7319628b8cddc8ac6e7befcd807e37f8 100644
--- a/common/cpp/src/asapo_fabric/common/fabric_context_impl.h
+++ b/common/cpp/src/asapo_fabric/common/fabric_context_impl.h
@@ -10,6 +10,7 @@
 #include "task/fabric_waitable_task.h"
 #include "../fabric_internal_error.h"
 #include "task/fabric_alive_check_response_task.h"
+#include "../fabric_function_map.h"
 
 namespace asapo {
 namespace fabric {
@@ -60,6 +61,8 @@ class FabricContextImpl : public FabricContext {
 
     uint64_t requestEnqueueTimeoutMs_ = 10000; // 10 sec for queuing a task
     uint64_t requestTimeoutMs_ = 20000; // 20 sec to complete a task, otherwise a ping will be send
+    uint64_t requestFastTimeoutMs_ = 7000; // All task that dont have use keepalive check will try to fail fast
+    uint64_t taskCancelTimeout_ = 5000; // The time it takes to timeout a cancel request
     uint32_t maxTimeoutRetires_ = 5; // Timeout retires, if one of them fails, the task will fail with a timeout
 
     std::unique_ptr<std::thread> completion_thread_;
@@ -141,7 +144,7 @@ class FabricContextImpl : public FabricContext {
             // Success
             break;
         case FI_EAGAIN: // We felt trough our own timeout loop
-            *error = FabricErrorTemplates::kTimeout.Generate();
+            *error = IOErrorTemplates::kTimeout.Generate();
             break;
         case FI_ENOENT:
             *error = FabricErrorTemplates::kConnectionRefusedError.Generate();
diff --git a/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.cpp b/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.cpp
index 75b18f1c440b5fc7afe5c868ff6789d1e9a5d088..53c48236f25b24b2c9d5ee74cff3cd37780accfd 100644
--- a/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.cpp
+++ b/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.cpp
@@ -49,6 +49,6 @@ void FabricSelfRequeuingTask::AfterCompletion() {
     }
 }
 
-FabricContextImpl* FabricSelfRequeuingTask::ParentContext() {
+FabricContextImpl* FabricSelfRequeuingTask::ParentContext() const {
     return parent_context_;
 }
diff --git a/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.h b/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.h
index 905b6f1dbb00a9669722711d898c70d970702424..0c23c3e447616d881503a3a3f4fa9e54c226a2a2 100644
--- a/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.h
+++ b/common/cpp/src/asapo_fabric/common/task/fabric_self_requeuing_task.h
@@ -25,7 +25,7 @@ class FabricSelfRequeuingTask : public FabricTask {
     void HandleCompletion(const fi_cq_tagged_entry* entry, FabricAddress source) final;
     void HandleErrorCompletion(const fi_cq_err_entry* errEntry) final;
   protected:
-    FabricContextImpl* ParentContext();
+    FabricContextImpl* ParentContext() const;
 
     virtual void RequeueSelf() = 0;
     virtual void OnCompletion(const fi_cq_tagged_entry* entry, FabricAddress source) = 0;
diff --git a/common/cpp/src/asapo_fabric/common/task/fabric_waitable_task.cpp b/common/cpp/src/asapo_fabric/common/task/fabric_waitable_task.cpp
index 47efa2fe8f558d934cbdf7969d44ad40710548fb..e4a13b07f910e7586410ba6fc557f73e977a3618 100644
--- a/common/cpp/src/asapo_fabric/common/task/fabric_waitable_task.cpp
+++ b/common/cpp/src/asapo_fabric/common/task/fabric_waitable_task.cpp
@@ -1,3 +1,4 @@
+#include <common/io_error.h>
 #include "fabric_waitable_task.h"
 #include "../../fabric_internal_error.h"
 
@@ -21,7 +22,7 @@ void FabricWaitableTask::HandleErrorCompletion(const fi_cq_err_entry* errEntry)
 void FabricWaitableTask::Wait(uint32_t sleepInMs, Error* error) {
     if (sleepInMs) {
         if (future_.wait_for(std::chrono::milliseconds(sleepInMs)) == std::future_status::timeout) {
-            *error = FabricErrorTemplates::kTimeout.Generate();
+            *error = IOErrorTemplates::kTimeout.Generate();
             return;
         }
     } else {
diff --git a/common/cpp/src/asapo_fabric/fabric_factory_impl.cpp b/common/cpp/src/asapo_fabric/fabric_factory_impl.cpp
index 3ae10ed3bb131a3cefb198341dd1d11f52fe6ab1..d2c2be821b12637117030bb809cd830a04bdf6f4 100644
--- a/common/cpp/src/asapo_fabric/fabric_factory_impl.cpp
+++ b/common/cpp/src/asapo_fabric/fabric_factory_impl.cpp
@@ -11,7 +11,7 @@ std::string fi_version_string(uint32_t version) {
 }
 
 bool FabricFactoryImpl::HasValidVersion(Error* error) const {
-    auto current_version = fi_version();
+    auto current_version = gffm().fi_version();
 
     if (FI_VERSION_LT(current_version, FabricContextImpl::kMinExpectedLibFabricVersion)) {
         std::string found_version_str = fi_version_string(current_version);
diff --git a/common/cpp/src/asapo_fabric/fabric_factory_impl.h b/common/cpp/src/asapo_fabric/fabric_factory_impl.h
index ce0ec84eeb0d4e5a61a0af811c71ba8f50846c6c..2df0f8049d96ed94f9da7415517ecc2a443aae85 100644
--- a/common/cpp/src/asapo_fabric/fabric_factory_impl.h
+++ b/common/cpp/src/asapo_fabric/fabric_factory_impl.h
@@ -1,4 +1,5 @@
 #include <asapo_fabric/asapo_fabric.h>
+#include "fabric_function_map.h"
 
 #ifndef ASAPO_FABRIC_FACTORY_IMPL_H
 #define ASAPO_FABRIC_FACTORY_IMPL_H
diff --git a/common/cpp/src/asapo_fabric/fabric_factory_not_supported.cpp b/common/cpp/src/asapo_fabric/fabric_factory_not_supported.cpp
index 09e33cd8cb71bee6c1740a6c53b97e375ac641d9..1e3be7d22aa463c9eb7c01d5e012ee73663555f0 100644
--- a/common/cpp/src/asapo_fabric/fabric_factory_not_supported.cpp
+++ b/common/cpp/src/asapo_fabric/fabric_factory_not_supported.cpp
@@ -1,16 +1,22 @@
 #include "fabric_factory_not_supported.h"
+
+#include <utility>
 #include "fabric_internal_error.h"
 
 using namespace asapo::fabric;
 
+FabricFactoryNotSupported::FabricFactoryNotSupported(FabricErrorTemplate reason) : reason_(std::move(reason)) {
+}
+
+
 std::unique_ptr<FabricServer> asapo::fabric::FabricFactoryNotSupported::CreateAndBindServer(
     const AbstractLogger* logger, const std::string& host, uint16_t port,
     Error* error) const {
-    *error = FabricErrorTemplates::kNotSupportedOnBuildError.Generate();
+    *error = reason_.Generate();
     return nullptr;
 }
 
 std::unique_ptr<FabricClient> asapo::fabric::FabricFactoryNotSupported::CreateClient(Error* error) const {
-    *error = FabricErrorTemplates::kNotSupportedOnBuildError.Generate();
+    *error = reason_.Generate();
     return nullptr;
 }
diff --git a/common/cpp/src/asapo_fabric/fabric_factory_not_supported.h b/common/cpp/src/asapo_fabric/fabric_factory_not_supported.h
index 789fe4e031eda5d096deeb300db654019d9b4400..2ca12aa63082d16170f9dd7baa2f3edd6a5bbf92 100644
--- a/common/cpp/src/asapo_fabric/fabric_factory_not_supported.h
+++ b/common/cpp/src/asapo_fabric/fabric_factory_not_supported.h
@@ -6,6 +6,11 @@
 namespace asapo {
 namespace fabric {
 class FabricFactoryNotSupported : public FabricFactory {
+  private:
+    FabricErrorTemplate reason_;
+  public:
+    explicit FabricFactoryNotSupported(FabricErrorTemplate reason);
+
     std::unique_ptr<FabricServer> CreateAndBindServer(
         const AbstractLogger* logger, const std::string& host, uint16_t port, Error* error) const override;
 
diff --git a/common/cpp/src/asapo_fabric/fabric_function_map.h b/common/cpp/src/asapo_fabric/fabric_function_map.h
new file mode 100644
index 0000000000000000000000000000000000000000..4d5b3f4e67d3a5a281c222aa760ace93babcf594
--- /dev/null
+++ b/common/cpp/src/asapo_fabric/fabric_function_map.h
@@ -0,0 +1,20 @@
+#ifndef ASAPO_FABRIC_DYNAMIC_CALLS_H
+#define ASAPO_FABRIC_DYNAMIC_CALLS_H
+
+#include <rdma/fabric.h>
+
+struct FabricFunctionMap {
+    bool is_init_ = false;
+
+    uint32_t(*fi_version)();
+    fi_info* (*fi_dupinfo)(const fi_info* info);
+    void(*fi_freeinfo)(fi_info* info);
+    int(*fi_getinfo)(uint32_t version, const char* node, const char* service,
+                     uint64_t flags, const fi_info* hints, fi_info** info);
+    int(*fi_fabric)(fi_fabric_attr* attr, fid_fabric** fabric, void* context);
+    const char* (*fi_strerror)(int errnum);
+};
+
+FabricFunctionMap& gffm();
+
+#endif //ASAPO_FABRIC_DYNAMIC_CALLS_H
diff --git a/common/cpp/src/asapo_fabric/fabric_internal_error.cpp b/common/cpp/src/asapo_fabric/fabric_internal_error.cpp
index fb8629e09f447a836f28496a09b2dfd8f8dfeb4b..cde5ddacd517469e97451d1429e47569a5a68534 100644
--- a/common/cpp/src/asapo_fabric/fabric_internal_error.cpp
+++ b/common/cpp/src/asapo_fabric/fabric_internal_error.cpp
@@ -1,9 +1,10 @@
 #include "fabric_internal_error.h"
+#include "fabric_function_map.h"
 #include <rdma/fi_errno.h>
 #include <asapo_fabric/fabric_error.h>
 
 asapo::Error asapo::fabric::ErrorFromFabricInternal(const std::string& where, int internalStatusCode) {
-    std::string errText = where + ": " + fi_strerror(-internalStatusCode);
+    std::string errText = where + ": " + gffm().fi_strerror(-internalStatusCode);
     switch (-internalStatusCode) {
     case FI_ECANCELED:
         return FabricErrorTemplates::kInternalOperationCanceledError.Generate(errText);
diff --git a/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.cpp b/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.cpp
index 44ed14d06149e743b8c8723fdbb29362539ac364..253d778b4975f56858098943c21649bcfd51e042 100644
--- a/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.cpp
+++ b/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.cpp
@@ -10,7 +10,7 @@ FabricHandshakeAcceptingTask::FabricHandshakeAcceptingTask(FabricServerImpl* par
     : FabricSelfRequeuingTask(parentServerContext) {
 }
 
-FabricServerImpl* FabricHandshakeAcceptingTask::ServerContext() {
+FabricServerImpl* FabricHandshakeAcceptingTask::ServerContext() const {
     return dynamic_cast<FabricServerImpl*>(ParentContext());
 }
 
diff --git a/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.h b/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.h
index 74ffd3742fdc8435ae927da7f953ffde8eec3832..dedf61822b10e97cd65d1035cd7fcda9728749f4 100644
--- a/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.h
+++ b/common/cpp/src/asapo_fabric/server/task/fabric_handshake_accepting_task.h
@@ -18,7 +18,7 @@ class FabricHandshakeAcceptingTask : public FabricSelfRequeuingTask {
     explicit FabricHandshakeAcceptingTask(FabricServerImpl* server);
 
   private:
-    FabricServerImpl* ServerContext();
+    FabricServerImpl* ServerContext() const;
 
   protected: // override FabricSelfRequeuingTask
     void RequeueSelf() override;
diff --git a/common/cpp/src/data_structs/data_structs.cpp b/common/cpp/src/data_structs/data_structs.cpp
index 31b82a2bee76df3b13b7f723649d99315aab4bd4..9d43c3e66609dcd245bf789bad6be0a99b5f2362 100644
--- a/common/cpp/src/data_structs/data_structs.cpp
+++ b/common/cpp/src/data_structs/data_structs.cpp
@@ -189,7 +189,7 @@ std::string StreamInfo::Json() const {
     return s;
 }
 
-bool StreamInfo::SetFromJson(const std::string &json_string) {
+bool StreamInfo::SetFromJson(const std::string& json_string) {
     JsonStringParser parser(json_string);
     uint64_t id;
     if (parser.GetUInt64("lastId", &id)) {
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index 7399f31ee154fe6c4e01ec96101a5799704faae7..26bc13c0b8ee42c0689e292e0283e901227fc86e 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -272,7 +272,8 @@ Error MongoDBClient::InsertAsSubset(const std::string& collection, const FileInf
     return err;
 }
 
-Error MongoDBClient::GetRecordFromDb(const std::string& collection, uint64_t id, bool ignore_id_return_last, std::string* res) const {
+Error MongoDBClient::GetRecordFromDb(const std::string& collection, uint64_t id, bool ignore_id_return_last,
+                                     std::string* res) const {
     if (!connected_) {
         return DBErrorTemplates::kNotConnected.Generate();
     }
@@ -359,7 +360,7 @@ Error MongoDBClient::GetDataSetById(const std::string& collection, uint64_t set_
 
 }
 
-Error StreamInfoFromDbResponse(std::string record_str,StreamInfo* info) {
+Error StreamInfoFromDbResponse(std::string record_str, StreamInfo* info) {
     auto parser = JsonStringParser(std::move(record_str));
     Error parse_err = parser.GetUInt64("_id", &(info->last_id));
     if (parse_err) {
@@ -369,9 +370,9 @@ Error StreamInfoFromDbResponse(std::string record_str,StreamInfo* info) {
     return nullptr;
 }
 
-Error MongoDBClient::GetStreamInfo(const std::string &collection, StreamInfo* info) const {
+Error MongoDBClient::GetStreamInfo(const std::string& collection, StreamInfo* info) const {
     std::string record_str;
-    auto err = GetRecordFromDb(collection, 0,true, &record_str);
+    auto err = GetRecordFromDb(collection, 0, true, &record_str);
     if (err) {
         info->last_id = 0;
         if (err == DBErrorTemplates::kNoRecord) {
@@ -379,7 +380,7 @@ Error MongoDBClient::GetStreamInfo(const std::string &collection, StreamInfo* in
         }
         return err;
     }
-    return StreamInfoFromDbResponse(std::move(record_str),info);
+    return StreamInfoFromDbResponse(std::move(record_str), info);
 }
 
 }
diff --git a/common/cpp/src/http_client/curl_http_client.cpp b/common/cpp/src/http_client/curl_http_client.cpp
index 829c6a7f104b49c426a5871efe49237fa486b3f4..2e3211d7b1099205a1b2adf8def2a539253561eb 100644
--- a/common/cpp/src/http_client/curl_http_client.cpp
+++ b/common/cpp/src/http_client/curl_http_client.cpp
@@ -33,7 +33,7 @@ size_t curl_write( void* ptr, size_t size, size_t nmemb, void* data_container) {
         if (container->bytes_received + nbytes > container->array_size) {
             return -1;
         }
-        memcpy(container->p_array->get()+container->bytes_received, ptr, nbytes);
+        memcpy(container->p_array->get() + container->bytes_received, ptr, nbytes);
         container->bytes_received += nbytes;
         break;
     case CurlDataMode::file:
diff --git a/common/cpp/src/request/request_pool.cpp b/common/cpp/src/request/request_pool.cpp
index c1454e39e8c1998cefc309d5dc8f59a0ca6144d5..562476cb72442ef06fa6e8b7083975fce02700c0 100644
--- a/common/cpp/src/request/request_pool.cpp
+++ b/common/cpp/src/request/request_pool.cpp
@@ -14,7 +14,7 @@ RequestPool:: RequestPool(uint8_t n_threads,
 
 }
 
-Error RequestPool::AddRequest(GenericRequestPtr request,bool top_priority) {
+Error RequestPool::AddRequest(GenericRequestPtr request, bool top_priority) {
     std::unique_lock<std::mutex> lock(mutex_);
     if (top_priority) {
         request_queue_.emplace_front(std::move(request));
diff --git a/common/cpp/src/system_io/system_io.cpp b/common/cpp/src/system_io/system_io.cpp
index 29c2130170aa3a17b521caabd88dc1dde4969fee..51ac70d758e2dd537dbb7b1feac3255c7cb9c03d 100644
--- a/common/cpp/src/system_io/system_io.cpp
+++ b/common/cpp/src/system_io/system_io.cpp
@@ -5,7 +5,7 @@
 #include <cerrno>
 #include <cstring>
 #include <algorithm>
-
+#include <mutex>
 
 #if defined(__linux__) || defined (__APPLE__)
 #include <sys/socket.h>
@@ -290,8 +290,11 @@ int SystemIO::FileOpenModeToPosixFileOpenMode(int open_flags) const {
 }
 
 std::string SystemIO::ResolveHostnameToIp(const std::string& hostname, Error* err) const {
-    hostent* record = gethostbyname(hostname.c_str());
-    if (record == nullptr) {
+    static std::mutex lock;
+    std::unique_lock<std::mutex> local_lock(lock);
+
+    const hostent* record = gethostbyname(hostname.c_str()); // gethostbyname seems not to be thread safe!
+    if (record == nullptr || record->h_addr == nullptr) {
         *err = IOErrorTemplates::kUnableToResolveHostname.Generate();
         return "";
     }
@@ -312,12 +315,10 @@ std::unique_ptr<sockaddr_in> SystemIO::BuildSockaddrIn(const std::string& addres
     uint16_t port = 0;
     std::tie(host, port) = *hostname_port_tuple;
 
-// this is not thread safe call we should not resolve hostname here - we actually already have ip in address.
-// todo: remove this
-//    host = ResolveHostnameToIp(host, err);
-//    if (*err != nullptr) {
-//        return nullptr;
-//    }
+    host = ResolveHostnameToIp(host, err);
+    if (*err != nullptr) {
+        return nullptr;
+    }
 
     short family = AddressFamilyToPosixFamily(AddressFamilies::INET);
     if (family == -1) {
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index aa8d193ae60102b55081cd37c278e94bf12b0063..43137367a4300f26c0d55cb8b203559cba32ed38 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -31,6 +31,8 @@ Error GetLastErrorFromErrno() {
     switch (errno) {
     case 0:
         return nullptr;
+    case EINTR:
+        return TextError("Interrupt occurred, is a debugger attached?");
     case EBADF:
         return IOErrorTemplates::kBadFileNumber.Generate();
     case EAGAIN:
diff --git a/common/cpp/unittests/logger/test_fluentd_sink.cpp b/common/cpp/unittests/logger/test_fluentd_sink.cpp
index bb41891d592774e2a11e6c41299f67ee2638569d..53c9180d30ffd810956762493ac019c852a06d0e 100644
--- a/common/cpp/unittests/logger/test_fluentd_sink.cpp
+++ b/common/cpp/unittests/logger/test_fluentd_sink.cpp
@@ -41,7 +41,7 @@ class FluentdSinkTests : public Test {
 };
 
 TEST_F(FluentdSinkTests, SendPost) {
-    EXPECT_CALL(mock_http_client, Post_t("test_url",_, HasSubstr("hello"),  _, _));
+    EXPECT_CALL(mock_http_client, Post_t("test_url", _, HasSubstr("hello"),  _, _));
     logger->info("hello");
 }
 
diff --git a/config/nomad/receiver_fabric.nmd.in b/config/nomad/receiver_fabric.nmd.in
new file mode 100644
index 0000000000000000000000000000000000000000..0c4d2174727eb6bfca65167b4925001188601a81
--- /dev/null
+++ b/config/nomad/receiver_fabric.nmd.in
@@ -0,0 +1,47 @@
+job "receiver" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "group" {
+    count = 1
+
+    task "receiver" {
+      driver = "raw_exec"
+
+      config {
+        command = "@RECEIVER_DIR@/@RECEIVER_NAME@",
+        args =  ["${NOMAD_TASK_DIR}/receiver.json"]
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "recv" {}
+          port "recv_ds" {}
+        }
+      }
+
+      service {
+        name = "asapo-receiver"
+        port = "recv"
+        check {
+          name     = "alive"
+          type     = "tcp"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+      }
+
+      template {
+         source        = "@WORK_DIR@/receiver_fabric.json.tpl"
+         destination   = "local/receiver.json"
+         change_mode   = "signal"
+         change_signal = "SIGHUP"
+      }
+
+    }
+  }
+}
diff --git a/config/nomad/receiver.nmd.in b/config/nomad/receiver_tcp.nmd.in
similarity index 93%
rename from config/nomad/receiver.nmd.in
rename to config/nomad/receiver_tcp.nmd.in
index 282208f29ed6b52887a9628d69ab86d90d45c04b..0d563934e9741538930dd51b7f90907ece08bb59 100644
--- a/config/nomad/receiver.nmd.in
+++ b/config/nomad/receiver_tcp.nmd.in
@@ -36,7 +36,7 @@ job "receiver" {
       }
 
       template {
-         source        = "@WORK_DIR@/receiver.json.tpl"
+         source        = "@WORK_DIR@/receiver_tcp.json.tpl"
          destination   = "local/receiver.json"
          change_mode   = "signal"
          change_signal = "SIGHUP"
diff --git a/consumer/api/cpp/CMakeLists.txt b/consumer/api/cpp/CMakeLists.txt
index 0a0564fbc76958091e6e5a4cf484b8d6a0a7ef32..ae5ce7be29c02946bca3c43445319356322d7a00 100644
--- a/consumer/api/cpp/CMakeLists.txt
+++ b/consumer/api/cpp/CMakeLists.txt
@@ -4,7 +4,8 @@ set(SOURCE_FILES
         src/data_broker.cpp
         src/server_data_broker.cpp
         src/tcp_client.cpp
-        src/tcp_connection_pool.cpp)
+        src/tcp_connection_pool.cpp
+        src/fabric_consumer_client.cpp)
 
 
 ################################
@@ -13,24 +14,29 @@ set(SOURCE_FILES
 add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io>
             $<TARGET_OBJECTS:json_parser> $<TARGET_OBJECTS:data_structs> $<TARGET_OBJECTS:curl_http_client> )
 
-target_include_directories(${TARGET_NAME} PUBLIC include ${ASAPO_CXX_COMMON_INCLUDE_DIR}  ${CURL_INCLUDE_DIRS})
+target_include_directories(${TARGET_NAME} PUBLIC include ${ASAPO_CXX_COMMON_INCLUDE_DIR}  ${LIBFABRIC_INCLUDE_DIR} ${CURL_INCLUDE_DIRS})
 
 IF(CMAKE_C_COMPILER_ID STREQUAL "GNU")
     SET( CMAKE_EXE_LINKER_FLAGS  "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++")
 ENDIF()
 
 
-target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+GET_PROPERTY(ASAPO_COMMON_FABRIC_LIBRARIES GLOBAL PROPERTY ASAPO_COMMON_FABRIC_LIBRARIES)
+target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}
+        asapo-fabric ${ASAPO_COMMON_FABRIC_LIBRARIES})
 
 
 ################################
 # Testing
 ################################
-set(TEST_SOURCE_FILES unittests/test_consumer_api.cpp
-                      unittests/test_server_broker.cpp
-                      unittests/test_tcp_client.cpp
-            unittests/test_tcp_connection_pool.cpp
-        )
+set(TEST_SOURCE_FILES
+        unittests/test_consumer_api.cpp
+        unittests/test_server_broker.cpp
+        unittests/test_tcp_client.cpp
+        unittests/test_tcp_connection_pool.cpp
+        unittests/test_fabric_consumer_client.cpp
+        unittests/test_rds_error_mapper.cpp
+    )
 set(TEST_LIBRARIES "${TARGET_NAME}")
 
 
diff --git a/consumer/api/cpp/include/consumer/data_broker.h b/consumer/api/cpp/include/consumer/data_broker.h
index eb8eac957124c369462eedadd83925f2443d3583..48787f00cac2b33bfd9c7f180c4aad530d35be0a 100644
--- a/consumer/api/cpp/include/consumer/data_broker.h
+++ b/consumer/api/cpp/include/consumer/data_broker.h
@@ -7,6 +7,7 @@
 
 #include "common/data_structs.h"
 #include "common/error.h"
+#include "common/networking.h"
 
 namespace asapo {
 
@@ -32,33 +33,46 @@ class DataBroker {
     */
     virtual Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) = 0;
 
-  //! Negative acknowledge data tuple for specific group id and substream.
-  /*!
-      \param group_id - group id to use.
-      \param id - data tuple id
-      \param delay_sec - data tuple will be redelivered after delay, 0 to redeliver immediately
-      \param substream (optional) - substream
-      \return nullptr of command was successful, otherwise error.
-  */
-  virtual Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec, std::string substream = kDefaultSubstream) = 0;
+    //! Negative acknowledge data tuple for specific group id and substream.
+    /*!
+        \param group_id - group id to use.
+        \param id - data tuple id
+        \param delay_sec - data tuple will be redelivered after delay, 0 to redeliver immediately
+        \param substream (optional) - substream
+        \return nullptr of command was successful, otherwise error.
+    */
+    virtual Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
+                                      std::string substream = kDefaultSubstream) = 0;
 
 
-  //! Get unacknowledged tuple for specific group id and substream.
-  /*!
-      \param group_id - group id to use.
-      \param substream (optional) - substream
-      \param from_id - return tuples with ids greater or equal to from (use 0 disable limit)
-      \param to_id - return tuples with ids less or equal to to (use 0 to disable limit)
-      \param in (optional) - substream
-      \param err - set to nullptr of operation succeed, error otherwise.
-      \return vector of ids, might be empty
-  */
-    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, std::string substream, uint64_t from_id, uint64_t to_id, Error* error) = 0;
+    //! Get unacknowledged tuple for specific group id and substream.
+    /*!
+        \param group_id - group id to use.
+        \param substream (optional) - substream
+        \param from_id - return tuples with ids greater or equal to from (use 0 disable limit)
+        \param to_id - return tuples with ids less or equal to to (use 0 to disable limit)
+        \param in (optional) - substream
+        \param err - set to nullptr of operation succeed, error otherwise.
+        \return vector of ids, might be empty
+    */
+    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, std::string substream, uint64_t from_id, uint64_t to_id,
+                                             Error* error) = 0;
     virtual IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) = 0;
 
     //! Set timeout for broker operations. Default - no timeout
     virtual void SetTimeout(uint64_t timeout_ms) = 0;
 
+    //! Will disable RDMA.
+    //! If RDMA is disabled, not available or the first connection fails to build up, it will automatically fall back to TCP.
+    //! This will only have an effect if no previous connection attempted was made on this DataBroker.
+    virtual void ForceNoRdma() = 0;
+
+    //! Returns the current network connection type
+    /*!
+     * \return current network connection type. If no connection was made, the result is NetworkConnectionType::kUndefined
+     */
+    virtual NetworkConnectionType CurrentConnectionType() const = 0;
+
     //! Set list of substreams
     virtual std::vector<std::string> GetSubstreamList(Error* err) = 0;
 
@@ -141,13 +155,13 @@ class DataBroker {
     virtual Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) = 0;
     virtual Error GetById(uint64_t id, FileInfo* info, std::string group_id, std::string substream, FileData* data) = 0;
 
-  //! Receive id of last acknowledged data tuple
-  /*!
-    \param group_id - group id to use.
-    \param substream (optional) - substream
-    \param err -  will be set in case of error, nullptr otherwise.
-    \return id of the last acknowledged image, 0 if error
-  */
+    //! Receive id of last acknowledged data tuple
+    /*!
+      \param group_id - group id to use.
+      \param substream (optional) - substream
+      \param err -  will be set in case of error, nullptr otherwise.
+      \return id of the last acknowledged image, 0 if error
+    */
     virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) = 0;
     virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) = 0;
 
@@ -170,25 +184,23 @@ class DataBroker {
     virtual FileInfos QueryImages(std::string query, Error* err) = 0;
     virtual FileInfos QueryImages(std::string query, std::string substream, Error* err) = 0;
 
-  //! Configure resending nonacknowledged data
-  /*!
-    \param resend -  where to resend
-    \param delay_sec - how many seconds to wait before resending
-    \param resend_attempts - how many resend attempts to make
-  */
-  virtual void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) = 0;
+    //! Configure resending nonacknowledged data
+    /*!
+      \param resend -  where to resend
+      \param delay_sec - how many seconds to wait before resending
+      \param resend_attempts - how many resend attempts to make
+    */
+    virtual void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) = 0;
 
 
     virtual ~DataBroker() = default; // needed for unique_ptr to delete itself
 };
 
-/*! A class to create a data broker instance. The class's only function Create is used for this*/
+/*! A class to create a data broker instance. The class's only function Create is used for this */
 class DataBrokerFactory {
   public:
     static std::unique_ptr<DataBroker> CreateServerBroker(std::string server_name, std::string source_path,
-            bool has_filesystem,
-            SourceCredentials source,
-            Error* error) noexcept;
+            bool has_filesystem, SourceCredentials source, Error* error) noexcept;
 
 };
 
diff --git a/consumer/api/cpp/src/data_broker.cpp b/consumer/api/cpp/src/data_broker.cpp
index 1f3b1db63f68a21992a76d3283271c5c96e13007..7ab6f2c04d49b218a940654c2ab811970fa79b67 100644
--- a/consumer/api/cpp/src/data_broker.cpp
+++ b/consumer/api/cpp/src/data_broker.cpp
@@ -1,3 +1,4 @@
+#include <common/networking.h>
 #include "consumer/data_broker.h"
 #include "server_data_broker.h"
 
@@ -26,8 +27,7 @@ std::unique_ptr<DataBroker> Create(const std::string& source_name,
 }
 
 std::unique_ptr<DataBroker> DataBrokerFactory::CreateServerBroker(std::string server_name, std::string source_path,
-        bool has_filesystem, SourceCredentials source,
-        Error* error) noexcept {
+        bool has_filesystem, SourceCredentials source, Error* error) noexcept {
     return Create<ServerDataBroker>(std::move(server_name), error, std::move(source_path), has_filesystem,
                                     std::move(source));
 }
diff --git a/consumer/api/cpp/src/fabric_consumer_client.cpp b/consumer/api/cpp/src/fabric_consumer_client.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..14c75775e2650cc583bf2d79b2afd4cf1924bc30
--- /dev/null
+++ b/consumer/api/cpp/src/fabric_consumer_client.cpp
@@ -0,0 +1,82 @@
+#include <common/networking.h>
+#include <io/io_factory.h>
+#include <iostream>
+#include "fabric_consumer_client.h"
+#include "rds_response_error.h"
+
+using namespace asapo;
+
+FabricConsumerClient::FabricConsumerClient(): factory__(fabric::GenerateDefaultFabricFactory()) {
+
+}
+
+Error FabricConsumerClient::GetData(const FileInfo* info, FileData* data) {
+    Error err;
+    if (!client__) {
+        client__ = factory__->CreateClient(&err);
+        if (err) {
+            return err;
+        }
+    }
+
+    fabric::FabricAddress address = GetAddressOrConnect(info, &err);
+    if (err) {
+        return err;
+    }
+
+    FileData tempData{new uint8_t[info->size]};
+
+    /* MemoryRegion will be released when out of scope */
+    auto mr = client__->ShareMemoryRegion(tempData.get(), info->size, &err);
+    if (err) {
+        return err;
+    }
+
+    GenericRequestHeader request_header{kOpcodeGetBufferData, info->buf_id, info->size};
+    memcpy(request_header.message, mr->GetDetails(), sizeof(fabric::MemoryRegionDetails));
+    GenericNetworkResponse response{};
+
+    PerformNetworkTransfer(address, &request_header, &response, &err);
+    if (err) {
+        return err;
+    }
+
+    if (response.error_code) {
+        return ConvertRdsResponseToError(response.error_code);
+    }
+
+    data->swap(tempData);
+
+    return nullptr;
+}
+
+fabric::FabricAddress FabricConsumerClient::GetAddressOrConnect(const FileInfo* info, Error* error) {
+    std::lock_guard<std::mutex> lock(mutex_);
+    auto tableEntry = known_addresses_.find(info->source);
+
+    /* Check if we need to build up a connection */
+    if (tableEntry == known_addresses_.end()) {
+        fabric::FabricAddress address = client__->AddServerAddress(info->source, error);
+        if (*error) {
+            return -1;
+        }
+        return known_addresses_[info->source] = address;
+    } else {
+        return tableEntry->second;
+    }
+}
+
+void FabricConsumerClient::PerformNetworkTransfer(fabric::FabricAddress address,
+                                                  const GenericRequestHeader* request_header,
+                                                  GenericNetworkResponse* response, Error* err) {
+    auto currentMessageId = global_message_id_++;
+    client__->Send(address, currentMessageId, request_header, sizeof(*request_header), err);
+    if (*err) {
+        return;
+    }
+
+    /* The server is _now_ sending us the data over RDMA, and then sending us a confirmation */
+
+    client__->Recv(address, currentMessageId, response, sizeof(*response), err);
+    // if (*err) ...
+}
diff --git a/consumer/api/cpp/src/fabric_consumer_client.h b/consumer/api/cpp/src/fabric_consumer_client.h
new file mode 100644
index 0000000000000000000000000000000000000000..a852b1ba33c93a0756d1f2e343008ef73f7d1616
--- /dev/null
+++ b/consumer/api/cpp/src/fabric_consumer_client.h
@@ -0,0 +1,37 @@
+#ifndef ASAPO_CONSUMER_FABRIC_CLIENT_H
+#define ASAPO_CONSUMER_FABRIC_CLIENT_H
+
+#include <map>
+#include <io/io.h>
+#include <atomic>
+#include <mutex>
+#include "asapo_fabric/asapo_fabric.h"
+#include "net_client.h"
+#include "common/networking.h"
+
+namespace asapo {
+
+class FabricConsumerClient : public NetClient {
+  public:
+    explicit FabricConsumerClient();
+
+    // modified in testings to mock system calls, otherwise do not touch
+    std::unique_ptr<asapo::fabric::FabricFactory> factory__;
+    std::unique_ptr<fabric::FabricClient> client__;
+
+  private:
+    std::mutex mutex_;
+    std::map<std::string /* server_uri */, fabric::FabricAddress> known_addresses_;
+    std::atomic<fabric::FabricMessageId> global_message_id_{0};
+
+  public:
+    Error GetData(const FileInfo* info, FileData* data) override;
+  private:
+    fabric::FabricAddress GetAddressOrConnect(const FileInfo* info, Error* error);
+
+    void PerformNetworkTransfer(fabric::FabricAddress address, const GenericRequestHeader* request_header,
+                                GenericNetworkResponse* response, Error* err);
+};
+}
+
+#endif //ASAPO_CONSUMER_FABRIC_CLIENT_H
diff --git a/consumer/api/cpp/src/net_client.h b/consumer/api/cpp/src/net_client.h
index ee2e92cd69c2546ca39e37cd537803a86ae25c54..df67f31cd777308a960b45ae12d22e93cde16ffa 100644
--- a/consumer/api/cpp/src/net_client.h
+++ b/consumer/api/cpp/src/net_client.h
@@ -8,7 +8,7 @@ namespace asapo {
 
 class NetClient {
   public:
-    virtual Error GetData(const FileInfo* info, FileData* data) const noexcept = 0;
+    virtual Error GetData(const FileInfo* info, FileData* data) = 0;
     virtual ~NetClient() = default;
 
 };
diff --git a/consumer/api/cpp/src/rds_response_error.h b/consumer/api/cpp/src/rds_response_error.h
new file mode 100644
index 0000000000000000000000000000000000000000..36b417c6931d79aaae12e600ebc12a2422067a65
--- /dev/null
+++ b/consumer/api/cpp/src/rds_response_error.h
@@ -0,0 +1,54 @@
+#ifndef ASAPO_RDS_RESPONSE_ERROR_H
+#define ASAPO_RDS_RESPONSE_ERROR_H
+
+#include <common/networking.h>
+
+namespace asapo {
+
+using RdsResponseError = ServiceError<NetworkErrorCode, ErrorType::kFabricError>;
+using RdsResponseErrorTemplate = ServiceErrorTemplate<NetworkErrorCode, ErrorType::kFabricError>;
+
+namespace RdsResponseErrorTemplates {
+auto const kNetErrorReauthorize = RdsResponseErrorTemplate {
+    "RDS response Reauthorize", NetworkErrorCode::kNetErrorReauthorize
+};
+auto const kNetErrorWarning = RdsResponseErrorTemplate {
+    "RDS response Warning", NetworkErrorCode::kNetErrorWarning
+};
+auto const kNetErrorWrongRequest = RdsResponseErrorTemplate {
+    "RDS response WrongRequest", NetworkErrorCode::kNetErrorWrongRequest
+};
+auto const kNetErrorNoData = RdsResponseErrorTemplate {
+    "RDS response NoData", NetworkErrorCode::kNetErrorNoData
+};
+auto const kNetAuthorizationError = RdsResponseErrorTemplate {
+    "RDS response AuthorizationError", NetworkErrorCode::kNetAuthorizationError
+};
+auto const kNetErrorInternalServerError = RdsResponseErrorTemplate {
+    "RDS response InternalServerError", NetworkErrorCode::kNetErrorInternalServerError
+};
+}
+
+inline Error ConvertRdsResponseToError(NetworkErrorCode error_code) {
+    switch (error_code) {
+    case kNetErrorNoError:
+        return nullptr;
+    case kNetErrorReauthorize:
+        return RdsResponseErrorTemplates::kNetErrorReauthorize.Generate();
+    case kNetErrorWarning:
+        return RdsResponseErrorTemplates::kNetErrorWarning.Generate();
+    case kNetErrorWrongRequest:
+        return RdsResponseErrorTemplates::kNetErrorWrongRequest.Generate();
+    case kNetErrorNoData:
+        return RdsResponseErrorTemplates::kNetErrorNoData.Generate();
+    case kNetAuthorizationError:
+        return RdsResponseErrorTemplates::kNetAuthorizationError.Generate();
+    case kNetErrorInternalServerError:
+        return RdsResponseErrorTemplates::kNetErrorInternalServerError.Generate();
+    default:
+        return TextError("Unknown RDS response code " + std::to_string(error_code));
+    }
+}
+}
+
+#endif //ASAPO_RDS_RESPONSE_ERROR_H
diff --git a/consumer/api/cpp/src/server_data_broker.cpp b/consumer/api/cpp/src/server_data_broker.cpp
index b8d9679b758ff54399152560bdbbbb07b53a6401..3fccd4f6eab9711bf4e7a923ff0f14ad443e9d0e 100644
--- a/consumer/api/cpp/src/server_data_broker.cpp
+++ b/consumer/api/cpp/src/server_data_broker.cpp
@@ -9,6 +9,8 @@
 #include "tcp_client.h"
 
 #include "asapo_consumer.h"
+#include "fabric_consumer_client.h"
+#include "rds_response_error.h"
 
 using std::chrono::system_clock;
 
@@ -17,17 +19,17 @@ namespace asapo {
 const std::string ServerDataBroker::kBrokerServiceName = "asapo-broker";
 const std::string ServerDataBroker::kFileTransferServiceName = "asapo-file-transfer";
 
-Error GetNoDataResponseFromJson(const std::string &json_string, ConsumerErrorData* data) {
+Error GetNoDataResponseFromJson(const std::string& json_string, ConsumerErrorData* data) {
     JsonStringParser parser(json_string);
     Error err;
     if ((err = parser.GetUInt64("id", &data->id)) || (err = parser.GetUInt64("id_max", &data->id_max))
-        || (err = parser.GetString("next_substream", &data->next_substream))) {
+            || (err = parser.GetString("next_substream", &data->next_substream))) {
         return err;
     }
     return nullptr;
 }
 
-Error ConsumerErrorFromNoDataResponse(const std::string &response) {
+Error ConsumerErrorFromNoDataResponse(const std::string& response) {
     if (response.find("get_record_by_id") != std::string::npos) {
         ConsumerErrorData data;
         auto parse_error = GetNoDataResponseFromJson(response, &data);
@@ -42,34 +44,41 @@ Error ConsumerErrorFromNoDataResponse(const std::string &response) {
             err = ConsumerErrorTemplates::kNoData.Generate();
         }
         ConsumerErrorData* error_data = new ConsumerErrorData{data};
-        err->SetCustomData(std::unique_ptr<CustomErrorData>{error_data});
+        err->SetCustomData(std::unique_ptr<CustomErrorData> {error_data});
         return err;
     }
     return ConsumerErrorTemplates::kNoData.Generate();
 }
 
-Error ConsumerErrorFromHttpCode(const RequestOutput* response, const HttpCode &code) {
+Error ConsumerErrorFromHttpCode(const RequestOutput* response, const HttpCode& code) {
     switch (code) {
-        case HttpCode::OK:return nullptr;
-        case HttpCode::BadRequest:return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string());
-        case HttpCode::Unauthorized:return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string());
-        case HttpCode::InternalServerError:return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string());
-        case HttpCode::NotFound:return ConsumerErrorTemplates::kUnavailableService.Generate(response->to_string());
-        case HttpCode::Conflict:return ConsumerErrorFromNoDataResponse(response->to_string());
-        default:return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string());
-    }
-}
-Error ConsumerErrorFromServerError(const Error &server_err) {
+    case HttpCode::OK:
+        return nullptr;
+    case HttpCode::BadRequest:
+        return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string());
+    case HttpCode::Unauthorized:
+        return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string());
+    case HttpCode::InternalServerError:
+        return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string());
+    case HttpCode::NotFound:
+        return ConsumerErrorTemplates::kUnavailableService.Generate(response->to_string());
+    case HttpCode::Conflict:
+        return ConsumerErrorFromNoDataResponse(response->to_string());
+    default:
+        return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string());
+    }
+}
+Error ConsumerErrorFromServerError(const Error& server_err) {
     if (server_err == HttpErrorTemplates::kTransferError) {
         return ConsumerErrorTemplates::kInterruptedTransaction.Generate(
-            "error processing request: " + server_err->Explain());
+                   "error processing request: " + server_err->Explain());
     } else {
         return ConsumerErrorTemplates::kUnavailableService.Generate(
-            "error processing request: " + server_err->Explain());
+                   "error processing request: " + server_err->Explain());
     }
 }
 
-Error ProcessRequestResponce(const Error &server_err, const RequestOutput* response, const HttpCode &code) {
+Error ProcessRequestResponce(const Error& server_err, const RequestOutput* response, const HttpCode& code) {
     if (server_err != nullptr) {
         return ConsumerErrorFromServerError(server_err);
     }
@@ -81,10 +90,11 @@ ServerDataBroker::ServerDataBroker(std::string server_uri,
                                    bool has_filesystem,
                                    SourceCredentials source) :
     io__{GenerateDefaultIO()}, httpclient__{DefaultHttpClient()},
-    net_client__{new TcpClient()},
     endpoint_{std::move(server_uri)}, source_path_{std::move(source_path)}, has_filesystem_{has_filesystem},
     source_credentials_(std::move(source)) {
 
+    // net_client__ will be lazy initialized
+
     if (source_credentials_.stream.empty()) {
         source_credentials_.stream = SourceCredentials::kDefaultStream;
     }
@@ -95,38 +105,49 @@ void ServerDataBroker::SetTimeout(uint64_t timeout_ms) {
     timeout_ms_ = timeout_ms;
 }
 
+void ServerDataBroker::ForceNoRdma() {
+    should_try_rdma_first_ = false;
+}
+
+NetworkConnectionType ServerDataBroker::CurrentConnectionType() const {
+    return current_connection_type_;
+}
+
+
 std::string ServerDataBroker::RequestWithToken(std::string uri) {
     return std::move(uri) + "?token=" + source_credentials_.user_token;
 }
 
-Error ServerDataBroker::ProcessPostRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
+Error ServerDataBroker::ProcessPostRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code) {
     Error err;
     switch (request.output_mode) {
-        case OutputDataMode::string:
-            response->string_output =
-                httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params,
-                                   request.cookie,
-                                   request.body,
-                                   code,
-                                   &err);
-            break;
-        case OutputDataMode::array:
-            err =
-                httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, request.cookie,
-                                   request.body, &response->data_output, response->data_output_size, code);
-            break;
+    case OutputDataMode::string:
+        response->string_output =
+            httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params,
+                               request.cookie,
+                               request.body,
+                               code,
+                               &err);
+        break;
+    case OutputDataMode::array:
+        err =
+            httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, request.cookie,
+                               request.body, &response->data_output, response->data_output_size, code);
+        break;
+    default:
+        break;
     }
     return err;
 }
 
-Error ServerDataBroker::ProcessGetRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
+Error ServerDataBroker::ProcessGetRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code) {
     Error err;
     response->string_output =
         httpclient__->Get(RequestWithToken(request.host + request.api) + request.extra_params, code, &err);
     return err;
 }
 
-Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInfo &request, std::string* service_uri) {
+Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri) {
     Error err;
     HttpCode code;
     if (request.post) {
@@ -140,7 +161,7 @@ Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInf
     return ProcessRequestResponce(err, response, code);
 }
 
-Error ServerDataBroker::DiscoverService(const std::string &service_name, std::string* uri_to_set) {
+Error ServerDataBroker::DiscoverService(const std::string& service_name, std::string* uri_to_set) {
     if (!uri_to_set->empty()) {
         return nullptr;
     }
@@ -154,13 +175,13 @@ Error ServerDataBroker::DiscoverService(const std::string &service_name, std::st
     if (err != nullptr || uri_to_set->empty()) {
         uri_to_set->clear();
         return ConsumerErrorTemplates::kUnavailableService.Generate(" on " + endpoint_
-                                                                        + (err != nullptr ? ": " + err->Explain()
-                                                                                          : ""));
+                + (err != nullptr ? ": " + err->Explain()
+                   : ""));
     }
     return nullptr;
 }
 
-bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string &response, std::string* redirect_uri) {
+bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* redirect_uri) {
     if (*err == ConsumerErrorTemplates::kNoData) {
         auto error_data = static_cast<const ConsumerErrorData*>((*err)->GetCustomData());
         if (error_data == nullptr) {
@@ -188,8 +209,8 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g
                                             bool dataset) {
     std::string request_suffix = OpToUriCmd(op);
     std::string request_api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream
-        + "/" + std::move(substream) +
-        +"/" + std::move(group_id) + "/";
+                              + "/" + std::move(substream) +
+                              +"/" + std::move(group_id) + "/";
     uint64_t elapsed_ms = 0;
     Error no_data_error;
     while (true) {
@@ -199,7 +220,7 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g
             auto ri = PrepareRequestInfo(request_api + request_suffix, dataset);
             if (request_suffix == "next" && resend_) {
                 ri.extra_params = ri.extra_params + "&resend_nacks=true" + "&delay_sec=" +
-                    std::to_string(delay_sec_) + "&resend_attempts=" + std::to_string(resend_attempts_);
+                                  std::to_string(delay_sec_) + "&resend_attempts=" + std::to_string(resend_attempts_);
             }
             RequestOutput output;
             err = ProcessRequest(&output, ri, &current_broker_uri_);
@@ -259,9 +280,12 @@ Error ServerDataBroker::GetLast(FileInfo* info, std::string group_id, std::strin
 
 std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) {
     switch (op) {
-        case GetImageServerOperation::GetNext:return "next";
-        case GetImageServerOperation::GetLast:return "last";
-        default:return "last";
+    case GetImageServerOperation::GetNext:
+        return "next";
+    case GetImageServerOperation::GetLast:
+        return "last";
+    default:
+        return "last";
     }
 }
 
@@ -332,7 +356,46 @@ bool ServerDataBroker::DataCanBeInBuffer(const FileInfo* info) {
     return info->buf_id > 0;
 }
 
+Error ServerDataBroker::CreateNetClientAndTryToGetFile(const FileInfo* info, FileData* data) {
+    const std::lock_guard<std::mutex> lock(net_client_mutex__);
+    if (net_client__) {
+        return nullptr;
+    }
+
+    if (should_try_rdma_first_) { // This will check if a rdma connection can be made and will return early if so
+        auto fabricClient = std::unique_ptr<NetClient>(new FabricConsumerClient());
+
+        Error error = fabricClient->GetData(info, data);
+
+        // Check if the error comes from the receiver data server (so a connection was made)
+        if (!error || error == RdsResponseErrorTemplates::kNetErrorNoData) {
+            net_client__.swap(fabricClient);
+            current_connection_type_ = NetworkConnectionType::kFabric;
+            return error; // Successfully received data and is now using a fabric client
+        }
+
+        // An error occurred!
+
+        if (std::getenv("ASAPO_PRINT_FALLBACK_REASON")) {
+            std::cout << "Fallback to TCP because error: " << error << std::endl;
+        }
+
+        // Retry with TCP
+        should_try_rdma_first_ = false;
+    }
+
+    // Create regular tcp client
+    net_client__.reset(new TcpClient());
+    current_connection_type_ = NetworkConnectionType::kAsapoTcp;
+
+    return net_client__->GetData(info, data);
+}
+
 Error ServerDataBroker::TryGetDataFromBuffer(const FileInfo* info, FileData* data) {
+    if (!net_client__) {
+        return CreateNetClientAndTryToGetFile(info, data);
+    }
+
     return net_client__->GetData(info, data);
 }
 
@@ -343,7 +406,7 @@ std::string ServerDataBroker::GenerateNewGroupId(Error* err) {
     return BrokerRequestWithTimeout(ri, err);
 }
 
-Error ServerDataBroker::ServiceRequestWithTimeout(const std::string &service_name,
+Error ServerDataBroker::ServiceRequestWithTimeout(const std::string& service_name,
                                                   std::string* service_uri,
                                                   RequestInfo request,
                                                   RequestOutput* response) {
@@ -426,7 +489,7 @@ Error ServerDataBroker::ResetLastReadMarker(std::string group_id, std::string su
 Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/"
-        + std::move(substream) + "/" + std::move(group_id) + "/resetcounter";
+             + std::move(substream) + "/" + std::move(group_id) + "/resetcounter";
     ri.extra_params = "&value=" + std::to_string(value);
     ri.post = true;
 
@@ -438,7 +501,7 @@ Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id,
 uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) + "/size";
+             +"/" + std::move(substream) + "/size";
     auto responce = BrokerRequestWithTimeout(ri, err);
     if (*err) {
         return 0;
@@ -476,9 +539,9 @@ Error ServerDataBroker::GetRecordFromServerById(uint64_t id, std::string* respon
                                                 bool dataset) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
-        "/" + std::move(
-        group_id) + "/" + std::to_string(id);
+             +"/" + std::move(substream) +
+             "/" + std::move(
+                 group_id) + "/" + std::to_string(id);
     if (dataset) {
         ri.extra_params += "&dataset=true";
     }
@@ -509,7 +572,7 @@ DataSet ServerDataBroker::DecodeDatasetFromResponse(std::string response, Error*
 FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream, Error* err) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        "/" + std::move(substream) + "/0/queryimages";
+             "/" + std::move(substream) + "/0/queryimages";
     ri.post = true;
     ri.body = std::move(query);
 
@@ -572,7 +635,7 @@ std::vector<std::string> ParseSubstreamsFromResponse(std::string response, Error
     std::vector<std::string> substreams;
     *err = parser.GetArrayString("substreams", &substreams);
     if (*err) {
-        return std::vector<std::string>{};
+        return std::vector<std::string> {};
     }
     return substreams;
 }
@@ -585,7 +648,7 @@ std::vector<std::string> ServerDataBroker::GetSubstreamList(Error* err) {
 
     auto response = BrokerRequestWithTimeout(ri, err);
     if (*err) {
-        return std::vector<std::string>{};
+        return std::vector<std::string> {};
     }
 
     return ParseSubstreamsFromResponse(std::move(response), err);
@@ -614,13 +677,13 @@ RequestInfo ServerDataBroker::CreateFolderTokenRequest() const {
     ri.post = true;
     ri.body =
         "{\"Folder\":\"" + source_path_ + "\",\"BeamtimeId\":\"" + source_credentials_.beamtime_id + "\",\"Token\":\""
-            +
-                source_credentials_.user_token + "\"}";
+        +
+        source_credentials_.user_token + "\"}";
     return ri;
 }
 
 Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* data,
-                                                       bool retry_with_new_token) {
+        bool retry_with_new_token) {
     auto err = UpdateFolderTokenIfNeeded(retry_with_new_token);
     if (err) {
         return err;
@@ -629,7 +692,7 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData*
     if (info->size == 0) {
         err = FtsSizeRequestWithTimeout(info);
         if (err == ConsumerErrorTemplates::kWrongInput
-            && !retry_with_new_token) { // token expired? Refresh token and try again.
+                && !retry_with_new_token) { // token expired? Refresh token and try again.
             return GetDataFromFileTransferService(info, data, true);
         }
         if (err) {
@@ -639,7 +702,7 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData*
 
     err = FtsRequestWithTimeout(info, data);
     if (err == ConsumerErrorTemplates::kWrongInput
-        && !retry_with_new_token) { // token expired? Refresh token and try again.
+            && !retry_with_new_token) { // token expired? Refresh token and try again.
         return GetDataFromFileTransferService(info, data, true);
     }
     return err;
@@ -648,8 +711,8 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData*
 Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::string substream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
-        "/" + std::move(group_id) + "/" + std::to_string(id);
+             +"/" + std::move(substream) +
+             "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
     ri.body = "{\"Op\":\"ackimage\"}";
 
@@ -659,14 +722,14 @@ Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::stri
 }
 
 IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
-                                                   std::string substream,
-                                                   uint64_t from_id,
-                                                   uint64_t to_id,
-                                                   Error* error) {
+        std::string substream,
+        uint64_t from_id,
+        uint64_t to_id,
+        Error* error) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
-        "/" + std::move(group_id) + "/nacks";
+             +"/" + std::move(substream) +
+             "/" + std::move(group_id) + "/nacks";
     ri.extra_params = "&from=" + std::to_string(from_id) + "&to=" + std::to_string(to_id);
 
     auto json_string = BrokerRequestWithTimeout(ri, error);
@@ -684,17 +747,17 @@ IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
 }
 
 IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
-                                                   uint64_t from_id,
-                                                   uint64_t to_id,
-                                                   Error* error) {
+        uint64_t from_id,
+        uint64_t to_id,
+        Error* error) {
     return GetUnacknowledgedTupleIds(std::move(group_id), kDefaultSubstream, from_id, to_id, error);
 }
 
 uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
-        "/" + std::move(group_id) + "/lastack";
+             +"/" + std::move(substream) +
+             "/" + std::move(group_id) + "/lastack";
 
     auto json_string = BrokerRequestWithTimeout(ri, error);
     if (*error) {
@@ -729,10 +792,10 @@ Error ServerDataBroker::NegativeAcknowledge(std::string group_id,
                                             std::string substream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
-        "/" + std::move(group_id) + "/" + std::to_string(id);
+             +"/" + std::move(substream) +
+             "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
-    ri.body = R"({"Op":"negackimage","Params":{"DelaySec":)"+std::to_string(delay_sec)+"}}";
+    ri.body = R"({"Op":"negackimage","Params":{"DelaySec":)" + std::to_string(delay_sec) + "}}";
 
     Error err;
     BrokerRequestWithTimeout(ri, &err);
diff --git a/consumer/api/cpp/src/server_data_broker.h b/consumer/api/cpp/src/server_data_broker.h
index 00b640475ff98d6626cb70e89ecf04238b5eb8c6..5994b51aa50fcebe730503cbb953ca1e13cf4e9e 100644
--- a/consumer/api/cpp/src/server_data_broker.h
+++ b/consumer/api/cpp/src/server_data_broker.h
@@ -1,6 +1,8 @@
 #ifndef ASAPO_SERVER_DATA_BROKER_H
 #define ASAPO_SERVER_DATA_BROKER_H
 
+#include <common/networking.h>
+#include <mutex>
 #include "consumer/data_broker.h"
 #include "io/io.h"
 #include "http_client/http_client.h"
@@ -9,149 +11,161 @@
 namespace asapo {
 
 enum class GetImageServerOperation {
-  GetNext,
-  GetLast,
-  GetID
+    GetNext,
+    GetLast,
+    GetID
 };
 
 enum class OutputDataMode {
-  string,
-  array,
-  file
+    string,
+    array,
+    file
 };
 
 struct RequestInfo {
-  std::string host;
-  std::string api;
-  std::string extra_params;
-  std::string body;
-  std::string cookie;
-  OutputDataMode output_mode = OutputDataMode::string;
-  bool post = false;
+    std::string host;
+    std::string api;
+    std::string extra_params;
+    std::string body;
+    std::string cookie;
+    OutputDataMode output_mode = OutputDataMode::string;
+    bool post = false;
 };
 
 struct RequestOutput {
-  std::string string_output;
-  FileData data_output;
-  uint64_t data_output_size;
-  const char* to_string() const {
-      if (!data_output) {
-          return string_output.c_str();
-      } else {
-          return reinterpret_cast<char const*>(data_output.get());
-      }
-  }
+    std::string string_output;
+    FileData data_output;
+    uint64_t data_output_size;
+    const char* to_string() const {
+        if (!data_output) {
+            return string_output.c_str();
+        } else {
+            return reinterpret_cast<char const*>(data_output.get()) ;
+        }
+    }
 };
 
-Error ProcessRequestResponce(const Error &server_err, const RequestOutput* response, const HttpCode &code);
-Error ConsumerErrorFromNoDataResponse(const std::string &response);
+Error ProcessRequestResponce(const Error& server_err, const RequestOutput* response, const HttpCode& code);
+Error ConsumerErrorFromNoDataResponse(const std::string& response);
+
 
 class ServerDataBroker final : public asapo::DataBroker {
- public:
-  explicit ServerDataBroker(std::string server_uri, std::string source_path, bool has_filesystem,
-                            SourceCredentials source);
-
-  Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) override;
-  Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec, std::string substream = kDefaultSubstream) override;
-
-  IdList GetUnacknowledgedTupleIds(std::string group_id,
-                                   std::string substream,
-                                   uint64_t from_id,
-                                   uint64_t to_id,
-                                   Error* error) override;
-  IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) override;
-
-  uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) override;
-  uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) override;
-
-  Error ResetLastReadMarker(std::string group_id) override;
-  Error ResetLastReadMarker(std::string group_id, std::string substream) override;
-
-  Error SetLastReadMarker(uint64_t value, std::string group_id) override;
-  Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) override;
-
-  Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
-  Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
-
-  Error GetLast(FileInfo* info, std::string group_id, FileData* data) override;
-  Error GetLast(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
-
-  std::string GenerateNewGroupId(Error* err) override;
-  std::string GetBeamtimeMeta(Error* err) override;
-
-  uint64_t GetCurrentSize(Error* err) override;
-  uint64_t GetCurrentSize(std::string substream, Error* err) override;
-
-  Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
-  Error GetById(uint64_t id, FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
-
-  void SetTimeout(uint64_t timeout_ms) override;
-  FileInfos QueryImages(std::string query, Error* err) override;
-  FileInfos QueryImages(std::string query, std::string substream, Error* err) override;
-
-  DataSet GetNextDataset(std::string group_id, Error* err) override;
-  DataSet GetNextDataset(std::string group_id, std::string substream, Error* err) override;
-
-  DataSet GetLastDataset(std::string group_id, Error* err) override;
-  DataSet GetLastDataset(std::string group_id, std::string substream, Error* err) override;
-
-  DataSet GetDatasetById(uint64_t id, std::string group_id, Error* err) override;
-  DataSet GetDatasetById(uint64_t id, std::string group_id, std::string substream, Error* err) override;
-
-  Error RetrieveData(FileInfo* info, FileData* data) override;
-
-  std::vector<std::string> GetSubstreamList(Error* err) override;
-  void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) override;
-
-  std::unique_ptr<IO> io__; // modified in testings to mock system calls,otherwise do not touch
-  std::unique_ptr<HttpClient> httpclient__;
-  std::unique_ptr<NetClient> net_client__;
- private:
-  Error GetDataFromFileTransferService(FileInfo* info, FileData* data, bool retry_with_new_token);
-  Error GetDataFromFile(FileInfo* info, FileData* data);
-  static const std::string kBrokerServiceName;
-  static const std::string kFileTransferServiceName;
-  std::string RequestWithToken(std::string uri);
-  Error GetRecordFromServer(std::string* info, std::string group_id, std::string substream, GetImageServerOperation op,
-                            bool dataset = false);
-  Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string substream,
-                                bool dataset = false);
-  Error GetDataIfNeeded(FileInfo* info, FileData* data);
-  Error DiscoverService(const std::string &service_name, std::string* uri_to_set);
-  bool SwitchToGetByIdIfNoData(Error* err, const std::string &response, std::string* redirect_uri);
-  Error ProcessRequest(RequestOutput* response, const RequestInfo &request, std::string* service_uri);
-  Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
-                           FileInfo* info, FileData* data);
-  DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
-                               Error* err);
-  bool DataCanBeInBuffer(const FileInfo* info);
-  Error TryGetDataFromBuffer(const FileInfo* info, FileData* data);
-  Error ServiceRequestWithTimeout(const std::string &service_name, std::string* service_uri, RequestInfo request,
-                                  RequestOutput* response);
-  std::string BrokerRequestWithTimeout(RequestInfo request, Error* err);
-  Error FtsRequestWithTimeout(FileInfo* info, FileData* data);
-  Error FtsSizeRequestWithTimeout(FileInfo* info);
-  Error ProcessPostRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code);
-  Error ProcessGetRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code);
-
-  DataSet DecodeDatasetFromResponse(std::string response, Error* err);
-  RequestInfo PrepareRequestInfo(std::string api_url, bool dataset);
-  std::string OpToUriCmd(GetImageServerOperation op);
-  Error UpdateFolderTokenIfNeeded(bool ignore_existing);
-  std::string endpoint_;
-  std::string current_broker_uri_;
-  std::string current_fts_uri_;
-  std::string source_path_;
-  bool has_filesystem_;
-  SourceCredentials source_credentials_;
-  uint64_t timeout_ms_ = 0;
-  std::string folder_token_;
-  RequestInfo CreateFolderTokenRequest() const;
-  RequestInfo CreateFileTransferRequest(const FileInfo* info) const;
-  uint64_t resend_timout_ = 0;
-  bool resend_ = false;
-  uint64_t delay_sec_;
-  uint64_t resend_attempts_;
+  public:
+    explicit ServerDataBroker(std::string server_uri, std::string source_path, bool has_filesystem,
+                              SourceCredentials source);
+
+    Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) override;
+    Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
+                              std::string substream = kDefaultSubstream) override;
+
+    IdList GetUnacknowledgedTupleIds(std::string group_id,
+                                     std::string substream,
+                                     uint64_t from_id,
+                                     uint64_t to_id,
+                                     Error* error) override;
+    IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) override;
+
+    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) override;
+    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) override;
+
+    Error ResetLastReadMarker(std::string group_id) override;
+    Error ResetLastReadMarker(std::string group_id, std::string substream) override;
+
+    Error SetLastReadMarker(uint64_t value, std::string group_id) override;
+    Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) override;
+
+    Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
+    Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
+
+    Error GetLast(FileInfo* info, std::string group_id, FileData* data) override;
+    Error GetLast(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
+
+    std::string GenerateNewGroupId(Error* err) override;
+    std::string GetBeamtimeMeta(Error* err) override;
+
+    uint64_t GetCurrentSize(Error* err) override;
+    uint64_t GetCurrentSize(std::string substream, Error* err) override;
+
+    Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
+    Error GetById(uint64_t id, FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
+
+
+    void SetTimeout(uint64_t timeout_ms) override;
+    void ForceNoRdma() override;
+
+    NetworkConnectionType CurrentConnectionType() const override;
+
+    FileInfos QueryImages(std::string query, Error* err) override;
+    FileInfos QueryImages(std::string query, std::string substream, Error* err) override;
+
+    DataSet GetNextDataset(std::string group_id, Error* err) override;
+    DataSet GetNextDataset(std::string group_id, std::string substream, Error* err) override;
+
+    DataSet GetLastDataset(std::string group_id, Error* err) override;
+    DataSet GetLastDataset(std::string group_id, std::string substream, Error* err) override;
+
+    DataSet GetDatasetById(uint64_t id, std::string group_id, Error* err) override;
+    DataSet GetDatasetById(uint64_t id, std::string group_id, std::string substream, Error* err) override;
+
+    Error RetrieveData(FileInfo* info, FileData* data) override;
+
+    std::vector<std::string> GetSubstreamList(Error* err) override;
+    void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) override;
+
+    std::unique_ptr<IO> io__; // modified in testings to mock system calls,otherwise do not touch
+    std::unique_ptr<HttpClient> httpclient__;
+    std::unique_ptr<NetClient> net_client__;
+
+    std::mutex net_client_mutex__; // Required for the lazy initialization of net_client
+  private:
+    Error GetDataFromFileTransferService(FileInfo* info, FileData* data, bool retry_with_new_token);
+    Error GetDataFromFile(FileInfo* info, FileData* data);
+    static const std::string kBrokerServiceName;
+    static const std::string kFileTransferServiceName;
+    std::string RequestWithToken(std::string uri);
+    Error GetRecordFromServer(std::string* info, std::string group_id, std::string substream, GetImageServerOperation op,
+                              bool dataset = false);
+    Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string substream,
+                                  bool dataset = false);
+    Error GetDataIfNeeded(FileInfo* info, FileData* data);
+    Error DiscoverService(const std::string& service_name, std::string* uri_to_set);
+    bool SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* redirect_uri);
+    Error ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri);
+    Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
+                             FileInfo* info, FileData* data);
+    DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
+                                 Error* err);
+    bool DataCanBeInBuffer(const FileInfo* info);
+    Error TryGetDataFromBuffer(const FileInfo* info, FileData* data);
+    Error CreateNetClientAndTryToGetFile(const FileInfo* info, FileData* data);
+    Error ServiceRequestWithTimeout(const std::string& service_name, std::string* service_uri, RequestInfo request,
+                                    RequestOutput* response);
+    std::string BrokerRequestWithTimeout(RequestInfo request, Error* err);
+    Error FtsRequestWithTimeout(FileInfo* info, FileData* data);
+    Error FtsSizeRequestWithTimeout(FileInfo* info);
+    Error ProcessPostRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code);
+    Error ProcessGetRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code);
+
+    DataSet DecodeDatasetFromResponse(std::string response, Error* err);
+    RequestInfo PrepareRequestInfo(std::string api_url, bool dataset);
+    std::string OpToUriCmd(GetImageServerOperation op);
+    Error UpdateFolderTokenIfNeeded(bool ignore_existing);
+    std::string endpoint_;
+    std::string current_broker_uri_;
+    std::string current_fts_uri_;
+    std::string source_path_;
+    bool has_filesystem_;
+    SourceCredentials source_credentials_;
+    uint64_t timeout_ms_ = 0;
+    bool should_try_rdma_first_ = true;
+    NetworkConnectionType current_connection_type_ = NetworkConnectionType::kUndefined;
+    std::string folder_token_;
+    RequestInfo CreateFolderTokenRequest() const;
+    RequestInfo CreateFileTransferRequest(const FileInfo* info) const;
+    uint64_t resend_timout_ = 0;
+    bool resend_ = false;
+    uint64_t delay_sec_;
+    uint64_t resend_attempts_;
 };
 
 }
diff --git a/consumer/api/cpp/src/tcp_client.cpp b/consumer/api/cpp/src/tcp_client.cpp
index 61ed3580cce0b70df72bc1c9341139d55a8881c4..c6bf9b5ae914b3c394d17a9cbfa20112b13fc110 100644
--- a/consumer/api/cpp/src/tcp_client.cpp
+++ b/consumer/api/cpp/src/tcp_client.cpp
@@ -1,6 +1,8 @@
 #include "tcp_client.h"
 #include "io/io_factory.h"
 #include "common/networking.h"
+#include "rds_response_error.h"
+
 namespace asapo {
 
 TcpClient::TcpClient() : io__{GenerateDefaultIO()}, connection_pool__{new TcpConnectionPool()} {
@@ -32,23 +34,25 @@ Error TcpClient::ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const Fi
 Error TcpClient::ReceiveResponce(SocketDescriptor sd) const noexcept {
     Error err;
 
-    GenericNetworkResponse Response;
-    io__->Receive(sd, &Response, sizeof(Response), &err);
+    GenericNetworkResponse response;
+    io__->Receive(sd, &response, sizeof(response), &err);
     if(err != nullptr) {
         io__->CloseSocket(sd, nullptr);
         connection_pool__->ReleaseConnection(sd);
         return err;
     }
-    switch (Response.error_code) {
-    case kNetErrorWrongRequest :
-        io__->CloseSocket(sd, nullptr);
-        return Error{new SimpleError("internal server error: wrong request")};
-    case kNetErrorNoData :
-        connection_pool__->ReleaseConnection(sd);
-        return Error{new SimpleError("no data")};
-    default:
-        return nullptr;
+    if (response.error_code) {
+        switch (response.error_code) {
+        case kNetErrorWrongRequest:
+            io__->CloseSocket(sd, nullptr);
+            break;
+        case kNetErrorNoData:
+            connection_pool__->ReleaseConnection(sd);
+            break;
+        }
+        return ConvertRdsResponseToError(response.error_code);
     }
+    return nullptr;
 }
 
 Error TcpClient::QueryCacheHasData(SocketDescriptor* sd, const FileInfo* info, bool try_reconnect) const noexcept {
@@ -84,8 +88,7 @@ Error TcpClient::ReceiveData(SocketDescriptor sd, const FileInfo* info, FileData
     return err;
 }
 
-
-Error TcpClient::GetData(const FileInfo* info, FileData* data) const noexcept {
+Error TcpClient::GetData(const FileInfo* info, FileData* data) {
     Error err;
     bool reused;
     auto sd = connection_pool__->GetFreeConnection(info->source, &reused, &err);
@@ -101,4 +104,4 @@ Error TcpClient::GetData(const FileInfo* info, FileData* data) const noexcept {
     return ReceiveData(sd, info, data);
 }
 
-}
\ No newline at end of file
+}
diff --git a/consumer/api/cpp/src/tcp_client.h b/consumer/api/cpp/src/tcp_client.h
index 6b0c64f78200e03b671ab2124335192766913c88..7be85152d1380cb028b3ff0f907d8b8915f86343 100644
--- a/consumer/api/cpp/src/tcp_client.h
+++ b/consumer/api/cpp/src/tcp_client.h
@@ -1,5 +1,5 @@
-#ifndef ASAPO_TCP_CLIENT_H
-#define ASAPO_TCP_CLIENT_H
+#ifndef ASAPO_CONSUMER_TCP_CLIENT_H
+#define ASAPO_CONSUMER_TCP_CLIENT_H
 
 #include "net_client.h"
 #include "io/io.h"
@@ -10,7 +10,7 @@ namespace asapo {
 class TcpClient : public NetClient {
   public:
     explicit TcpClient();
-    Error GetData(const FileInfo* info, FileData* data) const noexcept override;
+    Error GetData(const FileInfo* info, FileData* data) override;
     std::unique_ptr<IO> io__;
     std::unique_ptr<TcpConnectionPool> connection_pool__;
   private:
@@ -23,4 +23,4 @@ class TcpClient : public NetClient {
 
 }
 
-#endif //ASAPO_TCP_CLIENT_H
+#endif //ASAPO_CONSUMER_TCP_CLIENT_H
diff --git a/consumer/api/cpp/unittests/mocking.h b/consumer/api/cpp/unittests/mocking.h
index be0623021ed4c08aca9f4a128232420f0198e9e5..06a1326a43594a9124b36a1043bf6a55cb7c3321 100644
--- a/consumer/api/cpp/unittests/mocking.h
+++ b/consumer/api/cpp/unittests/mocking.h
@@ -12,7 +12,7 @@ namespace asapo {
 class MockNetClient : public asapo::NetClient {
   public:
 
-    Error GetData(const FileInfo* info, FileData* data) const noexcept override {
+    Error GetData(const FileInfo* info, FileData* data) override {
         return Error(GetData_t(info, data));
     }
 
diff --git a/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp b/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0624c879749bed3830cb69d4277cfc661e1958ec
--- /dev/null
+++ b/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp
@@ -0,0 +1,299 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <unittests/MockFabric.h>
+#include <common/networking.h>
+#include "../src/fabric_consumer_client.h"
+#include "../../../../common/cpp/src/system_io/system_io.h"
+
+using namespace asapo;
+
+using ::testing::Test;
+using ::testing::Ne;
+using ::testing::Eq;
+using ::testing::_;
+using ::testing::SetArgPointee;
+using ::testing::Return;
+using ::testing::DoAll;
+using ::testing::SaveArg;
+using ::testing::SaveArgPointee;
+using ::testing::StrictMock;
+using ::testing::Expectation;
+
+TEST(FabricConsumerClient, Constructor) {
+    FabricConsumerClient client;
+    ASSERT_THAT(dynamic_cast<fabric::FabricFactory*>(client.factory__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<fabric::FabricClient*>(client.client__.get()), Eq(nullptr));
+}
+
+MATCHER_P6(M_CheckSendDataRequest, op_code, buf_id, data_size, mr_addr, mr_length, mr_key,
+           "Checks if a valid GenericRequestHeader was Send") {
+    auto data = (GenericRequestHeader*) arg;
+    auto mr = (fabric::MemoryRegionDetails*) &data->message;
+    return data->op_code == op_code
+           && data->data_id == uint64_t(buf_id)
+           && data->data_size == uint64_t(data_size)
+           && mr->addr == uint64_t(mr_addr)
+           && mr->length == uint64_t(mr_length)
+           && mr->key == uint64_t(mr_key);
+}
+
+ACTION_P(A_WriteSendDataResponse, error_code) {
+    ((asapo::SendDataResponse*)arg2)->op_code = asapo::kOpcodeGetBufferData;
+    ((asapo::SendDataResponse*)arg2)->error_code = error_code;
+}
+
+class FabricConsumerClientTests : public Test {
+  public:
+    FabricConsumerClient client;
+    StrictMock<fabric::MockFabricFactory> mock_fabric_factory;
+    StrictMock<fabric::MockFabricClient> mock_fabric_client;
+
+    void SetUp() override {
+        client.factory__ = std::unique_ptr<fabric::FabricFactory> {&mock_fabric_factory};
+    }
+    void TearDown() override {
+        client.factory__.release();
+        client.client__.release();
+    }
+
+  public:
+    void ExpectInit(bool ok);
+    void ExpectAddedConnection(const std::string& address, bool ok, fabric::FabricAddress result);
+    void ExpectTransfer(void** outputData, fabric::FabricAddress serverAddr,
+                        fabric::FabricMessageId messageId, bool sendOk, bool recvOk,
+                        NetworkErrorCode serverResponse);
+};
+
+void FabricConsumerClientTests::ExpectInit(bool ok) {
+    EXPECT_CALL(mock_fabric_factory, CreateClient_t(_/*err*/))
+    .WillOnce(DoAll(
+                  SetArgPointee<0>(ok ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                  Return(&mock_fabric_client)
+              ));
+}
+
+void FabricConsumerClientTests::ExpectAddedConnection(const std::string& address, bool ok,
+        fabric::FabricAddress result) {
+    EXPECT_CALL(mock_fabric_client, AddServerAddress_t(address, _/*err*/))
+    .WillOnce(DoAll(
+                  SetArgPointee<1>(ok ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                  Return(result)
+              ));
+}
+
+void FabricConsumerClientTests::ExpectTransfer(void** outputData, fabric::FabricAddress serverAddr,
+                                               fabric::FabricMessageId messageId, bool sendOk, bool recvOk,
+                                               NetworkErrorCode serverResponse) {
+    static fabric::MemoryRegionDetails mrDetails{};
+    mrDetails.addr = 0x124;
+    mrDetails.length = 4123;
+    mrDetails.key = 20;
+
+    auto mr = new StrictMock<fabric::MockFabricMemoryRegion>();
+    EXPECT_CALL(mock_fabric_client, ShareMemoryRegion_t(_, 4123, _/*err*/)).WillOnce(DoAll(
+                SaveArg<0>(outputData),
+                Return(mr)
+            ));
+    Expectation getDetailsCall = EXPECT_CALL(*mr, GetDetails()).WillOnce(Return(&mrDetails));
+
+
+    Expectation sendCall = EXPECT_CALL(mock_fabric_client, Send_t(serverAddr, messageId,
+                                       M_CheckSendDataRequest(kOpcodeGetBufferData, 78954, 4123, 0x124, 4123, 20),
+                                       sizeof(GenericRequestHeader), _)).After(getDetailsCall)
+                           .WillOnce(SetArgPointee<4>(sendOk ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()));
+
+    if (sendOk) {
+        Expectation recvCall = EXPECT_CALL(mock_fabric_client, Recv_t(serverAddr, messageId, _,
+                                           sizeof(GenericNetworkResponse), _))
+                               .After(sendCall)
+                               .WillOnce(DoAll(
+                                             SetArgPointee<4>(recvOk ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                                             A_WriteSendDataResponse(serverResponse)
+                                         ));
+        EXPECT_CALL(*mr, Destructor()).After(recvCall);
+    } else {
+        EXPECT_CALL(*mr, Destructor()).After(sendCall);
+    }
+
+}
+
+TEST_F(FabricConsumerClientTests, GetData_Error_Init) {
+    ExpectInit(false);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_Error_AddConnection) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", false, -1);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+
+    // Make sure that the connection was not saved
+    ExpectAddedConnection("host:1234", false, -1);
+    err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_ShareMemoryRegion_Error) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+
+    EXPECT_CALL(mock_fabric_client, ShareMemoryRegion_t(_, 4123, _/*err*/))
+    .WillOnce(DoAll(
+                  SetArgPointee<2>(fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                  Return(nullptr)
+              ));
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_SendFailed) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, false, false, kNetErrorNoError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Ne(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_RecvFailed) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, true, false, kNetErrorNoError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Ne(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_ServerError) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, true, true, kNetErrorInternalServerError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Ne(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_Ok) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_Ok_UsedCahedConnection) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+
+    outData = nullptr;
+    ExpectTransfer(&outData, 0, 1, true, true, kNetErrorNoError);
+
+    err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+}
+
+TEST_F(FabricConsumerClientTests, GetData_Ok_SecondConnection) {
+    ExpectInit(true);
+    ExpectAddedConnection("host:1234", true, 0);
+
+    FileData expectedFileData;
+    FileInfo expectedInfo{};
+    expectedInfo.source = "host:1234";
+    expectedInfo.size = 4123;
+    expectedInfo.buf_id = 78954;
+
+    void* outData = nullptr;
+    ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
+
+    Error err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+
+    ExpectAddedConnection("host:1235", true, 54);
+    expectedInfo.source = "host:1235";
+
+    outData = nullptr;
+    ExpectTransfer(&outData, 54, 1, true, true, kNetErrorNoError);
+
+    err = client.GetData(&expectedInfo, &expectedFileData);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+}
diff --git a/consumer/api/cpp/unittests/test_rds_error_mapper.cpp b/consumer/api/cpp/unittests/test_rds_error_mapper.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f19dc56022525914a17fe0b04fee1340571c4216
--- /dev/null
+++ b/consumer/api/cpp/unittests/test_rds_error_mapper.cpp
@@ -0,0 +1,22 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include "../src/rds_response_error.h"
+
+using namespace asapo;
+using ::testing::Eq;
+
+TEST(ConvertRdsResponseToError, TestAllCases) {
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorNoError /* 0 */), Eq(nullptr));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorReauthorize),
+                Eq(RdsResponseErrorTemplates::kNetErrorReauthorize));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorWarning),
+                Eq(RdsResponseErrorTemplates::kNetErrorWarning));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorWrongRequest),
+                Eq(RdsResponseErrorTemplates::kNetErrorWrongRequest));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorNoData),
+                Eq(RdsResponseErrorTemplates::kNetErrorNoData));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetAuthorizationError),
+                Eq(RdsResponseErrorTemplates::kNetAuthorizationError));
+    ASSERT_THAT(ConvertRdsResponseToError(NetworkErrorCode::kNetErrorInternalServerError),
+                Eq(RdsResponseErrorTemplates::kNetErrorInternalServerError));
+}
diff --git a/consumer/api/cpp/unittests/test_server_broker.cpp b/consumer/api/cpp/unittests/test_server_broker.cpp
index b19620c467582a64b400469adf053b09879f4911..c1125ccae8330ff5c9c2e515a6f54a561a916a25 100644
--- a/consumer/api/cpp/unittests/test_server_broker.cpp
+++ b/consumer/api/cpp/unittests/test_server_broker.cpp
@@ -50,7 +50,7 @@ TEST(FolderDataBroker, Constructor) {
     };
     ASSERT_THAT(dynamic_cast<asapo::SystemIO*>(data_broker->io__.get()), Ne(nullptr));
     ASSERT_THAT(dynamic_cast<asapo::CurlHttpClient*>(data_broker->httpclient__.get()), Ne(nullptr));
-    ASSERT_THAT(dynamic_cast<asapo::TcpClient*>(data_broker->net_client__.get()), Ne(nullptr));
+    ASSERT_THAT(data_broker->net_client__.get(), Eq(nullptr));
 }
 
 const uint8_t expected_value = 1;
@@ -125,10 +125,11 @@ class ServerDataBrokerTests : public Test {
                 ));
     }
     void MockGetServiceUri(std::string service, std::string result) {
-        EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/" + service), _, _)).WillOnce(DoAll(
-                    SetArgPointee<1>(HttpCode::OK),
-                    SetArgPointee<2>(nullptr),
-                    Return(result)));
+        EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/" + service), _,
+                                            _)).WillOnce(DoAll(
+                                                    SetArgPointee<1>(HttpCode::OK),
+                                                    SetArgPointee<2>(nullptr),
+                                                    Return(result)));
     }
 
     void MockBeforeFTS(FileData* data);
@@ -554,7 +555,7 @@ TEST_F(ServerDataBrokerTests, GetImageCallsReadFromFileIfZeroBufId) {
 TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("creategroup"), _,"", _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("creategroup"), _, "", _, _)).WillOnce(DoAll(
                 SetArgPointee<3>(HttpCode::BadRequest),
                 SetArgPointee<4>(nullptr),
                 Return("")));
@@ -570,7 +571,7 @@ TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) {
 TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsGroupID) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/creategroup?token=" + expected_token,_,"", _,
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/creategroup?token=" + expected_token, _, "", _,
                                          _)).WillOnce(DoAll(
                                                  SetArgPointee<3>(HttpCode::OK),
                                                  SetArgPointee<4>(nullptr),
@@ -589,7 +590,7 @@ TEST_F(ServerDataBrokerTests, ResetCounterByDefaultUsesCorrectUri) {
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
                                          expected_group_id +
-                                         "/resetcounter?token=" + expected_token + "&value=0",_, _, _, _)).WillOnce(DoAll(
+                                         "/resetcounter?token=" + expected_token + "&value=0", _, _, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
                                                      Return("")));
@@ -603,7 +604,7 @@ TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUri) {
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
                                          expected_group_id +
-                                         "/resetcounter?token=" + expected_token + "&value=10", _,_, _, _)).WillOnce(DoAll(
+                                         "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
                                                      Return("")));
@@ -619,7 +620,7 @@ TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUriWithSubstream) {
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
                                          expected_substream + "/" +
                                          expected_group_id +
-                                         "/resetcounter?token=" + expected_token + "&value=10", _,_, _, _)).WillOnce(DoAll(
+                                         "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
                                                      Return("")));
@@ -792,7 +793,7 @@ TEST_F(ServerDataBrokerTests, GetMetaDataOK) {
 TEST_F(ServerDataBrokerTests, QueryImagesReturnError) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"),_, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
                 SetArgPointee<3>(HttpCode::BadRequest),
                 SetArgPointee<4>(nullptr),
                 Return("error in query")));
@@ -810,7 +811,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnError) {
 TEST_F(ServerDataBrokerTests, QueryImagesReturnEmptyResults) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"),_, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
                 SetArgPointee<3>(HttpCode::OK),
                 SetArgPointee<4>(nullptr),
                 Return("[]")));
@@ -834,7 +835,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseArray) {
     auto responce_string = json1 + "," + json2 + "]"; // no [ at the beginning
 
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"),_, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
                 SetArgPointee<3>(HttpCode::OK),
                 SetArgPointee<4>(nullptr),
                 Return(responce_string)));
@@ -855,7 +856,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseRecorsd) {
     auto responce_string = R"([{"bla":1},{"err":}])";
 
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"),_, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
                 SetArgPointee<3>(HttpCode::OK),
                 SetArgPointee<4>(nullptr),
                 Return(responce_string)));
@@ -884,7 +885,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) {
 
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0" +
-                                         "/queryimages?token=" + expected_token, _,expected_query_string, _, _)).WillOnce(DoAll(
+                                         "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
                                                      Return(responce_string)));
@@ -906,7 +907,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithSubstream) {
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
                                          expected_substream + "/0" +
-                                         "/queryimages?token=" + expected_token,_, expected_query_string, _, _)).WillOnce(DoAll(
+                                         "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
                                                      Return("[]")));
@@ -1056,7 +1057,7 @@ void ServerDataBrokerTests::ExpectFolderToken() {
                                                expected_beamtime_id
                                                + "\",\"Token\":\"" + expected_token + "\"}";
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr(expected_server_uri + "/asapo-authorizer/folder"),_,
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr(expected_server_uri + "/asapo-authorizer/folder"), _,
                                          expected_folder_query_string, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<3>(HttpCode::OK),
                                                      SetArgPointee<4>(nullptr),
@@ -1118,19 +1119,19 @@ TEST_F(ServerDataBrokerTests, GetImageUsesFileTransferServiceIfCannotReadFromCac
 TEST_F(ServerDataBrokerTests, FileTransferReadsFileSize) {
     AssertSingleFileTransfer();
     EXPECT_CALL(mock_http_client, Post_t(HasSubstr("sizeonly=true"),
-                                                    expected_cookie, expected_fts_query_string, _, _)).WillOnce(DoAll(
+                                         expected_cookie, expected_fts_query_string, _, _)).WillOnce(DoAll(
 
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("{\"file_size\":5}")
-    ));
+                                                     SetArgPointee<3>(HttpCode::OK),
+                                                     SetArgPointee<4>(nullptr),
+                                                     Return("{\"file_size\":5}")
+                                                 ));
 
     EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"),
                                                     expected_cookie, expected_fts_query_string, _, 5, _)).WillOnce(DoAll(
-        SetArgPointee<5>(HttpCode::OK),
-        AssignArg3(nullptr),
-        Return(nullptr)
-    ));
+                                                            SetArgPointee<5>(HttpCode::OK),
+                                                            AssignArg3(nullptr),
+                                                            Return(nullptr)
+                                                            ));
 
     FileData data;
     info.size = 0;
@@ -1162,13 +1163,14 @@ TEST_F(ServerDataBrokerTests, GetImageTriesToGetTokenAgainIfTransferFailed) {
 TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
     auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/"+expected_substream+"/"  +
-                                            expected_group_id
-                                            + "/" + std::to_string(expected_dataset_id) + "?token="
-                                            + expected_token,_,expected_acknowledge_command, _,_)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("")));
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
+                                         expected_substream + "/"  +
+                                         expected_group_id
+                                         + "/" + std::to_string(expected_dataset_id) + "?token="
+                                         + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll(
+                                                     SetArgPointee<3>(HttpCode::OK),
+                                                     SetArgPointee<4>(nullptr),
+                                                     Return("")));
 
     auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id, expected_substream);
 
@@ -1180,12 +1182,12 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) {
     MockGetBrokerUri();
     auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/"  +
-        expected_group_id
-                                             + "/" + std::to_string(expected_dataset_id) + "?token="
-                                             + expected_token,_,expected_acknowledge_command, _,_)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("")));
+                                         expected_group_id
+                                         + "/" + std::to_string(expected_dataset_id) + "?token="
+                                         + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll(
+                                                     SetArgPointee<3>(HttpCode::OK),
+                                                     SetArgPointee<4>(nullptr),
+                                                     Return("")));
 
     auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id);
 
@@ -1194,11 +1196,12 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) {
 
 void ServerDataBrokerTests::ExpectIdList(bool error) {
     MockGetBrokerUri();
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/"+expected_substream+"/"  +
-        expected_group_id + "/nacks?token=" + expected_token+"&from=1&to=0",_,_)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return(error?"":"{\"unacknowledged\":[1,2,3]}")));
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
+                                        expected_substream + "/"  +
+                                        expected_group_id + "/nacks?token=" + expected_token + "&from=1&to=0", _, _)).WillOnce(DoAll(
+                                                    SetArgPointee<1>(HttpCode::OK),
+                                                    SetArgPointee<2>(nullptr),
+                                                    Return(error ? "" : "{\"unacknowledged\":[1,2,3]}")));
 }
 
 TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) {
@@ -1206,17 +1209,18 @@ TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) {
     asapo::Error err;
     auto list = data_broker->GetUnacknowledgedTupleIds(expected_group_id, expected_substream, 1, 0, &err);
 
-    ASSERT_THAT(list, ElementsAre(1,2,3));
+    ASSERT_THAT(list, ElementsAre(1, 2, 3));
     ASSERT_THAT(err, Eq(nullptr));
 }
 
 
 void ServerDataBrokerTests::ExpectLastAckId(bool empty_response) {
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/"+expected_substream+"/"  +
-        expected_group_id + "/lastack?token=" + expected_token,_,_)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return(empty_response?"{\"lastAckId\":0}":"{\"lastAckId\":1}")));
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
+                                        expected_substream + "/"  +
+                                        expected_group_id + "/lastack?token=" + expected_token, _, _)).WillOnce(DoAll(
+                                                    SetArgPointee<1>(HttpCode::OK),
+                                                    SetArgPointee<2>(nullptr),
+                                                    Return(empty_response ? "{\"lastAckId\":0}" : "{\"lastAckId\":1}")));
 }
 
 
@@ -1251,14 +1255,14 @@ TEST_F(ServerDataBrokerTests, ResendNacks) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/"
-                                            + expected_group_id + "/next?token="
-                                            + expected_token+"&resend_nacks=true&delay_sec=10&resend_attempts=3", _,
+                                        + expected_group_id + "/next?token="
+                                        + expected_token + "&resend_nacks=true&delay_sec=10&resend_attempts=3", _,
                                         _)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return("")));
+                                                SetArgPointee<1>(HttpCode::OK),
+                                                SetArgPointee<2>(nullptr),
+                                                Return("")));
 
-    data_broker->SetResendNacs(true,10,3);
+    data_broker->SetResendNacs(true, 10, 3);
     data_broker->GetNext(&info, expected_group_id, nullptr);
 }
 
@@ -1266,15 +1270,16 @@ TEST_F(ServerDataBrokerTests, ResendNacks) {
 TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
     auto expected_neg_acknowledge_command = R"({"Op":"negackimage","Params":{"DelaySec":10}})";
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/"+expected_substream+"/"  +
-        expected_group_id
-                                             + "/" + std::to_string(expected_dataset_id) + "?token="
-                                             + expected_token,_,expected_neg_acknowledge_command, _,_)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("")));
-
-    auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id,10, expected_substream);
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
+                                         expected_substream + "/"  +
+                                         expected_group_id
+                                         + "/" + std::to_string(expected_dataset_id) + "?token="
+                                         + expected_token, _, expected_neg_acknowledge_command, _, _)).WillOnce(DoAll(
+                                                     SetArgPointee<3>(HttpCode::OK),
+                                                     SetArgPointee<4>(nullptr),
+                                                     Return("")));
+
+    auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10, expected_substream);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
diff --git a/consumer/api/python/CMakeLists_Linux.cmake b/consumer/api/python/CMakeLists_Linux.cmake
index f29e2096677ddab9cc77b62bdfa6ade15db06c47..5c6be372a27df3d93b7dabfd119496fe57ad04ab 100644
--- a/consumer/api/python/CMakeLists_Linux.cmake
+++ b/consumer/api/python/CMakeLists_Linux.cmake
@@ -1,12 +1,19 @@
+
+if (ENABLE_LIBFABRIC)
+    set (TEMP_ADDITIONAL_LINK_ARGS_PART ", '-lfabric'")
+else()
+    set (TEMP_ADDITIONAL_LINK_ARGS_PART "")
+endif()
+
 if ((CMAKE_BUILD_TYPE STREQUAL "Debug") AND (CMAKE_C_COMPILER_ID STREQUAL "GNU"))
     set (EXTRA_COMPILE_ARGS "['--std=c++11']")
-    set (EXTRA_LINK_ARGS "['--coverage','-fprofile-arcs','-ftest-coverage','-static-libgcc','-static-libstdc++']")
+    set (EXTRA_LINK_ARGS "['--coverage','-fprofile-arcs','-ftest-coverage','-static-libgcc','-static-libstdc++' ${TEMP_ADDITIONAL_LINK_ARGS_PART}]")
 ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "GNU")
     set (EXTRA_COMPILE_ARGS "['--std=c++11']")
-    set (EXTRA_LINK_ARGS "['-static-libgcc','-static-libstdc++','-Wl,--exclude-libs,ALL']")
+    set (EXTRA_LINK_ARGS "['-static-libgcc','-static-libstdc++','-Wl,--exclude-libs,ALL' ${TEMP_ADDITIONAL_LINK_ARGS_PART}]")
 else()
     set (EXTRA_COMPILE_ARGS "['-std=c++11']")
-    set (EXTRA_LINK_ARGS "[]")
+    set (EXTRA_LINK_ARGS "['' ${TEMP_ADDITIONAL_LINK_ARGS_PART}]")
 ENDIF()
 
 configure_files(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} @ONLY)
diff --git a/consumer/api/python/asapo_consumer.pxd b/consumer/api/python/asapo_consumer.pxd
index afe1db63f8ed665c99a75e84d6012939fd02961c..6c57b0f33aff209a837e6c186e0945c32f038f78 100644
--- a/consumer/api/python/asapo_consumer.pxd
+++ b/consumer/api/python/asapo_consumer.pxd
@@ -45,26 +45,35 @@ cdef extern from "asapo_consumer.h" namespace "asapo":
     string stream
     string user_token
 
+cdef extern from "asapo_consumer.h" namespace "asapo":
+  cppclass NetworkConnectionType:
+    pass
+  NetworkConnectionType NetworkConnectionType_kUndefined "asapo::NetworkConnectionType::kUndefined"
+  NetworkConnectionType NetworkConnectionType_kAsapoTcp "asapo::NetworkConnectionType::kAsapoTcp"
+  NetworkConnectionType NetworkConnectionType_kFabric "asapo::NetworkConnectionType::kFabric"
+
 cdef extern from "asapo_consumer.h" namespace "asapo" nogil:
     cdef cppclass DataBroker:
         DataBroker() except +
         void SetTimeout(uint64_t timeout_ms)
+        void ForceNoRdma()
+        NetworkConnectionType CurrentConnectionType()
         Error GetNext(FileInfo* info, string group_id,string substream, FileData* data)
         Error GetLast(FileInfo* info, string group_id,string substream, FileData* data)
-        Error GetById(uint64_t id, FileInfo* info, string group_id,string substream, FileData* data)
+        Error GetById(uint64_t id, FileInfo* info, string group_id, string substream, FileData* data)
         uint64_t GetCurrentSize(string substream, Error* err)
-        Error SetLastReadMarker(uint64_t value, string group_id,string substream)
-        Error ResetLastReadMarker(string group_id,string substream)
+        Error SetLastReadMarker(uint64_t value, string group_id, string substream)
+        Error ResetLastReadMarker(string group_id, string substream)
         Error Acknowledge(string group_id, uint64_t id, string substream)
         Error NegativeAcknowledge(string group_id, uint64_t id, uint64_t delay_sec, string substream)
         uint64_t GetLastAcknowledgedTulpeId(string group_id, string substream, Error* error)
         IdList GetUnacknowledgedTupleIds(string group_id, string substream, uint64_t from_id, uint64_t to_id, Error* error)
         string GenerateNewGroupId(Error* err)
         string GetBeamtimeMeta(Error* err)
-        FileInfos QueryImages(string query,string substream, Error* err)
-        DataSet GetNextDataset(string group_id,string substream, Error* err)
-        DataSet GetLastDataset(string group_id,string substream, Error* err)
-        DataSet GetDatasetById(uint64_t id,string group_id,string substream, Error* err)
+        FileInfos QueryImages(string query, string substream, Error* err)
+        DataSet GetNextDataset(string group_id, string substream, Error* err)
+        DataSet GetLastDataset(string group_id, string substream, Error* err)
+        DataSet GetDatasetById(uint64_t id, string group_id, string substream, Error* err)
         Error RetrieveData(FileInfo* info, FileData* data)
         vector[string] GetSubstreamList(Error* err)
         void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts)
@@ -88,4 +97,3 @@ cdef extern from "asapo_consumer.h" namespace "asapo":
     uint64_t id
     uint64_t id_max
     string next_substream
-
diff --git a/consumer/api/python/asapo_consumer.pyx.in b/consumer/api/python/asapo_consumer.pyx.in
index 3b3ac2efb227c99fcac32e7f9e9fe6e302688cd8..84307fe9ab64cc69c2b0c209907ec966c8e633c7 100644
--- a/consumer/api/python/asapo_consumer.pyx.in
+++ b/consumer/api/python/asapo_consumer.pyx.in
@@ -163,6 +163,19 @@ cdef class PyDataBroker:
         return size
     def set_timeout(self,timeout):
         self.c_broker.SetTimeout(timeout)
+    def force_no_rdma(self):
+        self.c_broker.ForceNoRdma()
+    def current_connection_type(self):
+        cdef NetworkConnectionType connection_type = self.c_broker.CurrentConnectionType()
+        cdef int cased = <int>connection_type
+        cdef string result = "Unknown"
+        if cased == <int>NetworkConnectionType_kUndefined:
+            result = "No connection"
+        elif cased == <int>NetworkConnectionType_kAsapoTcp:
+            result = "TCP"
+        elif cased == <int>NetworkConnectionType_kFabric:
+            result = "Fabric"
+        return result.decode('utf-8')
     def set_lastread_marker(self,value,group_id, substream = "default"):
         cdef string b_group_id = _bytes(group_id)
         cdef string b_substream = _bytes(substream)
diff --git a/consumer/api/python/setup.py.in b/consumer/api/python/setup.py.in
index 0de6e24cd6f79c81acc3a76cfce082e534f522db..655dd52cb18c5630a51cd97a4c1426aaabc7c146 100644
--- a/consumer/api/python/setup.py.in
+++ b/consumer/api/python/setup.py.in
@@ -6,7 +6,7 @@ from Cython.Build import cythonize
 import numpy
 
 module = Extension("asapo_consumer", ["asapo_consumer.pyx"],
-                       extra_objects=['$<TARGET_FILE:asapo-consumer>',
+                       extra_objects=['$<TARGET_FILE:asapo-consumer>','$<TARGET_FILE:asapo-fabric>',
                                       '@CURL_LIBRARIES@'],
                        include_dirs=["@ASAPO_CXX_COMMON_INCLUDE_DIR@","@ASAPO_CONSUMER_INCLUDE_DIR@",numpy.get_include()],
                        extra_compile_args=@EXTRA_COMPILE_ARGS@,
diff --git a/consumer/api/python/source_dist_linux/CMakeLists.txt b/consumer/api/python/source_dist_linux/CMakeLists.txt
index 1fb9c9c47db66105dbee3bdeaf1d98ad1dfdab07..cc3b1dc5e259f1098c8811cf4754bc6a1aadd618 100644
--- a/consumer/api/python/source_dist_linux/CMakeLists.txt
+++ b/consumer/api/python/source_dist_linux/CMakeLists.txt
@@ -17,6 +17,7 @@ ADD_CUSTOM_TARGET(copy_python_dist ALL
         COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/lib
         COMMAND ${CMAKE_COMMAND} -E copy ${CURL_LIBRARIES} ${CMAKE_CURRENT_BINARY_DIR}/lib
         COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:asapo-consumer> ${CMAKE_CURRENT_BINARY_DIR}/lib
+        COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:asapo-fabric> ${CMAKE_CURRENT_BINARY_DIR}/lib
 
         )
 
diff --git a/consumer/api/python/source_dist_linux/setup.py.in b/consumer/api/python/source_dist_linux/setup.py.in
index a171722aba0b394c126e6469cb7155adb19497e8..5da1c0dfd88338da59fa5fa0ed342a963c30b4da 100644
--- a/consumer/api/python/source_dist_linux/setup.py.in
+++ b/consumer/api/python/source_dist_linux/setup.py.in
@@ -2,9 +2,10 @@ from distutils.core import setup
 from distutils.core import Extension
 import numpy
 
+# TODO 'lib/libcurl.a' seems to have no effect, because there is a libcurl.so file
 ext_modules = [
     Extension("asapo_consumer", ["asapo_consumer.cpp"],
-              extra_objects=['lib/libasapo-consumer.a', 'lib/libcurl.a'],
+              extra_objects=['lib/libasapo-consumer.a', 'lib/libasapo-fabric.a', 'lib/libcurl.a'],
                        include_dirs=["include/common","include",numpy.get_include()],
                        extra_compile_args=@EXTRA_COMPILE_ARGS@,
                        extra_link_args=@EXTRA_LINK_ARGS@,
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json b/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json
index 3c9e6b1554fc58141d37736fb46719bbac1db4da..f2f6b0f2eb404de90825b1924a8c5493ef8591d2 100644
--- a/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json
@@ -9,7 +9,8 @@
   "DataServer": {
     "AdvertiseURI": "auto",
     "NThreads": {{ .Values.ownServices.receiver.dataServer.nThreads }},
-    "ListenPort": {{ .Values.ownServices.receiver.dataServer.port }}
+    "ListenPort": {{ .Values.ownServices.receiver.dataServer.port }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": {{ .Values.ownServices.receiver.dataCache.enable }},
diff --git a/deploy/asapo_services/scripts/asapo-receivers.nmd.tpl b/deploy/asapo_services/scripts/asapo-receivers.nmd.tpl
index bbbf4dfd86e25e1297a69fdac15fa76456923d31..efe823c70e6294c890b7293cfd621a547f3c0758 100644
--- a/deploy/asapo_services/scripts/asapo-receivers.nmd.tpl
+++ b/deploy/asapo_services/scripts/asapo-receivers.nmd.tpl
@@ -83,7 +83,7 @@ job "asapo-receivers" {
 
 
       template {
-         source        = "${scripts_dir}/receiver.json.tpl"
+         source        = "${scripts_dir}/receiver_tcp.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
diff --git a/deploy/asapo_services/scripts/receiver.json.tpl b/deploy/asapo_services/scripts/receiver.json.tpl
index a5f8192358812e3af2b281e48db0f2cdef0becb8..3216854994de0dc9698bfefc78d6e2c15b7218de 100644
--- a/deploy/asapo_services/scripts/receiver.json.tpl
+++ b/deploy/asapo_services/scripts/receiver.json.tpl
@@ -9,7 +9,8 @@
   "DataServer": {
     "AdvertiseURI": "{{ if or (env "meta.ib_address") "none" | regexMatch "none" }}{{ env "NOMAD_IP_recv" }}{{ else }}{{ env "meta.ib_address" }}{{ end }}:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": {{ env "NOMAD_META_receiver_dataserver_nthreads" }},
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,
diff --git a/examples/consumer/getnext_broker/getnext_broker.cpp b/examples/consumer/getnext_broker/getnext_broker.cpp
index a792e1f2199cab85cfa32f01e1ae6cfca3e43b14..037eaf4606823b9ea51acabbd87e77df4ee12ed1 100644
--- a/examples/consumer/getnext_broker/getnext_broker.cpp
+++ b/examples/consumer/getnext_broker/getnext_broker.cpp
@@ -1,15 +1,13 @@
 #include <iostream>
 #include <memory>
 #include <vector>
-#include <algorithm>
 #include <thread>
 #include <chrono>
-#include <iomanip>
 #include <numeric>
 #include <mutex>
 #include <string>
 #include <sstream>
-
+#include <condition_variable>
 
 #include "asapo_consumer.h"
 
@@ -21,6 +19,17 @@ std::mutex lock;
 
 uint64_t file_size = 0;
 
+inline std::string ConnectionTypeToString(asapo::NetworkConnectionType type) {
+    switch (type) {
+    case asapo::NetworkConnectionType::kUndefined:
+        return "No connection";
+    case asapo::NetworkConnectionType::kAsapoTcp:
+        return "TCP";
+    case asapo::NetworkConnectionType::kFabric:
+        return "Fabric";
+    }
+    return "Unknown type";
+}
 
 struct Args {
     std::string server;
@@ -34,6 +43,41 @@ struct Args {
     bool datasets;
 };
 
+class LatchedTimer {
+  private:
+    volatile int count_;
+    std::chrono::high_resolution_clock::time_point start_time_ = std::chrono::high_resolution_clock::time_point::max();
+    std::mutex mutex;
+    std::condition_variable waiter;
+  public:
+    explicit LatchedTimer(int count) : count_{count} {};
+
+    void count_down_and_wait() {
+        std::unique_lock<std::mutex> local_lock(mutex);
+        if (0 == count_) {
+            return;
+        }
+        count_--;
+        if (0 == count_) {
+            waiter.notify_all();
+            const std::chrono::high_resolution_clock::time_point now = std::chrono::high_resolution_clock::now();
+            start_time_ = now;
+        } else {
+            while (count_ > 0) {
+                waiter.wait(local_lock);
+            }
+        }
+    }
+
+    bool was_triggered() const {
+        return start_time() != std::chrono::high_resolution_clock::time_point::max();
+    }
+
+    std::chrono::high_resolution_clock::time_point start_time() const {
+        return start_time_;
+    };
+};
+
 void WaitThreads(std::vector<std::thread>* threads) {
     for (auto& thread : *threads) {
         thread.join();
@@ -46,16 +90,20 @@ int ProcessError(const Error& err) {
     return err == asapo::ConsumerErrorTemplates::kEndOfStream ? 0 : 1;
 }
 
-std::vector<std::thread> StartThreads(const Args& params,
-                                      std::vector<int>* nfiles,
-                                      std::vector<int>* errors,
-                                      std::vector<int>* nbuf,
-                                      std::vector<int>* nfiles_total) {
-    auto exec_next = [&params, nfiles, errors, nbuf, nfiles_total](int i) {
+std::vector<std::thread>
+StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* errors, std::vector<int>* nbuf,
+             std::vector<int>* nfiles_total, std::vector<asapo::NetworkConnectionType>* connection_type,
+             LatchedTimer* timer) {
+    auto exec_next = [&params, nfiles, errors, nbuf, nfiles_total, connection_type, timer](int i) {
         asapo::FileInfo fi;
         Error err;
         auto broker = asapo::DataBrokerFactory::CreateServerBroker(params.server, params.file_path, true,
                       asapo::SourceCredentials{params.beamtime_id, "", params.stream, params.token}, &err);
+        if (err) {
+            std::cout << "Error CreateServerBroker: " << err << std::endl;
+            exit(EXIT_FAILURE);
+        }
+        //broker->ForceNoRdma();
 
         broker->SetTimeout((uint64_t) params.timeout_ms);
         asapo::FileData data;
@@ -80,6 +128,8 @@ std::vector<std::thread> StartThreads(const Args& params,
                 std::cout << "Cannot get metadata: " << err->Explain() << std::endl;
             }
         }
+
+        bool isFirstFile = true;
         while (true) {
             if (params.datasets) {
                 auto dataset = broker->GetNextDataset(group_id, &err);
@@ -91,6 +141,10 @@ std::vector<std::thread> StartThreads(const Args& params,
                 }
             } else {
                 err = broker->GetNext(&fi, group_id, params.read_data ? &data : nullptr);
+                if (isFirstFile) {
+                    isFirstFile = false;
+                    timer->count_down_and_wait();
+                }
                 if (err == nullptr) {
                     (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1;
                     if (file_size == 0) {
@@ -106,14 +160,14 @@ std::vector<std::thread> StartThreads(const Args& params,
             }
 
             if (err) {
-                (*errors)[i] += ProcessError(err);
-                if (err) {
-                    std::cout << "Thread exit: " << i << std::endl;
-                    break;
-                }
+                (*errors)[i] += ProcessError(err); // If the error is significant it will be printed here
+                std::cout << "Thread exit: " << i << std::endl;
+                break;
             }
             (*nfiles)[i]++;
         }
+
+        (*connection_type)[i] = broker->CurrentConnectionType();
     };
 
     std::vector<std::thread> threads;
@@ -123,26 +177,64 @@ std::vector<std::thread> StartThreads(const Args& params,
     return threads;
 }
 
-int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nbuf, int* nfiles_total) {
+int ReadAllData(const Args& params, uint64_t* duration_ms, uint64_t* duration_without_first_ms, int* nerrors, int* nbuf,
+                int* nfiles_total,
+                asapo::NetworkConnectionType* connection_type) {
     asapo::FileInfo fi;
-    system_clock::time_point t1 = system_clock::now();
+    std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
 
     std::vector<int> nfiles(params.nthreads, 0);
     std::vector<int> errors(params.nthreads, 0);
     std::vector<int> nfiles_frombuf(params.nthreads, 0);
     std::vector<int> nfiles_total_in_datasets(params.nthreads, 0);
+    std::vector<asapo::NetworkConnectionType> connection_types(params.nthreads, asapo::NetworkConnectionType::kUndefined);
+
+    LatchedTimer latched_timer(params.nthreads);
 
-    auto threads = StartThreads(params, &nfiles, &errors, &nfiles_frombuf, &nfiles_total_in_datasets);
+    auto threads = StartThreads(params, &nfiles, &errors, &nfiles_frombuf, &nfiles_total_in_datasets, &connection_types,
+                                &latched_timer);
     WaitThreads(&threads);
 
+    std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
+    auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
+    *duration_ms = duration_read.count();
+    if (latched_timer.was_triggered()) {
+        auto duration_without_first = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - latched_timer.start_time());
+        *duration_without_first_ms = duration_without_first.count();
+    } else {
+        *duration_without_first_ms = 0;
+    }
+
     int n_total = std::accumulate(nfiles.begin(), nfiles.end(), 0);
     *nerrors = std::accumulate(errors.begin(), errors.end(), 0);
     *nbuf = std::accumulate(nfiles_frombuf.begin(), nfiles_frombuf.end(), 0);
     *nfiles_total = std::accumulate(nfiles_total_in_datasets.begin(), nfiles_total_in_datasets.end(), 0);
 
-    system_clock::time_point t2 = system_clock::now();
-    auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
-    *duration_ms = duration_read.count();
+    // The following two loops will check if all threads that processed some data were using the same network type
+    {
+        int firstThreadThatActuallyProcessedData = 0;
+        for (int i = 0; i < params.nthreads; i++) {
+            if (nfiles[i] > 0) {
+                firstThreadThatActuallyProcessedData = i;
+                break;
+            }
+        }
+
+        *connection_type = connection_types[firstThreadThatActuallyProcessedData];
+        for (int i = 0; i < params.nthreads; i++) {
+            if (*connection_type != connection_types[i] && nfiles[i] > 0) {
+                // The output will look like this:
+                // ERROR thread[0](processed 5 files) connection type is 'No connection' but thread[1](processed 3 files) is 'TCP'
+
+                std::cout << "ERROR thread[" << i << "](processed " << nfiles[i] << " files) connection type is '" <<
+                          ConnectionTypeToString(connection_types[i]) << "' but thread["
+                          << firstThreadThatActuallyProcessedData << "](processed "
+                          << nfiles[firstThreadThatActuallyProcessedData] << " files) is '" << ConnectionTypeToString(
+                              *connection_type) << "'" << std::endl;
+            }
+        }
+    }
+
     return n_total;
 }
 
@@ -165,7 +257,6 @@ void TryGetStream(Args* args) {
 
 }
 
-
 int main(int argc, char* argv[]) {
     asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
     Args params;
@@ -188,9 +279,19 @@ int main(int argc, char* argv[]) {
     if (argc == 9) {
         params.datasets = atoi(argv[8]) == 1;
     }
+
+    if (params.read_data) {
+        std::cout << "Will read metadata+payload" << std::endl;
+    } else {
+        std::cout << "Will only read metadata" << std::endl;
+    }
+
     uint64_t duration_ms;
+    uint64_t duration_without_first_ms;
     int nerrors, nbuf, nfiles_total;
-    auto nfiles = ReadAllData(params, &duration_ms, &nerrors, &nbuf, &nfiles_total);
+    asapo::NetworkConnectionType connectionType;
+    auto nfiles = ReadAllData(params, &duration_ms, &duration_without_first_ms, &nerrors, &nbuf, &nfiles_total,
+                              &connectionType);
     std::cout << "Processed " << nfiles << (params.datasets ? " dataset(s)" : " file(s)") << std::endl;
     if (params.datasets) {
         std::cout << "  with " << nfiles_total << " file(s)" << std::endl;
@@ -201,13 +302,21 @@ int main(int argc, char* argv[]) {
         std::cout << "  from filesystem: " << nfiles - nerrors - nbuf << std::endl;
     }
     std::cout << "Errors : " << nerrors << std::endl;
-    std::cout << "Elapsed : " << duration_ms << "ms" << std::endl;
-    auto rate = 1000.0f * nfiles / (duration_ms - params.timeout_ms);
+    float rate;
+    if (duration_without_first_ms == 0) {
+        std::cout << "Elapsed : " << duration_ms << "ms" << std::endl;
+        rate = 1000.0f * nfiles / (duration_ms - params.timeout_ms);
+    } else {
+        std::cout << "Elapsed : " << duration_without_first_ms << "ms (With handshake: " << duration_ms << "ms)" << std::endl;
+        rate = 1000.0f * nfiles / (duration_without_first_ms - params.timeout_ms);
+    }
     auto bw_gbytes = rate * file_size / 1000.0f / 1000.0f / 1000.0f;
     std::cout << "Rate : " << rate << std::endl;
     if (file_size > 0) {
         std::cout << "Bandwidth " << bw_gbytes * 8 << " Gbit/s" << std::endl;
         std::cout << "Bandwidth " << bw_gbytes << " GBytes/s" << std::endl;
     }
+
+    std::cout << "Using connection type: " << ConnectionTypeToString(connectionType) << std::endl;
     return nerrors == 0 ? 0 : 1;
 }
diff --git a/examples/consumer/getnext_broker_python/check_windows.bat b/examples/consumer/getnext_broker_python/check_windows.bat
index 44980989369cddff912cad3685b940f2c42ef69b..7562b1734294dd3dc066d278075de4cd48b954ef 100644
--- a/examples/consumer/getnext_broker_python/check_windows.bat
+++ b/examples/consumer/getnext_broker_python/check_windows.bat
@@ -16,22 +16,22 @@ echo db.meta.insert({"_id":0,"meta_test":"test"}) | %mongo_exe% %database_name%
 
 set PYTHONPATH=%1
 
-python3 getnext.py 127.0.0.1:8400  %source_path% %beamtime_id%  %token_test_run% %group_id% > out
+python3 getnext.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_test_run% %group_id% > out
 type out
 type out | findstr /c:"100" || goto :error
 type out | findstr /c:"\"_id\": 1" || goto :error
 type out | findstr /c:"\"meta_test\": \"test\"" || goto :error
 
-python3 getnext.py 127.0.0.1:8400  %source_path% %beamtime_id%  %token_test_run% %group_id% > out
+python3 getnext.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_test_run% %group_id% > out
 type out
 type out | findstr /c:"\"_id\": 2" || goto :error
 
-python3 getnext.py 127.0.0.1:8400  %source_path% %beamtime_id%  %token_test_run% %group_id% > out
+python3 getnext.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_test_run% %group_id% > out
 type out
 type out | findstr /c:"\"_id\": 3" || goto :error
 
 
-python3 getnext.py 127.0.0.1:8400  %source_path% %beamtime_id%  %token_test_run% new > out
+python3 getnext.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_test_run% new > out
 type out
 type out | findstr /c:"100" || goto :error
 type out | findstr /c:"\"_id\": 1" || goto :error
diff --git a/examples/pipeline/in_to_out/check_linux.sh b/examples/pipeline/in_to_out/check_linux.sh
index 8d5d293f9be46fd6ddc7e79e92c850ff5155ca59..6490fbadbf464a8708f57769e9b0715bdb28483f 100644
--- a/examples/pipeline/in_to_out/check_linux.sh
+++ b/examples/pipeline/in_to_out/check_linux.sh
@@ -43,7 +43,7 @@ Cleanup() {
 nomad run nginx.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run authorizer.nmd
 
 mkdir -p $receiver_folder
diff --git a/examples/pipeline/in_to_out_python/check_linux.sh b/examples/pipeline/in_to_out_python/check_linux.sh
index b1780ca9beab0634fb69b5ad644bc4001258f5d2..330f409736963582fe673954211dd6106b83b8ff 100644
--- a/examples/pipeline/in_to_out_python/check_linux.sh
+++ b/examples/pipeline/in_to_out_python/check_linux.sh
@@ -45,7 +45,7 @@ Cleanup() {
 nomad run nginx.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run authorizer.nmd
 
 mkdir -p $receiver_folder
diff --git a/examples/pipeline/in_to_out_python/in_to_out.py b/examples/pipeline/in_to_out_python/in_to_out.py
index e3444b92a91b28ab1dcd5aa83ab6cfa9fdc8318e..fa9c2c08314b53356c71b67ab5a018f39fab321e 100644
--- a/examples/pipeline/in_to_out_python/in_to_out.py
+++ b/examples/pipeline/in_to_out_python/in_to_out.py
@@ -2,11 +2,9 @@ from __future__ import print_function
 
 import asapo_consumer
 import asapo_producer
-import json
 import sys
-import time
-
 import threading
+
 lock = threading.Lock()
 
 
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index 25f6e18851512689427c7fe321e9ce3db3d93500..e8a40201c74d8c6ce139364fada60aae6cf07b16 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -70,7 +70,7 @@ void ProcessCommandArguments(int argc, char* argv[], Args* args) {
     if (argc != 8 && argc != 9) {
         std::cout <<
                   "Usage: " << argv[0] <<
-                  " <destination> <beamtime_id[%<stream>%<token>]> <number_of_byte> <iterations> <nthreads>"
+                  " <destination> <beamtime_id[%<stream>%<token>]> <number_of_kbyte> <iterations> <nthreads>"
                   " <mode x0 -t tcp, x1 - filesystem, 0x - write files, 1x - do not write files> <timeout (sec)> [n images in set (default 1)]"
                   << std::endl;
         exit(EXIT_FAILURE);
@@ -194,7 +194,7 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
 void PrintOutput(const Args& args, const system_clock::time_point& start) {
     system_clock::time_point t2 = system_clock::now();
     double duration_sec = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - start ).count() / 1000.0;
-    double size_gb = double(args.number_of_bytes) * args.iterations / 1000.0  / 1000.0 / 1000.0 * 8.0;
+    double size_gb = double(args.number_of_bytes) * args.iterations / 1000.0 / 1000.0 / 1000.0 * 8.0;
     double rate = args.iterations / duration_sec;
     std::cout << "Rate: " << rate << " Hz" << std::endl;
     std::cout << "Bandwidth " << size_gb / duration_sec << " Gbit/s" << std::endl;
diff --git a/examples/producer/simple-producer/produce.cpp b/examples/producer/simple-producer/produce.cpp
index c1f850bc0c428a66cac85dcdc92e8b8942b23bb1..68f15c8d2575d567e6bfe94ae29f94a89c734874 100644
--- a/examples/producer/simple-producer/produce.cpp
+++ b/examples/producer/simple-producer/produce.cpp
@@ -25,14 +25,14 @@ int main(int argc, char* argv[]) {
     auto source = "asapo-services2:8400";
     auto beamtime = "asapo_test";
 
-    auto producer = asapo::Producer::Create(source, 1,asapo::RequestHandlerType::kTcp,
+    auto producer = asapo::Producer::Create(source, 1, asapo::RequestHandlerType::kTcp,
                                             asapo::SourceCredentials{beamtime, "", "", ""}, 60, &err);
     exit_if_error("Cannot start producer", err);
 
     std::string to_send = "hello";
     auto send_size = to_send.size() + 1;
     auto buffer =  asapo::FileData(new uint8_t[send_size]);
-    memcpy(buffer.get(),to_send.c_str(),send_size);
+    memcpy(buffer.get(), to_send.c_str(), send_size);
 
     asapo::EventHeader event_header{1, send_size, "test_file"};
     err = producer->SendData(event_header, std::move(buffer), asapo::kDefaultIngestMode, &ProcessAfterSend);
diff --git a/producer/api/cpp/include/producer/common.h b/producer/api/cpp/include/producer/common.h
index 289f5099922f0eb2bedb905d1454be40ebb3a579..998d74f1ae358079e00f652a7d18ddc536defb67 100644
--- a/producer/api/cpp/include/producer/common.h
+++ b/producer/api/cpp/include/producer/common.h
@@ -13,8 +13,8 @@ const uint8_t kMaxProcessingThreads = 32;
 
 
 struct RequestCallbackPayload {
-  GenericRequestHeader original_header;
-  std::string response; 
+    GenericRequestHeader original_header;
+    std::string response;
 };
 
 using RequestCallback =  std::function<void(RequestCallbackPayload, Error)>;
diff --git a/producer/api/cpp/include/producer/producer.h b/producer/api/cpp/include/producer/producer.h
index 783c92c7e0ffe62187e888341b08cb7441d09e9c..ffd752a1e7e49831fbc796bd4b13b0c8eabba9aa 100644
--- a/producer/api/cpp/include/producer/producer.h
+++ b/producer/api/cpp/include/producer/producer.h
@@ -25,15 +25,15 @@ class Producer {
 
     virtual ~Producer() = default;
 
-  //! Get substream information from receiver
-  /*!
-    \param substream (optional) - substream
-    \return StreamInfo - a structure with substream information
-  */
-  virtual StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_ms, Error* err) const = 0;
-  virtual StreamInfo GetStreamInfo(uint64_t timeout_ms, Error* err) const = 0;
-
-  //! Sends data to the receiver
+    //! Get substream information from receiver
+    /*!
+      \param substream (optional) - substream
+      \return StreamInfo - a structure with substream information
+    */
+    virtual StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_ms, Error* err) const = 0;
+    virtual StreamInfo GetStreamInfo(uint64_t timeout_ms, Error* err) const = 0;
+
+    //! Sends data to the receiver
     /*!
       \param event_header - A stucture with the meta information (file name, size, a string with user metadata (JSON format)).
       \param data - A pointer to the data to send
diff --git a/producer/api/cpp/src/producer_impl.cpp b/producer/api/cpp/src/producer_impl.cpp
index 50760be369e229de6cd2cc5f9fa223efe4be270e..e6054e30a073e197f70c21283ec7bae7b2f7e1aa 100644
--- a/producer/api/cpp/src/producer_impl.cpp
+++ b/producer/api/cpp/src/producer_impl.cpp
@@ -261,34 +261,36 @@ Error ProducerImpl::SendFile(const EventHeader& event_header,
 
 }
 
-using RequestCallbackWithPromise = void (*)(std::shared_ptr<std::promise<StreamInfoResult>>, RequestCallbackPayload header, Error err);
+using RequestCallbackWithPromise = void (*)(std::shared_ptr<std::promise<StreamInfoResult>>,
+                                            RequestCallbackPayload header, Error err);
 
-RequestCallback unwrap_callback(RequestCallbackWithPromise callback, std::unique_ptr<std::promise<StreamInfoResult>> promise) {
+RequestCallback unwrap_callback(RequestCallbackWithPromise callback,
+                                std::unique_ptr<std::promise<StreamInfoResult>> promise) {
     auto shared_promise = std::shared_ptr<std::promise<StreamInfoResult>>(std::move(promise));
     RequestCallback wrapper = [ = ](RequestCallbackPayload payload, Error err) -> void {
-      callback(shared_promise, std::move(payload), std::move(err));
+        callback(shared_promise, std::move(payload), std::move(err));
     };
     return wrapper;
 }
 
-void ActivatePromise(std::shared_ptr<std::promise<StreamInfoResult>> promise, RequestCallbackPayload payload, Error err) {
+void ActivatePromise(std::shared_ptr<std::promise<StreamInfoResult>> promise, RequestCallbackPayload payload,
+                     Error err) {
     StreamInfoResult res;
     if (err == nullptr) {
         auto ok = res.sinfo.SetFromJson(payload.response);
-        res.err=ok?nullptr:ProducerErrorTemplates::kInternalServerError.Generate(
-            std::string("cannot read JSON string from server response: ")+payload.response).release();
+        res.err = ok ? nullptr : ProducerErrorTemplates::kInternalServerError.Generate(
+                      std::string("cannot read JSON string from server response: ") + payload.response).release();
     } else {
-        res.err=err.release();
+        res.err = err.release();
     }
     try {
         promise->set_value(res);
-    }
-    catch(...){}
+    } catch(...) {}
 }
 
-StreamInfo GetInfroFromCallback(std::future<StreamInfoResult>* promiseResult,uint64_t timeout_sec, Error* err) {
+StreamInfo GetInfroFromCallback(std::future<StreamInfoResult>* promiseResult, uint64_t timeout_sec, Error* err) {
     try {
-        auto status = promiseResult->wait_for(std::chrono::milliseconds(timeout_sec*1000));
+        auto status = promiseResult->wait_for(std::chrono::milliseconds(timeout_sec * 1000));
         if (status == std::future_status::ready) {
             auto res = promiseResult->get();
             if (res.err == nullptr) {
@@ -298,32 +300,33 @@ StreamInfo GetInfroFromCallback(std::future<StreamInfoResult>* promiseResult,uin
                 return StreamInfo{};
             }
         }
-    } catch(...){}
+    } catch(...) {}
 
     *err = ProducerErrorTemplates::kTimeout.Generate();
     return StreamInfo{};
 }
 
 StreamInfo ProducerImpl::GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const {
-    GenericRequestHeader request_header{kOpcodeStreamInfo, 0, 0,0, "", substream};
+    GenericRequestHeader request_header{kOpcodeStreamInfo, 0, 0, 0, "", substream};
     std::unique_ptr<std::promise<StreamInfoResult>> promise {new std::promise<StreamInfoResult>};
-    std::future<StreamInfoResult> promiseResult= promise->get_future();
+    std::future<StreamInfoResult> promiseResult = promise->get_future();
 
     *err = request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(request_header),
-                                                                                            nullptr, "", "",
-                                                                                            unwrap_callback(ActivatePromise,std::move(promise)), true,
-                                                                                            timeout_sec*1000}
+                nullptr, "", "",
+                unwrap_callback(ActivatePromise, std::move(promise)), true,
+                timeout_sec * 1000}
     }, true);
     if (*err) {
         return StreamInfo{};
     }
 
-    return GetInfroFromCallback(&promiseResult,timeout_sec+2,err); // we give two more sec for request to exit by timeout
+    return GetInfroFromCallback(&promiseResult, timeout_sec + 2,
+                                err); // we give two more sec for request to exit by timeout
 
 }
 
 StreamInfo ProducerImpl::GetStreamInfo(uint64_t timeout_sec, Error* err) const {
-    return GetStreamInfo(kDefaultSubstream,timeout_sec, err);
+    return GetStreamInfo(kDefaultSubstream, timeout_sec, err);
 }
 
 }
\ No newline at end of file
diff --git a/producer/api/cpp/src/producer_impl.h b/producer/api/cpp/src/producer_impl.h
index dc78bd75e1d618a509467787f51d75b6bc3376eb..c5f0583f2a80dd06256f4d6b0b98c6742f796a65 100644
--- a/producer/api/cpp/src/producer_impl.h
+++ b/producer/api/cpp/src/producer_impl.h
@@ -70,8 +70,8 @@ class ProducerImpl : public Producer {
 };
 
 struct StreamInfoResult {
-  StreamInfo sinfo;
-  ErrorInterface* err;
+    StreamInfo sinfo;
+    ErrorInterface* err;
 };
 
 
diff --git a/producer/api/cpp/src/request_handler_filesystem.cpp b/producer/api/cpp/src/request_handler_filesystem.cpp
index 1e77c7de82f124ff01a688e8f7e81cf95f45a623..c548f70c713a4e6fc40ac88f0b1f0373759f3d3c 100644
--- a/producer/api/cpp/src/request_handler_filesystem.cpp
+++ b/producer/api/cpp/src/request_handler_filesystem.cpp
@@ -31,7 +31,7 @@ bool RequestHandlerFilesystem::ProcessRequestUnlocked(GenericRequest* request, b
     err = io__->WriteDataToFile(destination_folder_, request->header.message, (uint8_t*)producer_request->data.get(),
                                 (size_t)request->header.data_size, true, true);
     if (producer_request->callback) {
-        producer_request->callback(RequestCallbackPayload{request->header,""}, std::move(err));
+        producer_request->callback(RequestCallbackPayload{request->header, ""}, std::move(err));
     }
     *retry = false;
     return true;
diff --git a/producer/api/cpp/src/request_handler_tcp.cpp b/producer/api/cpp/src/request_handler_tcp.cpp
index f395d63e91f003df6e0b8f1d50194e35096ef40e..e0931b03e542cae5a20516e0f73e3d8ff024c5bb 100644
--- a/producer/api/cpp/src/request_handler_tcp.cpp
+++ b/producer/api/cpp/src/request_handler_tcp.cpp
@@ -75,7 +75,7 @@ Error RequestHandlerTcp::SendRequestContent(const ProducerRequest* request) {
     return nullptr;
 }
 
-Error RequestHandlerTcp::ReceiveResponse(const GenericRequestHeader& request_header,std::string* response) {
+Error RequestHandlerTcp::ReceiveResponse(const GenericRequestHeader& request_header, std::string* response) {
     Error err;
     SendDataResponse sendDataResponse;
     io__->Receive(sd_, &sendDataResponse, sizeof(sendDataResponse), &err);
@@ -115,13 +115,13 @@ Error RequestHandlerTcp::ReceiveResponse(const GenericRequestHeader& request_hea
     }
 }
 
-Error RequestHandlerTcp::TrySendToReceiver(const ProducerRequest* request,std::string* response) {
+Error RequestHandlerTcp::TrySendToReceiver(const ProducerRequest* request, std::string* response) {
     auto err = SendRequestContent(request);
     if (err)  {
         return err;
     }
 
-    err = ReceiveResponse(request->header,response);
+    err = ReceiveResponse(request->header, response);
     if (err == nullptr || err == ProducerErrorTemplates::kServerWarning)  {
         log__->Debug("successfully sent data, opcode: " + std::to_string(request->header.op_code) +
                      ", id: " + std::to_string(request->header.data_id) + " to " + connected_receiver_uri_);
@@ -214,9 +214,9 @@ bool RequestHandlerTcp::ProcessErrorFromReceiver(const Error& error,
 }
 
 
-void RequestHandlerTcp::ProcessRequestCallback(Error err, ProducerRequest* request,std::string response, bool* retry) {
+void RequestHandlerTcp::ProcessRequestCallback(Error err, ProducerRequest* request, std::string response, bool* retry) {
     if (request->callback) {
-        request->callback(RequestCallbackPayload{request->header,std::move(response)}, std::move(err));
+        request->callback(RequestCallbackPayload{request->header, std::move(response)}, std::move(err));
     }
     *retry = false;
 }
@@ -235,7 +235,7 @@ bool RequestHandlerTcp::SendDataToOneOfTheReceivers(ProducerRequest* request, bo
         }
 
         std::string response;
-        auto err = TrySendToReceiver(request,&response);
+        auto err = TrySendToReceiver(request, &response);
         bool server_error_can_retry = ProcessErrorFromReceiver(err, request, receiver_uri);
         if (server_error_can_retry)  {
             continue;
@@ -295,13 +295,14 @@ void RequestHandlerTcp::TearDownProcessingRequestLocked(bool request_processed_s
 
 void RequestHandlerTcp::ProcessRequestTimeout(GenericRequest* request) {
     auto producer_request = static_cast<ProducerRequest*>(request);
-    auto err_string ="request id:" + std::to_string(request->header.data_id) + ", opcode: "+std::to_string(request->header.op_code) + " for " + request->header.substream +
-        " substream";
-    log__->Error("timeout "+err_string);
+    auto err_string = "request id:" + std::to_string(request->header.data_id) + ", opcode: " + std::to_string(
+                          request->header.op_code) + " for " + request->header.substream +
+                      " substream";
+    log__->Error("timeout " + err_string);
 
     auto err = ProducerErrorTemplates::kTimeout.Generate(err_string);
     if (producer_request->callback) {
-        producer_request->callback(RequestCallbackPayload{request->header,""}, std::move(err));
+        producer_request->callback(RequestCallbackPayload{request->header, ""}, std::move(err));
     }
 
 }
diff --git a/producer/api/cpp/src/request_handler_tcp.h b/producer/api/cpp/src/request_handler_tcp.h
index 38841d6c8da80376de94454974522a65b2e26f5b..822b75da29c7c9990f3f3c66ee5739476744ec2e 100644
--- a/producer/api/cpp/src/request_handler_tcp.h
+++ b/producer/api/cpp/src/request_handler_tcp.h
@@ -34,8 +34,8 @@ class RequestHandlerTcp: public RequestHandler {
     Error ConnectToReceiver(const std::string& source_credentials, const std::string& receiver_address);
     bool SendDataToOneOfTheReceivers(ProducerRequest* request, bool* retry);
     Error SendRequestContent(const ProducerRequest* request);
-    Error ReceiveResponse(const GenericRequestHeader& request_header,std::string* response);
-    Error TrySendToReceiver(const ProducerRequest* request,std::string* response);
+    Error ReceiveResponse(const GenericRequestHeader& request_header, std::string* response);
+    Error TrySendToReceiver(const ProducerRequest* request, std::string* response);
     SocketDescriptor sd_{kDisconnectedSocketDescriptor};
     void UpdateIfNewConnection();
     bool UpdateReceiversList();
diff --git a/producer/api/cpp/unittests/mocking.h b/producer/api/cpp/unittests/mocking.h
index 109fa23823bb693f79cd0924e0227a19d7b3e66c..ac806babd1c0b1a6c722d80df253b27fa66a2ad3 100644
--- a/producer/api/cpp/unittests/mocking.h
+++ b/producer/api/cpp/unittests/mocking.h
@@ -28,11 +28,11 @@ class MockRequestPull : public RequestPool {
         RequestPool{1, request_handler_factory, log} {};
     asapo::Error AddRequest(std::unique_ptr<asapo::GenericRequest> request, bool top_priority = false) override {
         if (request == nullptr) {
-            return asapo::Error{AddRequest_t(nullptr,top_priority)};
+            return asapo::Error{AddRequest_t(nullptr, top_priority)};
         }
-        return asapo::Error{AddRequest_t(request.get(),top_priority)};
+        return asapo::Error{AddRequest_t(request.get(), top_priority)};
     }
-    MOCK_METHOD2(AddRequest_t, asapo::ErrorInterface * (GenericRequest*,bool));
+    MOCK_METHOD2(AddRequest_t, asapo::ErrorInterface * (GenericRequest*, bool));
     MOCK_METHOD0(NRequestsInPool, uint64_t ());
 
     MOCK_METHOD1(WaitRequestsFinished_t, asapo::ErrorInterface * (uint64_t timeout_ms));
diff --git a/producer/api/cpp/unittests/test_producer_impl.cpp b/producer/api/cpp/unittests/test_producer_impl.cpp
index 798a49c17f435da1f12b7151ba9319f6771ce126..fdc8f4c04eaa985068f4497c81bc6a9634e918e0 100644
--- a/producer/api/cpp/unittests/test_producer_impl.cpp
+++ b/producer/api/cpp/unittests/test_producer_impl.cpp
@@ -95,8 +95,8 @@ class ProducerImplTests : public testing::Test {
 };
 
 TEST_F(ProducerImplTests, SendReturnsError) {
-    EXPECT_CALL(mock_pull, AddRequest_t(_,false)).WillOnce(Return(
-            asapo::ProducerErrorTemplates::kRequestPoolIsFull.Generate().release()));
+    EXPECT_CALL(mock_pull, AddRequest_t(_, false)).WillOnce(Return(
+                asapo::ProducerErrorTemplates::kRequestPoolIsFull.Generate().release()));
     asapo::EventHeader event_header{1, 1, "test"};
     auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kRequestPoolIsFull));
@@ -176,7 +176,7 @@ TEST_F(ProducerImplTests, UsesDefaultStream) {
                                         asapo::kDefaultSubstream.c_str(),
                                         expected_ingest_mode,
                                         0,
-                                        0),false)).WillOnce(Return(nullptr));
+                                        0), false)).WillOnce(Return(nullptr));
 
     asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
     auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
@@ -197,7 +197,7 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequest) {
                                         expected_ingest_mode,
                                         0,
                                         0
-                                                              ),false)).WillOnce(Return(
+                                                              ), false)).WillOnce(Return(
                                                                       nullptr));
 
     asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
@@ -219,7 +219,7 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequestWithSubstream) {
                                         expected_ingest_mode,
                                         0,
                                         0
-                                                              ),false)).WillOnce(Return(
+                                                              ), false)).WillOnce(Return(
                                                                       nullptr));
 
     asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
@@ -244,7 +244,7 @@ TEST_F(ProducerImplTests, OKSendingSubstreamFinish) {
                                         asapo::IngestModeFlags::kTransferMetaDataOnly,
                                         0,
                                         0
-                                                              ),false)).WillOnce(Return(
+                                                              ), false)).WillOnce(Return(
                                                                       nullptr));
 
     auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, expected_next_substream, nullptr);
@@ -269,7 +269,7 @@ TEST_F(ProducerImplTests, OKSendingSubstreamFinishWithNoNextStream) {
                                         asapo::IngestModeFlags::kTransferMetaDataOnly,
                                         0,
                                         0
-                                                              ),false)).WillOnce(Return(
+                                                              ), false)).WillOnce(Return(
                                                                       nullptr));
 
     auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, "", nullptr);
@@ -285,7 +285,7 @@ TEST_F(ProducerImplTests, OKSendingSendSubsetDataRequest) {
                                         expected_credentials_str, expected_metadata,
                                         expected_id, expected_size, expected_name, asapo::kDefaultSubstream.c_str(),
                                         expected_ingest_mode,
-                                        expected_subset_id, expected_subset_size),false)).WillOnce(
+                                        expected_subset_id, expected_subset_size), false)).WillOnce(
                                             Return(
                                                 nullptr));
 
@@ -312,7 +312,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
                                         "",
                                         expected_ingest_mode,
                                         10,
-                                        10),false)).WillOnce(Return(
+                                        10), false)).WillOnce(Return(
                                                     nullptr));
 
     auto err = producer.SendMetaData(expected_metadata, nullptr);
@@ -324,7 +324,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
 TEST_F(ProducerImplTests, ErrorSendingEmptyFileName) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(_,_)).Times(0);
+    EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
     asapo::EventHeader event_header{expected_id, 0, expected_name};
     auto err = producer.SendFile(event_header, "", expected_ingest_mode, nullptr);
@@ -337,7 +337,7 @@ TEST_F(ProducerImplTests, ErrorSendingEmptyFileName) {
 TEST_F(ProducerImplTests, ErrorSendingEmptyRelativeFileName) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(_,_)).Times(0);
+    EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
     asapo::EventHeader event_header{expected_id, 0, ""};
     auto err = producer.SendFile(event_header, expected_fullpath, expected_ingest_mode, nullptr);
@@ -359,8 +359,8 @@ TEST_F(ProducerImplTests, OKSendingSendFileRequest) {
                                         asapo::kDefaultSubstream.c_str(),
                                         expected_ingest_mode,
                                         0,
-                                        0),false)).WillOnce(Return(
-                                                nullptr));
+                                        0), false)).WillOnce(Return(
+                                                    nullptr));
 
     asapo::EventHeader event_header{expected_id, 0, expected_name};
     auto err = producer.SendFile(event_header, expected_fullpath, expected_ingest_mode, nullptr);
@@ -380,8 +380,8 @@ TEST_F(ProducerImplTests, OKSendingSendFileRequestWithSubstream) {
                                         expected_substream,
                                         expected_ingest_mode,
                                         0,
-                                        0),false)).WillOnce(Return(
-                                                nullptr));
+                                        0), false)).WillOnce(Return(
+                                                    nullptr));
 
     asapo::EventHeader event_header{expected_id, 0, expected_name};
     auto err = producer.SendFile(event_header, expected_substream, expected_fullpath, expected_ingest_mode, nullptr);
@@ -411,7 +411,7 @@ TEST_F(ProducerImplTests, ErrorSettingSecondTime) {
 TEST_F(ProducerImplTests, ErrorSendingWrongIngestMode) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(_,_)).Times(0);
+    EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
     asapo::EventHeader event_header{expected_id, 0, expected_name};
     auto ingest_mode = asapo::IngestModeFlags::kTransferMetaDataOnly | asapo::IngestModeFlags::kTransferData;
@@ -445,23 +445,23 @@ TEST_F(ProducerImplTests, WaitRequestsFinished) {
 
 
 MATCHER_P3(M_CheckGetSubstreamInfoRequest, op_code, source_credentials, substream,
-            "Checks if a valid GenericRequestHeader was Send") {
+           "Checks if a valid GenericRequestHeader was Send") {
     auto request = static_cast<ProducerRequest*>(arg);
     return ((asapo::GenericRequestHeader) (arg->header)).op_code == op_code
-        && request->source_credentials == source_credentials
-        && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
+           && request->source_credentials == source_credentials
+           && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
 }
 
 
 TEST_F(ProducerImplTests, GetStreamInfoMakesCorerctRequest) {
     producer.SetCredentials(expected_credentials);
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetSubstreamInfoRequest(asapo::kOpcodeStreamInfo,
-                                                               expected_credentials_str,
-                                                               expected_substream),true)).WillOnce(
-        Return(nullptr));
+                                        expected_credentials_str,
+                                        expected_substream), true)).WillOnce(
+                                            Return(nullptr));
 
     asapo::Error err;
-    producer.GetStreamInfo(expected_substream,1, &err);
+    producer.GetStreamInfo(expected_substream, 1, &err);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
diff --git a/producer/api/cpp/unittests/test_request_handler_filesystem.cpp b/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
index 321b60e4d330380666af87182dbf70ff1dc24f1c..120d79bcac7694cf65813365e1b138957f5d69b6 100644
--- a/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
+++ b/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
@@ -57,9 +57,9 @@ class RequestHandlerFilesystemTests : public testing::Test {
               expected_meta_size, expected_file_name};
     bool called = false;
     asapo::GenericRequestHeader callback_header;
-  std::string callback_response;
+    std::string callback_response;
 
-  asapo::ProducerRequest request{"", header, nullptr, "", "", [this](asapo::RequestCallbackPayload payload, asapo::Error err) {
+    asapo::ProducerRequest request{"", header, nullptr, "", "", [this](asapo::RequestCallbackPayload payload, asapo::Error err) {
             called = true;
             callback_err = std::move(err);
             callback_header = payload.original_header;
diff --git a/producer/api/cpp/unittests/test_request_handler_tcp.cpp b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
index 784c25cc5c2b42519d59ade138bcfd301f3b5e6a..687b86c06522b9a6a4032d4edd85d97695d4576d 100644
--- a/producer/api/cpp/unittests/test_request_handler_tcp.cpp
+++ b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
@@ -59,7 +59,7 @@ class RequestHandlerTcpTests : public testing::Test {
     std::string expected_warning = "warning";
     std::string expected_response = "response";
 
-  char  expected_file_name[asapo::kMaxMessageSize] = "test_name";
+    char  expected_file_name[asapo::kMaxMessageSize] = "test_name";
     char  expected_beamtime_id[asapo::kMaxMessageSize] = "test_beamtime_id";
     char  expected_substream[asapo::kMaxMessageSize] = "test_substream";
 
@@ -851,7 +851,7 @@ TEST_F(RequestHandlerTcpTests, FileRequestOK) {
     ExpectOKSendHeader(true);
     ExpectOKSendMetaData(true);
     ExpectOKSendFile(true);
-    ExpectOKReceive(true,asapo::kNetErrorNoError,expected_response);
+    ExpectOKReceive(true, asapo::kNetErrorNoError, expected_response);
 
     request_handler.PrepareProcessingRequestLocked();
 
@@ -870,7 +870,7 @@ TEST_F(RequestHandlerTcpTests, SendOK) {
     ExpectOKConnect(true);
     ExpectOKAuthorize(true);
     ExpectOKSendAll(true);
-    ExpectOKReceive(true,asapo::kNetErrorNoError,expected_response);
+    ExpectOKReceive(true, asapo::kNetErrorNoError, expected_response);
 
 
     request_handler.PrepareProcessingRequestLocked();
diff --git a/producer/api/python/asapo_wrappers.h b/producer/api/python/asapo_wrappers.h
index 5c9929da5127287fffb422869e37e4547bdd4647..794e97004dc59a66183b41fc0aea5f44fd9654d5 100644
--- a/producer/api/python/asapo_wrappers.h
+++ b/producer/api/python/asapo_wrappers.h
@@ -13,7 +13,7 @@ inline std::string GetErrorString(asapo::Error* err) {
     return "";
 }
 
-using RequestCallbackCython = void (*)(void*, void*, RequestCallbackPayload payload , Error err);
+using RequestCallbackCython = void (*)(void*, void*, RequestCallbackPayload payload, Error err);
 using RequestCallbackCythonMemory = void (*)(void*, void*, void*, RequestCallbackPayload payload, Error err);
 
 RequestCallback unwrap_callback(RequestCallbackCython callback, void* c_self, void* py_func) {
diff --git a/receiver/CMakeLists.txt b/receiver/CMakeLists.txt
index bbf62d388e76b126bf2e8ded4247c7cfa3a7cba5..398f86a6c54a0b6fff54bd2d831a4ff45711bb7d 100644
--- a/receiver/CMakeLists.txt
+++ b/receiver/CMakeLists.txt
@@ -28,10 +28,12 @@ set(RECEIVER_CORE_FILES
         )
 
 set(RDS_FILES
+        src/receiver_data_server/net_server/fabric_rds_request.cpp
         src/receiver_data_server/receiver_data_server.cpp
         src/receiver_data_server/receiver_data_server_request.cpp
         src/receiver_data_server/receiver_data_server_logger.cpp
         src/receiver_data_server/net_server/rds_tcp_server.cpp
+        src/receiver_data_server/net_server/rds_fabric_server.cpp
         src/receiver_data_server/request_handler/receiver_data_server_request_handler_factory.cpp
         src/receiver_data_server/request_handler/receiver_data_server_request_handler.cpp
         )
@@ -48,12 +50,14 @@ set(SOURCE_FILES
 ################################
 #SET( CMAKE_EXE_LINKER_FLAGS  "${CMAKE_EXE_LINKER_FLAGS} -static")
 
+GET_PROPERTY(ASAPO_COMMON_FABRIC_LIBRARIES GLOBAL PROPERTY ASAPO_COMMON_FABRIC_LIBRARIES)
 
 add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io> $<TARGET_OBJECTS:curl_http_client>
-         $<TARGET_OBJECTS:json_parser> $<TARGET_OBJECTS:logger> $<TARGET_OBJECTS:request_pool>)
+        $<TARGET_OBJECTS:json_parser> $<TARGET_OBJECTS:logger> $<TARGET_OBJECTS:request_pool>)
 set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
-target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR} ${CURL_INCLUDE_DIRS})
-target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} database)
+target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR} ${LIBFABRIC_INCLUDE_DIR} ${CURL_INCLUDE_DIRS})
+target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} database
+        asapo-fabric ${ASAPO_COMMON_FABRIC_LIBRARIES})
 
 
 add_executable(${TARGET_NAME}-bin src/main.cpp)
@@ -112,6 +116,7 @@ gtest(${TARGET_NAME} "${TEST_SOURCE_FILES}" "${TEST_LIBRARIES}"
 
 set(TEST_SOURCE_FILES_RDS
         unittests/receiver_data_server/test_receiver_data_server.cpp
+        unittests/receiver_data_server/net_server/test_rds_fabric_server.cpp
         unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
         unittests/receiver_data_server/request_handler/test_request_handler_factory.cpp
         unittests/receiver_data_server/request_handler/test_request_handler.cpp
diff --git a/receiver/src/main.cpp b/receiver/src/main.cpp
index e09fefd4d19eb96abd4af1b1b0f39b1e63266974..ab00fa5457bbd9e5e70f9417cacb10df67dfc5e8 100644
--- a/receiver/src/main.cpp
+++ b/receiver/src/main.cpp
@@ -5,11 +5,12 @@
 #include "receiver_config_factory.h"
 #include "receiver_config.h"
 
-#include "receiver_logger.h"
+#include "receiver_data_server/receiver_data_server_logger.h"
 #include "common/version.h"
 
 #include "receiver_data_server/receiver_data_server.h"
 #include "receiver_data_server/net_server/rds_tcp_server.h"
+#include "receiver_data_server/net_server/rds_fabric_server.h"
 
 asapo::Error ReadConfigFile(int argc, char* argv[]) {
     if (argc != 2) {
@@ -22,8 +23,20 @@ asapo::Error ReadConfigFile(int argc, char* argv[]) {
 
 void AddDataServers(const asapo::ReceiverConfig* config, asapo::SharedCache cache,
                     std::vector<asapo::RdsNetServerPtr>& netServers) {
-    // Add TCP
-    netServers.emplace_back(new asapo::RdsTcpServer("0.0.0.0:" + std::to_string(config->dataserver.listen_port)));
+    auto logger = asapo::GetDefaultReceiverDataServerLogger();
+    logger->SetLogLevel(config->log_level);
+
+    auto ds_config = config->dataserver;
+    auto networkingMode = ds_config.network_mode;
+    if (std::find(networkingMode.begin(), networkingMode.end(), "tcp") != networkingMode.end()) {
+        // Add TCP
+        netServers.emplace_back(new asapo::RdsTcpServer("0.0.0.0:" + std::to_string(ds_config.listen_port), logger));
+    }
+
+    if (std::find(networkingMode.begin(), networkingMode.end(), "fabric") != networkingMode.end()) {
+        // Add Fabric
+        netServers.emplace_back(new asapo::RdsFabricServer(ds_config.advertise_uri, logger));
+    }
 }
 
 std::vector<std::thread> StartDataServers(const asapo::ReceiverConfig* config, asapo::SharedCache cache,
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index f0dffeef0455e5de6c090d99b193ca14747672f8..a3334cbe2f0ef282aace6d2621894e61342863b0 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -35,6 +35,7 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
     (err = parser.GetUInt64("AuthorizationInterval", &config.authorization_interval_ms)) ||
     (err = parser.GetString("PerformanceDbName", &config.performance_db_name)) ||
     (err = parser.Embedded("DataServer").GetString("AdvertiseURI", &config.dataserver.advertise_uri)) ||
+    (err = parser.Embedded("DataServer").GetArrayString("NetworkMode", &config.dataserver.network_mode)) ||
     (err = parser.GetString("LogLevel", &log_level));
 
     if (err) {
diff --git a/receiver/src/receiver_data_server/net_server/fabric_rds_request.cpp b/receiver/src/receiver_data_server/net_server/fabric_rds_request.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0edde1019f0301655b758ddc76a82b46074ec85f
--- /dev/null
+++ b/receiver/src/receiver_data_server/net_server/fabric_rds_request.cpp
@@ -0,0 +1,14 @@
+#include "fabric_rds_request.h"
+
+using namespace asapo;
+
+FabricRdsRequest::FabricRdsRequest(const GenericRequestHeader& header,
+                                   fabric::FabricAddress sourceId, fabric::FabricMessageId messageId)
+    : ReceiverDataServerRequest(header, sourceId), message_id{messageId} {
+
+}
+
+const fabric::MemoryRegionDetails* asapo::FabricRdsRequest::GetMemoryRegion() const {
+    return reinterpret_cast<const fabric::MemoryRegionDetails*>(header.message);
+}
+
diff --git a/receiver/src/receiver_data_server/net_server/fabric_rds_request.h b/receiver/src/receiver_data_server/net_server/fabric_rds_request.h
new file mode 100644
index 0000000000000000000000000000000000000000..bcbd3a180b1cd80c090c408a78603a47f65ce8e3
--- /dev/null
+++ b/receiver/src/receiver_data_server/net_server/fabric_rds_request.h
@@ -0,0 +1,19 @@
+#ifndef ASAPO_FABRIC_RDS_REQUEST_H
+#define ASAPO_FABRIC_RDS_REQUEST_H
+
+#include <asapo_fabric/asapo_fabric.h>
+#include "../receiver_data_server_request.h"
+
+namespace asapo {
+
+class FabricRdsRequest : public ReceiverDataServerRequest {
+  public:
+    explicit FabricRdsRequest(const GenericRequestHeader& header, fabric::FabricAddress source_id,
+                              fabric::FabricMessageId messageId);
+    fabric::FabricMessageId message_id;
+    const fabric::MemoryRegionDetails* GetMemoryRegion() const;
+};
+
+}
+
+#endif //ASAPO_FABRIC_RDS_REQUEST_H
diff --git a/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp b/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a0d682b30ebaead730c6c1cfbe33c8931ed46075
--- /dev/null
+++ b/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp
@@ -0,0 +1,79 @@
+#include <io/io_factory.h>
+
+#include <utility>
+#include "rds_fabric_server.h"
+#include "../receiver_data_server_logger.h"
+#include "fabric_rds_request.h"
+
+using namespace asapo;
+
+RdsFabricServer::RdsFabricServer(std::string listenAddress,
+                                 const AbstractLogger* logger): factory__(fabric::GenerateDefaultFabricFactory()), io__{GenerateDefaultIO()},
+    log__{logger}, listenAddress_(std::move(listenAddress)) {
+
+}
+
+RdsFabricServer::~RdsFabricServer() {
+
+}
+
+Error RdsFabricServer::Initialize() {
+    if (server__) {
+        return TextError("Server was already initialized");
+    }
+    Error err;
+    std::string hostname;
+    uint16_t port;
+    std::tie(hostname, port) = *io__->SplitAddressToHostnameAndPort(listenAddress_);
+    server__ = factory__->CreateAndBindServer(log__, hostname, port, &err);
+    if (err) {
+        return err;
+    }
+
+    log__->Info("Started Fabric ReceiverDataServer at '" + server__->GetAddress() + "'");
+
+    return err;
+}
+
+GenericRequests RdsFabricServer::GetNewRequests(Error* err) {
+    // TODO: Should be performance tested, just a single request is returned at a time
+    fabric::FabricAddress srcAddress;
+    fabric::FabricMessageId messageId;
+
+    GenericRequestHeader header;
+    server__->RecvAny(&srcAddress, &messageId, &header, sizeof(header), err);
+    if (*err) {
+        return {}; // empty result
+    }
+    auto requestPtr = new FabricRdsRequest(header, srcAddress, messageId);
+
+    GenericRequests genericRequests;
+    genericRequests.emplace_back(GenericRequestPtr(requestPtr));
+    return genericRequests;
+}
+
+Error RdsFabricServer::SendResponse(const ReceiverDataServerRequest* request, const GenericNetworkResponse* response) {
+    Error err;
+    auto fabricRequest = dynamic_cast<const FabricRdsRequest*>(request);
+    server__->Send(request->source_id, fabricRequest->message_id, response, sizeof(*response), &err);
+    return err;
+}
+
+Error RdsFabricServer::SendResponseAndSlotData(const ReceiverDataServerRequest* request,
+                                               const GenericNetworkResponse* response, const CacheMeta* cache_slot) {
+    Error err;
+    auto fabricRequest = dynamic_cast<const FabricRdsRequest*>(request);
+
+    server__->RdmaWrite(fabricRequest->source_id, fabricRequest->GetMemoryRegion(), cache_slot->addr, cache_slot->size,
+                        &err);
+    if (err) {
+        return err;
+    }
+
+    server__->Send(request->source_id, fabricRequest->message_id, response, sizeof(*response), &err);
+    return err;
+}
+
+void RdsFabricServer::HandleAfterError(uint64_t source_id) {
+    /* Do nothing? */
+}
diff --git a/receiver/src/receiver_data_server/net_server/rds_fabric_server.h b/receiver/src/receiver_data_server/net_server/rds_fabric_server.h
new file mode 100644
index 0000000000000000000000000000000000000000..2733ade39e86817717b9252d51fa1d09bb5d2a9c
--- /dev/null
+++ b/receiver/src/receiver_data_server/net_server/rds_fabric_server.h
@@ -0,0 +1,36 @@
+#ifndef ASAPO_RDS_FABRIC_SERVER_H
+#define ASAPO_RDS_FABRIC_SERVER_H
+
+#include "rds_net_server.h"
+#include "asapo_fabric/asapo_fabric.h"
+
+namespace asapo {
+
+class RdsFabricServer : public RdsNetServer {
+  public:
+    explicit RdsFabricServer(std::string  listenAddress, const AbstractLogger* logger);
+    ~RdsFabricServer() override;
+
+    // modified in testings to mock system calls, otherwise do not touch
+    std::unique_ptr<fabric::FabricFactory> factory__;
+    std::unique_ptr<IO> io__;
+    const AbstractLogger* log__;
+    std::unique_ptr<fabric::FabricServer> server__;
+  private:
+    std::string listenAddress_;
+  public: // NetServer implementation
+    Error Initialize() override;
+
+    GenericRequests GetNewRequests(Error* err) override;
+
+    Error SendResponse(const ReceiverDataServerRequest* request, const GenericNetworkResponse* response) override;
+
+    Error SendResponseAndSlotData(const ReceiverDataServerRequest* request, const GenericNetworkResponse* response,
+                                  const CacheMeta* cache_slot) override;
+
+    void HandleAfterError(uint64_t source_id) override;
+};
+
+}
+
+#endif //ASAPO_RDS_FABRIC_SERVER_H
diff --git a/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp b/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
index 8d681e9f400213dd9cdd7e354b2c393a7c861717..6f9e586fd71e7818e63ff1ff8c1793d54ed683bf 100644
--- a/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
+++ b/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
@@ -6,7 +6,7 @@
 
 namespace asapo {
 
-RdsTcpServer::RdsTcpServer(std::string address) : io__{GenerateDefaultIO()}, log__{GetDefaultReceiverDataServerLogger()},
+RdsTcpServer::RdsTcpServer(std::string address, const AbstractLogger* logger) : io__{GenerateDefaultIO()}, log__{logger},
     address_{std::move(address)} {}
 
 Error RdsTcpServer::Initialize() {
@@ -14,9 +14,9 @@ Error RdsTcpServer::Initialize() {
     if (master_socket_ == kDisconnectedSocketDescriptor) {
         master_socket_ = io__->CreateAndBindIPTCPSocketListener(address_, kMaxPendingConnections, &err);
         if (!err) {
-            log__->Info("data server listening on " + address_);
+            log__->Info("Started TCP ReceiverDataServer at '" + address_ + "'");
         } else {
-            log__->Error("dataserver cannot listen on " + address_ + ": " + err->Explain());
+            log__->Error("TCP ReceiverDataServer cannot listen on " + address_ + ": " + err->Explain());
         }
     } else {
         err = TextError("Server was already initialized");
diff --git a/receiver/src/receiver_data_server/net_server/rds_tcp_server.h b/receiver/src/receiver_data_server/net_server/rds_tcp_server.h
index 5d28f042c5800f68a4791b26712545dfd92419c4..9a588c145dad9725d0054e36a72ade3d70785aa8 100644
--- a/receiver/src/receiver_data_server/net_server/rds_tcp_server.h
+++ b/receiver/src/receiver_data_server/net_server/rds_tcp_server.h
@@ -11,7 +11,7 @@ const int kMaxPendingConnections = 5;
 
 class RdsTcpServer : public RdsNetServer {
   public:
-    explicit RdsTcpServer(std::string address);
+    explicit RdsTcpServer(std::string address, const AbstractLogger* logger);
     ~RdsTcpServer() override;
 
     Error Initialize() override;
diff --git a/receiver/src/receiver_data_server/receiver_data_server.cpp b/receiver/src/receiver_data_server/receiver_data_server.cpp
index 5526d602a9f34283fe65bab55dd71418910f1665..4b26097544fcd93cf72bf89e2ff9ef88aa4e2227 100644
--- a/receiver/src/receiver_data_server/receiver_data_server.cpp
+++ b/receiver/src/receiver_data_server/receiver_data_server.cpp
@@ -24,6 +24,7 @@ void ReceiverDataServer::Run() {
         if (err == IOErrorTemplates::kTimeout) {
             continue;
         }
+
         if (!err) {
             err = request_pool__->AddRequests(std::move(requests));
         }
diff --git a/receiver/src/receiver_data_server/receiver_data_server_config.h b/receiver/src/receiver_data_server/receiver_data_server_config.h
index 46a3594dd645bbe61a4e59753974aeeceb7bdba5..208c87d96eeeb61e2cebaf6ecbb170ac42b518d4 100644
--- a/receiver/src/receiver_data_server/receiver_data_server_config.h
+++ b/receiver/src/receiver_data_server/receiver_data_server_config.h
@@ -10,6 +10,7 @@ struct ReceiverDataServerConfig {
     uint64_t nthreads = 0;
     std::string tag;
     std::string advertise_uri;
+    std::vector<std::string> network_mode;
 };
 
 }
diff --git a/receiver/src/receiver_data_server/receiver_data_server_logger.h b/receiver/src/receiver_data_server/receiver_data_server_logger.h
index c391dbe0d827627bf27edb7eb904235a16b62733..cf50590d69467a1a419222f14c1f2068fff4df53 100644
--- a/receiver/src/receiver_data_server/receiver_data_server_logger.h
+++ b/receiver/src/receiver_data_server/receiver_data_server_logger.h
@@ -1,5 +1,5 @@
-#ifndef ASAPO_RECEIVER_LOGGER_H
-#define ASAPO_RECEIVER_LOGGER_H
+#ifndef ASAPO_DATA_RECEIVER_LOGGER_H
+#define ASAPO_DATA_RECEIVER_LOGGER_H
 
 #include "logger/logger.h"
 
@@ -11,4 +11,4 @@ AbstractLogger* GetDefaultReceiverDataServerLogger();
 }
 
 
-#endif //ASAPO_RECEIVER_LOGGER_H
+#endif //ASAPO_DATA_RECEIVER_LOGGER_H
diff --git a/receiver/src/receiver_data_server/receiver_data_server_request.cpp b/receiver/src/receiver_data_server/receiver_data_server_request.cpp
index 6c30cff9e646cc4b18e7d6ed308f1a6c4b913d68..41d1477c0f29aba325e247190c4ef431d0e3a1fd 100644
--- a/receiver/src/receiver_data_server/receiver_data_server_request.cpp
+++ b/receiver/src/receiver_data_server/receiver_data_server_request.cpp
@@ -4,10 +4,10 @@
 namespace asapo {
 
 ReceiverDataServerRequest::ReceiverDataServerRequest(GenericRequestHeader header, uint64_t source_id) :
-    GenericRequest(std::move(header), 0),
+    GenericRequest(header, 0),
     source_id{source_id} {
 }
 
 
 
-}
\ No newline at end of file
+}
diff --git a/receiver/src/request.h b/receiver/src/request.h
index 22e80ecfee3cc0a87af391a92cabbbb67c1e19f1..6bfbcc38f84c53203e1a2c0e66342128cd7286ae 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -24,8 +24,8 @@ namespace asapo {
 using RequestHandlerList = std::vector<const ReceiverRequestHandler*>;
 
 enum class ResponseMessageType {
-  kWarning,
-  kInfo
+    kWarning,
+    kInfo
 };
 
 class Request {
diff --git a/receiver/src/request_handler/request_handler_db_stream_info.cpp b/receiver/src/request_handler/request_handler_db_stream_info.cpp
index 1c2695f422f93fbdb576b00df236e60dcf71aa05..ec52adb5bef28ed886c343143b41def912c5cab1 100644
--- a/receiver/src/request_handler/request_handler_db_stream_info.cpp
+++ b/receiver/src/request_handler/request_handler_db_stream_info.cpp
@@ -16,11 +16,11 @@ Error RequestHandlerDbStreamInfo::ProcessRequest(Request* request) const {
 
     auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
     StreamInfo info;
-    auto err =  db_client__->GetStreamInfo(col_name,&info);
+    auto err =  db_client__->GetStreamInfo(col_name, &info);
     if (!err) {
-            log__->Debug(std::string{"get stream info from "} + col_name + " in " +
-             db_name_ + " at " + GetReceiverConfig()->database_uri);
-            request->SetResponseMessage(info.Json(),ResponseMessageType::kInfo);
+        log__->Debug(std::string{"get stream info from "} + col_name + " in " +
+                     db_name_ + " at " + GetReceiverConfig()->database_uri);
+        request->SetResponseMessage(info.Json(), ResponseMessageType::kInfo);
     }
     return err;
 }
diff --git a/receiver/src/request_handler/request_handler_db_stream_info.h b/receiver/src/request_handler/request_handler_db_stream_info.h
index 5ec17c9dd69a56ef303369d67354bbc5c153e958..6562b6d7909ebb2faf5cd3f540856dcf105dfcb5 100644
--- a/receiver/src/request_handler/request_handler_db_stream_info.h
+++ b/receiver/src/request_handler/request_handler_db_stream_info.h
@@ -7,9 +7,9 @@
 namespace asapo {
 
 class RequestHandlerDbStreamInfo final: public RequestHandlerDb {
- public:
-  RequestHandlerDbStreamInfo(std::string collection_name_prefix);
-  Error ProcessRequest(Request* request) const override;
+  public:
+    RequestHandlerDbStreamInfo(std::string collection_name_prefix);
+    Error ProcessRequest(Request* request) const override;
 };
 
 }
diff --git a/receiver/src/request_handler/request_handler_db_write.cpp b/receiver/src/request_handler/request_handler_db_write.cpp
index 5570dad6a2dc8868fa3a05d051f3add7f92fd4c3..b37232ed55ad3485f188ebdbbabab7bb5e8ed771 100644
--- a/receiver/src/request_handler/request_handler_db_write.cpp
+++ b/receiver/src/request_handler/request_handler_db_write.cpp
@@ -38,7 +38,7 @@ Error RequestHandlerDbWrite::ProcessDuplicateRecordSituation(Request* request) c
     auto check_err = request->CheckForDuplicates();
     if (check_err == ReceiverErrorTemplates::kWarningDuplicatedRequest) {
         std::string warn_str = "ignoring duplicate record for id " + std::to_string(request->GetDataID());
-        request->SetResponseMessage(warn_str,ResponseMessageType::kWarning);
+        request->SetResponseMessage(warn_str, ResponseMessageType::kWarning);
         log__->Warning(warn_str);
         return nullptr;
     }
diff --git a/receiver/src/request_handler/request_handler_file_process.cpp b/receiver/src/request_handler/request_handler_file_process.cpp
index 47bf3dd21cdfe469f21326acca90bf401556a641..0d07f793c20921d8691b33789aac80573af22e01 100644
--- a/receiver/src/request_handler/request_handler_file_process.cpp
+++ b/receiver/src/request_handler/request_handler_file_process.cpp
@@ -20,14 +20,14 @@ Error RequestHandlerFileProcess::ProcessRequest(Request* request) const {
 Error RequestHandlerFileProcess::ProcessFileExistSituation(Request* request) const {
     auto err_duplicate = request->CheckForDuplicates();
     if (err_duplicate == nullptr) {
-        request->SetResponseMessage("file has been overwritten",ResponseMessageType::kWarning);
+        request->SetResponseMessage("file has been overwritten", ResponseMessageType::kWarning);
         log__->Warning(std::string("overwriting file " ) + request->GetOfflinePath() + kPathSeparator + request->GetFileName());
         return file_processor_->ProcessFile(request, true);
     }
 
     if (err_duplicate == ReceiverErrorTemplates::kWarningDuplicatedRequest) {
         request->SetAlreadyProcessedFlag();
-        request->SetResponseMessage("duplicated request, ignored",ResponseMessageType::kWarning);
+        request->SetResponseMessage("duplicated request, ignored", ResponseMessageType::kWarning);
         log__->Warning("duplicated request, id: " + std::to_string(request->GetDataID()));
         return nullptr;
     }
diff --git a/receiver/src/request_handler/requests_dispatcher.cpp b/receiver/src/request_handler/requests_dispatcher.cpp
index 460e82861bfe20ceb7dd8d9e90a96aee6b64790a..0ce7e34bffba7d6383670f8406a6092d7f9f7d00 100644
--- a/receiver/src/request_handler/requests_dispatcher.cpp
+++ b/receiver/src/request_handler/requests_dispatcher.cpp
@@ -39,7 +39,7 @@ GenericNetworkResponse RequestsDispatcher::CreateResponseToRequest(const std::un
         strncpy(generic_response.message, handle_error->Explain().c_str(), kMaxMessageSize);
     }
     if (request->GetResponseMessage().size() > 0) {
-        if (request->GetResponseMessageType()==ResponseMessageType::kWarning) {
+        if (request->GetResponseMessageType() == ResponseMessageType::kWarning) {
             generic_response.error_code = kNetErrorWarning;
         }
         strncpy(generic_response.message, request->GetResponseMessage().c_str(), kMaxMessageSize);
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 6a837eb31ce193d8bd1bfaca86a118f20e6a7451..e84d07cedde38fb560959808f24f89b9adf3d233 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -50,6 +50,17 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
     config_string += "," + Key("DataServer", error_field) + "{";
     config_string += Key("ListenPort", error_field) + std::to_string(config.dataserver.listen_port);
     config_string += "," +  Key("AdvertiseURI", error_field) + "\"" + config.dataserver.advertise_uri + "\"";
+    config_string += "," +  Key("NetworkMode", error_field);
+
+    config_string += "[";
+    for (auto it = config.dataserver.network_mode.begin() ; it != config.dataserver.network_mode.end(); ++it) {
+        if (it != config.dataserver.network_mode.begin()) {
+            config_string += ",";
+        }
+        config_string += "\"" + (*it) + "\"";
+    }
+    config_string += "]";
+
     config_string += "," + Key("NThreads", error_field) + std::to_string(config.dataserver.nthreads);
     config_string += "}";
     config_string += "," + Key("DataCache", error_field) + "{";
diff --git a/receiver/unittests/receiver_data_server/net_server/test_rds_fabric_server.cpp b/receiver/unittests/receiver_data_server/net_server/test_rds_fabric_server.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ff15633f4734cd5a4c7a79d5c45a1a2865286699
--- /dev/null
+++ b/receiver/unittests/receiver_data_server/net_server/test_rds_fabric_server.cpp
@@ -0,0 +1,265 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <string>
+#include <unittests/MockIO.h>
+#include <unittests/MockLogger.h>
+#include <unittests/MockFabric.h>
+#include "../../../src/receiver_data_server/net_server/rds_fabric_server.h"
+#include "../../../src/receiver_data_server/net_server/fabric_rds_request.h"
+#include "../../../../common/cpp/src/system_io/system_io.h"
+
+using ::testing::Ne;
+using ::testing::Eq;
+using ::testing::Test;
+using ::testing::NiceMock;
+using ::testing::StrictMock;
+using ::testing::DoAll;
+using ::testing::SetArgPointee;
+using ::testing::Return;
+using ::testing::_;
+
+using namespace asapo;
+
+std::string expected_address = "somehost:123";
+
+TEST(RdsFabricServer, Constructor) {
+    NiceMock<MockLogger> mock_logger;
+    RdsFabricServer fabric_server("", &mock_logger);
+    ASSERT_THAT(dynamic_cast<SystemIO*>(fabric_server.io__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<fabric::FabricFactory*>(fabric_server.factory__.get()), Ne(nullptr));
+    ASSERT_THAT(fabric_server.log__, Eq(&mock_logger));
+}
+
+class RdsFabricServerTests : public Test {
+  public:
+    RdsFabricServer rds_server{expected_address, &mock_logger};
+    NiceMock<MockLogger> mock_logger;
+    StrictMock<MockIO> mock_io;
+    StrictMock<fabric::MockFabricFactory> mock_fabric_factory;
+    StrictMock<fabric::MockFabricServer> mock_fabric_server;
+
+    void SetUp() override {
+        rds_server.log__ = &mock_logger;
+        rds_server.io__ = std::unique_ptr<IO> {&mock_io};
+        rds_server.factory__ = std::unique_ptr<fabric::FabricFactory> {&mock_fabric_factory};
+    }
+    void TearDown() override {
+        rds_server.io__.release();
+        rds_server.factory__.release();
+        rds_server.server__.release();
+    }
+
+  public:
+    void InitServer();
+};
+
+void RdsFabricServerTests::InitServer() {
+    EXPECT_CALL(mock_io, SplitAddressToHostnameAndPort_t(expected_address)).WillOnce(Return(
+                new std::tuple<std::string, uint16_t>("abc", 123)
+            ));
+
+    EXPECT_CALL(mock_fabric_factory, CreateAndBindServer_t(_, "abc", 123, _)).WillOnce(DoAll(
+                SetArgPointee<3>(fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                Return(&mock_fabric_server)
+            ));
+
+    Error err = rds_server.Initialize();
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(RdsFabricServerTests, Initialize_Ok) {
+    InitServer();
+}
+
+TEST_F(RdsFabricServerTests, Initialize_Error_CreateAndBindServer) {
+    EXPECT_CALL(mock_io, SplitAddressToHostnameAndPort_t(expected_address)).WillOnce(Return(
+                new std::tuple<std::string, uint16_t>("abc", 123)
+            ));
+
+    EXPECT_CALL(mock_fabric_factory, CreateAndBindServer_t(_, "abc", 123, _)).WillOnce(DoAll(
+                SetArgPointee<3>(fabric::FabricErrorTemplates::kInternalError.Generate().release()),
+                Return(nullptr)
+            ));
+
+    Error err = rds_server.Initialize();
+
+    ASSERT_THAT(rds_server.server__, Eq(nullptr));
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(RdsFabricServerTests, Initialize_Error_DoubleInitialize) {
+    EXPECT_CALL(mock_io, SplitAddressToHostnameAndPort_t(expected_address)).WillOnce(Return(
+                new std::tuple<std::string, uint16_t>("abc", 123)
+            ));
+
+    EXPECT_CALL(mock_fabric_factory, CreateAndBindServer_t(_, "abc", 123, _)).WillOnce(Return(
+                &mock_fabric_server
+            ));
+
+    EXPECT_CALL(mock_fabric_server, GetAddress()).WillOnce(Return(
+                "TestAddress"
+            ));
+
+    Error err = rds_server.Initialize();
+    ASSERT_THAT(rds_server.server__, Ne(nullptr));
+    ASSERT_THAT(err, Eq(nullptr));
+
+    err = rds_server.Initialize();
+    ASSERT_THAT(rds_server.server__, Ne(nullptr));
+    ASSERT_THAT(err, Ne(nullptr));
+}
+
+ACTION_P5(A_WriteToRecvAnyBuffer, op_code, expected_id, remote_mem_addr, remote_mem_length, remote_mem_key) {
+    ((GenericRequestHeader*)arg2)->op_code = op_code;
+    ((GenericRequestHeader*)arg2)->data_id = expected_id;
+    ((fabric::MemoryRegionDetails*) & ((GenericRequestHeader*)arg2)->message)->addr = remote_mem_addr;
+    ((fabric::MemoryRegionDetails*) & ((GenericRequestHeader*)arg2)->message)->length = remote_mem_length;
+    ((fabric::MemoryRegionDetails*) & ((GenericRequestHeader*)arg2)->message)->key = remote_mem_key;
+}
+
+TEST_F(RdsFabricServerTests, GetNewRequests_Ok) {
+    InitServer();
+
+    EXPECT_CALL(mock_fabric_server, RecvAny_t(_/*&src*/, _/*&msgId*/, _/*&buf*/, sizeof(GenericRequestHeader), _/*err*/))
+    .WillOnce(DoAll(
+                  SetArgPointee<0>(542),
+                  SetArgPointee<1>(123),
+                  A_WriteToRecvAnyBuffer(asapo::kOpcodeGetBufferData, 30,
+                                         90, 10, 23)
+              ));
+
+    Error err;
+    GenericRequests requests = rds_server.GetNewRequests(&err);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(requests.size(), Eq(1));
+    auto req = dynamic_cast<FabricRdsRequest*>(requests[0].get());
+    ASSERT_THAT(req->source_id, Eq(542));
+    ASSERT_THAT(req->message_id, Eq(123));
+    ASSERT_THAT(req->header.op_code, Eq(asapo::kOpcodeGetBufferData));
+    ASSERT_THAT(req->header.data_id, Eq(30));
+    ASSERT_THAT(req->GetMemoryRegion()->addr, Eq(90));
+    ASSERT_THAT(req->GetMemoryRegion()->length, Eq(10));
+    ASSERT_THAT(req->GetMemoryRegion()->key, Eq(23));
+}
+
+TEST_F(RdsFabricServerTests, GetNewRequests_Error_RecvAny_InternalError) {
+    InitServer();
+
+    EXPECT_CALL(mock_fabric_server, RecvAny_t(_/*&src*/, _/*&msgId*/, _/*&buf*/, _/*bufSize*/, _/*err*/))
+    .WillOnce(
+        SetArgPointee<4>(fabric::FabricErrorTemplates::kInternalError.Generate().release())
+    );
+
+    Error err;
+    GenericRequests requests = rds_server.GetNewRequests(&err);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+    ASSERT_THAT(requests.size(), Eq(0));
+}
+
+TEST_F(RdsFabricServerTests, GetNewRequests_Error_RecvAny_Timeout) {
+    InitServer();
+
+    EXPECT_CALL(mock_fabric_server, RecvAny_t(_/*&src*/, _/*&msgId*/, _/*&buf*/, _/*bufSize*/, _/*err*/))
+    .WillOnce(
+        SetArgPointee<4>(IOErrorTemplates::kTimeout.Generate().release())
+    );
+
+    Error err;
+    GenericRequests requests = rds_server.GetNewRequests(&err);
+
+    ASSERT_THAT(err, Eq(IOErrorTemplates::kTimeout));
+    ASSERT_THAT(requests.size(), Eq(0));
+}
+
+TEST_F(RdsFabricServerTests, SendResponse_Ok) {
+    InitServer();
+
+    FabricRdsRequest request(GenericRequestHeader{}, 41, 87);
+    GenericNetworkResponse response;
+
+    EXPECT_CALL(mock_fabric_server, Send_t(41, 87, &response, sizeof(response), _/*err*/)).Times(1);
+
+    Error err = rds_server.SendResponse(&request, &response);
+
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(RdsFabricServerTests, SendResponse_Error_SendError) {
+    InitServer();
+
+    FabricRdsRequest request(GenericRequestHeader{}, 41, 87);
+    GenericNetworkResponse response;
+
+    EXPECT_CALL(mock_fabric_server, Send_t(41, 87, &response, sizeof(response), _/*err*/)).WillOnce(
+        SetArgPointee<4>(fabric::FabricErrorTemplates::kInternalError.Generate().release())
+    );
+
+    Error err = rds_server.SendResponse(&request, &response);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(RdsFabricServerTests, SendResponseAndSlotData_Ok) {
+    InitServer();
+
+    GenericRequestHeader dummyHeader{};
+    FabricRdsRequest request(GenericRequestHeader{}, 41, 87);
+    GenericNetworkResponse response;
+    CacheMeta cacheSlot;
+    cacheSlot.addr = (void*)0xABC;
+    cacheSlot.size = 200;
+
+    EXPECT_CALL(mock_fabric_server, RdmaWrite_t(41, request.GetMemoryRegion(), (void*)0xABC, 200, _/*err*/)).Times(1);
+    EXPECT_CALL(mock_fabric_server, Send_t(41, 87, &response, sizeof(response), _/*err*/)).Times(1);
+
+    Error err = rds_server.SendResponseAndSlotData(&request, &response, &cacheSlot);
+
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(RdsFabricServerTests, SendResponseAndSlotData_RdmaWrite_Error) {
+    InitServer();
+
+    GenericRequestHeader dummyHeader{};
+    FabricRdsRequest request(GenericRequestHeader{}, 41, 87);
+    GenericNetworkResponse response;
+    CacheMeta cacheSlot;
+    cacheSlot.addr = (void*)0xABC;
+    cacheSlot.size = 200;
+
+    EXPECT_CALL(mock_fabric_server, RdmaWrite_t(41, request.GetMemoryRegion(), (void*)0xABC, 200, _/*err*/)).WillOnce(
+        SetArgPointee<4>(fabric::FabricErrorTemplates::kInternalError.Generate().release())
+    );
+
+    Error err = rds_server.SendResponseAndSlotData(&request, &response, &cacheSlot);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(RdsFabricServerTests, SendResponseAndSlotData_Send_Error) {
+    InitServer();
+
+    GenericRequestHeader dummyHeader{};
+    FabricRdsRequest request(GenericRequestHeader{}, 41, 87);
+    GenericNetworkResponse response;
+    CacheMeta cacheSlot;
+    cacheSlot.addr = (void*)0xABC;
+    cacheSlot.size = 200;
+
+    EXPECT_CALL(mock_fabric_server, RdmaWrite_t(41, request.GetMemoryRegion(), (void*)0xABC, 200, _/*err*/)).Times(1);
+    EXPECT_CALL(mock_fabric_server, Send_t(41, 87, &response, sizeof(response), _/*err*/)).WillOnce(
+        SetArgPointee<4>(fabric::FabricErrorTemplates::kInternalError.Generate().release())
+    );
+
+    Error err = rds_server.SendResponseAndSlotData(&request, &response, &cacheSlot);
+
+    ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
+}
+
+TEST_F(RdsFabricServerTests, HandleAfterError) {
+    InitServer();
+    rds_server.HandleAfterError(2); /* Function does nothing */
+}
diff --git a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
index 7b2327616500620e89649c0f3f92814f99c411e4..0c87f50d47f5715320edad6473cd69061152fd26 100644
--- a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
+++ b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
@@ -26,14 +26,16 @@ using ::testing::DoAll;
 
 using asapo::RdsTcpServer;
 using asapo::MockIO;
+using asapo::MockLogger;
 using asapo::Error;
 using asapo::ListSocketDescriptors;
 namespace {
 
 TEST(RdsTCPServer, Constructor) {
-    RdsTcpServer tcp_server("");
+    NiceMock<MockLogger> mock_logger;
+    RdsTcpServer tcp_server("", &mock_logger);
     ASSERT_THAT(dynamic_cast<asapo::IO*>(tcp_server.io__.get()), Ne(nullptr));
-    ASSERT_THAT(dynamic_cast<const asapo::AbstractLogger*>(tcp_server.log__), Ne(nullptr));
+    ASSERT_THAT(tcp_server.log__, Eq(&mock_logger));
 
 }
 
@@ -41,7 +43,7 @@ std::string expected_address = "somehost:123";
 
 class RdsTCPServerTests : public Test {
   public:
-    RdsTcpServer tcp_server {expected_address};
+    RdsTcpServer tcp_server {expected_address, &mock_logger};
     NiceMock<MockIO> mock_io;
     NiceMock<asapo::MockLogger> mock_logger;
     asapo::SocketDescriptor expected_master_socket = 1;
@@ -49,7 +51,6 @@ class RdsTCPServerTests : public Test {
     std::vector<std::string> expected_new_connections = {"test1", "test2"};
     void SetUp() override {
         tcp_server.io__ = std::unique_ptr<asapo::IO> {&mock_io};
-        tcp_server.log__ = &mock_logger;
         for (auto conn : expected_client_sockets) {
             std::string connected_uri = std::to_string(conn);
             ON_CALL(mock_io, AddressFromSocket_t(conn)).WillByDefault(Return(connected_uri));
diff --git a/receiver/unittests/receiver_mocking.h b/receiver/unittests/receiver_mocking.h
index 8aebb8a3e3a2cbcee95023881582dd5864cd8a9c..c8bd31122bd88571e3c9b28ef9ff312762aba355 100644
--- a/receiver/unittests/receiver_mocking.h
+++ b/receiver/unittests/receiver_mocking.h
@@ -94,12 +94,12 @@ class MockRequest: public Request {
 
     MOCK_CONST_METHOD0(WasAlreadyProcessed, bool());
     MOCK_METHOD0(SetAlreadyProcessedFlag, void());
-    MOCK_METHOD2(SetResponseMessage, void(std::string,ResponseMessageType));
+    MOCK_METHOD2(SetResponseMessage, void(std::string, ResponseMessageType));
     MOCK_CONST_METHOD0(GetResponseMessage, const std::string & ());
     MOCK_CONST_METHOD0(GetResponseMessageType_t, ResponseMessageType ());
 
     const ResponseMessageType GetResponseMessageType() const override {
-      return GetResponseMessageType_t();
+        return GetResponseMessageType_t();
     };
 
     Error CheckForDuplicates()  override {
diff --git a/receiver/unittests/request_handler/test_request_handler_authorizer.cpp b/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
index 92b50cdb65fc1d36ad0bdc95911965431a7392ee..bfb8e8f1147253987e7178c93ab4fd43aad1916a 100644
--- a/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
@@ -92,7 +92,7 @@ class AuthorizerHandlerTests : public Test {
     }
     void MockAuthRequest(bool error, HttpCode code = HttpCode::OK) {
         if (error) {
-            EXPECT_CALL(mock_http_client, Post_t(expected_authorization_server + "/authorize", _,expect_request_string, _, _)).
+            EXPECT_CALL(mock_http_client, Post_t(expected_authorization_server + "/authorize", _, expect_request_string, _, _)).
             WillOnce(
                 DoAll(SetArgPointee<4>(new asapo::SimpleError("http error")),
                       Return("")
@@ -104,7 +104,7 @@ class AuthorizerHandlerTests : public Test {
                                                  HasSubstr(expected_authorization_server))));
 
         } else {
-            EXPECT_CALL(mock_http_client, Post_t(expected_authorization_server + "/authorize", _,expect_request_string, _, _)).
+            EXPECT_CALL(mock_http_client, Post_t(expected_authorization_server + "/authorize", _, expect_request_string, _, _)).
             WillOnce(
                 DoAll(SetArgPointee<4>(nullptr),
                       SetArgPointee<3>(code),
@@ -256,7 +256,7 @@ TEST_F(AuthorizerHandlerTests, DataTransferRequestAuthorizeUsesCachedValue) {
     MockFirstAuthorization(false);
     EXPECT_CALL(*mock_request, GetOpCode())
     .WillOnce(Return(asapo::kOpcodeTransferData));
-    EXPECT_CALL(mock_http_client, Post_t(_, _, _,_, _)).Times(0);
+    EXPECT_CALL(mock_http_client, Post_t(_, _, _, _, _)).Times(0);
     EXPECT_CALL(*mock_request, SetBeamtimeId(expected_beamtime_id));
     EXPECT_CALL(*mock_request, SetBeamline(expected_beamline));
     EXPECT_CALL(*mock_request, SetStream(expected_stream));
diff --git a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
index fa78df3cd4c35c9e967f1c404022d556a17166ab..155ac6f9045cff168fa6fdc0abb2923b883e96cb 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
@@ -67,7 +67,7 @@ class DbMetaStreamInfoTests : public Test {
     std::string info_str = R"({"lastId":10})";
     const uint8_t* expected_info_str = reinterpret_cast<const uint8_t*>(info_str.c_str());
     asapo::StreamInfo expected_stream_info;
-  void SetUp() override {
+    void SetUp() override {
         GenericRequestHeader request_header;
         expected_stream_info.last_id = 10;
         request_header.data_id = 0;
@@ -93,20 +93,20 @@ TEST_F(DbMetaStreamInfoTests, CallsUpdate) {
     ;
 
     EXPECT_CALL(*mock_request, GetSubstream())
-        .WillOnce(Return(expected_substream))
-        ;
+    .WillOnce(Return(expected_substream))
+    ;
 
     EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_stream)).
     WillOnce(testing::Return(nullptr));
 
 
     EXPECT_CALL(mock_db, GetStreamInfo_t(expected_collection_name, _)).
-        WillOnce(DoAll(
-        SetArgPointee<1>(expected_stream_info),
-        testing::Return(nullptr)
-    ));
+    WillOnce(DoAll(
+                 SetArgPointee<1>(expected_stream_info),
+                 testing::Return(nullptr)
+             ));
 
-    EXPECT_CALL(*mock_request, SetResponseMessage(info_str,asapo::ResponseMessageType::kInfo));
+    EXPECT_CALL(*mock_request, SetResponseMessage(info_str, asapo::ResponseMessageType::kInfo));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("get stream info"),
                                          HasSubstr(config.database_uri),
diff --git a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
index 703095754b840d050155bed3a4043b1508cfcc48..a2536c22c05bf5847c406b13ea80a3aab4c528d3 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
@@ -94,7 +94,7 @@ class DbWriterHandlerTests : public Test {
         handler.log__ = &mock_logger;
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, "", &mock_db_check_handler});
         config.database_uri = "127.0.0.1:27017";
-        config.dataserver.advertise_uri = expected_host_ip+":"+std::to_string(expected_port);
+        config.dataserver.advertise_uri = expected_host_ip + ":" + std::to_string(expected_port);
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
 
@@ -254,7 +254,7 @@ TEST_F(DbWriterHandlerTests, SkipIfWasAlreadyProcessed) {
 TEST_F(DbWriterHandlerTests, DuplicatedRequest_SameRecord) {
     ExpectDuplicatedID();
 
-    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("duplicate record"),asapo::ResponseMessageType::kWarning));
+    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("duplicate record"), asapo::ResponseMessageType::kWarning));
     EXPECT_CALL(*mock_request, CheckForDuplicates_t())
     .WillOnce(
         Return(asapo::ReceiverErrorTemplates::kWarningDuplicatedRequest.Generate().release())
diff --git a/receiver/unittests/request_handler/test_request_handler_file_process.cpp b/receiver/unittests/request_handler/test_request_handler_file_process.cpp
index 11e32e3708c1006ed55b1cc65d25de03009f073c..65e1d9df922b6c76d5e6847e0a2dd7379898cde2 100644
--- a/receiver/unittests/request_handler/test_request_handler_file_process.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_file_process.cpp
@@ -81,7 +81,7 @@ void FileWriteHandlerTests::ExpecFileProcess(const asapo::SimpleErrorTemplate* e
 }
 
 TEST_F(FileWriteHandlerTests, FileAlreadyExists_NoRecordInDb) {
-    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("overwritten"),asapo::ResponseMessageType::kWarning));
+    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("overwritten"), asapo::ResponseMessageType::kWarning));
     EXPECT_CALL(*mock_request, CheckForDuplicates_t())
     .WillOnce(
         Return(nullptr)
@@ -106,7 +106,7 @@ TEST_F(FileWriteHandlerTests, FileAlreadyExists_NoRecordInDb) {
 
 TEST_F(FileWriteHandlerTests, FileAlreadyExists_DuplicatedRecordInDb) {
 
-    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("ignore"),asapo::ResponseMessageType::kWarning));
+    EXPECT_CALL(*mock_request, SetResponseMessage(HasSubstr("ignore"), asapo::ResponseMessageType::kWarning));
     EXPECT_CALL(*mock_request, SetAlreadyProcessedFlag());
     EXPECT_CALL(mock_logger, Warning(HasSubstr("duplicated")));
     EXPECT_CALL(*mock_request, GetDataID()).WillOnce(Return(1));
diff --git a/receiver/unittests/request_handler/test_requests_dispatcher.cpp b/receiver/unittests/request_handler/test_requests_dispatcher.cpp
index f3513ddfb80662de2b68ee0eea9a93e445359050..eda6db4cdad870b5f4f1a6df80fb2c09cf16ee66 100644
--- a/receiver/unittests/request_handler/test_requests_dispatcher.cpp
+++ b/receiver/unittests/request_handler/test_requests_dispatcher.cpp
@@ -262,7 +262,7 @@ TEST_F(RequestsDispatcherTests, OkProcessRequestSendOK) {
 TEST_F(RequestsDispatcherTests, ProcessRequestReturnsOkWithWarning) {
     MockHandleRequest(0);
     MockSendResponse(&response, false);
-    request->SetResponseMessage("duplicate",asapo::ResponseMessageType::kWarning);
+    request->SetResponseMessage("duplicate", asapo::ResponseMessageType::kWarning);
 
     auto err = dispatcher->ProcessRequest(request);
 
@@ -274,7 +274,7 @@ TEST_F(RequestsDispatcherTests, ProcessRequestReturnsOkWithWarning) {
 TEST_F(RequestsDispatcherTests, ProcessRequestReturnsOkWithInfo) {
     MockHandleRequest(0);
     MockSendResponse(&response, false);
-    request->SetResponseMessage("some info",asapo::ResponseMessageType::kInfo);
+    request->SetResponseMessage("some info", asapo::ResponseMessageType::kInfo);
 
     auto err = dispatcher->ProcessRequest(request);
 
diff --git a/receiver/unittests/statistics/test_statistics_sender_influx_db.cpp b/receiver/unittests/statistics/test_statistics_sender_influx_db.cpp
index bdb33de72a7c0357a4bb2e9a54b271fcce5feb83..f27906f7cc5034d96886a2a8eee522b6429c1263 100644
--- a/receiver/unittests/statistics/test_statistics_sender_influx_db.cpp
+++ b/receiver/unittests/statistics/test_statistics_sender_influx_db.cpp
@@ -82,7 +82,7 @@ class SenderInfluxDbTests : public Test {
 TEST_F(SenderInfluxDbTests, SendStatisticsCallsPost) {
     std::string expect_string = "statistics,name1=value1,name2=value2 elapsed_ms=100,data_volume=1000,"
                                 "n_requests=4,db_share=0.1000,network_share=0.3000,disk_share=0.6000";
-    EXPECT_CALL(mock_http_client, Post_t("test_uri/write?db=test_name",_, expect_string, _, _)).
+    EXPECT_CALL(mock_http_client, Post_t("test_uri/write?db=test_name", _, expect_string, _, _)).
     WillOnce(
         DoAll(SetArgPointee<4>(new asapo::IOError("Test Read Error", asapo::IOErrorType::kReadError)),
               Return("")
@@ -95,7 +95,7 @@ TEST_F(SenderInfluxDbTests, SendStatisticsCallsPost) {
 }
 
 TEST_F(SenderInfluxDbTests, LogErrorWithWrongResponceSendStatistics) {
-    EXPECT_CALL(mock_http_client, Post_t(_,_, _, _, _)).
+    EXPECT_CALL(mock_http_client, Post_t(_, _, _, _, _)).
     WillOnce(
         DoAll(SetArgPointee<3>(asapo::HttpCode::BadRequest), SetArgPointee<4>(nullptr), Return("error response")
              ));
@@ -107,7 +107,7 @@ TEST_F(SenderInfluxDbTests, LogErrorWithWrongResponceSendStatistics) {
 }
 
 TEST_F(SenderInfluxDbTests, LogDebugSendStatistics) {
-    EXPECT_CALL(mock_http_client, Post_t(_,_, _, _, _)).
+    EXPECT_CALL(mock_http_client, Post_t(_, _, _, _, _)).
     WillOnce(
         DoAll(SetArgPointee<4>(nullptr), SetArgPointee<3>(asapo::HttpCode::OK), Return("ok response")
              ));
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index d3c81a5003b8203c3c94d9fc9a6b6310b12b929e..3a3c86451f06f9e15b623dfe613d2d9b874930ae 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -45,7 +45,6 @@ class ConfigTests : public Test {
     }
     void PrepareConfig() {
         test_config.listen_port = 4200;
-        test_config.dataserver.listen_port = 4201;
         test_config.tag = "receiver1";
         test_config.performance_db_name = "db_test";
         test_config.performance_db_uri = "localhost:8086";
@@ -58,9 +57,11 @@ class ConfigTests : public Test {
         test_config.use_datacache = false;
         test_config.datacache_reserved_share = 10;
         test_config.datacache_size_gb = 2;
-        test_config.dataserver.nthreads = 5;
         test_config.discovery_server = "discovery";
+        test_config.dataserver.nthreads = 5;
+        test_config.dataserver.listen_port = 4201;
         test_config.dataserver.advertise_uri = "0.0.0.1:4201";
+        test_config.dataserver.network_mode = {"tcp", "fabric"};
         test_config.receive_to_disk_threshold_mb = 50;
 
     }
@@ -80,7 +81,6 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->performance_db_name, Eq("db_test"));
     ASSERT_THAT(config->database_uri, Eq("localhost:27017"));
     ASSERT_THAT(config->listen_port, Eq(4200));
-    ASSERT_THAT(config->dataserver.listen_port, Eq(4201));
     ASSERT_THAT(config->authorization_interval_ms, Eq(10000));
     ASSERT_THAT(config->authorization_server, Eq("AuthorizationServer/aa"));
     ASSERT_THAT(config->write_to_disk, Eq(true));
@@ -90,10 +90,14 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->use_datacache, Eq(false));
     ASSERT_THAT(config->datacache_reserved_share, Eq(10));
     ASSERT_THAT(config->datacache_size_gb, Eq(2));
+    ASSERT_THAT(config->discovery_server, Eq("discovery"));
     ASSERT_THAT(config->dataserver.nthreads, Eq(5));
     ASSERT_THAT(config->dataserver.tag, Eq("receiver1_ds"));
-    ASSERT_THAT(config->discovery_server, Eq("discovery"));
+    ASSERT_THAT(config->dataserver.listen_port, Eq(4201));
     ASSERT_THAT(config->dataserver.advertise_uri, Eq("0.0.0.1:4201"));
+    ASSERT_THAT(config->dataserver.network_mode.size(), Eq(2));
+    ASSERT_THAT(config->dataserver.network_mode[0], Eq("tcp"));
+    ASSERT_THAT(config->dataserver.network_mode[1], Eq("fabric"));
     ASSERT_THAT(config->receive_to_disk_threshold_mb, Eq(50));
 }
 
@@ -104,7 +108,8 @@ TEST_F(ConfigTests, ErrorReadSettings) {
     std::vector<std::string>fields {"PerformanceDbServer", "ListenPort", "DataServer", "ListenPort", "WriteToDisk",
                                     "WriteToDb", "DataCache", "Use", "SizeGB", "ReservedShare", "DatabaseServer", "Tag",
                                     "AuthorizationServer", "AuthorizationInterval", "PerformanceDbName", "LogLevel",
-                                    "NThreads", "DiscoveryServer", "AdvertiseURI", "ReceiveToDiskThresholdMB"};
+                                    "NThreads", "DiscoveryServer", "AdvertiseURI", "NetworkMode",
+                                    "ReceiveToDiskThresholdMB"};
     for (const auto& field : fields) {
         auto err = asapo::SetReceiverConfig(test_config, field);
         ASSERT_THAT(err, Ne(nullptr));
diff --git a/receiver/unittests/test_request.cpp b/receiver/unittests/test_request.cpp
index 2a25efbdeb3b8aa528fd4c59d5a11da1b8205bd3..32784f11dae8066c2561f29004dd0c00db910198 100644
--- a/receiver/unittests/test_request.cpp
+++ b/receiver/unittests/test_request.cpp
@@ -257,7 +257,7 @@ TEST_F(RequestTests, RequestTests_SetGetBeamtimeYear_Test) {
 }
 
 TEST_F(RequestTests, SetGetWarningMessage) {
-    request->SetResponseMessage("warn",asapo::ResponseMessageType::kWarning);
+    request->SetResponseMessage("warn", asapo::ResponseMessageType::kWarning);
 
     ASSERT_THAT(request->GetResponseMessage(), "warn");
     ASSERT_THAT(request->GetResponseMessageType(), asapo::ResponseMessageType::kWarning);
@@ -265,7 +265,7 @@ TEST_F(RequestTests, SetGetWarningMessage) {
 }
 
 TEST_F(RequestTests, SetGetInfossage) {
-    request->SetResponseMessage("info",asapo::ResponseMessageType::kInfo);
+    request->SetResponseMessage("info", asapo::ResponseMessageType::kInfo);
 
     ASSERT_THAT(request->GetResponseMessage(), "info");
     ASSERT_THAT(request->GetResponseMessageType(), asapo::ResponseMessageType::kInfo);
diff --git a/tests/automatic/CMakeLists.txt b/tests/automatic/CMakeLists.txt
index 9df59ca928b4dcae80fdf225a5f317419b3dbdc0..b7282bce4e6266677adf18754ee7409c5ac6a685 100644
--- a/tests/automatic/CMakeLists.txt
+++ b/tests/automatic/CMakeLists.txt
@@ -39,5 +39,9 @@ endif()
 add_subdirectory(bug_fixes)
 
 if (ENABLE_LIBFABRIC)
-    add_subdirectory(asapo_fabric)
+    if (ENABLE_LIBFABRIC_LOCALHOST)
+        add_subdirectory(asapo_fabric)
+    else ()
+        message(WARNING "Disabled automated LibFabric tests because 'ENABLE_LIBFABRIC_LOCALHOST' is not enabled.")
+    endif()
 endif()
diff --git a/tests/automatic/asapo_fabric/CMakeLists.txt b/tests/automatic/asapo_fabric/CMakeLists.txt
index c8da6e1c73e90e79bf3fdcd4674ef1a2042cf5aa..c9e75c82675d933e1357de941a40f44fda02a715 100644
--- a/tests/automatic/asapo_fabric/CMakeLists.txt
+++ b/tests/automatic/asapo_fabric/CMakeLists.txt
@@ -11,7 +11,7 @@ foreach(file ${files})
     # Executable and link
     add_executable(${TARGET_NAME} ${SOURCE_FILES} $<TARGET_OBJECTS:logger> $<TARGET_OBJECTS:curl_http_client>)
     target_link_libraries(${TARGET_NAME} test_common asapo-fabric ${CURL_LIBRARIES} ${ASAPO_COMMON_FABRIC_LIBRARIES})
-    target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR})
+    target_include_directories(${TARGET_NAME} PUBLIC ${ASAPO_CXX_COMMON_INCLUDE_DIR}  ${LIBFABRIC_INCLUDE_DIR})
     set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
 
     # Add test
diff --git a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
index fc99295efc270c7b364c28946a1b19363bf1f2ef..07c5fcc5600af523c0d95d572c3549b6cf6ce6b1 100644
--- a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
@@ -30,7 +30,12 @@ void ServerChildThread(FabricServer* server, std::atomic<int>* serverTotalReques
 
         FabricAddress clientAddress;
         FabricMessageId messageId;
-        server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+        // In order to run the tests more stable. Otherwise a timeout could occurred with valgrind
+        int tries = 0;
+        do {
+            err = nullptr;
+            server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+        } while (err == IOErrorTemplates::kTimeout && tries++ < 4);
         M_AssertEq(nullptr, err, "server->RecvAny");
         M_AssertEq("Hello World", request.message);
         M_AssertEq(messageId / kEachInstanceRuns, request.data_id); // is client index
diff --git a/tests/automatic/asapo_fabric/simple_data_transfer.cpp b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
index 5c35f5a2cadc7a021e20a606906c288698ed0871..00da5b38caa82feef7a05ca5ff941557d5337c2c 100644
--- a/tests/automatic/asapo_fabric/simple_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
@@ -35,7 +35,12 @@ void ServerMasterThread(const std::string& hostname, uint16_t port, char* expect
 
                 FabricAddress clientAddress;
                 FabricMessageId messageId;
-                server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+                // In order to run the tests more stable. Otherwise a timeout could occurred with valgrind
+                int tries = 0;
+                do {
+                    err = nullptr;
+                    server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+                } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
                 M_AssertEq(nullptr, err, "server->RecvAny");
                 M_AssertEq(123 + instanceRuns, messageId);
                 M_AssertEq("Hello World", request.message);
diff --git a/tests/automatic/asapo_fabric/timeout_test.cpp b/tests/automatic/asapo_fabric/timeout_test.cpp
index 42abd640883fb8304d00bc4fd982d63acf304d1e..7f752c6e3d08e86c5c88adb09337ac1d1e3f81dd 100644
--- a/tests/automatic/asapo_fabric/timeout_test.cpp
+++ b/tests/automatic/asapo_fabric/timeout_test.cpp
@@ -4,6 +4,7 @@
 #include <logger/logger.h>
 #include <testing.h>
 #include <asapo_fabric/asapo_fabric.h>
+#include <common/io_error.h>
 
 using namespace asapo;
 using namespace fabric;
@@ -28,7 +29,13 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
         int dummyBuffer;
         FabricAddress clientAddress;
         FabricMessageId messageId;
-        server->RecvAny(&clientAddress, &messageId, &dummyBuffer, sizeof(dummyBuffer), &err);
+
+        // In order to run the tests more stable. Otherwise a timeout could occurred with valgrind
+        int tries = 0;
+        do {
+            err = nullptr;
+            server->RecvAny(&clientAddress, &messageId, &dummyBuffer, sizeof(dummyBuffer), &err);
+        } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
         M_AssertEq(nullptr, err, "server->RecvAny");
 
         server->Send(clientAddress, messageId, &dummyBuffer, sizeof(dummyBuffer), &err);
@@ -64,7 +71,7 @@ void ClientThread(const std::string& hostname, uint16_t port) {
               "The following call might take a while since its able to reach the server but the server is not responding"
               << std::endl;
     client->Recv(serverAddress, 0, &dummyBuffer, sizeof(dummyBuffer), &err);
-    M_AssertEq(FabricErrorTemplates::kTimeout, err, "client->Recv");
+    M_AssertEq(IOErrorTemplates::kTimeout, err, "client->Recv");
     err = nullptr;
 
     serverShutdown.set_value();
@@ -76,7 +83,7 @@ void ClientThread(const std::string& hostname, uint16_t port) {
     err = nullptr;
 
     client->Send(serverAddress, 2, &dummyBuffer, sizeof(dummyBuffer), &err);
-    M_AssertEq(FabricErrorTemplates::kInternalConnectionError, err, "client->Send");
+    M_AssertEq(FabricErrorTemplates::kConnectionRefusedError, err, "client->Send");
     err = nullptr;
 }
 
diff --git a/tests/automatic/asapo_fabric/wrong_memory_info.cpp b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
index f94fc6c658136570b9eba5d650facfd10f4d5886..8281b4ab0e41683b5b38c8fa33687f2d1a2fe03d 100644
--- a/tests/automatic/asapo_fabric/wrong_memory_info.cpp
+++ b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
@@ -47,7 +47,11 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     M_AssertEq(nullptr, err, "server->Send(1)");
 
     // Simulate correct memory details
-    server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+    int tries = 0;
+    do {
+        err = nullptr;
+        server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &err);
+    } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(2)");
     M_AssertEq(2, messageId);
     server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
@@ -57,7 +61,11 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
 
     // Simulate old (unregistered) memory details
     GenericRequestHeader request2{};
-    server->RecvAny(&clientAddress, &messageId, &request2, sizeof(request2), &err);
+    tries = 0;
+    do {
+        err = nullptr;
+        server->RecvAny(&clientAddress, &messageId, &request2, sizeof(request2), &err);
+    } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(3)");
     M_AssertEq(3, messageId);
     server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
diff --git a/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh b/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
index edf36d29606436cd033909680251472d05767f59..766c57341e630336c0733cc0371ddd2ef2c34bc8 100644
--- a/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
+++ b/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
@@ -53,4 +53,4 @@ leak=$(( $mem2 - $mem1 ))
 cat out
 echo leak: $leak
 
-test $leak -lt 300000
\ No newline at end of file
+test $leak -lt 300000
diff --git a/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py b/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
index 2f5eaac9bc5eeda3289c08e2de21e7828b169f98..2f414425c392b87b468978b8816dbe502ea0222d 100644
--- a/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
+++ b/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
@@ -1,6 +1,6 @@
 import asapo_consumer
-import time
 import sys
+import time
 
 source, path, beamtime, token = sys.argv[1:]
 
diff --git a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
index 67bf2b9a4cd680750662368ef86fe26c7870ab67..5fe621a3a80c63b4770fa81a2416f4f7bd2b8ecf 100644
--- a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
+++ b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
@@ -30,7 +30,7 @@ echo "db.${beamtime_id}_${stream}.insert({dummy:1})" | mongo ${beamtime_id}_${st
 
 nomad run authorizer.nmd >/dev/null
 nomad run nginx.nmd >/dev/null
-nomad run receiver.nmd >/dev/null
+nomad run receiver_tcp.nmd >/dev/null
 nomad run discovery.nmd >/dev/null
 
 mkdir -p ${receiver_folder}
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt b/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
index 56a0067a3f0fb59f618d56247db84c8f4141ca15..cea0d39ae010441c92bb22e191294998d5fc32d8 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
@@ -13,4 +13,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/check_linux.sh b/tests/automatic/bug_fixes/producer_send_after_restart/check_linux.sh
index f9ef5a944ede561b54c50b891dbff9cf73571735..a572806f4eca1327b2912ed74e68a74db5fe3e10 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/check_linux.sh
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/check_linux.sh
@@ -4,6 +4,11 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
 
 monitor_database_name=db_test
@@ -31,14 +36,14 @@ Cleanup() {
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 
 sleep 1
 
 #producer
 mkdir -p ${receiver_folder}
-$1 test.json &> output &
+$producer_bin test.json &> output &
 producerid=`echo $!`
 
 sleep 1
@@ -47,7 +52,7 @@ echo hello > /tmp/asapo/test_in/test1/file1
 sleep 1
 nomad stop receiver
 sleep 1
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 
 echo hello > /tmp/asapo/test_in/test1/file1
 sleep 1
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat b/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
index 2421a18add8a7711b43c85fb2319db3b8b07d725..815dffc5431972fd07bd4d8be6c725b7de66a483 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
@@ -32,7 +32,7 @@ echo hello > c:\tmp\asapo\test_in\test1\file2
 ping 1.0.0.0 -n 3 -w 100 > nul
 
 c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run receiver_tcp.nmd
 
 ping 1.0.0.0 -n 3 -w 100 > nul
 ping 1.0.0.0 -n 3 -w 100 > nul
diff --git a/tests/automatic/bug_fixes/receiver_cpu_usage/check_linux.sh b/tests/automatic/bug_fixes/receiver_cpu_usage/check_linux.sh
index dd9d95deb376a73cedda62744a46509de3401c15..8877ee587ddafbdabadb2fcf61b9ce2a55856564 100644
--- a/tests/automatic/bug_fixes/receiver_cpu_usage/check_linux.sh
+++ b/tests/automatic/bug_fixes/receiver_cpu_usage/check_linux.sh
@@ -33,7 +33,7 @@ Cleanup() {
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 
 sleep 1
diff --git a/tests/automatic/common_scripts/start_services.bat b/tests/automatic/common_scripts/start_services.bat
index 9383d4aced21329cc23e324589d2af2e34a0cca1..680ed1a56bcd5f964cdf6ed2d0075c69b0d198be 100644
--- a/tests/automatic/common_scripts/start_services.bat
+++ b/tests/automatic/common_scripts/start_services.bat
@@ -1,4 +1,4 @@
-c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run receiver_tcp.nmd
 c:\opt\consul\nomad run authorizer.nmd
 c:\opt\consul\nomad run discovery.nmd
 c:\opt\consul\nomad run broker.nmd
diff --git a/tests/automatic/consumer/consumer_api/consumer_api.cpp b/tests/automatic/consumer/consumer_api/consumer_api.cpp
index 0ba8d1b644717f3e8587ad5c152025ddd8057031..b18fa79469171bb1cc4d03855f20d1eb564f291a 100644
--- a/tests/automatic/consumer/consumer_api/consumer_api.cpp
+++ b/tests/automatic/consumer/consumer_api/consumer_api.cpp
@@ -147,24 +147,24 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     M_AssertTrue(err == asapo::ConsumerErrorTemplates::kNoData, "last ack default stream no data");
     M_AssertTrue(id == 0, "last ack default stream no data id = 0");
 
-    auto nacks = broker->GetUnacknowledgedTupleIds(group_id,0,0,&err);
+    auto nacks = broker->GetUnacknowledgedTupleIds(group_id, 0, 0, &err);
     M_AssertTrue(err == nullptr, "nacks default stream all");
     M_AssertTrue(nacks.size() == 10, "nacks default stream size = 10");
 
-    err = broker->Acknowledge(group_id,1);
+    err = broker->Acknowledge(group_id, 1);
     M_AssertTrue(err == nullptr, "ack default stream no error");
 
-    nacks = broker->GetUnacknowledgedTupleIds(group_id,0,0,&err);
+    nacks = broker->GetUnacknowledgedTupleIds(group_id, 0, 0, &err);
     M_AssertTrue(nacks.size() == 9, "nacks default stream size = 9 after ack");
 
     id = broker->GetLastAcknowledgedTulpeId(group_id, &err);
     M_AssertTrue(err == nullptr, "last ack default stream no error");
     M_AssertTrue(id == 1, "last ack default stream id = 1");
 
-    err = broker->Acknowledge(group_id,1,"stream1");
+    err = broker->Acknowledge(group_id, 1, "stream1");
     M_AssertTrue(err == nullptr, "ack stream1 no error");
 
-    nacks = broker->GetUnacknowledgedTupleIds(group_id,"stream1",0,0,&err);
+    nacks = broker->GetUnacknowledgedTupleIds(group_id, "stream1", 0, 0, &err);
     M_AssertTrue(nacks.size() == 4, "nacks stream1 size = 4 after ack");
 
 // negative acks
@@ -172,7 +172,7 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     err = broker->GetNext(&fi, group_id, nullptr);
     M_AssertTrue(err == nullptr, "GetNextNegAckBeforeResend no error");
     M_AssertTrue(fi.name == "1", "GetNextNegAckBeforeResend filename");
-    err = broker->NegativeAcknowledge(group_id,1,0);
+    err = broker->NegativeAcknowledge(group_id, 1, 0);
     M_AssertTrue(err == nullptr, "NegativeAcknowledge no error");
     err = broker->GetNext(&fi, group_id, nullptr);
     M_AssertTrue(err == nullptr, "GetNextNegAckWithResend no error");
@@ -180,7 +180,7 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
 
 // automatic resend
     broker->ResetLastReadMarker(group_id);
-    broker->SetResendNacs(true,0,1);
+    broker->SetResendNacs(true, 0, 1);
     err = broker->GetNext(&fi, group_id, nullptr);
     M_AssertTrue(err == nullptr, "GetNextBeforeResend no error");
     M_AssertTrue(fi.name == "1", "GetNextBeforeResend filename");
@@ -189,7 +189,7 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     M_AssertTrue(err == nullptr, "GetNextWithResend no error");
     M_AssertTrue(fi.name == "1", "GetNextWithResend filename");
 
-    broker->SetResendNacs(false,0,1);
+    broker->SetResendNacs(false, 0, 1);
     err = broker->GetNext(&fi, group_id, nullptr);
     M_AssertTrue(err == nullptr, "GetNextAfterResend no error");
     M_AssertTrue(fi.name == "2", "GetNextAfterResend filename");
@@ -238,6 +238,11 @@ void TestAll(const Args& args) {
     asapo::Error err;
     auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, ".", true,
                   asapo::SourceCredentials{args.run_name, "", "", args.token}, &err);
+    if (err) {
+        std::cout << "Error CreateServerBroker: " << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+
     broker->SetTimeout(100);
     auto group_id = broker->GenerateNewGroupId(&err);
 
diff --git a/tests/automatic/consumer/consumer_api_python/consumer_api.py b/tests/automatic/consumer/consumer_api_python/consumer_api.py
index e0901a0bb9b9d950bb30565f56d7b33e8d45e59e..2fba3c8a2def108942c360793ea7ab0a1a1e242e 100644
--- a/tests/automatic/consumer/consumer_api_python/consumer_api.py
+++ b/tests/automatic/consumer/consumer_api_python/consumer_api.py
@@ -4,6 +4,7 @@ import asapo_consumer
 import json
 import sys
 
+
 def exit_on_noerr(name):
     print (name)
     sys.exit(1)
@@ -248,11 +249,12 @@ def check_dataset(broker,group_id):
 
 source, path, beamtime, token, mode = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(source,path,True, beamtime,"",token,60000)
-broker_fts = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,60000)
+broker = asapo_consumer.create_server_broker(source, path, True, beamtime, "", token, 60000)
+broker_fts = asapo_consumer.create_server_broker(source, path, False, beamtime, "", token, 60000)
 
 group_id = broker.generate_group_id()
 
+
 group_id_fts = broker_fts.generate_group_id()
 
 if mode == "single":
@@ -263,4 +265,4 @@ if mode == "datasets":
     check_dataset(broker,group_id)
 
 print ("tests done")
-sys.exit(0)
\ No newline at end of file
+sys.exit(0)
diff --git a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
index 93917960d16747306a8189b0e4be975018d12e59..33ff6eccc35866c85a17c8948d1159019d03513f 100644
--- a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
+++ b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
@@ -54,6 +54,11 @@ Args GetArgs(int argc, char* argv[]) {
 void TestAll(const Args& args) {
     asapo::Error err;
     auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, "dummy", true, asapo::SourceCredentials{args.run_name, "", "", args.token}, &err);
+    if (err) {
+        std::cout << "Error CreateServerBroker: " << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+
     auto group_id = broker->GenerateNewGroupId(&err);
     broker->SetTimeout(10000);
     std::vector<asapo::FileInfos>file_infos(args.nthreads);
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
index 75bc3d65bcd9ec7b5cc6b194c888cc669669da35..45527f88ef291784b6ffb62d433a4b40063602b5 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
+++ b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
@@ -34,6 +34,7 @@ int main(int argc, char* argv[]) {
     asapo::Error err;
     auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.uri_authorizer, "", true, asapo::SourceCredentials{"", "", "", ""}, &err);
     auto server_broker = static_cast<asapo::ServerDataBroker*>(broker.get());
+    M_AssertEq(nullptr, err);
 
     asapo::HttpCode code;
     std::string response;
@@ -66,15 +67,15 @@ int main(int argc, char* argv[]) {
 
     transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"random\"}";
     auto io = asapo::GenerateDefaultIO();
-    auto fname = args.folder+asapo::kPathSeparator+"random";
-    uint64_t size=0;
-    auto expected_data = io->GetDataFromFile(fname,&size,&err);
+    auto fname = args.folder + asapo::kPathSeparator + "random";
+    uint64_t size = 0;
+    auto expected_data = io->GetDataFromFile(fname, &size, &err);
     M_AssertEq(nullptr, err);
     err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, size, &code);
     M_AssertTrue(code == asapo::HttpCode::OK);
-    for (uint64_t i=0;i<size;i++) {
+    for (uint64_t i = 0; i < size; i++) {
         if (expected_data[i] != data[i]) {
-            M_AssertTrue(false,"recieve array equal to sent array");
+            M_AssertTrue(false, "recieve array equal to sent array");
         }
     }
 
diff --git a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt b/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
index 2eff5008291dff153400eb9194ff409f014dadf3..7ddfbbfbea7efa3dd31162a130bd557e15cf3f17 100644
--- a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
@@ -11,5 +11,5 @@ set_target_properties(${TARGET_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
 
 prepare_asapo()
 
-add_script_test("${TARGET_NAME}" "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}" nomem)
+add_script_test("${TARGET_NAME}-tcp" "${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME} tcp" nomem)
 
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_linux.sh b/tests/automatic/full_chain/send_recv_substreams/check_linux.sh
index 4defc53a4b89f197faaab7e5fe37a850d6360a1c..f7f1f0241bd2b973a45ef4c8141053c3209dbccf 100644
--- a/tests/automatic/full_chain/send_recv_substreams/check_linux.sh
+++ b/tests/automatic/full_chain/send_recv_substreams/check_linux.sh
@@ -13,6 +13,8 @@ set -e
 
 trap Cleanup EXIT
 
+network_type=$2
+
 Cleanup() {
     set +e
     nomad stop nginx
@@ -27,9 +29,9 @@ Cleanup() {
 nomad run nginx.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run authorizer.nmd
 
 
-$1 127.0.0.1:8400 $beamtime_id $token > out
-cat out
\ No newline at end of file
+$1 127.0.0.1:8400 $beamtime_id $token | tee out
+
diff --git a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp b/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
index 2862a87bf0f148a140b1197e0877cd1008298df1..0a4a0b910d302b9f433bd1d2979f4da59dd661bf 100644
--- a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
+++ b/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
@@ -18,7 +18,7 @@ using BrokerPtr = std::unique_ptr<asapo::DataBroker>;
 using ProducerPtr = std::unique_ptr<asapo::Producer>;
 std::string group_id = "";
 
-int files_sent;
+uint64_t files_sent;
 
 struct Args {
     std::string server;
@@ -70,9 +70,9 @@ ProducerPtr CreateProducer(const Args& args) {
 int main(int argc, char* argv[]) {
     asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
     Args args;
-    if (argc != 4) {
+    if (argc != 5) {
         std::cout << "Usage: " + std::string{argv[0]}
-                  + " <server>  <beamtime_id> <token>"
+                  + " <server> <network_type> <beamtime_id> <token>"
                   <<
                   std::endl;
         exit(EXIT_FAILURE);
@@ -82,7 +82,7 @@ int main(int argc, char* argv[]) {
     args.token = std::string{argv[3]};
     auto producer = CreateProducer(args);
 
-    auto n = 1;
+    uint64_t n = 1;
 
     for (uint64_t i = 0; i < n; i++) {
         asapo::EventHeader event_header{i + 1, 0, std::to_string(i + 1)};
@@ -93,6 +93,10 @@ int main(int argc, char* argv[]) {
 
     Error err;
     auto consumer = CreateBrokerAndGroup(args, &err);
+    if (err) {
+        std::cout << "Error CreateBrokerAndGroup: " << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
 
     asapo::FileInfo fi;
     for (uint64_t i = 0; i < n; i++) {
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt b/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
index 4312c3ca1e80f8d6b9e7530323d2ffaea2fb99cd..26c97503083f48685dd6341f098c6f1a0af57bd8 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
@@ -11,4 +11,4 @@ endif()
 
 file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/send_recv_substreams.py TEST_SCRIPT )
 
-add_script_test("${TARGET_NAME}" "${Python_EXECUTABLE} ${PYTHON_LIBS_CONSUMER} ${PYTHON_LIBS_PRODUCER} ${TEST_SCRIPT} " nomem)
+add_script_test("${TARGET_NAME}" "${Python_EXECUTABLE} ${PYTHON_LIBS_CONSUMER} ${PYTHON_LIBS_PRODUCER} ${TEST_SCRIPT}" nomem)
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh b/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh
index fed341d8cbddf42ba90d0a2949e6a42aeba0e327..024acde6816e99ac39d9bcee2dabae58a9fbfe7b 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh
+++ b/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh
@@ -27,11 +27,11 @@ Cleanup() {
 nomad run nginx.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run authorizer.nmd
 
 
 export PYTHONPATH=$2:$3:${PYTHONPATH}
 
 
-$1 $4 127.0.0.1:8400 $beamtime_id $token
\ No newline at end of file
+$1 $4 127.0.0.1:8400 $beamtime_id $token | tee out
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py b/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
index 60ad65264b71f728a5f16c2a8babfb3d03d9c2f4..c35fb9c0b8757bb46c2b573c0ccffdd37b03045d 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
+++ b/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
@@ -3,10 +3,8 @@ from __future__ import print_function
 import asapo_consumer
 import asapo_producer
 import sys
-import os
-
-
 import threading
+
 lock = threading.Lock()
 
 timeout = 10 * 1000
@@ -56,5 +54,6 @@ while True:
 
 assert_eq(n_recv, n_send, "send=recv")
 assert_eq(substream_finished, True, "substream finished")
+print('Using connection type: ' + broker.current_connection_type())
 
 
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
old mode 100644
new mode 100755
index a52875354109b60d77642588ac7024e1a31b05d6..fcc4a10f02dace183ace3188fd824821bed088e6
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -4,8 +4,12 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -39,18 +43,18 @@ echo "db.dropDatabase()" | mongo ${beamtime_id}_detector
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd # Only use TCP because the consumer will only use metadata anyways
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 1000 4 0 100
+$producer_bin localhost:8400 ${beamtime_id} 100 1000 4 0 100
 #producerid=`echo $!`
 
-
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
-cat out
-cat out   | grep "Processed 1000 file(s)"
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 | tee out
+grep "Processed 1000 file(s)" out
+grep -i "Using connection type: No connection" out
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
index fd9244556c2a3c0775a069d8d7aac95d63bfaa49..7b2024362594b5997cfc2dbb034d402df511677e 100644
--- a/tests/automatic/full_chain/simple_chain/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -21,7 +21,7 @@ start /B "" "%1" %proxy_address% %beamtime_id% 100 1000 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM consumer
-"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 5000  1 > out.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 5000 1 > out.txt
 type out.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out.txt || goto :error
 
diff --git a/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
index c66bfad61d973da55b0573a5e1599d753853accb..cf7580f6ea6f586300e671d29792dd31600bed1a 100644
--- a/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_simple_chain_dataset)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh b/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh
index 7ff5911214b9ab88a4387346c04d0396ea7ad3e9..6972a6ef1174e7224eeed0c020aac6f6308919d0 100644
--- a/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -35,20 +40,19 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 100 4 0 100 5 &
-
-
+$producer_bin localhost:8400 ${beamtime_id} 100 100 4 0 100 5 &
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 1 > out
-cat out
-cat out   | grep "Processed 100 dataset(s)"
-cat out   | grep "with 500 file(s)"
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 1 | tee out
+grep "Processed 100 dataset(s)" out
+grep "with 500 file(s)" out
+grep -i "Using connection type: No connection" out
 
diff --git a/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
index 768562c03d2341c99607114de84028cef78ac9c6..8bd0d723e8f5e9fd26f0413495370b9391cf327c 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
@@ -12,4 +12,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen/check_linux.sh
index 581c064b6b146ff95e52b35690c7047af8723385..4e88475f4060545fb191dbdc280f7f8009bb1257 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_filegen/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -38,15 +43,15 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 test.json &
+$producer_bin test.json &
 producerid=`echo $!`
 
 sleep 1
@@ -55,7 +60,10 @@ echo hello > /tmp/asapo/test_in/test1/file1
 echo hello > /tmp/asapo/test_in/test1/file2
 echo hello > /tmp/asapo/test_in/test2/file2
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 | tee /dev/stderr | grep "Processed 3 file(s)"
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 | tee /dev/stderr out
+grep "Processed 3 file(s)" out
+grep -i "Using connection type: No connection" out
 
 test ! -f /tmp/asapo/test_in/test1/file1
 test ! -f /tmp/asapo/test_in/test1/file2
diff --git a/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
index 500e19ccdfdafaaa065ad5b4f4898523fcd0642c..7de08baad3f1b7f826e60109449a8c3e7e7d30f1 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
@@ -53,5 +53,3 @@ Taskkill /IM "%producer_short_name%" /F
 
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
-
-
diff --git a/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
index c319a11ec8fbd3ac56918c0879d365c47c277f81..6a8d10614a88cb7b2e722efe226c1a7d70d64b5c 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
@@ -12,4 +12,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen_batches/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen_batches/check_linux.sh
index 830a312ea727633cb32798529408febbde77495a..a9a31bb2e782ccdccfdf737fb3ee48e4e58f4abb 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_batches/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_filegen_batches/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -39,15 +44,15 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 test.json &
+$producer_bin test.json &
 producerid=`echo $!`
 
 sleep 1
@@ -56,10 +61,11 @@ echo hello > /tmp/asapo/test_in/test1/file1
 echo hello > /tmp/asapo/test_in/test1/file2
 echo hello > /tmp/asapo/test_in/test2/file2
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 2000 1 1 > out
-cat out
-cat out   | grep "Processed 1 dataset(s)"
-cat out   | grep "with 3 file(s)"
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 2000 1 1 | tee out
+grep "Processed 1 dataset(s)" out
+grep "with 3 file(s)" out
+grep -i "Using connection type: No connection" out
 
 test -f /tmp/asapo/test_in/test1/file1
 test -f /tmp/asapo/test_in/test1/file2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_linux.sh
index 33709677881f49282e0c128a2dd7ec81c4d74ec9..bcdbfa2c31ab8be4bda172aafd3bc70b86c620c6 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_linux.sh
@@ -4,8 +4,12 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -40,20 +44,21 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
 mkdir -p ${receiver_folder}
-#producer1
-$1 test1.json &
+
+echo "Start producer 1"
+$producer_bin test1.json &
 producerid1=`echo $!`
-#producer2
-$1 test2.json &
-producerid2=`echo $!`
 
+echo "Start producer 2"
+$producer_bin test2.json &
+producerid2=`echo $!`
 
 sleep 1
 
@@ -62,7 +67,8 @@ echo hello > /tmp/asapo/test_in/test1/file2
 echo hello > /tmp/asapo/test_in/test2/file1
 echo hello > /tmp/asapo/test_in/test2/file2
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 2000 1 1 > out
-cat out
-cat out   | grep "Processed 2 dataset(s)"
-cat out   | grep "with 4 file(s)"
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} $network_type ${receiver_folder} ${beamtime_id} 2 $token 2000 1 1 | tee out
+grep "Processed 2 dataset(s)" out
+grep "with 4 file(s)" out
+grep -i "Using connection type: No connection" out
diff --git a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
index a8cf1670079f3c700aa5e395cc4dd8d8fbb1f1eb..edb87126c1d87a8b6f5ba547fb793b6acfb5bb5c 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
@@ -42,8 +42,9 @@ ping 1.0.0.0 -n 10 -w 100 > nul
 REM consumer
 "%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 1000 1 1 > out.txt
 type out.txt
-findstr /i /l /c:"Processed 2 dataset(s)"  out.txt || goto :error
-findstr /i /l /c:"with 4 file(s)"  out.txt || goto :error
+findstr /i /l /c:"Processed 2 dataset(s)" out.txt || goto :error
+findstr /i /l /c:"with 4 file(s)" out.txt || goto :error
+findstr /i /l /c:"Using connection type: No connection" out.txt || goto :error
 
 goto :clean
 
@@ -60,5 +61,3 @@ Taskkill /IM "%producer_short_name%" /F
 
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
-
-
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
index 7acab0298869b8bd336b8c6d5dd610aebd638451..ec3f269413aaa944deca4abb18dbe3a1e789c672 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
@@ -13,4 +13,12 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+
+if (ENABLE_LIBFABRIC)
+    if (ENABLE_LIBFABRIC_LOCALHOST)
+        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
+    else ()
+        message(WARNING "Disabled automated LibFabric of '${TARGET_NAME}-fabric' test because 'ENABLE_LIBFABRIC_LOCALHOST' is not enabled.")
+    endif()
+endif()
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh
index 3a60b5c862ac86f91d3b7c14095babfd749f2ee0..05b1e16e160b9bc409af4defc7b985306ff6e349 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh
@@ -4,6 +4,11 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
 token=`$3 token -secret auth_secret.key $beamtime_id`
 
@@ -40,15 +45,15 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 test.json &
+$producer_bin test.json &
 producerid=`echo $!`
 
 sleep 1
@@ -57,12 +62,13 @@ echo -n hello1 > /tmp/asapo/test_in/test1/file1
 echo -n hello2 > /tmp/asapo/test_in/test1/file2
 echo -n hello3 > /tmp/asapo/test_in/test2/file2
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 0 > out.txt
-cat out.txt
+echo "Start consumer in $network_type mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 0 | tee out.txt
 grep "Processed 3 file(s)" out.txt
 grep "hello1" out.txt
 grep "hello2" out.txt
 grep "hello3" out.txt
+grep -i "Using connection type: $network_type" out.txt
 
 sleep 12
 
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
index 1e3fcd21031106b6e5fd830611bdfe60ffcd8ea6..dc674898e8a6de37125f77147c700529e6628394 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
@@ -34,7 +34,7 @@ ping 1.0.0.0 -n 10 -w 100 > nul
 
 
 REM consumer
-"%2" %proxy_address%  %receiver_folder% %beamtime_id% 2 %token% 1000 0 > out.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 1000 0 > out.txt
 type out.txt
 findstr /i /l /c:"Processed 3 file(s)" out.txt || goto :error
 findstr /i /l /c:"hello1" out.txt || goto :error
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
index b5bfac21ef5109e54f34d314bcf07a4f9b7f9f93..c54130a4ac26bcdfb6c7b5adf3a92830141642c9 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
@@ -13,4 +13,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_linux.sh
index 4acd7a74d0bd915bcce1538f9348e43358900cbe..8b65535b5995be56fa9a0232e750c8a6e6c8651a 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -39,15 +44,15 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 test.json &
+$producer_bin test.json &
 producerid=`echo $!`
 
 sleep 1
@@ -56,10 +61,11 @@ echo -n hello1 > /tmp/asapo/test_in/test1/file1
 echo -n hello2 > /tmp/asapo/test_in/test1/file2
 echo -n hello3 > /tmp/asapo/test_in/test2/file2
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 0 > out.txt
-cat out.txt
+echo "Start consumer in $network_type mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 0 | tee out.txt
 grep "Processed 3 file(s)" out.txt
 grep "hello1" out.txt
 grep "hello2" out.txt
 grep "hello3" out.txt
+grep -i "Using connection type: No connection" out.txt
 
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
index dc674898e8a6de37125f77147c700529e6628394..68347e567d6ee78de314efea94f6985f1c968898 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
@@ -40,6 +40,7 @@ findstr /i /l /c:"Processed 3 file(s)" out.txt || goto :error
 findstr /i /l /c:"hello1" out.txt || goto :error
 findstr /i /l /c:"hello2" out.txt || goto :error
 findstr /i /l /c:"hello3" out.txt || goto :error
+findstr /i /l /c:"Using connection type: No connection" out.txt || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
index 4ac8f929ab76fb3a26be3e2acbcccc5b6a7cb7b6..2ff284ef9ad078f5f16274ce7ed69f9eb9f21185 100644
--- a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
@@ -1,7 +1,7 @@
-set(TARGET_NAME full_chain_simple_chain_meta)
+set(TARGET_NAME full_chain_simple_chain_metadata)
 
 ################################
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
index 4f1689f50bc5554d932169c3c407dc82939ed281..d766e7ae9933ba3d88167dd64f3b05dbb5a410ce 100644
--- a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
@@ -4,8 +4,12 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -35,16 +39,17 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd # Only use TCP because the consumer will only use metadata anyways
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 1
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 0 1 0 1000
+$producer_bin localhost:8400 ${beamtime_id} 100 0 1 0 1000
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 > out
-cat out
-cat out | grep "dummy_meta"
\ No newline at end of file
+echo "Start consumer in metadata only mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 | tee out
+grep "dummy_meta" out
+grep -i "Using connection type: No connection" out
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
index fca9fca048d39be93477e413caf1095977289158..64671af15df57480a57f41ad512f6ad66c790ded 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
@@ -4,8 +4,11 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+asapo_tool_bin=$2
+
 beamtime_id=asapo_test
-token=`$2 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -35,21 +38,20 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd # Only use TCP because the consumer will only use metadata anyways
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 2
 
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 100 1 0 100
+$producer_bin localhost:8400 ${beamtime_id} 100 100 1 0 100
 
 export PYTHONPATH=$4:${PYTHONPATH}
 export Python_EXECUTABLE=$5
 
-
-$Python_EXECUTABLE $3/get_user_meta.py $proxy_address $receiver_folder $beamtime_id $token new > out
-cat out
-cat out | grep "found images: 100"
-cat out | grep "test100"
+echo "Start python consumer in metadata only mode"
+$Python_EXECUTABLE $3/get_user_meta.py $proxy_address $receiver_folder $beamtime_id $token new | tee out
+grep "found images: 100" out
+grep "test100" out
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
index 3fc487795810b2546dc28f6a45a2ab93fe69f801..2a67b0c4a582883669db4057beadd582269caa0c 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
@@ -21,7 +21,7 @@ mkdir %receiver_folder%
 REM consumer
 set PYTHONPATH=%4
 
-python3 %3/get_user_meta.py %proxy_address%  %receiver_folder% %beamtime_id%  %token% new > out
+python3 %3/get_user_meta.py %proxy_address% %receiver_folder% %beamtime_id%  %token% new > out
 type out
 type out | findstr /c:"found images: 100" || goto :error
 type out | findstr /c:"test100" || goto :error
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py b/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
index 5fb497e041b462bf7290286f805bf1fe646c0737..eaa115726de4fea638cf1658d26fc1111196ce8a 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
@@ -1,7 +1,6 @@
 from __future__ import print_function
 
 import asapo_consumer
-import json
 import sys
 
 source, path, beamtime, token, group_id = sys.argv[1:]
@@ -12,6 +11,3 @@ images = broker.query_images("meta.user_meta regexp 'test*' order by _id")
 
 print ('found images:',len(images))
 print (images[99]['meta']['user_meta'])
-
-
-
diff --git a/tests/automatic/full_chain/two_beamlines/CMakeLists.txt b/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
index 5a352cfc77a224b717ed42e5fe0b89efb086c1f2..88c67cc9255bf4c8ec089c86730d83bda8561968 100644
--- a/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
+++ b/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
@@ -4,4 +4,13 @@ set(TARGET_NAME full_chain_two_beamlines)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+
+if (ENABLE_LIBFABRIC)
+    if (ENABLE_LIBFABRIC_LOCALHOST)
+        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
+    else ()
+        message(WARNING "Disabled automated LibFabric of '${TARGET_NAME}-fabric' test because 'ENABLE_LIBFABRIC_LOCALHOST' is not enabled.")
+    endif()
+endif()
+
diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh
index a64bc009d4393331f723631ea44e756cc656cf18..f6c7e26f028ec6ba2c906540d4b6842acdf8988f 100644
--- a/tests/automatic/full_chain/two_beamlines/check_linux.sh
+++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh
@@ -4,13 +4,18 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 stream=detector
 
 beamtime_id1=asapo_test1
-token1=`$3 token -secret auth_secret.key $beamtime_id1`
+token1=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id1`
 
 beamtime_id2=asapo_test2
-token2=`$3 token -secret auth_secret.key $beamtime_id2`
+token2=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id2`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -45,19 +50,25 @@ echo "db.${beamtime_id2}_${stream}.insert({dummy:1})" | mongo ${beamtime_id2}_${
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_${network_type}.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 3
 
-#producer
+echo "Start producers"
 mkdir -p ${receiver_folder1}
 mkdir -p ${receiver_folder2}
-$1 localhost:8400 ${beamtime_id1} 100 1000 4 0 100 &
-$1 localhost:8400 ${beamtime_id2} 100 900 4 0 100 &
+$producer_bin localhost:8400 ${beamtime_id1} 100 1000 4 0 100 &
+$producer_bin localhost:8400 ${beamtime_id2} 100 900 4 0 100 &
 #producerid=`echo $!`
 
-#consumers
-$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 12000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
-$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 12000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
+echo "Start consumers in $network_type mode"
+$consumer_bin ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 12000 0 | tee /dev/stderr consumer_1.out
+$consumer_bin ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 12000 0 | tee /dev/stderr consumer_2.out
+
+grep "from memory buffer: 1000" consumer_1.out
+grep -i "Using connection type: $network_type" consumer_1.out
+
+grep "from memory buffer: 900" consumer_2.out
+grep -i "Using connection type: $network_type" consumer_2.out
diff --git a/tests/automatic/full_chain/two_streams/CMakeLists.txt b/tests/automatic/full_chain/two_streams/CMakeLists.txt
index 35ce65b50d944f713621826b17d82f8797ba1b87..505c1ff22dde1cbc00fcb4a4a1a08f62d4e04795 100644
--- a/tests/automatic/full_chain/two_streams/CMakeLists.txt
+++ b/tests/automatic/full_chain/two_streams/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_two_streams)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/two_streams/check_linux.sh b/tests/automatic/full_chain/two_streams/check_linux.sh
index 38bcdc4b30c911c8c068842fd13f3325161a7c91..fbbe34ab9801818131ae7443a2a6203092b88579 100644
--- a/tests/automatic/full_chain/two_streams/check_linux.sh
+++ b/tests/automatic/full_chain/two_streams/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 stream1=s1
 stream2=s2
@@ -38,18 +43,23 @@ echo "db.${beamtime_id}_${stream2}.insert({dummy:1})" | mongo ${beamtime_id}_${s
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
 sleep 3
 
-#producer
+echo "Start producers"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id}%${stream1} 100 1000 4 0 100 &
-$1 localhost:8400 ${beamtime_id}%${stream2} 100 900 4 0 100 &
+$producer_bin localhost:8400 ${beamtime_id}%${stream1} 100 1000 4 0 100 &
+$producer_bin localhost:8400 ${beamtime_id}%${stream2} 100 900 4 0 100 &
+
+echo "Start consumers in $network_type mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id}%${stream1} 2 $token 10000 0 | tee /dev/stderr consumer_1.out
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id}%${stream2} 2 $token 10000 0 | tee /dev/stderr consumer_2.out
 
+grep "from memory buffer: 1000" consumer_1.out
+grep -i "Using connection type: $network_type" consumer_1.out
 
-#consumers
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id}%${stream1} 2 $token 10000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id}%${stream2} 2 $token 10000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
+grep "from memory buffer: 900" consumer_2.out
+grep -i "Using connection type: $network_type" consumer_2.out
diff --git a/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt b/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
index 0ba1d0de4810af2b0cdc35cbb688b7a7e2f8f043..244d2522065c3899e612801b4eaddc32b93491f4 100644
--- a/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
+++ b/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
@@ -5,4 +5,4 @@ set(TARGET_NAME broker_mongo_restart)
 ################################
 set(RECEIVER_WRITE_TO_DISK false)
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
old mode 100644
new mode 100755
index 71612bfabc4f8588472eb72c9ed07f8ed011c425..440d88d8ad6e6f2e018905e0bf7264c5afbd301c
--- a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
+++ b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT SIGHUP SIGINT SIGTERM
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$4
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -60,7 +65,7 @@ Cleanup() {
 }
 
 
-sed -i 's/27017/27016/g' receiver.json.tpl
+sed -i 's/27017/27016/g' receiver_tcp.json.tpl
 sed -i 's/27017/27016/g' discovery.json.tpl
 sed -i 's/info/debug/g' broker.json.tpl
 
@@ -70,7 +75,7 @@ wait_mongo 27016
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
@@ -79,15 +84,15 @@ sleep 1
 echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo --port 27016 ${beamtime_id}_detector
 
 
-
-#producer
+echo "Start producer"
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 1000 4 0 100 &
+$producer_bin localhost:8400 ${beamtime_id} 100 1000 4 0 100 &
 producerid=`echo $!`
 
 wait
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 10000 0 &> output.txt &
+echo "Start consumer in $network_type mode"
+$consumer_bin ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 10000 0 &> output.txt &
 workerid=`echo $!`
 
 sleep 2
@@ -106,4 +111,4 @@ wait
 cat output.txt
 nfiles=`cat output.txt | grep "Processed" | awk   '{print $2;}'`
 test  $nfiles -ge 1000
-rm output.txt
\ No newline at end of file
+rm output.txt
diff --git a/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh b/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
index 45fbc528df1a720d31bfbb8061386a211744f971..11199a46a2c3cb343df16f8a9eab4274ce7531ef 100644
--- a/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
+++ b/tests/automatic/high_avail/receiver_mongo_restart/check_linux.sh
@@ -63,7 +63,7 @@ sed -i 's/27017/27016/g' discovery.json.tpl
 
 nomad run authorizer.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 
 mkdir -p ${receiver_folder}
diff --git a/tests/automatic/high_avail/services_restart/CMakeLists.txt b/tests/automatic/high_avail/services_restart/CMakeLists.txt
index 5f7029bf9725d9a2c23e59c6556c73f02c3d0a73..b62a7b6f95a462ad2a2d4ab4c384907e21436892 100644
--- a/tests/automatic/high_avail/services_restart/CMakeLists.txt
+++ b/tests/automatic/high_avail/services_restart/CMakeLists.txt
@@ -5,5 +5,5 @@ set(TARGET_NAME service_restart)
 ################################
 set(RECEIVER_WRITE_TO_DISK false)
 prepare_asapo()
-add_script_test("${TARGET_NAME}-all" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> broker 1000 998" nomem)
-add_script_test("${TARGET_NAME}-all-but-broker" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> receiver 1000 1000" nomem)
+add_script_test("${TARGET_NAME}-all-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> broker 1000 998 tcp" nomem)
+add_script_test("${TARGET_NAME}-all-but-broker-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> receiver 1000 1000 tcp" nomem)
diff --git a/tests/automatic/high_avail/services_restart/check_linux.sh b/tests/automatic/high_avail/services_restart/check_linux.sh
index aafbb88340f1f3219b4e41c138d5a9422e8d8e96..dd3253f2ff0f3198f9042e6188d23cd548cae4c0 100644
--- a/tests/automatic/high_avail/services_restart/check_linux.sh
+++ b/tests/automatic/high_avail/services_restart/check_linux.sh
@@ -4,8 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+producer_bin=$1
+consumer_bin=$2
+asapo_tool_bin=$3
+network_type=$7
+
 beamtime_id=asapo_test
-token=`$3 token -secret auth_secret.key $beamtime_id`
+token=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id`
 
 monitor_database_name=db_test
 proxy_address=127.0.0.1:8400
@@ -28,7 +33,7 @@ sed -i 's/info/debug/g' broker.json.tpl
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run broker.nmd
 
@@ -36,15 +41,12 @@ sleep 1
 
 echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo  ${beamtime_id}_detector
 
-
-
-#producer
-$1 localhost:8400 ${beamtime_id} 100 $5 4 0 100 &
+echo "Start producer"
+$producer_bin localhost:8400 ${beamtime_id} 100 $5 4 0 100 &
 #producerid=`echo $!`
 
-
-#consumer
-$2 ${proxy_address} dummy_path ${beamtime_id} 2 $token 30000 1 &> output.txt &
+echo "Start consumer in $network_type mode"
+$consumer_bin ${proxy_address} dummy_path ${beamtime_id} 2 $token 30000 1 &> output.txt &
 
 sleep 1
 
@@ -58,13 +60,17 @@ nomad stop receiver
 nomad run nginx.nmd
 nomad run authorizer.nmd
 nomad run discovery.nmd
-nomad run receiver.nmd
+nomad run receiver_$7.nmd
 
-nomad run $4.nmd
+if [[ "$4" == "receiver" ]]; then
+  nomad run $4_$7.nmd
+else
+  nomad run $4.nmd
+fi
 
 wait
 
 cat output.txt
 nfiles=`cat output.txt | grep "Processed" | awk   '{print $2;}'`
 test  $nfiles -ge $6
-rm output.txt
\ No newline at end of file
+rm output.txt
diff --git a/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp b/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
index 289362011cd1a42d51af187adcdf82ab06af7439..e245e6c407d2728cc7e2fbd79a5df9be5eb65dc0 100644
--- a/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
+++ b/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
@@ -66,9 +66,9 @@ int main(int argc, char* argv[]) {
         Assert(err, "No record");
 
         asapo::StreamInfo info;
-        err = db.GetStreamInfo("test",&info);
+        err = db.GetStreamInfo("test", &info);
         M_AssertEq(nullptr, err);
-        M_AssertEq(fi.id,info.last_id);
+        M_AssertEq(fi.id, info.last_id);
     }
 
     return 0;
diff --git a/tests/automatic/producer/aai/check_linux.sh b/tests/automatic/producer/aai/check_linux.sh
index 553319166de9bb1bfa933bac07b0da0eb141c33e..6a40b0b091ef7f83d551ef71efa63d19dcd41771 100644
--- a/tests/automatic/producer/aai/check_linux.sh
+++ b/tests/automatic/producer/aai/check_linux.sh
@@ -32,7 +32,7 @@ export PYTHONPATH=$2:${PYTHONPATH}
 
 nomad run authorizer.nmd >/dev/null
 nomad run nginx.nmd >/dev/null
-nomad run receiver.nmd >/dev/null
+nomad run receiver_tcp.nmd >/dev/null
 nomad run discovery.nmd >/dev/null
 
 mkdir -p ${receiver_folder} ${receiver_folder2}
diff --git a/tests/automatic/producer/python_api/check_linux.sh b/tests/automatic/producer/python_api/check_linux.sh
index 657146af910c638faee761df87059dafe35d27fc..26e843a56a7df0bc2b6de443c374d7e9c955c88d 100644
--- a/tests/automatic/producer/python_api/check_linux.sh
+++ b/tests/automatic/producer/python_api/check_linux.sh
@@ -30,7 +30,7 @@ echo "db.${beamtime_id}_${stream}.insert({dummy:1})" | mongo ${beamtime_id}_${st
 
 nomad run authorizer.nmd >/dev/null
 nomad run nginx.nmd >/dev/null
-nomad run receiver.nmd >/dev/null
+nomad run receiver_tcp.nmd >/dev/null
 nomad run discovery.nmd >/dev/null
 
 mkdir -p ${receiver_folder}
diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
index c0212535d07d50bea4eb3f596bb53c1a606942fe..f1579684745cfe43871ab0fb4aafd32e0e16491c 100644
--- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
+++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
@@ -27,7 +27,7 @@ Cleanup() {
 mkdir -p ${receiver_folder}
 
 nomad run authorizer.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 nomad run nginx.nmd
 
diff --git a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
index 27dc92b8656704a96ddd5699bbba98d0a73043d2..0f581ea7ed2b9a7b46c8f8cf3e48de99d8b6f01b 100644
--- a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
@@ -33,7 +33,7 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run authorizer.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 
 mkdir -p ${receiver_folder}
@@ -44,4 +44,4 @@ ls -ln ${receiver_folder}/1_1 | awk '{ print $5 }'| grep 100000
 ls -ln ${receiver_folder}/1_2 | awk '{ print $5 }'| grep 100000
 ls -ln ${receiver_folder}/1_3 | awk '{ print $5 }'| grep 100000
 
-echo 'db.data_default.find({"images._id":{$gt:0}},{"images.name":1})' | mongo asapo_test_detector | grep 1_1 | grep 1_2 | grep 1_3
\ No newline at end of file
+echo 'db.data_default.find({"images._id":{$gt:0}},{"images.name":1})' | mongo asapo_test_detector | grep 1_1 | grep 1_2 | grep 1_3
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
index 0b764bdd5711e8ff2246baa09bc001155c277080..1d703fe89d1fcbfa8f3e82be863571a959ab8731 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
@@ -30,7 +30,7 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run authorizer.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 
 mkdir -p ${receiver_folder}
diff --git a/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_linux.sh
index 59e9600aa36a97a8500811f1e74c628b303d2c69..105baa23426235a2078a53ac93b57a38990a1332 100644
--- a/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_linux.sh
@@ -31,7 +31,7 @@ echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detec
 
 nomad run authorizer.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run discovery.nmd
 
 mkdir -p ${receiver_folder}
diff --git a/tests/automatic/settings/receiver_fabric.json.tpl.lin.in b/tests/automatic/settings/receiver_fabric.json.tpl.lin.in
new file mode 100644
index 0000000000000000000000000000000000000000..3c7ec5ef1e2e0b8d9353e5c475af2a2f39ab8e03
--- /dev/null
+++ b/tests/automatic/settings/receiver_fabric.json.tpl.lin.in
@@ -0,0 +1,25 @@
+{
+  "PerformanceDbServer":"localhost:8086",
+  "PerformanceDbName": "db_test",
+  "DatabaseServer":"auto",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
+  "DataServer": {
+    "AdvertiseURI": "127.0.0.1:{{ env "NOMAD_PORT_recv_ds" }}",
+    "NThreads": 2,
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["fabric"]
+  },
+  "DataCache": {
+    "Use": @RECEIVER_USE_CACHE@,
+    "SizeGB": 1,
+    "ReservedShare": 10
+  },
+  "AuthorizationServer": "localhost:8400/asapo-authorizer",
+  "AuthorizationInterval": 1000,
+  "ListenPort": {{ env "NOMAD_PORT_recv" }},
+  "Tag": "{{ env "NOMAD_ADDR_recv" }}",
+  "WriteToDisk": @RECEIVER_WRITE_TO_DISK@,
+  "ReceiveToDiskThresholdMB":50,
+  "WriteToDb": true,
+  "LogLevel" : "debug"
+  }
diff --git a/tests/automatic/settings/receiver.json.tpl.lin.in b/tests/automatic/settings/receiver_tcp.json.tpl.lin.in
similarity index 89%
rename from tests/automatic/settings/receiver.json.tpl.lin.in
rename to tests/automatic/settings/receiver_tcp.json.tpl.lin.in
index 1177861e12ba54dcf0e0a0e0c4c21cb30a079283..4414e4692d60a7f9180b0781f73a8889d9215b33 100644
--- a/tests/automatic/settings/receiver.json.tpl.lin.in
+++ b/tests/automatic/settings/receiver_tcp.json.tpl.lin.in
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI": "127.0.0.1:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": 2,
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": @RECEIVER_USE_CACHE@,
diff --git a/tests/automatic/settings/receiver.json.tpl.win.in b/tests/automatic/settings/receiver_tcp.json.tpl.win.in
similarity index 89%
rename from tests/automatic/settings/receiver.json.tpl.win.in
rename to tests/automatic/settings/receiver_tcp.json.tpl.win.in
index 02fdd657bff47c4f8988b677e57092917e5db1e9..b2afbf0d4a22b17512ff7e7e09ad70c001fe29aa 100644
--- a/tests/automatic/settings/receiver.json.tpl.win.in
+++ b/tests/automatic/settings/receiver_tcp.json.tpl.win.in
@@ -9,7 +9,8 @@
   "DataServer": {
     "AdvertiseURI": "127.0.0.1:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": 2,
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": @RECEIVER_USE_CACHE@,
diff --git a/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp b/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
index a0a0c224b1ad74a57888953c06129652a5bdd21d..267d82a7044cfd9cd546f93c794d2bc6cb2f953e 100644
--- a/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
+++ b/tests/automatic/system_io/ip_tcp_network/client_serv/ip_tcp_network.cpp
@@ -185,7 +185,7 @@ int main(int argc, char* argv[]) {
 
     std::cout << "[META] Check unknown host" << std::endl;
     io->CreateAndConnectIPTCPSocket("some-host-that-might-not-exists.aa:1234", &err);
-    if(err != asapo::IOErrorTemplates::kAddressNotValid) {
+    if(err != asapo::IOErrorTemplates::kUnableToResolveHostname) {
         ExitIfErrIsNotOk(&err, 303);
     }
 
diff --git a/tests/automatic/system_io/resolve_hostname_to_ip/resolve_hostname_to_ip.cpp b/tests/automatic/system_io/resolve_hostname_to_ip/resolve_hostname_to_ip.cpp
index c45cd6b193e1b0b8a12926cce025bb8ecc1cef97..56b9384a0accd5fc2bf469843aa3e252e120895e 100644
--- a/tests/automatic/system_io/resolve_hostname_to_ip/resolve_hostname_to_ip.cpp
+++ b/tests/automatic/system_io/resolve_hostname_to_ip/resolve_hostname_to_ip.cpp
@@ -11,12 +11,12 @@ void Check(const std::string& expected_ip_address, const std::string& hostname)
     Error err;
     auto io = std::unique_ptr<asapo::IO> {asapo::GenerateDefaultIO()};
     std::string ip_address = io->ResolveHostnameToIp(hostname, &err);
-    M_AssertEq(expected_ip_address, ip_address);
     if(expected_ip_address.empty()) {
-        M_AssertTrue(err != nullptr && asapo::IOErrorTemplates::kUnableToResolveHostname == err);
-        return;
+        M_AssertEq(asapo::IOErrorTemplates::kUnableToResolveHostname, err);
+    } else {
+        M_AssertEq(nullptr, err);
     }
-    M_AssertTrue(err == nullptr);
+    M_AssertEq(expected_ip_address, ip_address);
 }
 
 int main(int argc, char* argv[]) {
@@ -30,5 +30,9 @@ int main(int argc, char* argv[]) {
 
     Check("", "some-address-that-does-not-exists.ff");
     Check("", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.ff");
+
+    // Fallthrough tests
+    Check("123.123.123.123", "123.123.123.123");
+    Check("8.8.8.8", "8.8.8.8");
     return 0;
 }
diff --git a/tests/manual/asapo_fabric/fabric_client.cpp b/tests/manual/asapo_fabric/fabric_client.cpp
index 792a8293ff7d5cad7a9945fb68fcd9b04b28f758..072b8e1cb9e9bc10c3e01b488abff529ca8bcded 100644
--- a/tests/manual/asapo_fabric/fabric_client.cpp
+++ b/tests/manual/asapo_fabric/fabric_client.cpp
@@ -7,16 +7,28 @@ using namespace asapo;
 using namespace asapo::fabric;
 
 int main(int argc, char* argv[]) {
-    if (argc != 3) {
+    if (argc < 3 || argc > 5) {
         std::cout
-                << "Usage: " << argv[0] << " <serverAddress> <serverPort>" << std::endl
+                << "Usage: " << argv[0] <<
+                " <serverAddress> <serverPort> [kiByte=1024*400/*400MiByte*/ /*MUST BE SYNC WITH SERVER*/] [count=10]" << std::endl
+#ifdef LIBFARBIC_ALLOW_LOCALHOST
                 << "If the address is localhost or 127.0.0.1 the verbs connection will be emulated" << std::endl
+#endif
                 ;
         return 1;
     }
 
     std::string serverAddressString = std::string(argv[1]) + ':' + std::string(argv[2]);
 
+    int kByte = 1024 * 400 /*400 MiByte*/;
+    if (argc >= 4) {
+        kByte = std::stoi(argv[3]);
+    }
+    int count = 10;
+    if (argc >= 5) {
+        count = std::stoi(argv[4]);
+    }
+
     Error error;
     auto factory = GenerateDefaultFabricFactory();
 
@@ -26,8 +38,9 @@ int main(int argc, char* argv[]) {
         return 1;
     }
 
-    size_t dataBufferSize = 1024 * 1024 * 400 /*400 MiByte*/;
+    size_t dataBufferSize = 1024 * kByte;
     FileData dataBuffer = FileData{new uint8_t[dataBufferSize]};
+    std::cout << "Expected file size: " << dataBufferSize << " byte" << std::endl;
 
     auto serverAddress = client->AddServerAddress(serverAddressString, &error);
     if (error) {
@@ -46,7 +59,7 @@ int main(int argc, char* argv[]) {
     auto start = std::chrono::high_resolution_clock::now();
 
     std::cout << "Starting message loop" << std::endl;
-    for (FabricMessageId messageId = 0; messageId < 10 && !error; messageId++) {
+    for (FabricMessageId messageId = 0; messageId < count && !error; messageId++) {
         GenericRequestHeader request{};
         memcpy(&request.message, mr->GetDetails(), sizeof(MemoryRegionDetails));
         client->Send(serverAddress, messageId, &request, sizeof(request), &error);
diff --git a/tests/manual/asapo_fabric/fabric_server.cpp b/tests/manual/asapo_fabric/fabric_server.cpp
index fb973398bbc07b93cf75fecc9972235382596e5a..298f99c44683cbe7eb01267465b22e6718c4dc2b 100644
--- a/tests/manual/asapo_fabric/fabric_server.cpp
+++ b/tests/manual/asapo_fabric/fabric_server.cpp
@@ -16,7 +16,7 @@ void ServerThread(FabricServer* server, size_t bufferSize, FileData* buffer) {
         GenericRequestHeader request;
 
         server->RecvAny(&clientAddress, &messageId, &request, sizeof(request), &error);
-        if (error == FabricErrorTemplates::kTimeout) {
+        if (error == IOErrorTemplates::kTimeout) {
             error = nullptr;
             continue;
         }
@@ -37,10 +37,13 @@ void ServerThread(FabricServer* server, size_t bufferSize, FileData* buffer) {
 }
 
 int main(int argc, char* argv[]) {
-    if (argc != 3) {
+    if (argc < 3 || argc > 4) {
         std::cout
-                << "Usage: " << argv[0] << " <listenAddress> <listenPort>" << std::endl
+                << "Usage: " << argv[0] << " <listenAddress> <listenPort> [kiByte=1024*400/*400MiByte*/ /*MUST BE SYNC WITH CLIENT*/]"
+                << std::endl
+#ifdef LIBFARBIC_ALLOW_LOCALHOST
                 << "If the address is localhost or 127.0.0.1 the verbs connection will be emulated" << std::endl
+#endif
                 ;
         return 1;
     }
@@ -57,11 +60,17 @@ int main(int argc, char* argv[]) {
         return 1;
     }
 
+    int kByte = 1024 * 400 /*400 MiByte*/;
+    if (argc >= 4) {
+        kByte = std::stoi(argv[3]);
+    }
+
     std::cout << "Server is listening on " << server->GetAddress() << std::endl;
 
-    size_t dataBufferSize = 1024 * 1024 * 400 /*400 MiByte*/;
+    size_t dataBufferSize = 1024 * kByte;
     FileData dataBuffer = FileData{new uint8_t[dataBufferSize]};
     strcpy((char*)dataBuffer.get(), "I (the server) wrote into your buffer.");
+    std::cout << "Expected file size: " << dataBufferSize << " byte" << std::endl;
 
     running = true;
     auto thread = io->NewThread("ServerThread", [&server, &dataBufferSize, &dataBuffer]() {
diff --git a/tests/manual/broker_debug_local/receiver.json b/tests/manual/broker_debug_local/receiver.json
index 5567105bf672adfedefa41bacd655e2c24d20351..62c11ec5a306342dd8996a409bc9e64d91f35365 100644
--- a/tests/manual/broker_debug_local/receiver.json
+++ b/tests/manual/broker_debug_local/receiver.json
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI":"127.0.0.1",
     "NThreads": 2,
-    "ListenPort": 22000
+    "ListenPort": 22000,
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,
diff --git a/tests/manual/broker_debug_local/receiver.json.tpl b/tests/manual/broker_debug_local/receiver.json.tpl
index a6c0887d9c96c0f22c59808f7c438009704f7896..234e78b51f86e603e6bf5b9c7e5b184d16f4537c 100644
--- a/tests/manual/broker_debug_local/receiver.json.tpl
+++ b/tests/manual/broker_debug_local/receiver.json.tpl
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,
diff --git a/tests/manual/broker_debug_local/receiver.nmd b/tests/manual/broker_debug_local/receiver.nmd
index bb4cfe877376ba98f0137725db8c074f5a93d75a..6d7986648fb477ff6446018f871e1f78c31fc173 100644
--- a/tests/manual/broker_debug_local/receiver.nmd
+++ b/tests/manual/broker_debug_local/receiver.nmd
@@ -36,7 +36,7 @@ job "receiver" {
       }
 
       template {
-         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/producer_receiver/check_monitoring/receiver.json.tpl"
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/producer_receiver/check_monitoring/receiver_tcp.json.tpl"
          destination   = "local/receiver.json"
          change_mode   = "signal"
          change_signal = "SIGHUP"
diff --git a/tests/manual/broker_debug_local/start_services.sh b/tests/manual/broker_debug_local/start_services.sh
index 0de9567bff5b2665537da0cba9fca69a1b386877..fe817656d3d39911b55c731da9ea569ca287fb84 100755
--- a/tests/manual/broker_debug_local/start_services.sh
+++ b/tests/manual/broker_debug_local/start_services.sh
@@ -3,5 +3,5 @@
 nomad run authorizer.nmd
 nomad run discovery.nmd
 #nomad run broker.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
 nomad run nginx.nmd
diff --git a/tests/manual/performance_broker_receiver/getlast_broker.cpp b/tests/manual/performance_broker_receiver/getlast_broker.cpp
index 1adcda25b2a1edee2db8379dfabc6229fa565987..d4ce9cf5190ebe684a533c1f4c90543f34686784 100644
--- a/tests/manual/performance_broker_receiver/getlast_broker.cpp
+++ b/tests/manual/performance_broker_receiver/getlast_broker.cpp
@@ -16,6 +16,19 @@ using asapo::Error;
 std::string group_id = "";
 std::mutex lock;
 
+
+inline std::string ConnectionTypeToString(asapo::NetworkConnectionType type) {
+    switch (type) {
+    case asapo::NetworkConnectionType::kUndefined:
+        return "No connection";
+    case asapo::NetworkConnectionType::kAsapoTcp:
+        return "TCP";
+    case asapo::NetworkConnectionType::kFabric:
+        return "Fabric";
+    }
+    return "Unknown type";
+}
+
 struct Args {
     std::string server;
     std::string file_path;
@@ -102,7 +115,8 @@ std::vector<std::thread> StartThreads(const Args& params,
     return threads;
 }
 
-int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nbuf, int* nfiles_total) {
+int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nbuf, int* nfiles_total,
+                asapo::NetworkConnectionType* connection_type) {
     asapo::FileInfo fi;
     system_clock::time_point t1 = system_clock::now();
 
@@ -110,6 +124,7 @@ int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nb
     std::vector<int> errors(params.nthreads, 0);
     std::vector<int> nfiles_frombuf(params.nthreads, 0);
     std::vector<int> nfiles_total_in_datasets(params.nthreads, 0);
+    std::vector<asapo::NetworkConnectionType> connection_types(params.nthreads, asapo::NetworkConnectionType::kUndefined);
 
     auto threads = StartThreads(params, &nfiles, &errors, &nfiles_frombuf, &nfiles_total_in_datasets);
     WaitThreads(&threads);
@@ -122,6 +137,32 @@ int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nb
     system_clock::time_point t2 = system_clock::now();
     auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
     *duration_ms = duration_read.count();
+
+    // The following two loops will check if all threads that processed some data were using the same network type
+    {
+        int firstThreadThatActuallyProcessedData = 0;
+        for (int i = 0; i < params.nthreads; i++) {
+            if (nfiles[i] > 0) {
+                firstThreadThatActuallyProcessedData = i;
+                break;
+            }
+        }
+
+        *connection_type = connection_types[firstThreadThatActuallyProcessedData];
+        for (int i = 0; i < params.nthreads; i++) {
+            if (*connection_type != connection_types[i] && nfiles[i] > 0) {
+                // The output will look like this:
+                // ERROR thread[0](processed 5 files) connection type is 'No connection' but thread[1](processed 3 files) is 'TCP'
+
+                std::cout << "ERROR thread[" << i << "](processed " << nfiles[i] << " files) connection type is '" <<
+                          ConnectionTypeToString(connection_types[i]) << "' but thread["
+                          << firstThreadThatActuallyProcessedData << "](processed "
+                          << nfiles[firstThreadThatActuallyProcessedData] << " files) is '" << ConnectionTypeToString(
+                              *connection_type) << "'" << std::endl;
+            }
+        }
+    }
+
     return n_total;
 }
 
@@ -129,9 +170,9 @@ int main(int argc, char* argv[]) {
     asapo::ExitAfterPrintVersionIfNeeded("GetLast Broker Example", argc, argv);
     Args params;
     params.datasets = false;
-    if (argc != 8 && argc != 9) {
+    if (argc != 9 && argc != 10) {
         std::cout << "Usage: " + std::string{argv[0]}
-                  + " <server> <files_path> <run_name> <nthreads> <token> <timeout ms> <metaonly> [use datasets]"
+                  + " <server> <network_type> <files_path> <run_name> <nthreads> <token> <timeout ms> <metaonly> [use datasets]"
                   <<
                   std::endl;
         exit(EXIT_FAILURE);
@@ -148,7 +189,8 @@ int main(int argc, char* argv[]) {
     }
     uint64_t duration_ms;
     int nerrors, nbuf, nfiles_total;
-    auto nfiles = ReadAllData(params, &duration_ms, &nerrors, &nbuf, &nfiles_total);
+    asapo::NetworkConnectionType connectionType;
+    auto nfiles = ReadAllData(params, &duration_ms, &nerrors, &nbuf, &nfiles_total, &connectionType);
     std::cout << "Processed " << nfiles << (params.datasets ? " dataset(s)" : " file(s)") << std::endl;
     if (params.datasets) {
         std::cout << "  with " << nfiles_total << " file(s)" << std::endl;
@@ -161,5 +203,8 @@ int main(int argc, char* argv[]) {
     std::cout << "Errors : " << nerrors << std::endl;
     std::cout << "Elapsed : " << duration_ms << "ms" << std::endl;
     std::cout << "Rate : " << 1000.0f * nfiles / (duration_ms) << " Hz" << std::endl;
+
+    std::cout << "Using connection type: " << ConnectionTypeToString(connectionType) << std::endl;
+
     return nerrors == 0 ? 0 : 1;
 }
diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh
index 733b99f2c7eb8cad90f14484817fe5f243d9dffe..56b1536ad37cac5a6365e4a6e74b1cf74e63983b 100755
--- a/tests/manual/performance_full_chain_simple/test.sh
+++ b/tests/manual/performance_full_chain_simple/test.sh
@@ -6,6 +6,7 @@ trap Cleanup EXIT
 
 #clean-up
 Cleanup() {
+echo cleanup
 set +e
 ssh ${receiver_node} rm -f ${receiver_dir}/files/${beamline}/${beamtime_id}/*
 ssh ${receiver_node} killall receiver
diff --git a/tests/manual/python_tests/consumer/consumer_api.py b/tests/manual/python_tests/consumer/consumer_api.py
index b3b8a20e9a6fd0243a9ba14e7c098bc715d59748..60c82c03cdcf5d5a3ced09a71ecbee7a37bce78f 100644
--- a/tests/manual/python_tests/consumer/consumer_api.py
+++ b/tests/manual/python_tests/consumer/consumer_api.py
@@ -3,7 +3,7 @@ from __future__ import print_function
 import asapo_consumer
 import sys
 
-source, path,beamtime, token = sys.argv[1:]
+source, path, beamtime, token = sys.argv[1:]
 broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
 group_id = broker.generate_group_id()
 
@@ -20,4 +20,4 @@ print(res)
 #print (len(data),data[0:100])
 #data.tofile("out")
 
-sys.exit(0)
\ No newline at end of file
+sys.exit(0)
diff --git a/tests/manual/python_tests/consumer/test.sh b/tests/manual/python_tests/consumer/test.sh
index 6b5254e86fc977cf3afda84823a48a00f5ae6386..9c9712593b009f4bc895e54907e27dd56b7d7e51 100644
--- a/tests/manual/python_tests/consumer/test.sh
+++ b/tests/manual/python_tests/consumer/test.sh
@@ -1 +1 @@
-python3 consumer_api.py asapo-services.desy.de:8400 asapo_test /shared_data/test_facility/gpfs/test/2019/data/asapo_test KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc=
\ No newline at end of file
+python3 consumer_api.py asapo-services.desy.de:8400 asapo_test /shared_data/test_facility/gpfs/test/2019/data/asapo_test KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc=
diff --git a/tests/manual/python_tests/consumer/test_k8s.sh b/tests/manual/python_tests/consumer/test_k8s.sh
index 3cd69a940ae900b7c389d53eff4922a05bf30eb3..9ee6bb429689c22b6be068b889a89f1ad5a62094 100755
--- a/tests/manual/python_tests/consumer/test_k8s.sh
+++ b/tests/manual/python_tests/consumer/test_k8s.sh
@@ -1,4 +1,4 @@
 export PYTHONPATH=/Users/yakubov/projects/asapo/cmake-build-debug/consumer/api/python
 export token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 python3 consumer_api.py gest-k8s-test2.desy.de/yakser /test_offline/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token
-#python3 getnext.py gest-k8s-test2.desy.de/yakser /test_offline/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token new
\ No newline at end of file
+#python3 getnext.py gest-k8s-test2.desy.de/yakser /test_offline/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token new
diff --git a/tests/manual/python_tests/producer/receiver.json.tpl b/tests/manual/python_tests/producer/receiver.json.tpl
index 6f80d44bbde2b6531ff5c05406b0a9ded02e11a5..ec637c58473d65b47548df1266e65e0185bcbee1 100644
--- a/tests/manual/python_tests/producer/receiver.json.tpl
+++ b/tests/manual/python_tests/producer/receiver.json.tpl
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,
diff --git a/tests/manual/python_tests/producer/receiver.nmd b/tests/manual/python_tests/producer/receiver.nmd
index 75fbca5749a82d13f1d3d44ad90b0afb2cec68e0..a25edbf7b68a661e2ee25261e3dcb2a3a8f17101 100644
--- a/tests/manual/python_tests/producer/receiver.nmd
+++ b/tests/manual/python_tests/producer/receiver.nmd
@@ -36,7 +36,7 @@ job "receiver" {
       }
 
       template {
-         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/receiver.json.tpl"
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/receiver_tcp.json.tpl"
          destination   = "local/receiver.json"
          change_mode   = "signal"
          change_signal = "SIGHUP"
diff --git a/tests/manual/python_tests/producer/short_test.py b/tests/manual/python_tests/producer/short_test.py
index 849b22c359c3a2039c262a0974839a7a0330237b..e5e1b98e3039eef5259a022a9a63b2c0629d2d06 100644
--- a/tests/manual/python_tests/producer/short_test.py
+++ b/tests/manual/python_tests/producer/short_test.py
@@ -2,9 +2,8 @@ from __future__ import print_function
 
 import asapo_producer
 import sys
-import time
-import numpy as np
 import threading
+
 lock = threading.Lock()
 
 
diff --git a/tests/manual/python_tests/producer/start_services.sh b/tests/manual/python_tests/producer/start_services.sh
index bd0128b53c38ed3181131fdde33bc90ba02c3683..1b882a4a885571fcfa7e2e83340be585d0d9110a 100755
--- a/tests/manual/python_tests/producer/start_services.sh
+++ b/tests/manual/python_tests/producer/start_services.sh
@@ -3,4 +3,4 @@
 nomad run authorizer.nmd
 nomad run discovery.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
diff --git a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
index 6f80d44bbde2b6531ff5c05406b0a9ded02e11a5..ec637c58473d65b47548df1266e65e0185bcbee1 100644
--- a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
+++ b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
-    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
+    "ListenPort": {{ env "NOMAD_PORT_recv_ds" }},
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,
diff --git a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.nmd b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.nmd
index 75fbca5749a82d13f1d3d44ad90b0afb2cec68e0..a25edbf7b68a661e2ee25261e3dcb2a3a8f17101 100644
--- a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.nmd
+++ b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.nmd
@@ -36,7 +36,7 @@ job "receiver" {
       }
 
       template {
-         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/receiver.json.tpl"
+         source        = "/home/yakubov/projects/asapo/cmake-build-debug/tests/automatic/full_chain/simple_chain/receiver_tcp.json.tpl"
          destination   = "local/receiver.json"
          change_mode   = "signal"
          change_signal = "SIGHUP"
diff --git a/tests/manual/python_tests/producer_wait_bug_mongo/start_services.sh b/tests/manual/python_tests/producer_wait_bug_mongo/start_services.sh
index bd0128b53c38ed3181131fdde33bc90ba02c3683..1b882a4a885571fcfa7e2e83340be585d0d9110a 100755
--- a/tests/manual/python_tests/producer_wait_bug_mongo/start_services.sh
+++ b/tests/manual/python_tests/producer_wait_bug_mongo/start_services.sh
@@ -3,4 +3,4 @@
 nomad run authorizer.nmd
 nomad run discovery.nmd
 nomad run nginx.nmd
-nomad run receiver.nmd
+nomad run receiver_tcp.nmd
diff --git a/tests/manual/receiver_debug_local/receiver.json b/tests/manual/receiver_debug_local/receiver.json
index 5567105bf672adfedefa41bacd655e2c24d20351..62c11ec5a306342dd8996a409bc9e64d91f35365 100644
--- a/tests/manual/receiver_debug_local/receiver.json
+++ b/tests/manual/receiver_debug_local/receiver.json
@@ -6,7 +6,8 @@
   "DataServer": {
     "AdvertiseURI":"127.0.0.1",
     "NThreads": 2,
-    "ListenPort": 22000
+    "ListenPort": 22000,
+    "NetworkMode": ["tcp"]
   },
   "DataCache": {
     "Use": true,