diff --git a/common/cpp/include/common/error.h b/common/cpp/include/common/error.h
index 12488250f0c878fb05463c40bdfe84276efac1b2..0efb3605971c62071902a02baff670a8aec44bae 100644
--- a/common/cpp/include/common/error.h
+++ b/common/cpp/include/common/error.h
@@ -13,6 +13,7 @@ enum class ErrorType {
     kAsapoError,
     kHttpError,
     kIOError,
+    kDBError,
     kReceiverError,
     kProducerError,
 
diff --git a/common/cpp/include/common/io_error.h b/common/cpp/include/common/io_error.h
index a04af80203e35cd82cd9f13fe64803775141fc7c..bed8165b601ee3668fd36c871996adb69362c58e 100644
--- a/common/cpp/include/common/io_error.h
+++ b/common/cpp/include/common/io_error.h
@@ -26,7 +26,8 @@ enum class IOErrorType {
     kUnableToResolveHostname,
     kSocketOperationUnknownAtLevel,
     kSocketOperationValueOutOfBound,
-    kAddressNotValid
+    kAddressNotValid,
+    kBrokenPipe
 
 };
 
@@ -98,6 +99,11 @@ auto const kAddressNotValid =  IOErrorTemplate {
     "Address not valid", IOErrorType::kAddressNotValid
 };
 
+auto const kBrokenPipe =  IOErrorTemplate {
+    "Broken pipe/connection", IOErrorType::kBrokenPipe
+};
+
+
 }
 
 }
diff --git a/common/cpp/include/database/database.h b/common/cpp/include/database/database.h
index 2bbd8ed8e1740cf53e848f84a838b22dcaf6b09d..129a5e3a6ec927ceb1e3d9a64e088e85d378db0c 100644
--- a/common/cpp/include/database/database.h
+++ b/common/cpp/include/database/database.h
@@ -8,18 +8,6 @@
 
 namespace asapo {
 
-namespace DBError {
-auto const KUnknownError = "Inknown error";
-auto const kConnectionError = "Connection error";
-auto const kInsertError = "Insert error";
-auto const kDuplicateID = "Duplicate ID";
-auto const kAlreadyConnected = "Already connected";
-auto const kNotConnected = "Not connected";
-auto const kBadAddress = "Bad address";
-auto const kMemoryError = "Memory error";
-
-}
-
 constexpr char kDBDataCollectionName[] = "data";
 constexpr char kDBMetaCollectionName[] = "meta";
 
diff --git a/common/cpp/include/database/db_error.h b/common/cpp/include/database/db_error.h
new file mode 100644
index 0000000000000000000000000000000000000000..c36e52458ac127e484513428b63fbe4c5e863973
--- /dev/null
+++ b/common/cpp/include/database/db_error.h
@@ -0,0 +1,68 @@
+#ifndef ASAPO_SYSTEM__DB_ERROR_H
+#define ASAPO_SYSTEM__DB_ERROR_H
+
+#include "common/error.h"
+
+namespace asapo {
+
+
+enum class DBErrorType {
+    kJsonParseError,
+    kUnknownError,
+    kConnectionError,
+    kNotConnected,
+    kInsertError,
+    kDuplicateID,
+    kAlreadyConnected,
+    kBadAddress,
+    kMemoryError
+};
+
+using DBError = ServiceError<DBErrorType, ErrorType::kDBError>;
+using DBErrorTemplate = ServiceErrorTemplate<DBErrorType, ErrorType::kDBError>;
+
+namespace DBErrorTemplates {
+
+auto const kNotConnected = DBErrorTemplate {
+    "Not connected", DBErrorType::kNotConnected
+};
+
+
+auto const kDBError = DBErrorTemplate {
+    "Unknown Error", DBErrorType::kUnknownError
+};
+
+auto const kConnectionError = DBErrorTemplate {
+    "Connection error", DBErrorType::kConnectionError
+};
+
+auto const kInsertError = DBErrorTemplate {
+    "Insert error", DBErrorType::kInsertError
+};
+
+auto const kJsonParseError = DBErrorTemplate {
+    "JSON parse error", DBErrorType::kJsonParseError
+};
+
+auto const kDuplicateID = DBErrorTemplate {
+    "Duplicate ID", DBErrorType::kDuplicateID
+};
+
+auto const kAlreadyConnected = DBErrorTemplate {
+    "Not connected", DBErrorType::kAlreadyConnected
+};
+
+auto const kBadAddress = DBErrorTemplate {
+    "Bad address", DBErrorType::kBadAddress
+};
+
+auto const kMemoryError = DBErrorTemplate {
+    "Memory error", DBErrorType::kMemoryError
+};
+
+
+}
+
+}
+
+#endif //ASAPO_SYSTEM__DB_ERROR_H
diff --git a/common/cpp/include/unittests/MockDatabase.h b/common/cpp/include/unittests/MockDatabase.h
index de44724a0fd951759da3944257a1f761e812c358..9216e8c4615019e0a31a7f0f73bcf8705242717f 100644
--- a/common/cpp/include/unittests/MockDatabase.h
+++ b/common/cpp/include/unittests/MockDatabase.h
@@ -20,14 +20,14 @@ class MockDatabase : public Database {
         return Error{Insert_t(file, ignore_duplicates)};
     }
 
-    MOCK_METHOD3(Connect_t, SimpleError * (const std::string&, const std::string&, const std::string&));
-    MOCK_CONST_METHOD2(Insert_t, SimpleError * (const FileInfo&, bool));
+    MOCK_METHOD3(Connect_t, ErrorInterface * (const std::string&, const std::string&, const std::string&));
+    MOCK_CONST_METHOD2(Insert_t, ErrorInterface * (const FileInfo&, bool));
 
     Error Upsert(uint64_t id, const uint8_t* data, uint64_t size) const override {
         return Error{Upsert_t(id, data, size)};
 
     }
-    MOCK_CONST_METHOD3(Upsert_t, SimpleError * (uint64_t id, const uint8_t* data, uint64_t size));
+    MOCK_CONST_METHOD3(Upsert_t, ErrorInterface * (uint64_t id, const uint8_t* data, uint64_t size));
 
 
 
diff --git a/common/cpp/src/database/database.cpp b/common/cpp/src/database/database.cpp
index c207779a58fa4b2b9441ced6f77bd7121e7406d3..b4397eebf9c774a7844842b5f7eb884a10dcf30e 100644
--- a/common/cpp/src/database/database.cpp
+++ b/common/cpp/src/database/database.cpp
@@ -9,7 +9,7 @@ std::unique_ptr<Database> DatabaseFactory::Create(Error* err) const noexcept {
         p.reset(new MongoDBClient());
         *err = nullptr;
     } catch (...) {
-        *err = TextError(DBError::kMemoryError);
+        *err = ErrorTemplates::kMemoryAllocationError.Generate();
     }
     return p;
 };
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index 5d398425c437d54f04af96520e1e29a69e4c2b3f..4b65b5c63431168cddae537187e1ff8492780d3c 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -1,4 +1,5 @@
 #include "mongodb_client.h"
+#include "database/db_error.h"
 
 namespace asapo {
 
@@ -25,7 +26,7 @@ Error MongoDBClient::Ping() {
     bson_destroy (&reply);
     bson_destroy (command);
 
-    return !retval ? TextError(DBError::kConnectionError) : nullptr;
+    return !retval ? DBErrorTemplates::kConnectionError.Generate() : nullptr;
 
 }
 MongoDBClient::MongoDBClient() {
@@ -37,7 +38,7 @@ Error MongoDBClient::InitializeClient(const string& address) {
     client_ = mongoc_client_new (uri_str.c_str());
 
     if (client_ == nullptr) {
-        return TextError(DBError::kBadAddress);
+        return DBErrorTemplates::kBadAddress.Generate();
     }
     return nullptr;
 
@@ -65,7 +66,7 @@ Error MongoDBClient::TryConnectDatabase() {
 Error MongoDBClient::Connect(const string& address, const string& database_name,
                              const string& collection_name) {
     if (connected_) {
-        return TextError(DBError::kAlreadyConnected);
+        return DBErrorTemplates::kAlreadyConnected.Generate();
     }
 
     auto err = InitializeClient(address);
@@ -98,7 +99,7 @@ bson_p PrepareBsonDocument(const FileInfo& file, Error* err) {
     auto json = reinterpret_cast<const uint8_t*>(s.c_str());
     auto bson = bson_new_from_json(json, -1, &mongo_err);
     if (!bson) {
-        *err = TextError(std::string(DBError::kInsertError) + ": " + mongo_err.message);
+        *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
         return nullptr;
     }
 
@@ -110,7 +111,7 @@ bson_p PrepareBsonDocument(const uint8_t* json, ssize_t len, Error* err) {
     bson_error_t mongo_err;
     auto bson = bson_new_from_json(json, len, &mongo_err);
     if (!bson) {
-        *err = TextError(std::string(DBError::kInsertError) + ": " + mongo_err.message);
+        *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
         return nullptr;
     }
 
@@ -123,9 +124,9 @@ Error MongoDBClient::InsertBsonDocument(const bson_p& document, bool ignore_dupl
     bson_error_t mongo_err;
     if (!mongoc_collection_insert_one(collection_, document.get(), NULL, NULL, &mongo_err)) {
         if (mongo_err.code == MONGOC_ERROR_DUPLICATE_KEY) {
-            return ignore_duplicates ? nullptr : TextError(DBError::kDuplicateID);
+            return ignore_duplicates ? nullptr : DBErrorTemplates::kDuplicateID.Generate();
         }
-        return TextError(std::string(DBError::kInsertError) + " - " + mongo_err.message);
+        return DBErrorTemplates::kInsertError.Generate(mongo_err.message);
     }
 
     return nullptr;
@@ -141,7 +142,7 @@ Error MongoDBClient::UpdateBsonDocument(uint64_t id, const bson_p& document, boo
     Error err = nullptr;
 
     if (!mongoc_collection_replace_one(collection_, selector, document.get(), opts, NULL, &mongo_err)) {
-        err = TextError(std::string(DBError::kInsertError) + " - " + mongo_err.message);
+        err = DBErrorTemplates::kInsertError.Generate(mongo_err.message);
     }
 
     bson_free (opts);
@@ -153,7 +154,7 @@ Error MongoDBClient::UpdateBsonDocument(uint64_t id, const bson_p& document, boo
 
 Error MongoDBClient::Insert(const FileInfo& file, bool ignore_duplicates) const {
     if (!connected_) {
-        return TextError(DBError::kNotConnected);
+        return DBErrorTemplates::kNotConnected.Generate();
     }
 
     Error err;
@@ -175,7 +176,7 @@ MongoDBClient::~MongoDBClient() {
 
 Error MongoDBClient::Upsert(uint64_t id, const uint8_t* data, uint64_t size) const {
     if (!connected_) {
-        return TextError(DBError::kNotConnected);
+        return DBErrorTemplates::kNotConnected.Generate();
     }
 
     Error err;
@@ -185,7 +186,7 @@ Error MongoDBClient::Upsert(uint64_t id, const uint8_t* data, uint64_t size) con
     }
 
     if (!BSON_APPEND_INT64(document.get(), "_id", id)) {
-        err = TextError(std::string(DBError::kInsertError) + "- cannot assign document id " );
+        err = DBErrorTemplates::kInsertError.Generate("cannot assign document id ");
     }
 
     return UpdateBsonDocument(id, document, true);
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index 7312cd5839c86669e6beb432c73643f7cebb72af..74d41e9e57c8a6ab0764d0ac281647c108f7cab8 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -58,7 +58,8 @@ Error GetLastErrorFromErrno() {
         return IOErrorTemplates::kSocketOperationUnknownAtLevel.Generate();
     case EDOM:
         return IOErrorTemplates::kSocketOperationValueOutOfBound.Generate();
-
+    case EPIPE:
+        return IOErrorTemplates::kBrokenPipe.Generate();
     default:
         std::cout << "[IOErrorsFromErrno] Unknown error code: " << errno << std::endl;
         Error err = IOErrorTemplates::kUnknownIOError.Generate();
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index 4ae3656b21a5565c6d8d32b232fc21db00d2f392..dee9462a5b32aa65bf4b0fdb79e980537fce0570 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -72,6 +72,18 @@ void ProcessAfterSend(asapo::GenericRequestHeader header, asapo::Error err) {
     mutex.unlock();
 }
 
+void ProcessAfterMetaDataSend(asapo::GenericRequestHeader header, asapo::Error err) {
+    mutex.lock();
+    iterations_remained--;
+    if (err) {
+        std::cerr << "Metadata was not successfully send: " << err << std::endl;
+    } else {
+        std::cout << "Metadata was successfully send" << std::endl;
+    }
+    mutex.unlock();
+    return;
+}
+
 asapo::FileData CreateMemoryBuffer(size_t size) {
     return asapo::FileData(new uint8_t[size]);
 }
@@ -79,6 +91,17 @@ asapo::FileData CreateMemoryBuffer(size_t size) {
 
 bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations) {
 
+    asapo::Error err;
+    if (iterations > 0) { // send wrong meta, for negative integration tests
+        err = producer->SendMetaData("bla", &ProcessAfterMetaDataSend);
+    } else {
+        err = producer->SendMetaData("{\"dummy_meta\":\"test\"}", &ProcessAfterMetaDataSend);
+    }
+    if (err) {
+        std::cerr << "Cannot send metadata: " << err << std::endl;
+        return false;
+    }
+
     for(uint64_t i = 0; i < iterations; i++) {
         auto buffer = CreateMemoryBuffer(number_of_byte);
         asapo::EventHeader event_header{i + 1, number_of_byte, std::to_string(i + 1)};
@@ -142,7 +165,7 @@ int main (int argc, char* argv[]) {
 
     auto producer = CreateProducer(args);
 
-    iterations_remained = args.iterations;
+    iterations_remained = args.iterations + 1;
 
     high_resolution_clock::time_point start_time = high_resolution_clock::now();
 
diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp
index 0410877de75c546f0cf13c9fd69d973d2bf56ed0..7fa75a351a8036e71ddcbae16542d88265d721b0 100644
--- a/examples/worker/getnext_broker/getnext_broker.cpp
+++ b/examples/worker/getnext_broker/getnext_broker.cpp
@@ -62,6 +62,14 @@ std::vector<std::thread> StartThreads(const Params& params,
 
         lock.unlock();
 
+        if (i == 0) {
+            auto meta = broker->GetBeamtimeMeta(&err);
+            if (err == nullptr) {
+                std::cout << meta << std::endl;
+            } else {
+                std::cout << "Cannot get metadata: " << err->Explain() << std::endl;
+            }
+        }
         while (true) {
             err = broker->GetNext(&fi, group_id, params.read_data ? &data : nullptr);
             if (err == nullptr) {
@@ -102,7 +110,6 @@ int ReadAllData(const Params& params, uint64_t* duration_ms, int* nerrors, int*
     *nerrors = std::accumulate(errors.begin(), errors.end(), 0);
     *nbuf = std::accumulate(nfiles_frombuf.begin(), nfiles_frombuf.end(), 0);
 
-
     high_resolution_clock::time_point t2 = high_resolution_clock::now();
     auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
     *duration_ms = duration_read.count();
diff --git a/examples/worker/getnext_broker_python/check_linux.sh b/examples/worker/getnext_broker_python/check_linux.sh
index 707c48bbc60f6bf4cf1443d357b35f041709587f..1df181c3f6eb383cd6a12b0ec79f4e671493a0dc 100644
--- a/examples/worker/getnext_broker_python/check_linux.sh
+++ b/examples/worker/getnext_broker_python/check_linux.sh
@@ -27,6 +27,8 @@ do
 	echo 'db.data.insert({"_id":'$i',"size":100,"name":"'$i'","lastchange":1,"source":"none","buf_id":0})' | mongo ${database_name}
 done
 
+echo 'db.meta.insert({"_id":0,"meta_test":"test"})' | mongo ${database_name}
+
 sleep 1
 
 export PYTHONPATH=$1:${PYTHONPATH}
@@ -35,6 +37,7 @@ python getnext.py 127.0.0.1:8400 $source_path $database_name $token_test_run $gr
 cat out
 cat out | grep '"size": 100'
 cat out | grep '"_id": 1'
+cat out | grep '"meta_test": "test"'
 
 python getnext.py 127.0.0.1:8400 $source_path $database_name $token_test_run $group_id> out
 cat out
diff --git a/examples/worker/getnext_broker_python/check_windows.bat b/examples/worker/getnext_broker_python/check_windows.bat
index 7bc8389650b19bd367e203d5c4c9d7a98324a621..a1815b4abb651669268a60c69b44b81f18b26465 100644
--- a/examples/worker/getnext_broker_python/check_windows.bat
+++ b/examples/worker/getnext_broker_python/check_windows.bat
@@ -13,12 +13,16 @@ ping 1.0.0.0 -n 10 -w 100 > nul
 
 for /l %%x in (1, 1, 3) do echo db.data.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0}) | %mongo_exe% %database_name%  || goto :error
 
+
+echo db.meta.insert({"_id":0,"meta_test":"test"}) | %mongo_exe% %database_name%  || goto :error
+
 set PYTHONPATH=%1
 
 python3 getnext.py 127.0.0.1:8400  %source_path% %database_name%  %token_test_run% %group_id% > out
 type out
 type out | findstr /c:"100" || goto :error
 type out | findstr /c:"\"_id\": 1" || goto :error
+type out | findstr /c:"\"meta_test\": \"test\"" || goto :error
 
 python3 getnext.py 127.0.0.1:8400  %source_path% %database_name%  %token_test_run% %group_id% > out
 type out
diff --git a/examples/worker/getnext_broker_python/getnext.py b/examples/worker/getnext_broker_python/getnext.py
index f124d484aa41995fa3b8e34b77d378a1a3dd409d..8648ca8ffb596462029ce14bf45030bb3b6f5f80 100644
--- a/examples/worker/getnext_broker_python/getnext.py
+++ b/examples/worker/getnext_broker_python/getnext.py
@@ -24,3 +24,10 @@ if err != None:
 else:
     print ('filename: ', meta['name'])
     print ('meta: ', json.dumps(meta, indent=4, sort_keys=True))
+
+
+beamtime_meta,err = broker.get_beamtime_meta()
+if err != None:
+    print ('error getting beamtime meta: ', err)
+else:
+    print ('beamtime meta: ', json.dumps(beamtime_meta, indent=4, sort_keys=True))
diff --git a/producer/api/src/producer_impl.cpp b/producer/api/src/producer_impl.cpp
index 5cdc8445be2b34d8ce140003f89949679875560d..fc52dd56104a98c3c165e4e761cfccf65cf5eb0c 100644
--- a/producer/api/src/producer_impl.cpp
+++ b/producer/api/src/producer_impl.cpp
@@ -106,7 +106,7 @@ Error ProducerImpl::SetBeamtimeId(std::string beamtime_id) {
 }
 
 Error ProducerImpl::SendMetaData(const std::string& metadata, RequestCallback callback) {
-    GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), beamtime_id_ + ".meta"};
+    GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), "beamtime_global.meta"};
     FileData data{new uint8_t[metadata.size()]};
     strncpy((char*)data.get(), metadata.c_str(), metadata.size());
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{beamtime_id_, std::move(request_header),
diff --git a/producer/api/src/request_handler_tcp.cpp b/producer/api/src/request_handler_tcp.cpp
index 5c534bb423c29a0a316727ea6aad7b389a00e8b9..9f6d911f8ab03a43f8ddab1d1101261e70c4dbc9 100644
--- a/producer/api/src/request_handler_tcp.cpp
+++ b/producer/api/src/request_handler_tcp.cpp
@@ -86,7 +86,9 @@ Error RequestHandlerTcp::ReceiveResponse() {
     case kNetErrorNoError :
         return nullptr;
     default:
-        return ProducerErrorTemplates::kInternalServerError.Generate();
+        auto res_err = ProducerErrorTemplates::kInternalServerError.Generate();
+        res_err->Append(sendDataResponse.message);
+        return res_err;
     }
 }
 
@@ -174,8 +176,9 @@ Error RequestHandlerTcp::SendDataToOneOfTheReceivers(ProducerRequest* request) {
         auto err = TrySendToReceiver(request);
         if (ServerError(err))  {
             Disconnect();
-            log__->Debug("cannot send data id " + std::to_string(request->header.data_id) + " to " + receiver_uri + ": " +
-                         err->Explain());
+            log__->Warning("cannot send data, opcode: " + std::to_string(request->header.op_code) +
+                           ", id: " + std::to_string(request->header.data_id) + " to " + receiver_uri + ": " +
+                           err->Explain());
             continue;
         }
 
diff --git a/producer/api/unittests/test_producer_impl.cpp b/producer/api/unittests/test_producer_impl.cpp
index 423193f74035fa9b05170a400891c110b56bd2c6..f7aacf76ef0378bd408ab7e16d35244e870e31d4 100644
--- a/producer/api/unittests/test_producer_impl.cpp
+++ b/producer/api/unittests/test_producer_impl.cpp
@@ -116,7 +116,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
 
     producer.SetBeamtimeId(expected_beamtimeid);
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferMetaData,
-                                        expected_beamtimeid, expected_id, expected_size, "beamtime_id.meta"))).WillOnce(Return(
+                                        expected_beamtimeid, expected_id, expected_size, "beamtime_global.meta"))).WillOnce(Return(
                                                     nullptr));
 
     auto err = producer.SendMetaData(expected_metadata, nullptr);
diff --git a/producer/api/unittests/test_request_handler_tcp.cpp b/producer/api/unittests/test_request_handler_tcp.cpp
index 516c7e7683c32928d3c3a56546695caf8691b81f..185f257dc890ea0417a8c6dc5060e104f44230e4 100644
--- a/producer/api/unittests/test_request_handler_tcp.cpp
+++ b/producer/api/unittests/test_request_handler_tcp.cpp
@@ -232,11 +232,11 @@ void RequestHandlerTcpTests::ExpectFailSendHeader(bool only_once) {
                                        )
                                       ));
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
@@ -260,11 +260,11 @@ void RequestHandlerTcpTests::ExpectFailSendData(bool only_once) {
                                        )
                                       ));
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
@@ -290,11 +290,11 @@ void RequestHandlerTcpTests::ExpectFailReceive(bool only_once) {
                                       ));
 
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
diff --git a/producer/event_monitor_producer/src/system_folder_watch_linux.cpp b/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
index c8f6249c8227438682413df9ba56be5e920e5085..bdfacc408165fbd7372f85fcf5d1431531a83a0c 100644
--- a/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
+++ b/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
@@ -94,6 +94,7 @@ Error SystemFolderWatch::ProcessFileEvent(const InotifyEvent& event, FilesToSend
         return err;
     }
     GetDefaultEventMonLogger()->Debug(((event.GetMask() & IN_CLOSE_WRITE) ? "file closed: " : "file moved: ") + fname);
+// todo - check if filename already manually added?
     processed_filenames_[processed_filenames_counter_] = fname;
     processed_filenames_counter_++;
     if (processed_filenames_counter_ == kProcessedFilenamesBufLen) {
diff --git a/receiver/src/requests_dispatcher.cpp b/receiver/src/requests_dispatcher.cpp
index c50319f847b4efe9d13055fa2affd4b44f7304e4..bb44cb5ebdbbe47239b02f126333be29cafe5a39 100644
--- a/receiver/src/requests_dispatcher.cpp
+++ b/receiver/src/requests_dispatcher.cpp
@@ -2,7 +2,7 @@
 #include "request.h"
 #include "io/io_factory.h"
 #include "receiver_logger.h"
-
+#include "database/db_error.h"
 namespace asapo {
 
 RequestsDispatcher::RequestsDispatcher(SocketDescriptor socket_fd, std::string address,
@@ -20,6 +20,8 @@ NetworkErrorCode GetNetworkCodeFromError(const Error& err) {
             return NetworkErrorCode::kNetErrorFileIdAlreadyInUse;
         } else if (err == ReceiverErrorTemplates::kAuthorizationFailure) {
             return NetworkErrorCode::kNetAuthorizationError;
+        } else if (err == DBErrorTemplates::kJsonParseError) {
+            return NetworkErrorCode::kNetErrorErrorInMetadata;
         } else {
             return NetworkErrorCode::kNetErrorInternalServerError;
         }
diff --git a/receiver/unittests/test_requests_dispatcher.cpp b/receiver/unittests/test_requests_dispatcher.cpp
index 47da03eb533095f59fe708cd4466405a693f4dcc..3413de1b499fced9761cfccb9ae774b91df2d0dc 100644
--- a/receiver/unittests/test_requests_dispatcher.cpp
+++ b/receiver/unittests/test_requests_dispatcher.cpp
@@ -10,6 +10,7 @@
 #include "mock_receiver_config.h"
 
 #include "../src/requests_dispatcher.h"
+#include "database/db_error.h"
 
 
 using ::testing::Test;
@@ -262,5 +263,17 @@ TEST_F(RequestsDispatcherTests, ProcessRequestReturnsAuthorizationFailure) {
 }
 
 
+TEST_F(RequestsDispatcherTests, ProcessRequestReturnsMetaDataFailure) {
+    MockHandleRequest(true, asapo::DBErrorTemplates::kJsonParseError.Generate());
+    MockSendResponse(&response, false);
+
+    auto err = dispatcher->ProcessRequest(request);
+
+    ASSERT_THAT(err, Eq(asapo::DBErrorTemplates::kJsonParseError));
+    ASSERT_THAT(response.error_code, Eq(asapo::kNetErrorErrorInMetadata));
+    ASSERT_THAT(std::string(response.message), HasSubstr("parse"));
+}
+
+
 
 }
diff --git a/tests/automatic/broker/get_meta/check_windows.bat b/tests/automatic/broker/get_meta/check_windows.bat
index 8e111ea82520404ddb13e186748953f9af671401..9fde26734668cd6fd1bb82ac12b20d6527f179b1 100644
--- a/tests/automatic/broker/get_meta/check_windows.bat
+++ b/tests/automatic/broker/get_meta/check_windows.bat
@@ -13,8 +13,8 @@ start /B "" "%full_name%" -config settings.json
 
 ping 1.0.0.0 -n 1 -w 100 > nul
 
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/1/meta?token=%token% --stderr - | findstr /c:"not found"  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta/0?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta/1?token=%token% --stderr - | findstr /c:"not found"  || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/full_chain/CMakeLists.txt b/tests/automatic/full_chain/CMakeLists.txt
index 0b813e3898f37c65a48e2b9063b788fbde3a954f..35c1e00165fbc6880dd9988907e659978ce95b04 100644
--- a/tests/automatic/full_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/CMakeLists.txt
@@ -1,4 +1,5 @@
 add_subdirectory(simple_chain)
+add_subdirectory(simple_chain_metadata)
 add_subdirectory(two_beamlines)
 add_subdirectory(simple_chain_filegen)
 add_subdirectory(simple_chain_filegen_readdata_cache)
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
index 47b94d971cbb3d55a0b312a13451a9403d52a968..c410fcf2869157b2f3657e5231f9acda572985e5 100644
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -22,6 +22,7 @@ Cleanup() {
     nomad stop discovery
     nomad stop broker
     nomad stop authorizer
+    rm -rf out
 #    kill $producerid
     echo "db.dropDatabase()" | mongo ${beamtime_id}
     influx -execute "drop database ${monitor_database_name}"
@@ -44,4 +45,7 @@ $1 localhost:8400 ${beamtime_id} 100 1000 4 0 100 &
 #producerid=`echo $!`
 
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 | grep "Processed 1000 file(s)"
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
+cat out
+cat out   | grep "Processed 1000 file(s)"
+cat out | grep "Cannot get metadata"
\ No newline at end of file
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
index fffc1e3bfff95818d53669dd7ee124af47540454..e79cb8c07259e9500998f4e15bd03d56a32ffba4 100644
--- a/tests/automatic/full_chain/simple_chain/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -26,9 +26,10 @@ start /B "" "%1" %proxy_address% %beamtime_id% 100 1000 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM worker
-"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 1000  1 > out.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 5000  1 > out.txt
 type out.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out.txt || goto :error
+findstr /i /l /c:"Cannot get metadata"  out.txt || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4ac8f929ab76fb3a26be3e2acbcccc5b6a7cb7b6
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(TARGET_NAME full_chain_simple_chain_meta)
+
+################################
+# Testing
+################################
+prepare_asapo()
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
new file mode 100644
index 0000000000000000000000000000000000000000..035c5408871ac627ce97283723ad992043262df6
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap Cleanup EXIT
+
+beamtime_id=asapo_test
+token=`$3 token -secret broker_secret.key $beamtime_id`
+
+monitor_database_name=db_test
+proxy_address=127.0.0.1:8400
+
+beamline=test
+receiver_root_folder=/tmp/asapo/receiver/files
+receiver_folder=${receiver_root_folder}/${beamline}/${beamtime_id}
+
+Cleanup() {
+    echo cleanup
+    rm -rf ${receiver_root_folder}
+    nomad stop nginx
+    nomad stop receiver
+    nomad stop discovery
+    nomad stop broker
+    nomad stop authorizer
+    rm -rf out
+    echo "db.dropDatabase()" | mongo ${beamtime_id}
+    influx -execute "drop database ${monitor_database_name}"
+}
+
+influx -execute "create database ${monitor_database_name}"
+echo "db.${beamtime_id}.insert({dummy:1})" | mongo ${beamtime_id}
+
+nomad run nginx.nmd
+nomad run authorizer.nmd
+nomad run receiver.nmd
+nomad run discovery.nmd
+nomad run broker.nmd
+
+sleep 1
+
+#producer
+mkdir -p ${receiver_folder}
+$1 localhost:8400 ${beamtime_id} 100 0 1 0 100
+
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 0 1 > out
+cat out
+cat out | grep "dummy_meta"
\ No newline at end of file
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
new file mode 100644
index 0000000000000000000000000000000000000000..58ae6e83d6ba5e02866d04fa54ccddf32473520a
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
@@ -0,0 +1,49 @@
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+SET beamtime_id=asapo_test
+SET beamline=test
+SET receiver_root_folder=c:\tmp\asapo\receiver\files
+SET receiver_folder="%receiver_root_folder%\%beamline%\%beamtime_id%"
+
+
+"%3" token -secret broker_secret.key %beamtime_id% > token
+set /P token=< token
+
+set proxy_address="127.0.0.1:8400"
+
+echo db.%beamtime_id%.insert({dummy:1}) | %mongo_exe% %beamtime_id%
+
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run authorizer.nmd
+c:\opt\consul\nomad run discovery.nmd
+c:\opt\consul\nomad run broker.nmd
+c:\opt\consul\nomad run nginx.nmd
+
+ping 1.0.0.0 -n 10 -w 100 > nul
+
+REM producer
+mkdir %receiver_folder%
+"%1" %proxy_address% %beamtime_id% 100 0 1 0 100
+
+REM worker
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 0  1 > out.txt
+type out.txt
+findstr /i /l /c:"dummy_meta"  out.txt || goto :error
+
+
+goto :clean
+
+:error
+call :clean
+exit /b 1
+
+:clean
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop discovery
+c:\opt\consul\nomad stop broker
+c:\opt\consul\nomad stop authorizer
+c:\opt\consul\nomad stop nginx
+rmdir /S /Q %receiver_root_folder%
+del /f token
+echo db.dropDatabase() | %mongo_exe% %beamtime_id%
+
+
diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh
index c688445e93c279a292921b82366d700b91971511..5c1455d75b9624b325dfe549976aeeac3dd6a938 100644
--- a/tests/automatic/full_chain/two_beamlines/check_linux.sh
+++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh
@@ -53,5 +53,5 @@ $1 localhost:8400 ${beamtime_id2} 100 900 4 0 100 &
 #producerid=`echo $!`
 
 #workers
-$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 2000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
-$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 2000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
+$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 10000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
+$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 10000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
diff --git a/tests/automatic/full_chain/two_beamlines/check_windows.bat b/tests/automatic/full_chain/two_beamlines/check_windows.bat
index 00b6c1a89a67f5677381a737d39509c5b603e5a3..4e27899de77104b10ff346e2bab3c19711738d68 100644
--- a/tests/automatic/full_chain/two_beamlines/check_windows.bat
+++ b/tests/automatic/full_chain/two_beamlines/check_windows.bat
@@ -35,11 +35,11 @@ start /B "" "%1" %proxy_address% %beamtime_id2% 100 900 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM worker
-"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 2000  0 > out1.txt
+"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 10000  0 > out1.txt
 type out1.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out1.txt || goto :error
 
-"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 2000  0 > out2.txt
+"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 10000  0 > out2.txt
 type out2.txt
 findstr /i /l /c:"Processed 900 file(s)"  out2.txt || goto :error
 
diff --git a/tests/automatic/mongo_db/connect/connect_mongodb.cpp b/tests/automatic/mongo_db/connect/connect_mongodb.cpp
index e16f4f5c85d8ada24d96f51c569db745f3a79b3b..eb928e05fd9a49b39385614c2037c47468c58a5b 100644
--- a/tests/automatic/mongo_db/connect/connect_mongodb.cpp
+++ b/tests/automatic/mongo_db/connect/connect_mongodb.cpp
@@ -3,7 +3,7 @@
 
 #include "../../../common/cpp/src/database/mongodb_client.h"
 #include "testing.h"
-
+#include "database/db_error.h"
 
 using asapo::M_AssertContains;
 using asapo::Error;
@@ -44,7 +44,7 @@ int main(int argc, char* argv[]) {
 
     if (err == nullptr) {
         err = db.Connect(args.address, args.database_name, args.collection_name);
-        Assert(err, asapo::DBError::kAlreadyConnected);
+        Assert(err, asapo::DBErrorTemplates::kAlreadyConnected.Generate()->Explain());
     }
     return 0;
 }
diff --git a/tests/automatic/producer/beamtime_metadata/check_linux.sh b/tests/automatic/producer/beamtime_metadata/check_linux.sh
index 2a30b1139fc8136b5c0007a6e2a92df75f85ce28..fcbe0520f0017fcd284b235a0405cf0c7ed600b2 100644
--- a/tests/automatic/producer/beamtime_metadata/check_linux.sh
+++ b/tests/automatic/producer/beamtime_metadata/check_linux.sh
@@ -12,5 +12,5 @@ mkdir files
 
 $@ files beamtime_id 1
 
-cat files/beamtime_id.meta | grep hello
-ls -ln files/beamtime_id.meta | awk '{ print $5 }'| grep 5
+cat files/beamtime_global.meta | grep hello
+ls -ln files/beamtime_global.meta | awk '{ print $5 }'| grep 5
diff --git a/tests/automatic/producer/beamtime_metadata/check_windows.bat b/tests/automatic/producer/beamtime_metadata/check_windows.bat
index 13fd63f99b7148dc72764252b3e8fcf69f90d910..fce4c0293e46833b3b081ad32088b4258a6f4687 100644
--- a/tests/automatic/producer/beamtime_metadata/check_windows.bat
+++ b/tests/automatic/producer/beamtime_metadata/check_windows.bat
@@ -4,10 +4,10 @@ mkdir %folder%
 
 "%1" %folder% beamtime_id 1
 
-FOR /F "usebackq" %%A IN ('%folder%\beamtime_id.meta') DO set size=%%~zA
+FOR /F "usebackq" %%A IN ('%folder%\beamtime_global.meta') DO set size=%%~zA
 if %size% NEQ 5 goto :error
 
-type %folder%\beamtime_id.meta | findstr /c:"hello"  || goto :error
+type %folder%\beamtime_global.meta | findstr /c:"hello"  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/producer/file_monitor_producer/check_linux.sh b/tests/automatic/producer/file_monitor_producer/check_linux.sh
index aed6e502a59123e63c622b932e12449b829eec90..8da14669bc2328a7beb3718f161ccebbb4295359 100644
--- a/tests/automatic/producer/file_monitor_producer/check_linux.sh
+++ b/tests/automatic/producer/file_monitor_producer/check_linux.sh
@@ -24,6 +24,8 @@ sleep 0.5
 echo test1 > /tmp/asapo/test_in/test1/test1.dat
 echo test2 > /tmp/asapo/test_in/test2/test2.tmp
 mkdir -p /tmp/asapo/test_in/test2/subdir
+sleep 0.1
+
 echo test3 > /tmp/asapo/test_in/test2/subdir/test3.dat
 
 sleep 0.1
diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
index e357471e3df091d19fe1bc2142f114d1c0829a23..6e8bad6f3b658fa2c60d869d163ee201146c6b97 100644
--- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
+++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
@@ -35,5 +35,5 @@ $1 localhost:8400 ${beamtime_id} 100 112 4  0 100
 
 sleep 2
 
-# should be 116 requests (112 data transfers and 4 authorizations)
-influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json  | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 116
+# should be 118 requests (112 data transfers +  5 authorizations (4 + 1 after reconnection due to wrong meta))
+influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json  | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 117
diff --git a/worker/api/cpp/include/worker/data_broker.h b/worker/api/cpp/include/worker/data_broker.h
index ea4123b244d17dfc2250e4ae3dd0dd9a4985f2d6..fbd5da3ebc896000f1a5b7c700fae84c932cbd82 100644
--- a/worker/api/cpp/include/worker/data_broker.h
+++ b/worker/api/cpp/include/worker/data_broker.h
@@ -56,6 +56,12 @@ class DataBroker {
     */
     virtual std::string GenerateNewGroupId(Error* err) = 0;
 
+    //! Get Beamtime metadata.
+    /*!
+      \param err - return nullptr of operation succeed, error otherwise.
+      \return beamtime metadata.
+    */
+    virtual std::string GetBeamtimeMeta(Error* err) = 0;
 
     //! Receive next available image.
     /*!
diff --git a/worker/api/cpp/src/folder_data_broker.cpp b/worker/api/cpp/src/folder_data_broker.cpp
index 845c0d4cdb2e7121de26f3b804edc7f8556a8de7..f5eab447d13615c36fb104fa9f40564b207233b3 100644
--- a/worker/api/cpp/src/folder_data_broker.cpp
+++ b/worker/api/cpp/src/folder_data_broker.cpp
@@ -1,6 +1,7 @@
 #include "folder_data_broker.h"
 
 #include "io/io_factory.h"
+#include "preprocessor/definitions.h"
 
 namespace asapo {
 
@@ -93,4 +94,8 @@ Error FolderDataBroker::GetById(uint64_t id, FileInfo* info, std::string group_i
     return GetFileByIndex(id - 1 , info, data);
 }
 
+std::string FolderDataBroker::GetBeamtimeMeta(Error* err) {
+    return io__->ReadFileToString(base_path_ + kPathSeparator + "beamtime_global.meta", err);
+}
+
 }
diff --git a/worker/api/cpp/src/folder_data_broker.h b/worker/api/cpp/src/folder_data_broker.h
index 4c4ec17705ca5c4eb6f4741b146c875ecd462d28..54319bbccb208a94ecf797dd0cd42c3b85674fec 100644
--- a/worker/api/cpp/src/folder_data_broker.h
+++ b/worker/api/cpp/src/folder_data_broker.h
@@ -20,6 +20,7 @@ class FolderDataBroker final : public asapo::DataBroker {
     void SetTimeout(uint64_t timeout_ms) override {}; // to timeout in this case
     std::string GenerateNewGroupId(Error* err)
     override; // return "0" always and no error - no group ids for folder datra broker
+    std::string GetBeamtimeMeta(Error* err) override;
     uint64_t GetNDataSets(Error* err) override;
     Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
     std::unique_ptr<asapo::IO> io__; // modified in testings to mock system calls,otherwise do not touch
diff --git a/worker/api/cpp/src/server_data_broker.cpp b/worker/api/cpp/src/server_data_broker.cpp
index f5b005f227bf91315a16fcb9d26c9e98f9fcf7d2..bf939ba5e02bb8a5e4cb80ab0675766475a94186 100644
--- a/worker/api/cpp/src/server_data_broker.cpp
+++ b/worker/api/cpp/src/server_data_broker.cpp
@@ -164,8 +164,9 @@ std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) {
         return "next";
     case GetImageServerOperation::GetLast:
         return "last";
+    default:
+        return "last";
     }
-    return "";
 }
 
 Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id,
@@ -281,4 +282,9 @@ Error ServerDataBroker::GetFileInfoFromServerById(uint64_t id, FileInfo* info, s
     return nullptr;
 }
 
+std::string ServerDataBroker::GetBeamtimeMeta(Error* err) {
+    std::string request_string =  "database/" + source_name_ + "/0/meta/0";
+    return BrokerRequestWithTimeout(request_string, "", false, err);
+}
+
 }
diff --git a/worker/api/cpp/src/server_data_broker.h b/worker/api/cpp/src/server_data_broker.h
index d6ed5f6d8cfe76de995d160cd6a1ad58b32e3283..292477a4bd9f65a85822a18ec6abbf53254907b3 100644
--- a/worker/api/cpp/src/server_data_broker.h
+++ b/worker/api/cpp/src/server_data_broker.h
@@ -24,6 +24,7 @@ class ServerDataBroker final : public asapo::DataBroker {
     Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
     Error GetLast(FileInfo* info, std::string group_id, FileData* data) override;
     std::string GenerateNewGroupId(Error* err) override;
+    std::string GetBeamtimeMeta(Error* err) override;
     uint64_t GetNDataSets(Error* err) override;
     Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
     void SetTimeout(uint64_t timeout_ms) override;
diff --git a/worker/api/cpp/unittests/test_folder_broker.cpp b/worker/api/cpp/unittests/test_folder_broker.cpp
index 56b55a946ba7295af4bb6b6bd2fecd8974418b66..322089e9618b4da3058d8e561ddcc3da53396c7e 100644
--- a/worker/api/cpp/unittests/test_folder_broker.cpp
+++ b/worker/api/cpp/unittests/test_folder_broker.cpp
@@ -38,10 +38,6 @@ TEST(FolderDataBroker, SetCorrectIO) {
 class FakeIO: public asapo::MockIO {
   public:
 
-    virtual std::string ReadFileToString(const std::string& fname, Error* err) const noexcept override {
-        return "OK";
-    }
-
     FileInfos FilesInFolder(const std::string& folder, Error* err) const override {
         *err = nullptr;
         FileInfos file_infos;
@@ -253,8 +249,9 @@ class GetDataFromFileTests : public Test {
     OpenFileMock mock;
     FileInfo fi;
     FileData data;
+    std::string expected_base_path = "/path/to/file";
     void SetUp() override {
-        data_broker = std::unique_ptr<FolderDataBroker> {new FolderDataBroker("/path/to/file")};
+        data_broker = std::unique_ptr<FolderDataBroker> {new FolderDataBroker(expected_base_path)};
         data_broker->io__ = std::unique_ptr<IO> {&mock};
         data_broker->Connect();
     }
@@ -326,5 +323,27 @@ TEST_F(FolderDataBrokerTests, GetByIdReturnsError) {
     ASSERT_THAT(err2, Ne(nullptr));
 }
 
+TEST_F(GetDataFromFileTests, GetMetaDataReturnsError) {
+    EXPECT_CALL(mock, ReadFileToString_t(_, _)).
+    WillOnce(DoAll(testing::SetArgPointee<1>(asapo::IOErrorTemplates::kReadError.Generate().release()),
+                   testing::Return("")));
+
+    Error err;
+    auto meta = data_broker->GetBeamtimeMeta(&err);
+    ASSERT_THAT(err, Eq(asapo::IOErrorTemplates::kReadError));
+}
+
+TEST_F(GetDataFromFileTests, GetMetaDataReturnsOK) {
+    EXPECT_CALL(mock, ReadFileToString_t(expected_base_path + asapo::kPathSeparator + "beamtime_global.meta", _)).
+    WillOnce(DoAll(testing::SetArgPointee<1>(nullptr),
+                   testing::Return("OK")));
+
+    Error err;
+    auto meta = data_broker->GetBeamtimeMeta(&err);
+    ASSERT_THAT(meta, Eq("OK"));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+
 
 }
diff --git a/worker/api/cpp/unittests/test_server_broker.cpp b/worker/api/cpp/unittests/test_server_broker.cpp
index e63bf56057568fe2e2722c7162698f480edb908a..a3f921cc25ed250766bb52e7e62c988ad7d86ea7 100644
--- a/worker/api/cpp/unittests/test_server_broker.cpp
+++ b/worker/api/cpp/unittests/test_server_broker.cpp
@@ -62,6 +62,7 @@ class ServerDataBrokerTests : public Test {
     std::string expected_filename = "filename";
     std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename;
     std::string expected_group_id = "groupid";
+    std::string expected_metadata = "{\"meta\":1}";
     uint64_t expected_dataset_id = 1;
     static const uint64_t expected_buf_id = 123;
     void SetUp() override {
@@ -471,24 +472,23 @@ TEST_F(ServerDataBrokerTests, GetByIdUsesCorrectUri) {
 
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdReturnsNoData) {
+TEST_F(ServerDataBrokerTests, GetMetaDataOK) {
     MockGetBrokerUri();
     data_broker->SetTimeout(100);
-    auto to_send = CreateFI();
-    auto json = to_send.Json();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_group_id
-                                        + "/" + std::to_string(
-                                            expected_dataset_id) + "?token="
-                                        + expected_token + "&reset=true", _,
+
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/0/meta/0?token="
+                                        + expected_token, _,
                                         _)).WillOnce(DoAll(
-                                                SetArgPointee<1>(HttpCode::Conflict),
+                                                SetArgPointee<1>(HttpCode::OK),
                                                 SetArgPointee<2>(nullptr),
-                                                Return("{\"id\":1}")));
+                                                Return(expected_metadata)));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr);
+    asapo::Error err;
+    auto res = data_broker->GetBeamtimeMeta(&err);
 
-    ASSERT_THAT(err->GetErrorType(), Eq(asapo::ErrorType::kEndOfFile));
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(res, Eq(expected_metadata));
 
 }
 
diff --git a/worker/api/python/asapo_worker.pxd b/worker/api/python/asapo_worker.pxd
index 05e8d826b874fc2c0e3cf64da38211e673840dc2..0cd548ea26769f1b699895c2cc94fb037c220e91 100644
--- a/worker/api/python/asapo_worker.pxd
+++ b/worker/api/python/asapo_worker.pxd
@@ -34,6 +34,8 @@ cdef extern from "asapo_worker.h" namespace "asapo":
         uint64_t GetNDataSets(Error* err)
         Error ResetCounter(string group_id)
         string GenerateNewGroupId(Error* err)
+        string GetBeamtimeMeta(Error* err)
+
 
 cdef extern from "asapo_worker.h" namespace "asapo":
     cdef cppclass DataBrokerFactory:
diff --git a/worker/api/python/asapo_worker.pyx.in b/worker/api/python/asapo_worker.pyx.in
index 38bc41ec9a491e314605f02881287690cb3b9dfa..6c8adf0202dc2232328557e4461b5e9bd2fea194 100644
--- a/worker/api/python/asapo_worker.pyx.in
+++ b/worker/api/python/asapo_worker.pyx.in
@@ -82,6 +82,17 @@ cdef class PyDataBroker:
             return None, err_str
         else:
             return _str(group_id), None
+    def get_beamtime_meta(self):
+            cdef Error err
+            cdef string meta_str
+            meta_str = self.c_broker.GetBeamtimeMeta(&err)
+            err_str = _str(GetErrorString(&err))
+            if err_str.strip():
+                return None, err_str
+            else:
+                meta = json.loads(_str(meta_str))
+                del meta['_id']
+                return meta, None
 
 cdef class PyDataBrokerFactory:
     cdef DataBrokerFactory c_factory
diff --git a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
index a8d3bb863471b983a9bef1fb4f6242e362d1d6c1..2d38106ad160e8b98b8be92be567c219faf690ca 100644
--- a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
+++ b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
@@ -5,6 +5,8 @@
 #include "io/io.h"
 #include "../../../../common/cpp/src/system_io/system_io.h"
 
+#include "database/db_error.h"
+
 
 #include "database/database.h"
 
@@ -12,6 +14,7 @@
 #include "unittests/MockDatabase.h"
 
 #include "../src/folder_db_importer.h"
+#include "database/db_error.h"
 
 #include "unittests/MockIO.h"
 
@@ -85,7 +88,7 @@ class MockDatabaseFactory : public DatabaseFactory {
 
 class FakeDatabaseFactory : public DatabaseFactory {
     std::unique_ptr<Database> Create(Error* err) const noexcept override {
-        *err = asapo::TextError(asapo::DBError::kMemoryError);
+        *err = asapo::ErrorTemplates::kMemoryAllocationError.Generate();
         return {};
     }
 };
@@ -129,11 +132,9 @@ class FolderDBConverterTests : public Test {
     }
 };
 
-
-
 TEST_F(FolderDBConverterTests, ErrorWhenCannotConnect) {
     EXPECT_CALL(*(mock_dbf->db[0]), Connect_t(uri, db_name, kDBDataCollectionName)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
 
     auto error = converter.Convert(uri, folder, db_name);
     ASSERT_THAT(error, Ne(nullptr));
@@ -142,11 +143,11 @@ TEST_F(FolderDBConverterTests, ErrorWhenCannotConnect) {
 TEST_F(FolderDBConverterTests, ErrorWhenCannotCreateDbParallel) {
     int nparallel = 3;
     EXPECT_CALL(*(mock_dbf->db[0]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
     EXPECT_CALL(*(mock_dbf->db[1]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
     EXPECT_CALL(*(mock_dbf->db[2]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
 
     converter.SetNParallelTasks(nparallel);
     auto error = converter.Convert(uri, folder, db_name);
@@ -184,7 +185,7 @@ TEST_F(FolderDBConverterTests, PassesIgnoreDuplicates) {
 TEST_F(FolderDBConverterTests, ErrorWhenCannotImportFileListToDb) {
 
     EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(_, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kInsertError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kInsertError.Generate().release()));
 
     auto error = converter.Convert(uri, folder, db_name);
     ASSERT_THAT(error, Ne(nullptr));