From acf110b0b56936343c54aaffb7a951ae259795ec Mon Sep 17 00:00:00 2001
From: Sergey Yakubov <sergey.yakubov@desy.de>
Date: Tue, 14 May 2019 16:35:04 +0200
Subject: [PATCH] bug fixes, integration tests for metadata

---
 common/cpp/include/common/error.h             |  1 +
 common/cpp/include/common/io_error.h          |  8 ++-
 common/cpp/include/database/database.h        | 12 ----
 common/cpp/include/database/db_error.h        | 68 +++++++++++++++++++
 common/cpp/include/unittests/MockDatabase.h   |  6 +-
 common/cpp/src/database/database.cpp          |  2 +-
 common/cpp/src/database/mongodb_client.cpp    | 23 ++++---
 .../cpp/src/system_io/system_io_linux_mac.cpp |  3 +-
 .../dummy_data_producer.cpp                   | 25 ++++++-
 .../worker/getnext_broker/getnext_broker.cpp  |  9 ++-
 .../getnext_broker_python/check_linux.sh      |  3 +
 .../getnext_broker_python/check_windows.bat   |  4 ++
 .../worker/getnext_broker_python/getnext.py   |  7 ++
 producer/api/src/producer_impl.cpp            |  2 +-
 producer/api/src/request_handler_tcp.cpp      |  9 ++-
 producer/api/unittests/test_producer_impl.cpp |  2 +-
 .../unittests/test_request_handler_tcp.cpp    | 30 ++++----
 .../src/system_folder_watch_linux.cpp         |  1 +
 receiver/src/requests_dispatcher.cpp          |  4 +-
 .../unittests/test_requests_dispatcher.cpp    | 13 ++++
 .../broker/get_meta/check_windows.bat         |  4 +-
 tests/automatic/full_chain/CMakeLists.txt     |  1 +
 .../full_chain/simple_chain/check_linux.sh    |  6 +-
 .../full_chain/simple_chain/check_windows.bat |  3 +-
 .../simple_chain_metadata/CMakeLists.txt      |  7 ++
 .../simple_chain_metadata/check_linux.sh      | 47 +++++++++++++
 .../simple_chain_metadata/check_windows.bat   | 49 +++++++++++++
 .../full_chain/two_beamlines/check_linux.sh   |  4 +-
 .../two_beamlines/check_windows.bat           |  4 +-
 .../mongo_db/connect/connect_mongodb.cpp      |  4 +-
 .../producer/beamtime_metadata/check_linux.sh |  4 +-
 .../beamtime_metadata/check_windows.bat       |  4 +-
 .../file_monitor_producer/check_linux.sh      |  2 +
 .../check_monitoring/check_linux.sh           |  4 +-
 worker/api/cpp/include/worker/data_broker.h   |  6 ++
 worker/api/cpp/src/folder_data_broker.cpp     |  5 ++
 worker/api/cpp/src/folder_data_broker.h       |  1 +
 worker/api/cpp/src/server_data_broker.cpp     |  8 ++-
 worker/api/cpp/src/server_data_broker.h       |  1 +
 .../api/cpp/unittests/test_folder_broker.cpp  | 29 ++++++--
 .../api/cpp/unittests/test_server_broker.cpp  | 22 +++---
 worker/api/python/asapo_worker.pxd            |  2 +
 worker/api/python/asapo_worker.pyx.in         | 11 +++
 .../unittests/test_folder_to_db.cpp           | 17 ++---
 44 files changed, 384 insertions(+), 93 deletions(-)
 create mode 100644 common/cpp/include/database/db_error.h
 create mode 100644 tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
 create mode 100644 tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
 create mode 100644 tests/automatic/full_chain/simple_chain_metadata/check_windows.bat

diff --git a/common/cpp/include/common/error.h b/common/cpp/include/common/error.h
index 12488250f..0efb36059 100644
--- a/common/cpp/include/common/error.h
+++ b/common/cpp/include/common/error.h
@@ -13,6 +13,7 @@ enum class ErrorType {
     kAsapoError,
     kHttpError,
     kIOError,
+    kDBError,
     kReceiverError,
     kProducerError,
 
diff --git a/common/cpp/include/common/io_error.h b/common/cpp/include/common/io_error.h
index a04af8020..bed8165b6 100644
--- a/common/cpp/include/common/io_error.h
+++ b/common/cpp/include/common/io_error.h
@@ -26,7 +26,8 @@ enum class IOErrorType {
     kUnableToResolveHostname,
     kSocketOperationUnknownAtLevel,
     kSocketOperationValueOutOfBound,
-    kAddressNotValid
+    kAddressNotValid,
+    kBrokenPipe
 
 };
 
@@ -98,6 +99,11 @@ auto const kAddressNotValid =  IOErrorTemplate {
     "Address not valid", IOErrorType::kAddressNotValid
 };
 
+auto const kBrokenPipe =  IOErrorTemplate {
+    "Broken pipe/connection", IOErrorType::kBrokenPipe
+};
+
+
 }
 
 }
diff --git a/common/cpp/include/database/database.h b/common/cpp/include/database/database.h
index 2bbd8ed8e..129a5e3a6 100644
--- a/common/cpp/include/database/database.h
+++ b/common/cpp/include/database/database.h
@@ -8,18 +8,6 @@
 
 namespace asapo {
 
-namespace DBError {
-auto const KUnknownError = "Inknown error";
-auto const kConnectionError = "Connection error";
-auto const kInsertError = "Insert error";
-auto const kDuplicateID = "Duplicate ID";
-auto const kAlreadyConnected = "Already connected";
-auto const kNotConnected = "Not connected";
-auto const kBadAddress = "Bad address";
-auto const kMemoryError = "Memory error";
-
-}
-
 constexpr char kDBDataCollectionName[] = "data";
 constexpr char kDBMetaCollectionName[] = "meta";
 
diff --git a/common/cpp/include/database/db_error.h b/common/cpp/include/database/db_error.h
new file mode 100644
index 000000000..c36e52458
--- /dev/null
+++ b/common/cpp/include/database/db_error.h
@@ -0,0 +1,68 @@
+#ifndef ASAPO_SYSTEM__DB_ERROR_H
+#define ASAPO_SYSTEM__DB_ERROR_H
+
+#include "common/error.h"
+
+namespace asapo {
+
+
+enum class DBErrorType {
+    kJsonParseError,
+    kUnknownError,
+    kConnectionError,
+    kNotConnected,
+    kInsertError,
+    kDuplicateID,
+    kAlreadyConnected,
+    kBadAddress,
+    kMemoryError
+};
+
+using DBError = ServiceError<DBErrorType, ErrorType::kDBError>;
+using DBErrorTemplate = ServiceErrorTemplate<DBErrorType, ErrorType::kDBError>;
+
+namespace DBErrorTemplates {
+
+auto const kNotConnected = DBErrorTemplate {
+    "Not connected", DBErrorType::kNotConnected
+};
+
+
+auto const kDBError = DBErrorTemplate {
+    "Unknown Error", DBErrorType::kUnknownError
+};
+
+auto const kConnectionError = DBErrorTemplate {
+    "Connection error", DBErrorType::kConnectionError
+};
+
+auto const kInsertError = DBErrorTemplate {
+    "Insert error", DBErrorType::kInsertError
+};
+
+auto const kJsonParseError = DBErrorTemplate {
+    "JSON parse error", DBErrorType::kJsonParseError
+};
+
+auto const kDuplicateID = DBErrorTemplate {
+    "Duplicate ID", DBErrorType::kDuplicateID
+};
+
+auto const kAlreadyConnected = DBErrorTemplate {
+    "Not connected", DBErrorType::kAlreadyConnected
+};
+
+auto const kBadAddress = DBErrorTemplate {
+    "Bad address", DBErrorType::kBadAddress
+};
+
+auto const kMemoryError = DBErrorTemplate {
+    "Memory error", DBErrorType::kMemoryError
+};
+
+
+}
+
+}
+
+#endif //ASAPO_SYSTEM__DB_ERROR_H
diff --git a/common/cpp/include/unittests/MockDatabase.h b/common/cpp/include/unittests/MockDatabase.h
index de44724a0..9216e8c46 100644
--- a/common/cpp/include/unittests/MockDatabase.h
+++ b/common/cpp/include/unittests/MockDatabase.h
@@ -20,14 +20,14 @@ class MockDatabase : public Database {
         return Error{Insert_t(file, ignore_duplicates)};
     }
 
-    MOCK_METHOD3(Connect_t, SimpleError * (const std::string&, const std::string&, const std::string&));
-    MOCK_CONST_METHOD2(Insert_t, SimpleError * (const FileInfo&, bool));
+    MOCK_METHOD3(Connect_t, ErrorInterface * (const std::string&, const std::string&, const std::string&));
+    MOCK_CONST_METHOD2(Insert_t, ErrorInterface * (const FileInfo&, bool));
 
     Error Upsert(uint64_t id, const uint8_t* data, uint64_t size) const override {
         return Error{Upsert_t(id, data, size)};
 
     }
-    MOCK_CONST_METHOD3(Upsert_t, SimpleError * (uint64_t id, const uint8_t* data, uint64_t size));
+    MOCK_CONST_METHOD3(Upsert_t, ErrorInterface * (uint64_t id, const uint8_t* data, uint64_t size));
 
 
 
diff --git a/common/cpp/src/database/database.cpp b/common/cpp/src/database/database.cpp
index c207779a5..b4397eebf 100644
--- a/common/cpp/src/database/database.cpp
+++ b/common/cpp/src/database/database.cpp
@@ -9,7 +9,7 @@ std::unique_ptr<Database> DatabaseFactory::Create(Error* err) const noexcept {
         p.reset(new MongoDBClient());
         *err = nullptr;
     } catch (...) {
-        *err = TextError(DBError::kMemoryError);
+        *err = ErrorTemplates::kMemoryAllocationError.Generate();
     }
     return p;
 };
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index 5d398425c..4b65b5c63 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -1,4 +1,5 @@
 #include "mongodb_client.h"
+#include "database/db_error.h"
 
 namespace asapo {
 
@@ -25,7 +26,7 @@ Error MongoDBClient::Ping() {
     bson_destroy (&reply);
     bson_destroy (command);
 
-    return !retval ? TextError(DBError::kConnectionError) : nullptr;
+    return !retval ? DBErrorTemplates::kConnectionError.Generate() : nullptr;
 
 }
 MongoDBClient::MongoDBClient() {
@@ -37,7 +38,7 @@ Error MongoDBClient::InitializeClient(const string& address) {
     client_ = mongoc_client_new (uri_str.c_str());
 
     if (client_ == nullptr) {
-        return TextError(DBError::kBadAddress);
+        return DBErrorTemplates::kBadAddress.Generate();
     }
     return nullptr;
 
@@ -65,7 +66,7 @@ Error MongoDBClient::TryConnectDatabase() {
 Error MongoDBClient::Connect(const string& address, const string& database_name,
                              const string& collection_name) {
     if (connected_) {
-        return TextError(DBError::kAlreadyConnected);
+        return DBErrorTemplates::kAlreadyConnected.Generate();
     }
 
     auto err = InitializeClient(address);
@@ -98,7 +99,7 @@ bson_p PrepareBsonDocument(const FileInfo& file, Error* err) {
     auto json = reinterpret_cast<const uint8_t*>(s.c_str());
     auto bson = bson_new_from_json(json, -1, &mongo_err);
     if (!bson) {
-        *err = TextError(std::string(DBError::kInsertError) + ": " + mongo_err.message);
+        *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
         return nullptr;
     }
 
@@ -110,7 +111,7 @@ bson_p PrepareBsonDocument(const uint8_t* json, ssize_t len, Error* err) {
     bson_error_t mongo_err;
     auto bson = bson_new_from_json(json, len, &mongo_err);
     if (!bson) {
-        *err = TextError(std::string(DBError::kInsertError) + ": " + mongo_err.message);
+        *err = DBErrorTemplates::kJsonParseError.Generate(mongo_err.message);
         return nullptr;
     }
 
@@ -123,9 +124,9 @@ Error MongoDBClient::InsertBsonDocument(const bson_p& document, bool ignore_dupl
     bson_error_t mongo_err;
     if (!mongoc_collection_insert_one(collection_, document.get(), NULL, NULL, &mongo_err)) {
         if (mongo_err.code == MONGOC_ERROR_DUPLICATE_KEY) {
-            return ignore_duplicates ? nullptr : TextError(DBError::kDuplicateID);
+            return ignore_duplicates ? nullptr : DBErrorTemplates::kDuplicateID.Generate();
         }
-        return TextError(std::string(DBError::kInsertError) + " - " + mongo_err.message);
+        return DBErrorTemplates::kInsertError.Generate(mongo_err.message);
     }
 
     return nullptr;
@@ -141,7 +142,7 @@ Error MongoDBClient::UpdateBsonDocument(uint64_t id, const bson_p& document, boo
     Error err = nullptr;
 
     if (!mongoc_collection_replace_one(collection_, selector, document.get(), opts, NULL, &mongo_err)) {
-        err = TextError(std::string(DBError::kInsertError) + " - " + mongo_err.message);
+        err = DBErrorTemplates::kInsertError.Generate(mongo_err.message);
     }
 
     bson_free (opts);
@@ -153,7 +154,7 @@ Error MongoDBClient::UpdateBsonDocument(uint64_t id, const bson_p& document, boo
 
 Error MongoDBClient::Insert(const FileInfo& file, bool ignore_duplicates) const {
     if (!connected_) {
-        return TextError(DBError::kNotConnected);
+        return DBErrorTemplates::kNotConnected.Generate();
     }
 
     Error err;
@@ -175,7 +176,7 @@ MongoDBClient::~MongoDBClient() {
 
 Error MongoDBClient::Upsert(uint64_t id, const uint8_t* data, uint64_t size) const {
     if (!connected_) {
-        return TextError(DBError::kNotConnected);
+        return DBErrorTemplates::kNotConnected.Generate();
     }
 
     Error err;
@@ -185,7 +186,7 @@ Error MongoDBClient::Upsert(uint64_t id, const uint8_t* data, uint64_t size) con
     }
 
     if (!BSON_APPEND_INT64(document.get(), "_id", id)) {
-        err = TextError(std::string(DBError::kInsertError) + "- cannot assign document id " );
+        err = DBErrorTemplates::kInsertError.Generate("cannot assign document id ");
     }
 
     return UpdateBsonDocument(id, document, true);
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index 7312cd583..74d41e9e5 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -58,7 +58,8 @@ Error GetLastErrorFromErrno() {
         return IOErrorTemplates::kSocketOperationUnknownAtLevel.Generate();
     case EDOM:
         return IOErrorTemplates::kSocketOperationValueOutOfBound.Generate();
-
+    case EPIPE:
+        return IOErrorTemplates::kBrokenPipe.Generate();
     default:
         std::cout << "[IOErrorsFromErrno] Unknown error code: " << errno << std::endl;
         Error err = IOErrorTemplates::kUnknownIOError.Generate();
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index 4ae3656b2..dee9462a5 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -72,6 +72,18 @@ void ProcessAfterSend(asapo::GenericRequestHeader header, asapo::Error err) {
     mutex.unlock();
 }
 
+void ProcessAfterMetaDataSend(asapo::GenericRequestHeader header, asapo::Error err) {
+    mutex.lock();
+    iterations_remained--;
+    if (err) {
+        std::cerr << "Metadata was not successfully send: " << err << std::endl;
+    } else {
+        std::cout << "Metadata was successfully send" << std::endl;
+    }
+    mutex.unlock();
+    return;
+}
+
 asapo::FileData CreateMemoryBuffer(size_t size) {
     return asapo::FileData(new uint8_t[size]);
 }
@@ -79,6 +91,17 @@ asapo::FileData CreateMemoryBuffer(size_t size) {
 
 bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations) {
 
+    asapo::Error err;
+    if (iterations > 0) { // send wrong meta, for negative integration tests
+        err = producer->SendMetaData("bla", &ProcessAfterMetaDataSend);
+    } else {
+        err = producer->SendMetaData("{\"dummy_meta\":\"test\"}", &ProcessAfterMetaDataSend);
+    }
+    if (err) {
+        std::cerr << "Cannot send metadata: " << err << std::endl;
+        return false;
+    }
+
     for(uint64_t i = 0; i < iterations; i++) {
         auto buffer = CreateMemoryBuffer(number_of_byte);
         asapo::EventHeader event_header{i + 1, number_of_byte, std::to_string(i + 1)};
@@ -142,7 +165,7 @@ int main (int argc, char* argv[]) {
 
     auto producer = CreateProducer(args);
 
-    iterations_remained = args.iterations;
+    iterations_remained = args.iterations + 1;
 
     high_resolution_clock::time_point start_time = high_resolution_clock::now();
 
diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp
index 0410877de..7fa75a351 100644
--- a/examples/worker/getnext_broker/getnext_broker.cpp
+++ b/examples/worker/getnext_broker/getnext_broker.cpp
@@ -62,6 +62,14 @@ std::vector<std::thread> StartThreads(const Params& params,
 
         lock.unlock();
 
+        if (i == 0) {
+            auto meta = broker->GetBeamtimeMeta(&err);
+            if (err == nullptr) {
+                std::cout << meta << std::endl;
+            } else {
+                std::cout << "Cannot get metadata: " << err->Explain() << std::endl;
+            }
+        }
         while (true) {
             err = broker->GetNext(&fi, group_id, params.read_data ? &data : nullptr);
             if (err == nullptr) {
@@ -102,7 +110,6 @@ int ReadAllData(const Params& params, uint64_t* duration_ms, int* nerrors, int*
     *nerrors = std::accumulate(errors.begin(), errors.end(), 0);
     *nbuf = std::accumulate(nfiles_frombuf.begin(), nfiles_frombuf.end(), 0);
 
-
     high_resolution_clock::time_point t2 = high_resolution_clock::now();
     auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
     *duration_ms = duration_read.count();
diff --git a/examples/worker/getnext_broker_python/check_linux.sh b/examples/worker/getnext_broker_python/check_linux.sh
index 707c48bbc..1df181c3f 100644
--- a/examples/worker/getnext_broker_python/check_linux.sh
+++ b/examples/worker/getnext_broker_python/check_linux.sh
@@ -27,6 +27,8 @@ do
 	echo 'db.data.insert({"_id":'$i',"size":100,"name":"'$i'","lastchange":1,"source":"none","buf_id":0})' | mongo ${database_name}
 done
 
+echo 'db.meta.insert({"_id":0,"meta_test":"test"})' | mongo ${database_name}
+
 sleep 1
 
 export PYTHONPATH=$1:${PYTHONPATH}
@@ -35,6 +37,7 @@ python getnext.py 127.0.0.1:8400 $source_path $database_name $token_test_run $gr
 cat out
 cat out | grep '"size": 100'
 cat out | grep '"_id": 1'
+cat out | grep '"meta_test": "test"'
 
 python getnext.py 127.0.0.1:8400 $source_path $database_name $token_test_run $group_id> out
 cat out
diff --git a/examples/worker/getnext_broker_python/check_windows.bat b/examples/worker/getnext_broker_python/check_windows.bat
index 7bc838965..a1815b4ab 100644
--- a/examples/worker/getnext_broker_python/check_windows.bat
+++ b/examples/worker/getnext_broker_python/check_windows.bat
@@ -13,12 +13,16 @@ ping 1.0.0.0 -n 10 -w 100 > nul
 
 for /l %%x in (1, 1, 3) do echo db.data.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0}) | %mongo_exe% %database_name%  || goto :error
 
+
+echo db.meta.insert({"_id":0,"meta_test":"test"}) | %mongo_exe% %database_name%  || goto :error
+
 set PYTHONPATH=%1
 
 python3 getnext.py 127.0.0.1:8400  %source_path% %database_name%  %token_test_run% %group_id% > out
 type out
 type out | findstr /c:"100" || goto :error
 type out | findstr /c:"\"_id\": 1" || goto :error
+type out | findstr /c:"\"meta_test\": \"test\"" || goto :error
 
 python3 getnext.py 127.0.0.1:8400  %source_path% %database_name%  %token_test_run% %group_id% > out
 type out
diff --git a/examples/worker/getnext_broker_python/getnext.py b/examples/worker/getnext_broker_python/getnext.py
index f124d484a..8648ca8ff 100644
--- a/examples/worker/getnext_broker_python/getnext.py
+++ b/examples/worker/getnext_broker_python/getnext.py
@@ -24,3 +24,10 @@ if err != None:
 else:
     print ('filename: ', meta['name'])
     print ('meta: ', json.dumps(meta, indent=4, sort_keys=True))
+
+
+beamtime_meta,err = broker.get_beamtime_meta()
+if err != None:
+    print ('error getting beamtime meta: ', err)
+else:
+    print ('beamtime meta: ', json.dumps(beamtime_meta, indent=4, sort_keys=True))
diff --git a/producer/api/src/producer_impl.cpp b/producer/api/src/producer_impl.cpp
index 5cdc8445b..fc52dd561 100644
--- a/producer/api/src/producer_impl.cpp
+++ b/producer/api/src/producer_impl.cpp
@@ -106,7 +106,7 @@ Error ProducerImpl::SetBeamtimeId(std::string beamtime_id) {
 }
 
 Error ProducerImpl::SendMetaData(const std::string& metadata, RequestCallback callback) {
-    GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), beamtime_id_ + ".meta"};
+    GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), "beamtime_global.meta"};
     FileData data{new uint8_t[metadata.size()]};
     strncpy((char*)data.get(), metadata.c_str(), metadata.size());
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{beamtime_id_, std::move(request_header),
diff --git a/producer/api/src/request_handler_tcp.cpp b/producer/api/src/request_handler_tcp.cpp
index 5c534bb42..9f6d911f8 100644
--- a/producer/api/src/request_handler_tcp.cpp
+++ b/producer/api/src/request_handler_tcp.cpp
@@ -86,7 +86,9 @@ Error RequestHandlerTcp::ReceiveResponse() {
     case kNetErrorNoError :
         return nullptr;
     default:
-        return ProducerErrorTemplates::kInternalServerError.Generate();
+        auto res_err = ProducerErrorTemplates::kInternalServerError.Generate();
+        res_err->Append(sendDataResponse.message);
+        return res_err;
     }
 }
 
@@ -174,8 +176,9 @@ Error RequestHandlerTcp::SendDataToOneOfTheReceivers(ProducerRequest* request) {
         auto err = TrySendToReceiver(request);
         if (ServerError(err))  {
             Disconnect();
-            log__->Debug("cannot send data id " + std::to_string(request->header.data_id) + " to " + receiver_uri + ": " +
-                         err->Explain());
+            log__->Warning("cannot send data, opcode: " + std::to_string(request->header.op_code) +
+                           ", id: " + std::to_string(request->header.data_id) + " to " + receiver_uri + ": " +
+                           err->Explain());
             continue;
         }
 
diff --git a/producer/api/unittests/test_producer_impl.cpp b/producer/api/unittests/test_producer_impl.cpp
index 423193f74..f7aacf76e 100644
--- a/producer/api/unittests/test_producer_impl.cpp
+++ b/producer/api/unittests/test_producer_impl.cpp
@@ -116,7 +116,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
 
     producer.SetBeamtimeId(expected_beamtimeid);
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferMetaData,
-                                        expected_beamtimeid, expected_id, expected_size, "beamtime_id.meta"))).WillOnce(Return(
+                                        expected_beamtimeid, expected_id, expected_size, "beamtime_global.meta"))).WillOnce(Return(
                                                     nullptr));
 
     auto err = producer.SendMetaData(expected_metadata, nullptr);
diff --git a/producer/api/unittests/test_request_handler_tcp.cpp b/producer/api/unittests/test_request_handler_tcp.cpp
index 516c7e768..185f257dc 100644
--- a/producer/api/unittests/test_request_handler_tcp.cpp
+++ b/producer/api/unittests/test_request_handler_tcp.cpp
@@ -232,11 +232,11 @@ void RequestHandlerTcpTests::ExpectFailSendHeader(bool only_once) {
                                        )
                                       ));
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
@@ -260,11 +260,11 @@ void RequestHandlerTcpTests::ExpectFailSendData(bool only_once) {
                                        )
                                       ));
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
@@ -290,11 +290,11 @@ void RequestHandlerTcpTests::ExpectFailReceive(bool only_once) {
                                       ));
 
 
-        EXPECT_CALL(mock_logger, Debug(AllOf(
-                                           HasSubstr("cannot send"),
-                                           HasSubstr(receivers_list[i])
-                                       )
-                                      ));
+        EXPECT_CALL(mock_logger, Warning(AllOf(
+                                             HasSubstr("cannot send"),
+                                             HasSubstr(receivers_list[i])
+                                         )
+                                        ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
         if (only_once) break;
         i++;
diff --git a/producer/event_monitor_producer/src/system_folder_watch_linux.cpp b/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
index c8f6249c8..bdfacc408 100644
--- a/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
+++ b/producer/event_monitor_producer/src/system_folder_watch_linux.cpp
@@ -94,6 +94,7 @@ Error SystemFolderWatch::ProcessFileEvent(const InotifyEvent& event, FilesToSend
         return err;
     }
     GetDefaultEventMonLogger()->Debug(((event.GetMask() & IN_CLOSE_WRITE) ? "file closed: " : "file moved: ") + fname);
+// todo - check if filename already manually added?
     processed_filenames_[processed_filenames_counter_] = fname;
     processed_filenames_counter_++;
     if (processed_filenames_counter_ == kProcessedFilenamesBufLen) {
diff --git a/receiver/src/requests_dispatcher.cpp b/receiver/src/requests_dispatcher.cpp
index c50319f84..bb44cb5eb 100644
--- a/receiver/src/requests_dispatcher.cpp
+++ b/receiver/src/requests_dispatcher.cpp
@@ -2,7 +2,7 @@
 #include "request.h"
 #include "io/io_factory.h"
 #include "receiver_logger.h"
-
+#include "database/db_error.h"
 namespace asapo {
 
 RequestsDispatcher::RequestsDispatcher(SocketDescriptor socket_fd, std::string address,
@@ -20,6 +20,8 @@ NetworkErrorCode GetNetworkCodeFromError(const Error& err) {
             return NetworkErrorCode::kNetErrorFileIdAlreadyInUse;
         } else if (err == ReceiverErrorTemplates::kAuthorizationFailure) {
             return NetworkErrorCode::kNetAuthorizationError;
+        } else if (err == DBErrorTemplates::kJsonParseError) {
+            return NetworkErrorCode::kNetErrorErrorInMetadata;
         } else {
             return NetworkErrorCode::kNetErrorInternalServerError;
         }
diff --git a/receiver/unittests/test_requests_dispatcher.cpp b/receiver/unittests/test_requests_dispatcher.cpp
index 47da03eb5..3413de1b4 100644
--- a/receiver/unittests/test_requests_dispatcher.cpp
+++ b/receiver/unittests/test_requests_dispatcher.cpp
@@ -10,6 +10,7 @@
 #include "mock_receiver_config.h"
 
 #include "../src/requests_dispatcher.h"
+#include "database/db_error.h"
 
 
 using ::testing::Test;
@@ -262,5 +263,17 @@ TEST_F(RequestsDispatcherTests, ProcessRequestReturnsAuthorizationFailure) {
 }
 
 
+TEST_F(RequestsDispatcherTests, ProcessRequestReturnsMetaDataFailure) {
+    MockHandleRequest(true, asapo::DBErrorTemplates::kJsonParseError.Generate());
+    MockSendResponse(&response, false);
+
+    auto err = dispatcher->ProcessRequest(request);
+
+    ASSERT_THAT(err, Eq(asapo::DBErrorTemplates::kJsonParseError));
+    ASSERT_THAT(response.error_code, Eq(asapo::kNetErrorErrorInMetadata));
+    ASSERT_THAT(std::string(response.message), HasSubstr("parse"));
+}
+
+
 
 }
diff --git a/tests/automatic/broker/get_meta/check_windows.bat b/tests/automatic/broker/get_meta/check_windows.bat
index 8e111ea82..9fde26734 100644
--- a/tests/automatic/broker/get_meta/check_windows.bat
+++ b/tests/automatic/broker/get_meta/check_windows.bat
@@ -13,8 +13,8 @@ start /B "" "%full_name%" -config settings.json
 
 ping 1.0.0.0 -n 1 -w 100 > nul
 
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/1/meta?token=%token% --stderr - | findstr /c:"not found"  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta/0?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/%database_name%/0/meta/1?token=%token% --stderr - | findstr /c:"not found"  || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/full_chain/CMakeLists.txt b/tests/automatic/full_chain/CMakeLists.txt
index 0b813e389..35c1e0016 100644
--- a/tests/automatic/full_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/CMakeLists.txt
@@ -1,4 +1,5 @@
 add_subdirectory(simple_chain)
+add_subdirectory(simple_chain_metadata)
 add_subdirectory(two_beamlines)
 add_subdirectory(simple_chain_filegen)
 add_subdirectory(simple_chain_filegen_readdata_cache)
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
index 47b94d971..c410fcf28 100644
--- a/tests/automatic/full_chain/simple_chain/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -22,6 +22,7 @@ Cleanup() {
     nomad stop discovery
     nomad stop broker
     nomad stop authorizer
+    rm -rf out
 #    kill $producerid
     echo "db.dropDatabase()" | mongo ${beamtime_id}
     influx -execute "drop database ${monitor_database_name}"
@@ -44,4 +45,7 @@ $1 localhost:8400 ${beamtime_id} 100 1000 4 0 100 &
 #producerid=`echo $!`
 
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 | grep "Processed 1000 file(s)"
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 5000 1 > out
+cat out
+cat out   | grep "Processed 1000 file(s)"
+cat out | grep "Cannot get metadata"
\ No newline at end of file
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
index fffc1e3bf..e79cb8c07 100644
--- a/tests/automatic/full_chain/simple_chain/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -26,9 +26,10 @@ start /B "" "%1" %proxy_address% %beamtime_id% 100 1000 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM worker
-"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 1000  1 > out.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 5000  1 > out.txt
 type out.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out.txt || goto :error
+findstr /i /l /c:"Cannot get metadata"  out.txt || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
new file mode 100644
index 000000000..4ac8f929a
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(TARGET_NAME full_chain_simple_chain_meta)
+
+################################
+# Testing
+################################
+prepare_asapo()
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
new file mode 100644
index 000000000..035c54088
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap Cleanup EXIT
+
+beamtime_id=asapo_test
+token=`$3 token -secret broker_secret.key $beamtime_id`
+
+monitor_database_name=db_test
+proxy_address=127.0.0.1:8400
+
+beamline=test
+receiver_root_folder=/tmp/asapo/receiver/files
+receiver_folder=${receiver_root_folder}/${beamline}/${beamtime_id}
+
+Cleanup() {
+    echo cleanup
+    rm -rf ${receiver_root_folder}
+    nomad stop nginx
+    nomad stop receiver
+    nomad stop discovery
+    nomad stop broker
+    nomad stop authorizer
+    rm -rf out
+    echo "db.dropDatabase()" | mongo ${beamtime_id}
+    influx -execute "drop database ${monitor_database_name}"
+}
+
+influx -execute "create database ${monitor_database_name}"
+echo "db.${beamtime_id}.insert({dummy:1})" | mongo ${beamtime_id}
+
+nomad run nginx.nmd
+nomad run authorizer.nmd
+nomad run receiver.nmd
+nomad run discovery.nmd
+nomad run broker.nmd
+
+sleep 1
+
+#producer
+mkdir -p ${receiver_folder}
+$1 localhost:8400 ${beamtime_id} 100 0 1 0 100
+
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 0 1 > out
+cat out
+cat out | grep "dummy_meta"
\ No newline at end of file
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
new file mode 100644
index 000000000..58ae6e83d
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
@@ -0,0 +1,49 @@
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+SET beamtime_id=asapo_test
+SET beamline=test
+SET receiver_root_folder=c:\tmp\asapo\receiver\files
+SET receiver_folder="%receiver_root_folder%\%beamline%\%beamtime_id%"
+
+
+"%3" token -secret broker_secret.key %beamtime_id% > token
+set /P token=< token
+
+set proxy_address="127.0.0.1:8400"
+
+echo db.%beamtime_id%.insert({dummy:1}) | %mongo_exe% %beamtime_id%
+
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run authorizer.nmd
+c:\opt\consul\nomad run discovery.nmd
+c:\opt\consul\nomad run broker.nmd
+c:\opt\consul\nomad run nginx.nmd
+
+ping 1.0.0.0 -n 10 -w 100 > nul
+
+REM producer
+mkdir %receiver_folder%
+"%1" %proxy_address% %beamtime_id% 100 0 1 0 100
+
+REM worker
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 0  1 > out.txt
+type out.txt
+findstr /i /l /c:"dummy_meta"  out.txt || goto :error
+
+
+goto :clean
+
+:error
+call :clean
+exit /b 1
+
+:clean
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop discovery
+c:\opt\consul\nomad stop broker
+c:\opt\consul\nomad stop authorizer
+c:\opt\consul\nomad stop nginx
+rmdir /S /Q %receiver_root_folder%
+del /f token
+echo db.dropDatabase() | %mongo_exe% %beamtime_id%
+
+
diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh
index c688445e9..5c1455d75 100644
--- a/tests/automatic/full_chain/two_beamlines/check_linux.sh
+++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh
@@ -53,5 +53,5 @@ $1 localhost:8400 ${beamtime_id2} 100 900 4 0 100 &
 #producerid=`echo $!`
 
 #workers
-$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 2000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
-$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 2000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
+$2 ${proxy_address} ${receiver_folder1} ${beamtime_id1} 2 $token1 10000 0  | tee /dev/stderr | grep "Processed 1000 file(s)"
+$2 ${proxy_address} ${receiver_folder2} ${beamtime_id2} 2 $token2 10000 0 | tee /dev/stderr | grep "Processed 900 file(s)"
diff --git a/tests/automatic/full_chain/two_beamlines/check_windows.bat b/tests/automatic/full_chain/two_beamlines/check_windows.bat
index 00b6c1a89..4e27899de 100644
--- a/tests/automatic/full_chain/two_beamlines/check_windows.bat
+++ b/tests/automatic/full_chain/two_beamlines/check_windows.bat
@@ -35,11 +35,11 @@ start /B "" "%1" %proxy_address% %beamtime_id2% 100 900 4 0 100
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 REM worker
-"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 2000  0 > out1.txt
+"%2" %proxy_address% %receiver_folder1% %beamtime_id1% 2 %token1% 10000  0 > out1.txt
 type out1.txt
 findstr /i /l /c:"Processed 1000 file(s)"  out1.txt || goto :error
 
-"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 2000  0 > out2.txt
+"%2" %proxy_address% %receiver_folder2% %beamtime_id2% 2 %token2% 10000  0 > out2.txt
 type out2.txt
 findstr /i /l /c:"Processed 900 file(s)"  out2.txt || goto :error
 
diff --git a/tests/automatic/mongo_db/connect/connect_mongodb.cpp b/tests/automatic/mongo_db/connect/connect_mongodb.cpp
index e16f4f5c8..eb928e05f 100644
--- a/tests/automatic/mongo_db/connect/connect_mongodb.cpp
+++ b/tests/automatic/mongo_db/connect/connect_mongodb.cpp
@@ -3,7 +3,7 @@
 
 #include "../../../common/cpp/src/database/mongodb_client.h"
 #include "testing.h"
-
+#include "database/db_error.h"
 
 using asapo::M_AssertContains;
 using asapo::Error;
@@ -44,7 +44,7 @@ int main(int argc, char* argv[]) {
 
     if (err == nullptr) {
         err = db.Connect(args.address, args.database_name, args.collection_name);
-        Assert(err, asapo::DBError::kAlreadyConnected);
+        Assert(err, asapo::DBErrorTemplates::kAlreadyConnected.Generate()->Explain());
     }
     return 0;
 }
diff --git a/tests/automatic/producer/beamtime_metadata/check_linux.sh b/tests/automatic/producer/beamtime_metadata/check_linux.sh
index 2a30b1139..fcbe0520f 100644
--- a/tests/automatic/producer/beamtime_metadata/check_linux.sh
+++ b/tests/automatic/producer/beamtime_metadata/check_linux.sh
@@ -12,5 +12,5 @@ mkdir files
 
 $@ files beamtime_id 1
 
-cat files/beamtime_id.meta | grep hello
-ls -ln files/beamtime_id.meta | awk '{ print $5 }'| grep 5
+cat files/beamtime_global.meta | grep hello
+ls -ln files/beamtime_global.meta | awk '{ print $5 }'| grep 5
diff --git a/tests/automatic/producer/beamtime_metadata/check_windows.bat b/tests/automatic/producer/beamtime_metadata/check_windows.bat
index 13fd63f99..fce4c0293 100644
--- a/tests/automatic/producer/beamtime_metadata/check_windows.bat
+++ b/tests/automatic/producer/beamtime_metadata/check_windows.bat
@@ -4,10 +4,10 @@ mkdir %folder%
 
 "%1" %folder% beamtime_id 1
 
-FOR /F "usebackq" %%A IN ('%folder%\beamtime_id.meta') DO set size=%%~zA
+FOR /F "usebackq" %%A IN ('%folder%\beamtime_global.meta') DO set size=%%~zA
 if %size% NEQ 5 goto :error
 
-type %folder%\beamtime_id.meta | findstr /c:"hello"  || goto :error
+type %folder%\beamtime_global.meta | findstr /c:"hello"  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/producer/file_monitor_producer/check_linux.sh b/tests/automatic/producer/file_monitor_producer/check_linux.sh
index aed6e502a..8da14669b 100644
--- a/tests/automatic/producer/file_monitor_producer/check_linux.sh
+++ b/tests/automatic/producer/file_monitor_producer/check_linux.sh
@@ -24,6 +24,8 @@ sleep 0.5
 echo test1 > /tmp/asapo/test_in/test1/test1.dat
 echo test2 > /tmp/asapo/test_in/test2/test2.tmp
 mkdir -p /tmp/asapo/test_in/test2/subdir
+sleep 0.1
+
 echo test3 > /tmp/asapo/test_in/test2/subdir/test3.dat
 
 sleep 0.1
diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
index e357471e3..6e8bad6f3 100644
--- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
+++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
@@ -35,5 +35,5 @@ $1 localhost:8400 ${beamtime_id} 100 112 4  0 100
 
 sleep 2
 
-# should be 116 requests (112 data transfers and 4 authorizations)
-influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json  | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 116
+# should be 118 requests (112 data transfers +  5 authorizations (4 + 1 after reconnection due to wrong meta))
+influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json  | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 117
diff --git a/worker/api/cpp/include/worker/data_broker.h b/worker/api/cpp/include/worker/data_broker.h
index ea4123b24..fbd5da3eb 100644
--- a/worker/api/cpp/include/worker/data_broker.h
+++ b/worker/api/cpp/include/worker/data_broker.h
@@ -56,6 +56,12 @@ class DataBroker {
     */
     virtual std::string GenerateNewGroupId(Error* err) = 0;
 
+    //! Get Beamtime metadata.
+    /*!
+      \param err - return nullptr of operation succeed, error otherwise.
+      \return beamtime metadata.
+    */
+    virtual std::string GetBeamtimeMeta(Error* err) = 0;
 
     //! Receive next available image.
     /*!
diff --git a/worker/api/cpp/src/folder_data_broker.cpp b/worker/api/cpp/src/folder_data_broker.cpp
index 845c0d4cd..f5eab447d 100644
--- a/worker/api/cpp/src/folder_data_broker.cpp
+++ b/worker/api/cpp/src/folder_data_broker.cpp
@@ -1,6 +1,7 @@
 #include "folder_data_broker.h"
 
 #include "io/io_factory.h"
+#include "preprocessor/definitions.h"
 
 namespace asapo {
 
@@ -93,4 +94,8 @@ Error FolderDataBroker::GetById(uint64_t id, FileInfo* info, std::string group_i
     return GetFileByIndex(id - 1 , info, data);
 }
 
+std::string FolderDataBroker::GetBeamtimeMeta(Error* err) {
+    return io__->ReadFileToString(base_path_ + kPathSeparator + "beamtime_global.meta", err);
+}
+
 }
diff --git a/worker/api/cpp/src/folder_data_broker.h b/worker/api/cpp/src/folder_data_broker.h
index 4c4ec1770..54319bbcc 100644
--- a/worker/api/cpp/src/folder_data_broker.h
+++ b/worker/api/cpp/src/folder_data_broker.h
@@ -20,6 +20,7 @@ class FolderDataBroker final : public asapo::DataBroker {
     void SetTimeout(uint64_t timeout_ms) override {}; // to timeout in this case
     std::string GenerateNewGroupId(Error* err)
     override; // return "0" always and no error - no group ids for folder datra broker
+    std::string GetBeamtimeMeta(Error* err) override;
     uint64_t GetNDataSets(Error* err) override;
     Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
     std::unique_ptr<asapo::IO> io__; // modified in testings to mock system calls,otherwise do not touch
diff --git a/worker/api/cpp/src/server_data_broker.cpp b/worker/api/cpp/src/server_data_broker.cpp
index f5b005f22..bf939ba5e 100644
--- a/worker/api/cpp/src/server_data_broker.cpp
+++ b/worker/api/cpp/src/server_data_broker.cpp
@@ -164,8 +164,9 @@ std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) {
         return "next";
     case GetImageServerOperation::GetLast:
         return "last";
+    default:
+        return "last";
     }
-    return "";
 }
 
 Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id,
@@ -281,4 +282,9 @@ Error ServerDataBroker::GetFileInfoFromServerById(uint64_t id, FileInfo* info, s
     return nullptr;
 }
 
+std::string ServerDataBroker::GetBeamtimeMeta(Error* err) {
+    std::string request_string =  "database/" + source_name_ + "/0/meta/0";
+    return BrokerRequestWithTimeout(request_string, "", false, err);
+}
+
 }
diff --git a/worker/api/cpp/src/server_data_broker.h b/worker/api/cpp/src/server_data_broker.h
index d6ed5f6d8..292477a4b 100644
--- a/worker/api/cpp/src/server_data_broker.h
+++ b/worker/api/cpp/src/server_data_broker.h
@@ -24,6 +24,7 @@ class ServerDataBroker final : public asapo::DataBroker {
     Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
     Error GetLast(FileInfo* info, std::string group_id, FileData* data) override;
     std::string GenerateNewGroupId(Error* err) override;
+    std::string GetBeamtimeMeta(Error* err) override;
     uint64_t GetNDataSets(Error* err) override;
     Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override;
     void SetTimeout(uint64_t timeout_ms) override;
diff --git a/worker/api/cpp/unittests/test_folder_broker.cpp b/worker/api/cpp/unittests/test_folder_broker.cpp
index 56b55a946..322089e96 100644
--- a/worker/api/cpp/unittests/test_folder_broker.cpp
+++ b/worker/api/cpp/unittests/test_folder_broker.cpp
@@ -38,10 +38,6 @@ TEST(FolderDataBroker, SetCorrectIO) {
 class FakeIO: public asapo::MockIO {
   public:
 
-    virtual std::string ReadFileToString(const std::string& fname, Error* err) const noexcept override {
-        return "OK";
-    }
-
     FileInfos FilesInFolder(const std::string& folder, Error* err) const override {
         *err = nullptr;
         FileInfos file_infos;
@@ -253,8 +249,9 @@ class GetDataFromFileTests : public Test {
     OpenFileMock mock;
     FileInfo fi;
     FileData data;
+    std::string expected_base_path = "/path/to/file";
     void SetUp() override {
-        data_broker = std::unique_ptr<FolderDataBroker> {new FolderDataBroker("/path/to/file")};
+        data_broker = std::unique_ptr<FolderDataBroker> {new FolderDataBroker(expected_base_path)};
         data_broker->io__ = std::unique_ptr<IO> {&mock};
         data_broker->Connect();
     }
@@ -326,5 +323,27 @@ TEST_F(FolderDataBrokerTests, GetByIdReturnsError) {
     ASSERT_THAT(err2, Ne(nullptr));
 }
 
+TEST_F(GetDataFromFileTests, GetMetaDataReturnsError) {
+    EXPECT_CALL(mock, ReadFileToString_t(_, _)).
+    WillOnce(DoAll(testing::SetArgPointee<1>(asapo::IOErrorTemplates::kReadError.Generate().release()),
+                   testing::Return("")));
+
+    Error err;
+    auto meta = data_broker->GetBeamtimeMeta(&err);
+    ASSERT_THAT(err, Eq(asapo::IOErrorTemplates::kReadError));
+}
+
+TEST_F(GetDataFromFileTests, GetMetaDataReturnsOK) {
+    EXPECT_CALL(mock, ReadFileToString_t(expected_base_path + asapo::kPathSeparator + "beamtime_global.meta", _)).
+    WillOnce(DoAll(testing::SetArgPointee<1>(nullptr),
+                   testing::Return("OK")));
+
+    Error err;
+    auto meta = data_broker->GetBeamtimeMeta(&err);
+    ASSERT_THAT(meta, Eq("OK"));
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+
 
 }
diff --git a/worker/api/cpp/unittests/test_server_broker.cpp b/worker/api/cpp/unittests/test_server_broker.cpp
index e63bf5605..a3f921cc2 100644
--- a/worker/api/cpp/unittests/test_server_broker.cpp
+++ b/worker/api/cpp/unittests/test_server_broker.cpp
@@ -62,6 +62,7 @@ class ServerDataBrokerTests : public Test {
     std::string expected_filename = "filename";
     std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename;
     std::string expected_group_id = "groupid";
+    std::string expected_metadata = "{\"meta\":1}";
     uint64_t expected_dataset_id = 1;
     static const uint64_t expected_buf_id = 123;
     void SetUp() override {
@@ -471,24 +472,23 @@ TEST_F(ServerDataBrokerTests, GetByIdUsesCorrectUri) {
 
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdReturnsNoData) {
+TEST_F(ServerDataBrokerTests, GetMetaDataOK) {
     MockGetBrokerUri();
     data_broker->SetTimeout(100);
-    auto to_send = CreateFI();
-    auto json = to_send.Json();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_group_id
-                                        + "/" + std::to_string(
-                                            expected_dataset_id) + "?token="
-                                        + expected_token + "&reset=true", _,
+
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/0/meta/0?token="
+                                        + expected_token, _,
                                         _)).WillOnce(DoAll(
-                                                SetArgPointee<1>(HttpCode::Conflict),
+                                                SetArgPointee<1>(HttpCode::OK),
                                                 SetArgPointee<2>(nullptr),
-                                                Return("{\"id\":1}")));
+                                                Return(expected_metadata)));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr);
+    asapo::Error err;
+    auto res = data_broker->GetBeamtimeMeta(&err);
 
-    ASSERT_THAT(err->GetErrorType(), Eq(asapo::ErrorType::kEndOfFile));
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(res, Eq(expected_metadata));
 
 }
 
diff --git a/worker/api/python/asapo_worker.pxd b/worker/api/python/asapo_worker.pxd
index 05e8d826b..0cd548ea2 100644
--- a/worker/api/python/asapo_worker.pxd
+++ b/worker/api/python/asapo_worker.pxd
@@ -34,6 +34,8 @@ cdef extern from "asapo_worker.h" namespace "asapo":
         uint64_t GetNDataSets(Error* err)
         Error ResetCounter(string group_id)
         string GenerateNewGroupId(Error* err)
+        string GetBeamtimeMeta(Error* err)
+
 
 cdef extern from "asapo_worker.h" namespace "asapo":
     cdef cppclass DataBrokerFactory:
diff --git a/worker/api/python/asapo_worker.pyx.in b/worker/api/python/asapo_worker.pyx.in
index 38bc41ec9..6c8adf020 100644
--- a/worker/api/python/asapo_worker.pyx.in
+++ b/worker/api/python/asapo_worker.pyx.in
@@ -82,6 +82,17 @@ cdef class PyDataBroker:
             return None, err_str
         else:
             return _str(group_id), None
+    def get_beamtime_meta(self):
+            cdef Error err
+            cdef string meta_str
+            meta_str = self.c_broker.GetBeamtimeMeta(&err)
+            err_str = _str(GetErrorString(&err))
+            if err_str.strip():
+                return None, err_str
+            else:
+                meta = json.loads(_str(meta_str))
+                del meta['_id']
+                return meta, None
 
 cdef class PyDataBrokerFactory:
     cdef DataBrokerFactory c_factory
diff --git a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
index a8d3bb863..2d38106ad 100644
--- a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
+++ b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
@@ -5,6 +5,8 @@
 #include "io/io.h"
 #include "../../../../common/cpp/src/system_io/system_io.h"
 
+#include "database/db_error.h"
+
 
 #include "database/database.h"
 
@@ -12,6 +14,7 @@
 #include "unittests/MockDatabase.h"
 
 #include "../src/folder_db_importer.h"
+#include "database/db_error.h"
 
 #include "unittests/MockIO.h"
 
@@ -85,7 +88,7 @@ class MockDatabaseFactory : public DatabaseFactory {
 
 class FakeDatabaseFactory : public DatabaseFactory {
     std::unique_ptr<Database> Create(Error* err) const noexcept override {
-        *err = asapo::TextError(asapo::DBError::kMemoryError);
+        *err = asapo::ErrorTemplates::kMemoryAllocationError.Generate();
         return {};
     }
 };
@@ -129,11 +132,9 @@ class FolderDBConverterTests : public Test {
     }
 };
 
-
-
 TEST_F(FolderDBConverterTests, ErrorWhenCannotConnect) {
     EXPECT_CALL(*(mock_dbf->db[0]), Connect_t(uri, db_name, kDBDataCollectionName)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
 
     auto error = converter.Convert(uri, folder, db_name);
     ASSERT_THAT(error, Ne(nullptr));
@@ -142,11 +143,11 @@ TEST_F(FolderDBConverterTests, ErrorWhenCannotConnect) {
 TEST_F(FolderDBConverterTests, ErrorWhenCannotCreateDbParallel) {
     int nparallel = 3;
     EXPECT_CALL(*(mock_dbf->db[0]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
     EXPECT_CALL(*(mock_dbf->db[1]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
     EXPECT_CALL(*(mock_dbf->db[2]), Connect_t(uri, _, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kConnectionError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kConnectionError.Generate().release()));
 
     converter.SetNParallelTasks(nparallel);
     auto error = converter.Convert(uri, folder, db_name);
@@ -184,7 +185,7 @@ TEST_F(FolderDBConverterTests, PassesIgnoreDuplicates) {
 TEST_F(FolderDBConverterTests, ErrorWhenCannotImportFileListToDb) {
 
     EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(_, _)).
-    WillOnce(testing::Return(new SimpleError(asapo::DBError::kInsertError)));
+    WillOnce(testing::Return(asapo::DBErrorTemplates::kInsertError.Generate().release()));
 
     auto error = converter.Convert(uri, folder, db_name);
     ASSERT_THAT(error, Ne(nullptr));
-- 
GitLab