diff --git a/broker/src/hidra2_broker/database/mongodb.go b/broker/src/hidra2_broker/database/mongodb.go
index c6e54099ea00a7eb53f0ba1f1ea3e6bc97368a40..bc24d52a61c19c681b214fa24d71435115da21df 100644
--- a/broker/src/hidra2_broker/database/mongodb.go
+++ b/broker/src/hidra2_broker/database/mongodb.go
@@ -7,6 +7,7 @@ import (
 	"gopkg.in/mgo.v2"
 	"gopkg.in/mgo.v2/bson"
 	"hidra2_broker/utils"
+	"sync"
 	"time"
 )
 
@@ -19,28 +20,37 @@ const data_collection_name = "data"
 const pointer_collection_name = "current_location"
 const pointer_field_name = "current_pointer"
 const no_session_msg = "database session not created"
+const wrong_id_type = "wrong id type"
 const already_connected_msg = "already connected"
 
+var dbListLock sync.RWMutex
+var dbPointersLock sync.RWMutex
+
 type Mongodb struct {
-	main_session *mgo.Session
-	timeout      time.Duration
-	databases    []string
+	session             *mgo.Session
+	timeout             time.Duration
+	databases           []string
+	parent_db           *Mongodb
+	db_pointers_created map[string]bool
 }
 
 func (db *Mongodb) Copy() Agent {
-	new_db:= new(Mongodb)
-	new_db.main_session = db.main_session.Copy()
-	new_db.databases = make([]string,len(db.databases))
-	copy(new_db.databases,db.databases)
+	new_db := new(Mongodb)
+	new_db.session = db.session.Copy()
+	new_db.parent_db = db
 	return new_db
 }
 
 func (db *Mongodb) databaseInList(dbname string) bool {
+	dbListLock.RLock()
+	defer dbListLock.RUnlock()
 	return utils.StringInSlice(dbname, db.databases)
 }
 
 func (db *Mongodb) updateDatabaseList() (err error) {
-	db.databases, err = db.main_session.DatabaseNames()
+	dbListLock.Lock()
+	db.databases, err = db.session.DatabaseNames()
+	dbListLock.Unlock()
 	return err
 }
 
@@ -61,11 +71,11 @@ func (db *Mongodb) dataBaseExist(dbname string) (err error) {
 }
 
 func (db *Mongodb) Connect(address string) (err error) {
-	if db.main_session != nil {
+	if db.session != nil {
 		return errors.New(already_connected_msg)
 	}
 
-	db.main_session, err = mgo.DialWithTimeout(address, time.Second)
+	db.session, err = mgo.DialWithTimeout(address, time.Second)
 	if err != nil {
 		return err
 	}
@@ -78,47 +88,72 @@ func (db *Mongodb) Connect(address string) (err error) {
 }
 
 func (db *Mongodb) Close() {
-	if db.main_session != nil {
-		db.main_session.Close()
-		db.main_session = nil
+	if db.session != nil {
+		db.session.Close()
+		db.session = nil
 	}
 
 }
 
 func (db *Mongodb) DeleteAllRecords(dbname string) (err error) {
-	if db.main_session == nil {
+	if db.session == nil {
 		return errors.New(no_session_msg)
 	}
-	return db.main_session.DB(dbname).DropDatabase()
+	return db.session.DB(dbname).DropDatabase()
 }
 
 func (db *Mongodb) InsertRecord(dbname string, s interface{}) error {
-	if db.main_session == nil {
+	if db.session == nil {
 		return errors.New(no_session_msg)
 	}
 
-	c := db.main_session.DB(dbname).C(data_collection_name)
+	c := db.session.DB(dbname).C(data_collection_name)
 
 	return c.Insert(s)
 }
 
-func (db *Mongodb) incrementField(dbname string, res interface{}) (err error) {
+func (db *Mongodb) getMaxIndex(dbname string) (max_id int, err error) {
+	c := db.session.DB(dbname).C(data_collection_name)
+	var id Pointer
+	err = c.Find(nil).Sort("-_id").Select(bson.M{"_id": 1}).One(&id)
+	if err != nil {
+		return 0, nil
+	}
+	return id.ID, nil
+}
+
+func (db *Mongodb) createLocationPointers(dbname string) (err error) {
 	change := mgo.Change{
-		Update:    bson.M{"$inc": bson.M{pointer_field_name: 1}},
-		Upsert:    true,
-		ReturnNew: true,
+		Update: bson.M{"$inc": bson.M{pointer_field_name: 0}},
+		Upsert: true,
 	}
 	q := bson.M{"_id": 0}
-	c := db.main_session.DB(dbname).C(pointer_collection_name)
-	_, err = c.Find(q).Apply(change, res)
+	c := db.session.DB(dbname).C(pointer_collection_name)
+	var res map[string]interface{}
+	_, err = c.Find(q).Apply(change, &res)
+	return err
+}
 
+func (db *Mongodb) incrementField(dbname string, max_ind int, res interface{}) (err error) {
+	update := bson.M{"$inc": bson.M{pointer_field_name: 1}}
+	change := mgo.Change{
+		Update:    update,
+		Upsert:    false,
+		ReturnNew: true,
+	}
+	q := bson.M{"_id": 0, pointer_field_name: bson.M{"$lt": max_ind}}
+	c := db.session.DB(dbname).C(pointer_collection_name)
+	_, err = c.Find(q).Apply(change, res)
+	if err == mgo.ErrNotFound {
+		return &DBError{utils.StatusNoData, err.Error()}
+	}
 	return err
 }
 
 func (db *Mongodb) getRecordByID(dbname string, id int) (interface{}, error) {
 	var res map[string]interface{}
 	q := bson.M{"_id": id}
-	c := db.main_session.DB(dbname).C(data_collection_name)
+	c := db.session.DB(dbname).C(data_collection_name)
 	err := c.Find(q).One(&res)
 	if err == mgo.ErrNotFound {
 		return nil, &DBError{utils.StatusNoData, err.Error()}
@@ -126,20 +161,58 @@ func (db *Mongodb) getRecordByID(dbname string, id int) (interface{}, error) {
 	return &res, err
 }
 
+func (db *Mongodb) needCreateLocationPointersInDb(db_name string) bool {
+	dbPointersLock.RLock()
+	needCreate := !db.db_pointers_created[db_name]
+	dbPointersLock.RUnlock()
+	return needCreate
+}
+
+func (db *Mongodb) SetLocationPointersCreateFlag(db_name string) {
+	dbPointersLock.Lock()
+	if db.db_pointers_created == nil {
+		db.db_pointers_created = make(map[string]bool)
+	}
+	db.db_pointers_created[db_name] = true
+	dbPointersLock.Unlock()
+}
+
+func (db *Mongodb) generateLocationPointersInDbIfNeeded(db_name string) {
+	if db.needCreateLocationPointersInDb(db_name) {
+		db.createLocationPointers(db_name)
+		db.SetLocationPointersCreateFlag(db_name)
+	}
+}
+
+func (db *Mongodb) getParentDB() *Mongodb {
+	if db.parent_db == nil {
+		return db
+	} else {
+		return db.parent_db
+	}
+}
+
 func (db *Mongodb) checkDatabaseOperationPrerequisites(db_name string) error {
-	if db.main_session == nil {
+	if db.session == nil {
 		return &DBError{utils.StatusError, no_session_msg}
 	}
 
-	if err := db.dataBaseExist(db_name); err != nil {
+	if err := db.getParentDB().dataBaseExist(db_name); err != nil {
 		return &DBError{utils.StatusWrongInput, err.Error()}
 	}
+
+	db.getParentDB().generateLocationPointersInDbIfNeeded(db_name)
+
 	return nil
 }
 
 func (db *Mongodb) getCurrentPointer(db_name string) (Pointer, error) {
+	max_ind, err := db.getMaxIndex(db_name)
+	if err != nil {
+		return Pointer{}, err
+	}
 	var curPointer Pointer
-	err := db.incrementField(db_name, &curPointer)
+	err = db.incrementField(db_name, max_ind, &curPointer)
 	if err != nil {
 		return Pointer{}, err
 	}
@@ -148,6 +221,7 @@ func (db *Mongodb) getCurrentPointer(db_name string) (Pointer, error) {
 }
 
 func (db *Mongodb) GetNextRecord(db_name string) ([]byte, error) {
+
 	if err := db.checkDatabaseOperationPrerequisites(db_name); err != nil {
 		return nil, err
 	}
diff --git a/broker/src/hidra2_broker/database/mongodb_test.go b/broker/src/hidra2_broker/database/mongodb_test.go
index 484e2020fbb413186ef816165c4cc3cc7fbb88b7..dcad1b703ec343180a9c672ef49578548ee266e2 100644
--- a/broker/src/hidra2_broker/database/mongodb_test.go
+++ b/broker/src/hidra2_broker/database/mongodb_test.go
@@ -27,6 +27,7 @@ var rec2_expect, _ = json.Marshal(rec2)
 
 func cleanup() {
 	db.DeleteAllRecords(dbname)
+	db.db_pointers_created = nil
 	db.Close()
 }
 
@@ -59,10 +60,8 @@ func TestMongoDBGetNextErrorWhenWrongDatabasename(t *testing.T) {
 
 func TestMongoDBGetNextErrorWhenEmptyCollection(t *testing.T) {
 	db.Connect(dbaddress)
+	db.databases = append(db.databases, dbname)
 	defer cleanup()
-	var curPointer Pointer
-	db.incrementField(dbname, &curPointer)
-
 	_, err := db.GetNextRecord(dbname)
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 }
diff --git a/broker/src/hidra2_broker/server/get_next.go b/broker/src/hidra2_broker/server/get_next.go
index 672a398987e0a85d40f93d98537beb0e68d77263..b076696b41243d506f89a13b17104d6484b8f49a 100644
--- a/broker/src/hidra2_broker/server/get_next.go
+++ b/broker/src/hidra2_broker/server/get_next.go
@@ -28,19 +28,22 @@ func routeGetNext(w http.ResponseWriter, r *http.Request) {
 	w.Write(answer)
 }
 
+func returnError(err error) (answer []byte, code int) {
+	err_db, ok := err.(*database.DBError)
+	code = utils.StatusError
+	if ok {
+		code = err_db.Code
+	}
+	return []byte(err.Error()), code
+}
+
 func getNextRecord(db_name string) (answer []byte, code int) {
 	db_new := db.Copy()
 	defer db_new.Close()
 	statistics.IncreaseCounter()
 	answer, err := db_new.GetNextRecord(db_name)
 	if err != nil {
-		err_db, ok := err.(*database.DBError)
-		code = utils.StatusError
-		if ok {
-			code = err_db.Code
-		}
-		return []byte(err.Error()), code
+		return returnError(err)
 	}
-
 	return answer, utils.StatusOK
 }
diff --git a/broker/src/hidra2_broker/server/server.go b/broker/src/hidra2_broker/server/server.go
index eadd2ba6b3cc0b202c92f3901ecec985f0d7ceb8..df1fd327f05ddc6518e652b212c63182fda35479 100644
--- a/broker/src/hidra2_broker/server/server.go
+++ b/broker/src/hidra2_broker/server/server.go
@@ -7,10 +7,10 @@ import (
 var db database.Agent
 
 type serverSettings struct {
-	BrokerDbAddress string
+	BrokerDbAddress  string
 	MonitorDbAddress string
-	MonitorDbName string
-	Port            int
+	MonitorDbName    string
+	Port             int
 }
 
 var settings serverSettings
@@ -18,7 +18,8 @@ var statistics serverStatistics
 
 func InitDB(dbAgent database.Agent) error {
 	db = dbAgent
-	return db.Connect(settings.BrokerDbAddress)
+	err := db.Connect(settings.BrokerDbAddress)
+	return err
 }
 
 func CleanupDB() {
diff --git a/common/cpp/include/unittests/MockDatabase.h b/common/cpp/include/unittests/MockDatabase.h
new file mode 100644
index 0000000000000000000000000000000000000000..b1bb10efc1915a9707f7b30fa85585673fc72f37
--- /dev/null
+++ b/common/cpp/include/unittests/MockDatabase.h
@@ -0,0 +1,37 @@
+#ifndef HIDRA2_MOCKDATABASE_H
+#define HIDRA2_MOCKDATABASE_H
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include "database/database.h"
+#include "common/error.h"
+
+namespace hidra2 {
+
+class MockDatabase : public Database {
+  public:
+    Error Connect(const std::string& address, const std::string& database,
+                  const std::string& collection ) override {
+        return Error{Connect_t(address, database, collection)};
+
+    }
+    Error Insert(const FileInfo& file, bool ignore_duplicates) const override {
+        return Error{Insert_t(file, ignore_duplicates)};
+    }
+
+    MOCK_METHOD3(Connect_t, SimpleError * (const std::string&, const std::string&, const std::string&));
+    MOCK_CONST_METHOD2(Insert_t, SimpleError * (const FileInfo&, bool));
+
+    // stuff to test db destructor is called and avoid "uninteresting call" messages
+    MOCK_METHOD0(Die, void());
+    virtual ~MockDatabase() override {
+        if (check_destructor)
+            Die();
+    }
+    bool check_destructor{false};
+};
+
+}
+
+#endif //HIDRA2_MOCKDATABASE_H
diff --git a/common/cpp/src/http_client/CMakeLists.txt b/common/cpp/src/http_client/CMakeLists.txt
index b274c9319e20e484c12d9f22f8668fda3f6a39f9..42e3f0659a252680128db9bf06b3c731f290ec05 100644
--- a/common/cpp/src/http_client/CMakeLists.txt
+++ b/common/cpp/src/http_client/CMakeLists.txt
@@ -1,7 +1,7 @@
 set(TARGET_NAME curl_http_client)
 set(SOURCE_FILES
         curl_http_client.cpp
-        )
+        ../../include/unittests/MockDatabase.h)
 
 
 ################################
diff --git a/config/grafana/ASAP__O.json b/config/grafana/ASAP__O.json
new file mode 100644
index 0000000000000000000000000000000000000000..7ed15c7eeef11c48042a4040e9350637865b477f
--- /dev/null
+++ b/config/grafana/ASAP__O.json
@@ -0,0 +1,459 @@
+{
+  "__inputs": [
+    {
+      "name": "DS_TEST",
+      "label": "test",
+      "description": "",
+      "type": "datasource",
+      "pluginId": "influxdb",
+      "pluginName": "InfluxDB"
+    }
+  ],
+  "__requires": [
+    {
+      "type": "grafana",
+      "id": "grafana",
+      "name": "Grafana",
+      "version": "5.0.0-beta5"
+    },
+    {
+      "type": "panel",
+      "id": "graph",
+      "name": "Graph",
+      "version": "5.0.0"
+    },
+    {
+      "type": "datasource",
+      "id": "influxdb",
+      "name": "InfluxDB",
+      "version": "5.0.0"
+    }
+  ],
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": "-- Grafana --",
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": null,
+  "links": [],
+  "panels": [
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "${DS_TEST}",
+      "fill": 0,
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 0
+      },
+      "id": 6,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "alias": "Database",
+          "groupBy": [],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "query": "SELECT \"db_share\" FROM \"statistics\" WHERE $timeFilter",
+          "rawQuery": false,
+          "refId": "A",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "db_share"
+                ],
+                "type": "field"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "alias": "Disk",
+          "groupBy": [],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "B",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "disk_share"
+                ],
+                "type": "field"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "alias": "Network",
+          "groupBy": [],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "C",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "network_share"
+                ],
+                "type": "field"
+              }
+            ]
+          ],
+          "tags": []
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Shares",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "${DS_TEST}",
+      "fill": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 11,
+        "x": 12,
+        "y": 0
+      },
+      "id": 2,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": true,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "alias": "Total",
+          "groupBy": [],
+          "measurement": "statistics",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "A",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "data_volume"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [
+                  " / elapsed_ms/1024/1024/1024*1000*8"
+                ],
+                "type": "math"
+              }
+            ]
+          ],
+          "tags": []
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Bandwidth",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "${DS_TEST}",
+      "fill": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 11,
+        "x": 12,
+        "y": 8
+      },
+      "id": 4,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "alias": "Receiver",
+          "groupBy": [
+            {
+              "params": [
+                "$__interval"
+              ],
+              "type": "time"
+            },
+            {
+              "params": [
+                "null"
+              ],
+              "type": "fill"
+            }
+          ],
+          "measurement": "RequestsRate",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "query": "SELECT \"n_requests\" / elapsed_ms*1000 FROM \"statistics\" WHERE $timeFilter",
+          "rawQuery": true,
+          "refId": "A",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "n_requests"
+                ],
+                "type": "field"
+              },
+              {
+                "params": [],
+                "type": "mean"
+              }
+            ]
+          ],
+          "tags": []
+        },
+        {
+          "alias": "Broker",
+          "groupBy": [],
+          "measurement": "RequestsRate",
+          "orderByTime": "ASC",
+          "policy": "default",
+          "refId": "B",
+          "resultFormat": "time_series",
+          "select": [
+            [
+              {
+                "params": [
+                  "rate"
+                ],
+                "type": "field"
+              }
+            ]
+          ],
+          "tags": []
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Number of Requests",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "transparent": false,
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    }
+  ],
+  "refresh": false,
+  "schemaVersion": 16,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now/d",
+    "to": "now/d"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "ASAP::O",
+  "uid": "3JvTwliiz",
+  "version": 4
+}
\ No newline at end of file
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index c7bae8553d6b740df188573d851196d2089ba107..dd1e4cf48bd77ed8f1b437a63931adc9b599cb7c 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -30,7 +30,7 @@ bool SendDummyData(hidra2::Producer* producer, size_t number_of_byte, uint64_t i
     for(uint64_t i = 0; i < iterations; i++) {
 //        std::cerr << "Send file " << i + 1 << "/" << iterations << std::endl;
 
-        auto err = producer->Send(i, buffer.get(), number_of_byte);
+        auto err = producer->Send(i + 1, buffer.get(), number_of_byte);
 
         if (err) {
             std::cerr << "File was not successfully send: " << err << std::endl;
diff --git a/examples/worker/getnext_broker/CMakeLists.txt b/examples/worker/getnext_broker/CMakeLists.txt
index 903ff9db59315b9c4c03dd680adf631f48c8efd8..03483d371c1022600b70b4b67ba5e122360c7043 100644
--- a/examples/worker/getnext_broker/CMakeLists.txt
+++ b/examples/worker/getnext_broker/CMakeLists.txt
@@ -3,6 +3,7 @@ set(SOURCE_FILES getnext_broker.cpp)
 
 add_executable(${TARGET_NAME} ${SOURCE_FILES})
 target_link_libraries(${TARGET_NAME} hidra2-worker)
+
 #use expression generator to get rid of VS adding Debug/Release folders
 set_target_properties(${TARGET_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
         ${CMAKE_CURRENT_BINARY_DIR}$<$<CONFIG:Debug>:>
diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp
index 3c5a43d0677555276b0f25aaf2aed1768272c7fe..726c6410281ea2d4c8a685744c71b9be64d46b83 100644
--- a/examples/worker/getnext_broker/getnext_broker.cpp
+++ b/examples/worker/getnext_broker/getnext_broker.cpp
@@ -18,24 +18,26 @@ void WaitThreads(std::vector<std::thread>* threads) {
     }
 }
 
-void ProcessError(const Error& err) {
-    if (err == nullptr) return;
+int ProcessError(const Error& err) {
+    if (err == nullptr) return 0;
     if (err->GetErrorType() != hidra2::ErrorType::kEndOfFile) {
         std::cout << err->Explain() << std::endl;
-        exit(EXIT_FAILURE);
+        return 1;
     }
+    return 0;
 }
 
 std::vector<std::thread> StartThreads(const std::string& server, const std::string& run_name, int nthreads,
-                                      std::vector<int>* nfiles) {
-    auto exec_next = [server, run_name, nfiles](int i) {
+                                      std::vector<int>* nfiles, std::vector<int>* errors) {
+    auto exec_next = [server, run_name, nfiles, errors](int i) {
         hidra2::FileInfo fi;
         Error err;
         auto broker = hidra2::DataBrokerFactory::CreateServerBroker(server, run_name, &err);
+        broker->SetTimeout(1000);
         while ((err = broker->GetNext(&fi, nullptr)) == nullptr) {
             (*nfiles)[i] ++;
         }
-        ProcessError(err);
+        (*errors)[i] = ProcessError(err);
     };
 
     std::vector<std::thread> threads;
@@ -50,11 +52,16 @@ int ReadAllData(const std::string& server, const std::string& run_name, int nthr
     high_resolution_clock::time_point t1 = high_resolution_clock::now();
 
     std::vector<int>nfiles(nthreads, 0);
+    std::vector<int>errors(nthreads, 0);
 
-    auto threads = StartThreads(server, run_name, nthreads, &nfiles);
+    auto threads = StartThreads(server, run_name, nthreads, &nfiles, &errors);
     WaitThreads(&threads);
 
     int n_total = std::accumulate(nfiles.begin(), nfiles.end(), 0);
+    int errors_total = std::accumulate(errors.begin(), errors.end(), 0);
+    if (errors_total) {
+        exit(EXIT_FAILURE);
+    }
 
     high_resolution_clock::time_point t2 = high_resolution_clock::now();
     auto duration_read = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 );
diff --git a/receiver/CMakeLists.txt b/receiver/CMakeLists.txt
index 6f5ee995cbb7c61e4178e5c3687602836b493292..a5d795a7ccf763b0735c11fc5c603bc6df582889 100644
--- a/receiver/CMakeLists.txt
+++ b/receiver/CMakeLists.txt
@@ -7,7 +7,8 @@ set(SOURCE_FILES
         src/request_handler_file_write.cpp
         src/statistics.cpp
         src/statistics_sender_influx_db.cpp
-        src/receiver_config.cpp src/receiver_config.h)
+        src/receiver_config.cpp
+        src/request_handler_db_write.cpp)
 
 
 ################################
@@ -19,7 +20,7 @@ add_library(${TARGET_NAME} STATIC ${SOURCE_FILES} $<TARGET_OBJECTS:system_io> $<
          $<TARGET_OBJECTS:json_parser>)
 set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
 target_include_directories(${TARGET_NAME} PUBLIC ${HIDRA2_CXX_COMMON_INCLUDE_DIR} ${CURL_INCLUDE_DIRS})
-target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} database)
 
 
 add_executable(${TARGET_NAME}-bin src/main.cpp)
@@ -38,7 +39,9 @@ set(TEST_SOURCE_FILES
         unittests/test_statistics.cpp
         unittests/test_config.cpp
         unittests/test_request.cpp
+        unittests/test_request_factory.cpp
         unittests/test_request_handler_file_write.cpp
+        unittests/test_request_handler_db_writer.cpp
         unittests/test_statistics_sender_influx_db.cpp
         unittests/mock_receiver_config.cpp
         )
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index ba4e4b250daea114344e47c6c8c6a300e9f6ab0d..984218ee0b60eef9f854962db0ff116a2d223396 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -17,6 +17,10 @@ Error ReceiverConfigFactory::SetConfigFromFile(std::string file_name) {
     (err = parser.GetString("MonitorDbAddress", &config.monitor_db_uri)) ||
     (err = parser.GetUInt64("ListenPort", &config.listen_port)) ||
     (err = parser.GetBool("WriteToDisk", &config.write_to_disk)) ||
+    (err = parser.GetBool("WriteToDb", &config.write_to_db)) ||
+    (err = parser.GetString("BrokerDbAddress", &config.broker_db_uri)) ||
+    (err = parser.GetString("BrokerDbName", &config.broker_db_name)) ||
+
     (err = parser.GetString("MonitorDbName", &config.monitor_db_name));
     return err;
 }
diff --git a/receiver/src/receiver_config.h b/receiver/src/receiver_config.h
index f0aa92adf8e6317c698f2a5621de6407ad25d4c2..e348dc704ea13276fe16da498c92d3710a134969 100644
--- a/receiver/src/receiver_config.h
+++ b/receiver/src/receiver_config.h
@@ -9,8 +9,12 @@ namespace hidra2 {
 struct ReceiverConfig {
     std::string monitor_db_uri;
     std::string monitor_db_name;
+    std::string broker_db_uri;
+    std::string broker_db_name;
     uint64_t listen_port = 0;
     bool write_to_disk = false;
+    bool write_to_db = false;
+
 };
 
 const ReceiverConfig* GetReceiverConfig();
diff --git a/receiver/src/request.cpp b/receiver/src/request.cpp
index 19ac5ca8a7dfd394f652412dcdb56e069ae0b54c..30862496da685bada5aff6ad2e456bc284e9adcb 100644
--- a/receiver/src/request.cpp
+++ b/receiver/src/request.cpp
@@ -60,6 +60,11 @@ void Request::AddHandler(const RequestHandler* handler) {
 }
 
 
+uint64_t Request::GetDataID() const {
+    return request_header_.data_id;
+}
+
+
 uint64_t Request::GetDataSize() const {
     return request_header_.data_size;
 }
@@ -72,7 +77,6 @@ std::string Request::GetFileName() const {
     return std::to_string(request_header_.data_id) + ".bin";
 }
 
-
 std::unique_ptr<Request> RequestFactory::GenerateRequest(const GenericNetworkRequestHeader&
         request_header, SocketDescriptor socket_fd,
         Error* err) const noexcept {
@@ -80,16 +84,22 @@ std::unique_ptr<Request> RequestFactory::GenerateRequest(const GenericNetworkReq
     switch (request_header.op_code) {
     case Opcode::kNetOpcodeSendData: {
         auto request = std::unique_ptr<Request> {new Request{request_header, socket_fd}};
+
         if (GetReceiverConfig()->write_to_disk) {
             request->AddHandler(&request_handler_filewrite_);
         }
+
+        if (GetReceiverConfig()->write_to_db) {
+            request->AddHandler(&request_handler_dbwrite_);
+        }
+
         return request;
     }
     default:
         *err = ReceiverErrorTemplates::kInvalidOpCode.Generate();
         return nullptr;
     }
-
 }
 
+
 }
\ No newline at end of file
diff --git a/receiver/src/request.h b/receiver/src/request.h
index b32a9e580401fa666c3fbcc404f03c68f8fb8ae8..5b297b83d2b5a67290495148bea62051d834e025 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -6,6 +6,7 @@
 #include "io/io.h"
 #include "request_handler.h"
 #include "request_handler_file_write.h"
+#include "request_handler_db_write.h"
 #include "statistics.h"
 
 namespace hidra2 {
@@ -20,6 +21,7 @@ class Request {
     void AddHandler(const RequestHandler*);
     const RequestHandlerList& GetListHandlers() const;
     virtual uint64_t GetDataSize() const;
+    virtual uint64_t GetDataID() const;
     virtual std::string GetFileName() const;
 
     virtual const FileData& GetData() const;
@@ -39,8 +41,10 @@ class RequestFactory {
                                                      SocketDescriptor socket_fd, Error* err) const noexcept;
   private:
     RequestHandlerFileWrite request_handler_filewrite_;
+    RequestHandlerDbWrite request_handler_dbwrite_;
 };
 
 }
 
 #endif //HIDRA2_REQUEST_H
+
diff --git a/receiver/src/request_handler_db_write.cpp b/receiver/src/request_handler_db_write.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b3b73bfb0d8d8f47ca9ae73630ca802c45fa558d
--- /dev/null
+++ b/receiver/src/request_handler_db_write.cpp
@@ -0,0 +1,43 @@
+#include "request_handler_db_write.h"
+#include "request.h"
+#include "receiver_config.h"
+
+namespace hidra2 {
+
+Error RequestHandlerDbWrite::ProcessRequest(const Request& request) const {
+    if (Error err = ConnectToDbIfNeeded() ) {
+        return err;
+    }
+
+    FileInfo file_info;
+    file_info.name = request.GetFileName();
+    file_info.size = request.GetDataSize();
+    file_info.id = request.GetDataID();
+    return db_client__->Insert(file_info, false);
+
+}
+
+RequestHandlerDbWrite::RequestHandlerDbWrite()  {
+    DatabaseFactory factory;
+    Error err;
+    db_client__ = factory.Create(&err);
+}
+
+StatisticEntity RequestHandlerDbWrite::GetStatisticEntity() const {
+    return StatisticEntity::kDatabase;
+}
+
+Error RequestHandlerDbWrite::ConnectToDbIfNeeded() const {
+    if (!connected_to_db) {
+        Error err = db_client__->Connect(GetReceiverConfig()->broker_db_uri, GetReceiverConfig()->broker_db_name,
+                                         kDBCollectionName);
+        if (err) {
+            return err;
+        }
+        connected_to_db = true;
+    }
+    return nullptr;
+}
+
+
+}
\ No newline at end of file
diff --git a/receiver/src/request_handler_db_write.h b/receiver/src/request_handler_db_write.h
new file mode 100644
index 0000000000000000000000000000000000000000..ddd57350abd5cb4bb35c6a20935c479dd252dbf0
--- /dev/null
+++ b/receiver/src/request_handler_db_write.h
@@ -0,0 +1,24 @@
+#ifndef HIDRA2_REQUEST_HANDLER_DB_WRITE_H
+#define HIDRA2_REQUEST_HANDLER_DB_WRITE_H
+
+#include "request_handler.h"
+#include "database/database.h"
+
+#include "io/io.h"
+
+namespace hidra2 {
+
+class RequestHandlerDbWrite final: public RequestHandler {
+  public:
+    RequestHandlerDbWrite();
+    StatisticEntity GetStatisticEntity() const override;
+    Error ProcessRequest(const Request& request) const override;
+    std::unique_ptr<Database> db_client__;
+  private:
+    Error ConnectToDbIfNeeded() const;
+    mutable bool connected_to_db = false;
+};
+
+}
+
+#endif //HIDRA2_REQUEST_HANDLER_DB_WRITE_H
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 7819fe13a934464ce8ddd2fbbb8f8abd90d7403c..c1d5108bfa7b5ac22a1605a03620aae5d5daa6dc 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -17,8 +17,12 @@ Error SetReceiverConfig (const ReceiverConfig& config) {
 
     auto config_string = std::string("{\"MonitorDbAddress\":") + "\"" + config.monitor_db_uri + "\"";
     config_string += "," + std::string("\"MonitorDbName\":") + "\"" + config.monitor_db_name + "\"";
+    config_string += "," + std::string("\"BrokerDbName\":") + "\"" + config.broker_db_name + "\"";
+    config_string += "," + std::string("\"BrokerDbAddress\":") + "\"" + config.broker_db_uri + "\"";
     config_string += "," + std::string("\"ListenPort\":") + std::to_string(config.listen_port);
     config_string += "," + std::string("\"WriteToDisk\":") + (config.write_to_disk ? "true" : "false");
+    config_string += "," + std::string("\"WriteToDb\":") + (config.write_to_db ? "true" : "false");
+
     config_string += "}";
 
     EXPECT_CALL(mock_io, ReadFileToString_t("fname", _)).WillOnce(
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index ea15cede3b0eb2ba3bb0511f4d3381ee575a1454..008b965a59949b4fdae28580caa528aea00996aa 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -53,7 +53,9 @@ TEST_F(ConfigTests, ReadSettings) {
     test_config.monitor_db_name = "db_test";
     test_config.monitor_db_uri = "localhost:8086";
     test_config.write_to_disk = true;
-
+    test_config.write_to_db = true;
+    test_config.broker_db_uri = "localhost:27017";
+    test_config.broker_db_name = "test";
 
     auto err = hidra2::SetReceiverConfig(test_config);
 
@@ -62,8 +64,11 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(config->monitor_db_uri, Eq("localhost:8086"));
     ASSERT_THAT(config->monitor_db_name, Eq("db_test"));
+    ASSERT_THAT(config->broker_db_uri, Eq("localhost:27017"));
+    ASSERT_THAT(config->broker_db_name, Eq("test"));
     ASSERT_THAT(config->listen_port, Eq(4200));
     ASSERT_THAT(config->write_to_disk, true);
+    ASSERT_THAT(config->write_to_db, true);
 
 }
 
diff --git a/receiver/unittests/test_request.cpp b/receiver/unittests/test_request.cpp
index 1d1e175279e9c6f3a2588b6a014c48ad22f9969c..9a213b69bf16a9be2d3ab81055e220105191d574 100644
--- a/receiver/unittests/test_request.cpp
+++ b/receiver/unittests/test_request.cpp
@@ -6,6 +6,8 @@
 #include "../src/request.h"
 #include "../src/request_handler.h"
 #include "../src/request_handler_file_write.h"
+#include "../src/request_handler_db_write.h"
+#include "database/database.h"
 
 #include "mock_statistics.h"
 #include "mock_receiver_config.h"
@@ -40,7 +42,7 @@ using hidra2::StatisticEntity;
 
 using hidra2::ReceiverConfig;
 using hidra2::SetReceiverConfig;
-
+using hidra2::RequestFactory;
 
 namespace {
 
@@ -58,47 +60,6 @@ class MockReqestHandler : public hidra2::RequestHandler {
 
 };
 
-class FactoryTests : public Test {
-  public:
-    hidra2::RequestFactory factory;
-    Error err{nullptr};
-    GenericNetworkRequestHeader generic_request_header;
-    ReceiverConfig config;
-    void SetUp() override {
-        config.write_to_disk = true;
-        SetReceiverConfig(config);
-    }
-    void TearDown() override {
-    }
-};
-
-TEST_F(FactoryTests, ErrorOnWrongCode) {
-    generic_request_header.op_code = hidra2::Opcode::kNetOpcodeUnknownOp;
-    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
-
-    ASSERT_THAT(err, Ne(nullptr));
-}
-
-TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCode) {
-    generic_request_header.op_code = hidra2::Opcode::kNetOpcodeSendData;
-    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
-
-    ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(dynamic_cast<hidra2::Request*>(request.get()), Ne(nullptr));
-    ASSERT_THAT(dynamic_cast<const hidra2::RequestHandlerFileWrite*>(request->GetListHandlers()[0]), Ne(nullptr));
-}
-
-TEST_F(FactoryTests, DoNotAddWriterIfNotWanted) {
-    generic_request_header.op_code = hidra2::Opcode::kNetOpcodeSendData;
-    config.write_to_disk = false;
-    SetReceiverConfig(config);
-
-    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
-    ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(request->GetListHandlers().size(), Eq(0));
-}
-
-
 
 class RequestTests : public Test {
   public:
@@ -212,6 +173,13 @@ TEST_F(RequestTests, GetDataIsNotNullptr) {
 
 }
 
+TEST_F(RequestTests, GetDataID) {
+    auto id = request->GetDataID();
+
+    ASSERT_THAT(id, Eq(data_id_));
+}
+
+
 
 TEST_F(RequestTests, GetDataSize) {
     auto size = request->GetDataSize();
diff --git a/receiver/unittests/test_request_factory.cpp b/receiver/unittests/test_request_factory.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ec8b93855ee7104b04ed8b526f282b2806f7deb5
--- /dev/null
+++ b/receiver/unittests/test_request_factory.cpp
@@ -0,0 +1,106 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+#include "unittests/MockIO.h"
+#include "unittests/MockDatabase.h"
+#include "../src/connection.h"
+#include "../src/receiver_error.h"
+#include "../src/request.h"
+#include "../src/request_handler.h"
+#include "../src/request_handler_file_write.h"
+#include "../src/request_handler_db_write.h"
+#include "database/database.h"
+
+#include "mock_statistics.h"
+#include "mock_receiver_config.h"
+
+
+using ::testing::Test;
+using ::testing::Return;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::InSequence;
+using ::testing::SetArgPointee;
+using ::hidra2::Error;
+using ::hidra2::ErrorInterface;
+using ::hidra2::GenericNetworkRequestHeader;
+using ::hidra2::GenericNetworkResponse;
+using ::hidra2::Opcode;
+using ::hidra2::Connection;
+using ::hidra2::MockIO;
+using hidra2::Request;
+using hidra2::MockStatistics;
+
+using hidra2::StatisticEntity;
+
+using hidra2::ReceiverConfig;
+using hidra2::SetReceiverConfig;
+using hidra2::RequestFactory;
+
+namespace {
+
+
+class FactoryTests : public Test {
+  public:
+    RequestFactory factory;
+    Error err{nullptr};
+    GenericNetworkRequestHeader generic_request_header;
+    ReceiverConfig config;
+
+    void SetUp() override {
+        generic_request_header.op_code = hidra2::Opcode::kNetOpcodeSendData;
+        config.write_to_disk = true;
+        config.write_to_db = true;
+        SetReceiverConfig(config);
+    }
+    void TearDown() override {
+    }
+};
+
+TEST_F(FactoryTests, ErrorOnWrongCode) {
+    generic_request_header.op_code = hidra2::Opcode::kNetOpcodeUnknownOp;
+    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
+
+    ASSERT_THAT(err, Ne(nullptr));
+}
+
+TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCode) {
+    generic_request_header.op_code = hidra2::Opcode::kNetOpcodeSendData;
+    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
+
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(dynamic_cast<hidra2::Request*>(request.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<const hidra2::RequestHandlerFileWrite*>(request->GetListHandlers()[0]), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<const hidra2::RequestHandlerDbWrite*>(request->GetListHandlers().back()), Ne(nullptr));
+}
+
+TEST_F(FactoryTests, DoNotAddDiskWriterIfNotWanted) {
+    config.write_to_disk = false;
+
+    SetReceiverConfig(config);
+
+    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(request->GetListHandlers().size(), Eq(1));
+    ASSERT_THAT(dynamic_cast<const hidra2::RequestHandlerDbWrite*>(request->GetListHandlers().back()), Ne(nullptr));
+}
+
+TEST_F(FactoryTests, DoNotAddDbWriterIfNotWanted) {
+    config.write_to_db = false;
+
+    SetReceiverConfig(config);
+
+    auto request = factory.GenerateRequest(generic_request_header, 1, &err);
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(request->GetListHandlers().size(), Eq(1));
+    ASSERT_THAT(dynamic_cast<const hidra2::RequestHandlerFileWrite*>(request->GetListHandlers()[0]), Ne(nullptr));
+}
+
+
+}
diff --git a/receiver/unittests/test_request_handler_db_writer.cpp b/receiver/unittests/test_request_handler_db_writer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..99075925a536429a92458a0c59bc1e731afbf040
--- /dev/null
+++ b/receiver/unittests/test_request_handler_db_writer.cpp
@@ -0,0 +1,194 @@
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+#include "unittests/MockIO.h"
+#include "unittests/MockDatabase.h"
+
+#include "../src/receiver_error.h"
+#include "../src/request.h"
+#include "../src/request_handler.h"
+#include "../src/request_handler_db_write.h"
+#include "common/networking.h"
+#include "../../common/cpp/src/database/mongodb_client.h"
+#include "mock_receiver_config.h"
+#include "common/data_structs.h"
+
+
+using hidra2::FileInfo;
+using ::testing::Test;
+using ::testing::Return;
+using ::testing::ReturnRef;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::SetArgReferee;
+using ::testing::Gt;
+using ::testing::Eq;
+using ::testing::Ne;
+using ::testing::Mock;
+using ::testing::NiceMock;
+using ::testing::InSequence;
+using ::testing::SetArgPointee;
+using ::hidra2::Error;
+using ::hidra2::ErrorInterface;
+using ::hidra2::FileDescriptor;
+using ::hidra2::SocketDescriptor;
+using ::hidra2::MockIO;
+using hidra2::Request;
+using hidra2::RequestHandlerDbWrite;
+using ::hidra2::GenericNetworkRequestHeader;
+
+using hidra2::MockDatabase;
+using hidra2::RequestFactory;
+using hidra2::SetReceiverConfig;
+using hidra2::ReceiverConfig;
+
+
+namespace {
+
+class MockRequest: public Request {
+  public:
+    MockRequest(const GenericNetworkRequestHeader& request_header, SocketDescriptor socket_fd):
+        Request(request_header, socket_fd) {};
+
+    MOCK_CONST_METHOD0(GetFileName, std::string());
+    MOCK_CONST_METHOD0(GetDataSize, uint64_t());
+    MOCK_CONST_METHOD0(GetDataID, uint64_t());
+    MOCK_CONST_METHOD0(GetData, const hidra2::FileData & ());
+};
+
+class DbWriterHandlerTests : public Test {
+  public:
+    RequestHandlerDbWrite handler;
+    NiceMock<MockIO> mock_io;
+    std::unique_ptr<NiceMock<MockRequest>> mock_request;
+    NiceMock<MockDatabase> mock_db;
+    ReceiverConfig config;
+    void SetUp() override {
+        GenericNetworkRequestHeader request_header;
+        request_header.data_id = 2;
+        handler.db_client__ = std::unique_ptr<hidra2::Database> {&mock_db};
+        mock_request.reset(new NiceMock<MockRequest> {request_header, 1});
+    }
+    void TearDown() override {
+        handler.db_client__.release();
+    }
+};
+
+TEST(DBWritewr, HandlerHasCorrectDbFactory) {
+    RequestHandlerDbWrite handler;
+    ASSERT_THAT(dynamic_cast<hidra2::MongoDBClient*>(handler.db_client__.get()), Ne(nullptr));
+}
+
+
+TEST_F(DbWriterHandlerTests, CheckStatisticEntity) {
+    auto entity = handler.GetStatisticEntity();
+    ASSERT_THAT(entity, Eq(hidra2::StatisticEntity::kDatabase));
+}
+
+
+TEST_F(DbWriterHandlerTests, ProcessRequestCallsConnectDbWhenNotConnected) {
+    config.broker_db_name = "test";
+    config.broker_db_uri = "127.0.0.1:27017";
+    SetReceiverConfig(config);
+
+    EXPECT_CALL(mock_db, Connect_t("127.0.0.1:27017", "test", hidra2::kDBCollectionName)).
+    WillOnce(testing::Return(nullptr));
+
+    auto err = handler.ProcessRequest(*mock_request);
+    ASSERT_THAT(err, Eq(nullptr));
+}
+
+TEST_F(DbWriterHandlerTests, ProcessRequestReturnsErrorWhenCannotConnect) {
+
+    EXPECT_CALL(mock_db, Connect_t(_, _, hidra2::kDBCollectionName)).
+    WillOnce(testing::Return(new hidra2::SimpleError("")));
+
+    auto err = handler.ProcessRequest(*mock_request);
+
+    ASSERT_THAT(err, Ne(nullptr));
+
+}
+
+
+TEST_F(DbWriterHandlerTests, ProcessRequestDoesNotCallConnectSecondTime) {
+
+    EXPECT_CALL(mock_db, Connect_t(_, _, hidra2::kDBCollectionName)).
+    WillOnce(testing::Return(nullptr));
+
+    handler.ProcessRequest(*mock_request);
+    handler.ProcessRequest(*mock_request);
+}
+
+MATCHER_P(CompareFileInfo, file, "") {
+    if (arg.size != file.size) return false;
+    if (arg.name != file.name) return false;
+    if (arg.id != file.id) return false;
+
+    return true;
+}
+
+
+
+TEST_F(DbWriterHandlerTests, CallsInsert) {
+
+    EXPECT_CALL(mock_db, Connect_t(_, _, hidra2::kDBCollectionName)).
+    WillOnce(testing::Return(nullptr));
+
+    std::string expected_file_name = "2.bin";
+    uint64_t expected_file_size = 10;
+    uint64_t expected_id = 15;
+    EXPECT_CALL(*mock_request, GetDataSize())
+    .WillOnce(Return(expected_file_size))
+    ;
+
+    EXPECT_CALL(*mock_request, GetFileName())
+    .WillOnce(Return(expected_file_name))
+    ;
+
+    EXPECT_CALL(*mock_request, GetDataID())
+    .WillOnce(Return(expected_id))
+    ;
+
+    FileInfo file_info;
+    file_info.size = expected_file_size;
+    file_info.name = expected_file_name;
+    file_info.id = expected_id;
+
+
+    EXPECT_CALL(mock_db, Insert_t(CompareFileInfo(file_info), _)).
+    WillOnce(testing::Return(nullptr));
+
+    handler.ProcessRequest(*mock_request);
+}
+
+
+/*
+TEST_F(DbWriterHandlerTests, CallsWriteFile) {
+    std::string expected_file_name = "2.bin";
+    uint64_t expected_file_size = 10;
+    EXPECT_CALL(*mock_request, GetDataSize())
+    .WillOnce(Return(expected_file_size))
+    ;
+
+    hidra2::FileData data;
+    EXPECT_CALL(*mock_request, GetData())
+    .WillOnce(ReturnRef(data))
+    ;
+
+    EXPECT_CALL(*mock_request, GetFileName())
+    .WillOnce(Return(expected_file_name))
+    ;
+
+
+    EXPECT_CALL(mock_io, WriteDataToFile_t("files/" + expected_file_name, _, expected_file_size))
+    .WillOnce(
+        Return(hidra2::IOErrorTemplates::kUnknownIOError.Generate().release())
+    );
+
+    auto err = handler.ProcessRequest(*mock_request);
+
+    ASSERT_THAT(err, Eq(hidra2::IOErrorTemplates::kUnknownIOError));
+}
+
+*/
+}
\ No newline at end of file
diff --git a/tests/automatic/CMakeLists.txt b/tests/automatic/CMakeLists.txt
index b0a661944ef8fd479b8d5aa8d4b4c69139995420..f041b5fe837e9ed5a7248cecbb6edad5f63de972 100644
--- a/tests/automatic/CMakeLists.txt
+++ b/tests/automatic/CMakeLists.txt
@@ -20,4 +20,6 @@ add_subdirectory(worker)
 
 add_subdirectory(curl_http_client)
 
-add_subdirectory(producer_receiver)
\ No newline at end of file
+add_subdirectory(producer_receiver)
+
+add_subdirectory(full_chain)
\ No newline at end of file
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/CMakeLists.txt b/tests/automatic/curl_http_client/curl_http_client_command/CMakeLists.txt
index 503b9e91b51343704ccd80b856da664d5c99aab7..3738e79d412d78df63f830582bb415c40510d043 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/CMakeLists.txt
+++ b/tests/automatic/curl_http_client/curl_http_client_command/CMakeLists.txt
@@ -19,7 +19,7 @@ target_link_libraries(${TARGET_NAME} test_common hidra2-worker)
 ################################
 
 #add_test_setup_cleanup(${TARGET_NAME})
-add_integration_test(${TARGET_NAME}  get_google "GET google.com moved 302")
+add_integration_test(${TARGET_NAME}  get_google "GET google.com moved 301")
 add_integration_test(${TARGET_NAME}  get_badaddress "GET google.com/badaddress found 404")
 add_integration_test(${TARGET_NAME}  get_badaddress2 "GET 111 clienterror 404")
 add_integration_test(${TARGET_NAME}  post "POST httpbin.org/post testdata 200")
diff --git a/tests/automatic/full_chain/CMakeLists.txt b/tests/automatic/full_chain/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..24928ac777e7b2c07adb7be8bc14984be2ba0c31
--- /dev/null
+++ b/tests/automatic/full_chain/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(simple_chain)
diff --git a/tests/automatic/full_chain/simple_chain/CMakeLists.txt b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5198c49bb2d94837f017ad38a644c6bf9059ae17
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
@@ -0,0 +1,9 @@
+set(TARGET_NAME full_chain_simple_chain)
+
+################################
+# Testing
+################################
+configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json receiver.json COPYONLY)
+configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/broker_settings.json broker.json COPYONLY)
+
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:receiver-bin> $<TARGET_PROPERTY:hidra2-broker,EXENAME> $<TARGET_FILE:getnext_broker>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh
new file mode 100644
index 0000000000000000000000000000000000000000..26289c3e00f338b6561d6206582a470249879c53
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain/check_linux.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap Cleanup EXIT
+
+broker_database_name=test_run
+monitor_database_name=db_test
+broker_address=127.0.0.1:5005
+
+Cleanup() {
+	echo cleanup
+	rm -rf files
+    kill -9 $receiverid
+    kill -9 $brokerid
+    #kill -9 $producerrid
+    echo "db.dropDatabase()" | mongo ${broker_database_name}
+    influx -execute "drop database ${monitor_database_name}"
+}
+
+influx -execute "create database ${monitor_database_name}"
+
+
+#receiver
+$2 receiver.json &
+sleep 0.3
+receiverid=`echo $!`
+
+#broker
+$3 broker.json &
+sleep 0.3
+brokerid=`echo $!`
+
+
+#producer
+mkdir files
+$1 localhost:4200 100 100 &
+#producerrid=`echo $!`
+sleep 0.1
+
+$4 ${broker_address} ${broker_database_name} 2 | grep "Processed 100 file(s)"
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
new file mode 100644
index 0000000000000000000000000000000000000000..f0951cbc5df9eb2892a6b3b5481e8ca6d5a97f5c
--- /dev/null
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -0,0 +1,37 @@
+REM receiver
+set full_recv_name="%2"
+set short_recv_name="%~nx2"
+start /B "" "%full_recv_name%" receiver.json
+ping 1.0.0.0 -n 1 -w 100 > nul
+
+REM broker
+set full_broker_name="%3"
+set short_broker_name="%~nx3"
+start /B "" "%full_broker_name%" broker.json
+ping 1.0.0.0 -n 1 -w 100 > nul
+
+REM producer
+mkdir files
+start /B "" "%1" localhost:4200 100 100
+ping 1.0.0.0 -n 1 -w 100 > nul
+
+REM worker
+set broker_address="127.0.0.1:5005"
+set broker_database_name="test_run"
+"%4" %broker_address% %broker_database_name% 2 | findstr "Processed 100 file(s)"  || goto :error
+
+
+goto :clean
+
+:error
+call :clean
+exit /b 1
+
+:clean
+Taskkill /IM "%short_recv_name%" /F
+Taskkill /IM "%short_broker_name%" /F
+rmdir /S /Q files
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+echo db.dropDatabase() | %mongo_exe% %broker_database_name%
+
+
diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
index 3b4b82ec54b48718f5b00412ca0e9a83ddbbdd0e..5eeb022b926ff0efa59dbc3496a5af90c2749ef2 100644
--- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
+++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh
@@ -1,6 +1,7 @@
 #!/usr/bin/env bash
 
 database_name=db_test
+mongo_database_name=test_run
 
 set -e
 
@@ -11,7 +12,7 @@ Cleanup() {
 	influx -execute "drop database ${database_name}"
     kill $receiverid
 	rm -rf files
-
+    echo "db.dropDatabase()" | mongo ${mongo_database_name}
 }
 
 influx -execute "create database ${database_name}"
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
index d7c69e6a25b6189b0d84220d635a15b386f29449..48d4ff3dc0a304c83e88d3fc659acb39bcfa2014 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
@@ -4,10 +4,13 @@ set -e
 
 trap Cleanup EXIT
 
+database_name=test_run
+
 Cleanup() {
 	echo cleanup
 	rm -rf files
     kill $receiverid
+    echo "db.dropDatabase()" | mongo ${database_name}
 }
 
 nohup $2 receiver.json &>/dev/null &
@@ -18,4 +21,4 @@ mkdir files
 
 $1 localhost:4200 100 1
 
-ls -ln files/0.bin | awk '{ print $5 }'| grep 102400
+ls -ln files/1.bin | awk '{ print $5 }'| grep 102400
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
index ee15242cd4354e178f16d6e4059b635cf01b3db4..8a470b403ca5365ccd54e6fd4be67f649e15a922 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
@@ -11,7 +11,7 @@ mkdir files
 
 ping 1.0.0.0 -n 1 -w 100 > nul
 
-FOR /F "usebackq" %%A IN ('files\0.bin') DO set size=%%~zA
+FOR /F "usebackq" %%A IN ('files\1.bin') DO set size=%%~zA
 
 if %size% NEQ 102400 goto :error
 
@@ -24,5 +24,8 @@ exit /b 1
 :clean
 Taskkill /IM "%short_recv_name%" /F
 rmdir /S /Q files
+SET database_name=test_run
+SET mongo_exe="c:\Program Files\MongoDB\Server\3.6\bin\mongo.exe"
+echo db.dropDatabase() | %mongo_exe% %database_name%
 
 
diff --git a/tests/automatic/settings/receiver.json b/tests/automatic/settings/receiver.json
index e23a556d1cd5a8c03ce0c32b37b7b73161bfa89d..5939e7d82c0811804e36472acafafa2b48d09f10 100644
--- a/tests/automatic/settings/receiver.json
+++ b/tests/automatic/settings/receiver.json
@@ -1,6 +1,9 @@
 {
   "MonitorDbAddress":"localhost:8086",
   "MonitorDbName": "db_test",
+  "BrokerDbAddress":"localhost:27017",
+  "BrokerDbName": "test_run",
   "ListenPort":4200,
-  "WriteToDisk":true
+  "WriteToDisk":true,
+  "WriteToDb":true
 }
\ No newline at end of file
diff --git a/tests/manual/performance_full_chain_simple/broker.json b/tests/manual/performance_full_chain_simple/broker.json
new file mode 100644
index 0000000000000000000000000000000000000000..c45d16f2f7b59b7966ad9d2d406ef530da720a2b
--- /dev/null
+++ b/tests/manual/performance_full_chain_simple/broker.json
@@ -0,0 +1,6 @@
+{
+  "BrokerDbAddress":"localhost:27017",
+  "MonitorDbAddress": "localhost:8086",
+  "MonitorDbName": "db_test",
+  "port":5005
+}
\ No newline at end of file
diff --git a/tests/manual/performance_full_chain_simple/receiver.json b/tests/manual/performance_full_chain_simple/receiver.json
new file mode 100644
index 0000000000000000000000000000000000000000..5939e7d82c0811804e36472acafafa2b48d09f10
--- /dev/null
+++ b/tests/manual/performance_full_chain_simple/receiver.json
@@ -0,0 +1,9 @@
+{
+  "MonitorDbAddress":"localhost:8086",
+  "MonitorDbName": "db_test",
+  "BrokerDbAddress":"localhost:27017",
+  "BrokerDbName": "test_run",
+  "ListenPort":4200,
+  "WriteToDisk":true,
+  "WriteToDb":true
+}
\ No newline at end of file
diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..77c63a85c1bb178b0ac11fb9c0779c7020c8b603
--- /dev/null
+++ b/tests/manual/performance_full_chain_simple/test.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap Cleanup EXIT
+
+#clean-up
+Cleanup() {
+set +e
+ssh ${receiver_node} rm -f ${receiver_dir}/files/*
+ssh ${receiver_node} killall receiver
+ssh ${broker_node} killall hidra2-broker
+ssh ${broker_node} docker rm -f -v mongo
+}
+
+#monitoring_setup
+monitor_node=zitpcx27016
+monitor_port=8086
+
+
+# starts receiver on $receiver_node
+# runs producer with various file sizes from $producer_node and measures performance
+
+file_size=10000
+file_num=$((10000000 / $file_size))
+echo filesize: ${file_size}K, filenum: $file_num
+
+# receiver_setup
+receiver_node=max-wgs
+receiver_ip=`resolveip -s ${receiver_node}`
+receiver_port=4201
+receiver_dir=/gpfs/petra3/scratch/yakubov/receiver_tests
+ssh ${receiver_node} mkdir -p ${receiver_dir}
+ssh ${receiver_node} mkdir -p ${receiver_dir}/files
+scp ../../../cmake-build-release/receiver/receiver ${receiver_node}:${receiver_dir}
+cat receiver.json |
+  jq "to_entries |
+       map(if .key == \"MonitorDbAddress\"
+          then . + {value:\"${monitor_node}:${monitor_port}\"}
+          elif .key == \"ListenPort\"
+          then . + {value:${receiver_port}}
+          else .
+          end
+         ) |
+      from_entries" > settings_tmp.json
+scp settings_tmp.json ${receiver_node}:${receiver_dir}/settings.json
+
+#producer_setup
+producer_node=max-display001
+#producer_node=max-wgs
+producer_dir=~/fullchain_tests
+ssh ${producer_node} mkdir -p ${producer_dir}
+scp ../../../cmake-build-release/examples/producer/dummy-data-producer/dummy-data-producer ${producer_node}:${producer_dir}
+
+#broker_setup
+broker_node=max-wgs
+broker_dir=~/fullchain_tests
+cat broker.json |
+  jq "to_entries |
+       map(if .key == \"MonitorDbAddress\"
+          then . + {value:\"${monitor_node}:${monitor_port}\"}
+          else .
+          end
+         ) |
+      from_entries" > settings_tmp.json
+scp settings_tmp.json ${broker_node}:${broker_dir}/broker.json
+rm settings_tmp.json
+scp ../../../cmake-build-release/broker/hidra2-broker ${broker_node}:${broker_dir}
+
+
+#worker_setup
+worker_node=max-display002
+worker_dir=~/fullchain_tests
+nthreads=16
+scp ../../../cmake-build-release/examples/worker/getnext_broker/getnext_broker ${worker_node}:${worker_dir}
+
+#monitoring_start
+ssh ${monitor_node} influx -execute \"create database db_test\"
+#ssh ${monitor_node} docker run -d -p 8086 -p 8086 --name influxdb influxdb
+
+#mongo_start
+ssh ${broker_node} docker run -d -p 27017:27017 --name mongo mongo
+
+#receiver_start
+ssh ${receiver_node} "bash -c 'cd ${receiver_dir}; nohup ./receiver settings.json &> ${receiver_dir}/receiver.log &'"
+sleep 0.3
+
+#broker_start
+ssh ${broker_node} "bash -c 'cd ${broker_dir}; nohup ./hidra2-broker broker.json &> ${broker_dir}/broker.log &'"
+sleep 0.3
+
+#producer_start
+ssh ${producer_node} "bash -c 'cd ${producer_dir}; nohup ./dummy-data-producer ${receiver_ip}:${receiver_port} ${file_size} ${file_num} &> ${producer_dir}/producer.log &'"
+sleep 0.3
+
+#worker_start
+ssh ${worker_node} ${worker_dir}/getnext_broker ${broker_node}:5005 test_run ${nthreads}
+
diff --git a/tests/manual/performance_producer_receiver/receiver.json b/tests/manual/performance_producer_receiver/receiver.json
index e23a556d1cd5a8c03ce0c32b37b7b73161bfa89d..2479a88d54ce29b0e2d08e52ca02652a2c15d592 100644
--- a/tests/manual/performance_producer_receiver/receiver.json
+++ b/tests/manual/performance_producer_receiver/receiver.json
@@ -1,6 +1,9 @@
 {
   "MonitorDbAddress":"localhost:8086",
   "MonitorDbName": "db_test",
+  "BrokerDbAddress":"localhost:27017",
+  "BrokerDbName": "test_run",
   "ListenPort":4200,
-  "WriteToDisk":true
-}
\ No newline at end of file
+  "WriteToDisk":true,
+  "WriteToDb":true
+}
diff --git a/tests/manual/performance_producer_receiver/settings_tmp.json b/tests/manual/performance_producer_receiver/settings_tmp.json
deleted file mode 100644
index 35b3649bcb182f9b0121d9e85cac59e9c710e679..0000000000000000000000000000000000000000
--- a/tests/manual/performance_producer_receiver/settings_tmp.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "MonitorDbAddress": "zitpcx27016:8086",
-  "MonitorDbName": "db_test",
-  "ListenPort": 4201,
-  "WriteToDisk": false
-}
diff --git a/tests/manual/performance_producer_receiver/test.sh b/tests/manual/performance_producer_receiver/test.sh
index 317f32d77331e639f2d1ed564fd5ba4fc31aa70c..862ae2ec85bafd6df51f610336c10aaa2855e7bc 100755
--- a/tests/manual/performance_producer_receiver/test.sh
+++ b/tests/manual/performance_producer_receiver/test.sh
@@ -30,7 +30,6 @@ ssh ${worker_node} mkdir -p ${worker_dir}
 scp ../../../cmake-build-release/receiver/receiver ${service_node}:${service_dir}
 scp ../../../cmake-build-release/examples/producer/dummy-data-producer/dummy-data-producer ${worker_node}:${worker_dir}
 
-
 function do_work {
 cat receiver.json |
   jq "to_entries |
@@ -49,9 +48,11 @@ ssh ${service_node} "bash -c 'cd ${service_dir}; nohup ./receiver settings.json
 sleep 0.3
 for size  in 100 1000 10000
 do
+ssh ${service_node} docker run -d -p 27017:27017 --name mongo mongo
 echo ===================================================================
 ssh ${worker_node} ${worker_dir}/dummy-data-producer ${service_ip}:${service_port} ${size} 1000
 ssh ${service_node} rm -f ${service_dir}/files/*
+ssh ${service_node} docker rm -f -v mongo
 done
 ssh ${service_node} killall receiver
 }
@@ -67,3 +68,4 @@ do_work false
 
 #rm settings_tmp.json
 #ssh ${service_node} docker rm -f influxdb
+
diff --git a/worker/api/cpp/include/worker/data_broker.h b/worker/api/cpp/include/worker/data_broker.h
index e0859dcae75758ec98c58b6aedda5d5db7cc8d0b..a7baaa2a4da104025d5a0c895ae854e99c86361d 100644
--- a/worker/api/cpp/include/worker/data_broker.h
+++ b/worker/api/cpp/include/worker/data_broker.h
@@ -29,6 +29,8 @@ class DataBroker {
     //! Connect to the data source - will scan file folders or connect to the database.
 // TODO: do we need this?
     virtual Error Connect() = 0;
+    //! Set timeout for broker operations. Default - no timeout
+    virtual void SetTimeout(uint64_t timeout_ms) = 0;
     //! Receive next image.
     /*!
       \param info -  where to store image metadata. Can be set to nullptr only image data is needed.
diff --git a/worker/api/cpp/src/folder_data_broker.h b/worker/api/cpp/src/folder_data_broker.h
index f388e5b1fd726b34eae9035670b035ce1695af39..15bc208fda4d7a73f944440ae3d06b0ba71a8799 100644
--- a/worker/api/cpp/src/folder_data_broker.h
+++ b/worker/api/cpp/src/folder_data_broker.h
@@ -15,9 +15,9 @@ class FolderDataBroker final : public hidra2::DataBroker {
     explicit FolderDataBroker(const std::string& source_name);
     Error Connect() override;
     Error GetNext(FileInfo* info, FileData* data) override;
+    void SetTimeout(uint64_t timeout_ms) override {}; // to timeout in this case
 
     std::unique_ptr<hidra2::IO> io__; // modified in testings to mock system calls,otherwise do not touch
-
   private:
     std::string base_path_;
     bool is_connected_;
diff --git a/worker/api/cpp/src/server_data_broker.cpp b/worker/api/cpp/src/server_data_broker.cpp
index 0b739e6e7d720e713c91a8623ea9153d08f4678a..fcbd1d44992f9a7101708e763e659e2ce31cd3ab 100644
--- a/worker/api/cpp/src/server_data_broker.cpp
+++ b/worker/api/cpp/src/server_data_broker.cpp
@@ -1,9 +1,17 @@
 #include "server_data_broker.h"
+
+
+#include <chrono>
+
+
 #include "io/io_factory.h"
 #include "http_client/curl_http_client.h"
 #include "http_client/http_error.h"
 
 
+using std::chrono::high_resolution_clock;
+
+
 namespace hidra2 {
 
 Error HttpCodeToWorkerError(const HttpCode& code) {
@@ -42,20 +50,36 @@ Error ServerDataBroker::Connect() {
     return nullptr;
 }
 
+void ServerDataBroker::SetTimeout(uint64_t timeout_ms) {
+    timeout_ms_ = timeout_ms;
+}
+
 Error ServerDataBroker::GetFileInfoFromServer(FileInfo* info, const std::string& operation) {
     std::string full_uri = server_uri_ + "/database/" + source_name_ + "/" + operation;
     Error err;
     HttpCode code;
-    auto response = httpclient__->Get(full_uri, &code, &err);
 
-    if (err != nullptr) {
-        return err;
-    }
-
-    err = HttpCodeToWorkerError(code);
-    if (err != nullptr) {
-        err->Append(response);
-        return err;
+    std::string response;
+    uint64_t elapsed_ms = 0;
+    while (true) {
+        response = httpclient__->Get(full_uri, &code, &err);
+        if (err != nullptr) {
+            return err;
+        }
+
+        err = HttpCodeToWorkerError(code);
+        if (err == nullptr) break;
+        if (err->GetErrorType() != hidra2::ErrorType::kEndOfFile) {
+            err->Append(response);
+//            return err;
+        }
+
+        if (elapsed_ms >= timeout_ms_) {
+            err->Append("exit on timeout");
+            return err;
+        }
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+        elapsed_ms += 100;
     }
 
     if (!info->SetFromJson(response)) {
diff --git a/worker/api/cpp/src/server_data_broker.h b/worker/api/cpp/src/server_data_broker.h
index 5be1b4236e818fa303cde1829811219bc7e099d9..94a818b071a718fee57b20f51da4f806f2edaeb6 100644
--- a/worker/api/cpp/src/server_data_broker.h
+++ b/worker/api/cpp/src/server_data_broker.h
@@ -15,12 +15,14 @@ class ServerDataBroker final : public hidra2::DataBroker {
     explicit ServerDataBroker(const std::string& server_uri, const std::string& source_name);
     Error Connect() override;
     Error GetNext(FileInfo* info, FileData* data) override;
+    void SetTimeout(uint64_t timeout_ms) override;
     std::unique_ptr<IO> io__; // modified in testings to mock system calls,otherwise do not touch
     std::unique_ptr<HttpClient> httpclient__;
   private:
     Error GetFileInfoFromServer(FileInfo* info, const std::string& operation);
     std::string server_uri_;
     std::string source_name_;
+    uint64_t timeout_ms_ = 0;
 };
 
 }
diff --git a/worker/api/cpp/unittests/test_server_broker.cpp b/worker/api/cpp/unittests/test_server_broker.cpp
index 7987769fdd31e72e715f1959375ec806525e94aa..13d85ba3eaf0d7b80779aec40aaaf57e03f7bb8c 100644
--- a/worker/api/cpp/unittests/test_server_broker.cpp
+++ b/worker/api/cpp/unittests/test_server_broker.cpp
@@ -117,6 +117,20 @@ TEST_F(ServerDataBrokerTests, GetNextReturnsEOFFromHttpClient) {
     ASSERT_THAT(err->GetErrorType(), hidra2::ErrorType::kEndOfFile);
 }
 
+TEST_F(ServerDataBrokerTests, GetNextReturnsEOFFromHttpClientUntilTimeout) {
+    EXPECT_CALL(mock_http_client, Get_t(_, _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
+                SetArgPointee<1>(HttpCode::NoContent),
+                SetArgPointee<2>(nullptr),
+                Return("")));
+
+    data_broker->SetTimeout(100);
+    auto err = data_broker->GetNext(&info, nullptr);
+
+    ASSERT_THAT(err->Explain(), HasSubstr(hidra2::WorkerErrorMessage::kNoData));
+    ASSERT_THAT(err->GetErrorType(), hidra2::ErrorType::kEndOfFile);
+}
+
+
 
 FileInfo CreateFI() {
     FileInfo fi;
diff --git a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
index cd9f6991b7b0284efe3761217db56baebeeb43fe..5746bce28e8ac7b1e2aba9199e1eefe8e85f690c 100644
--- a/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
+++ b/worker/tools/folder_to_db/unittests/test_folder_to_db.cpp
@@ -9,6 +9,7 @@
 #include "database/database.h"
 
 #include "common/data_structs.h"
+#include "unittests/MockDatabase.h"
 
 #include "../src/folder_db_importer.h"
 
@@ -26,18 +27,7 @@ using ::testing::NiceMock;
 using ::testing::Ref;
 using ::testing::Return;
 
-using hidra2::FolderToDbImporter;
-using hidra2::Database;
-using hidra2::DatabaseFactory;
-using hidra2::IO;
-using hidra2::kDBCollectionName;
-using hidra2::FileInfos;
-using hidra2::FileInfo;
-using hidra2::MockIO;
-using hidra2::Error;
-using hidra2::TextError;
-using hidra2::SimpleError;
-
+using namespace hidra2;
 
 
 namespace {
@@ -45,12 +35,12 @@ namespace {
 
 TEST(FolderDBConverter, SetCorrectIO) {
     FolderToDbImporter converter{};
-    ASSERT_THAT(dynamic_cast<hidra2::SystemIO*>(converter.io__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<SystemIO*>(converter.io__.get()), Ne(nullptr));
 }
 
 TEST(FolderDBConverter, SetCorrectDBFactory) {
     FolderToDbImporter converter{};
-    ASSERT_THAT(dynamic_cast<hidra2::DatabaseFactory*>(converter.db_factory__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<DatabaseFactory*>(converter.db_factory__.get()), Ne(nullptr));
 }
 
 TEST(FolderDBConverter, SetNTasksCorrectly) {
@@ -70,29 +60,6 @@ TEST(FolderDBConverter, SetNTasksCorrectly) {
 }
 
 
-class MockDatabase : public Database {
-  public:
-    Error Connect(const std::string& address, const std::string& database,
-                  const std::string& collection ) override {
-        return Error{Connect_t(address, database, collection)};
-
-    }
-    Error Insert(const FileInfo& file, bool ignore_duplicates) const override {
-        return Error{Insert_t(file, ignore_duplicates)};
-    }
-
-    MOCK_METHOD3(Connect_t, SimpleError * (const std::string&, const std::string&, const std::string&));
-    MOCK_CONST_METHOD2(Insert_t, SimpleError * (const FileInfo&, bool));
-
-    // stuff to test db destructor is called and avoid "uninteresting call" messages
-    MOCK_METHOD0(Die, void());
-    virtual ~MockDatabase() override {
-        if (check_destructor)
-            Die();
-    }
-    bool check_destructor{false};
-};
-
 class MockDatabaseFactory : public DatabaseFactory {
   public:
     std::vector<NiceMock<MockDatabase>*> db;