diff --git a/3d_party/rapidjson/include/rapidjson/internal/regex.h b/3d_party/rapidjson/include/rapidjson/internal/regex.h
index 02272d2afdb9190d7a69d6ffd382d9003eaebb32..79ec11b2b99ca774a6fc910a9fe6aa152019bf3c 100644
--- a/3d_party/rapidjson/include/rapidjson/internal/regex.h
+++ b/3d_party/rapidjson/include/rapidjson/internal/regex.h
@@ -50,7 +50,7 @@ RAPIDJSON_DIAG_OFF(switch - enum)
                                                    0);  //!< Represents an invalid index in GenericRegex::State::out, out1
     static const SizeType kRegexInvalidRange = ~SizeType(0);
 
-//! Regular expression engine with subset of ECMAscript grammar.
+//! Regular expression engine with dataset of ECMAscript grammar.
     /*!
         Supported regular expression syntax:
         - \c ab     Concatenation
diff --git a/3d_party/spd_log/include/spdlog/fmt/bundled/format.h b/3d_party/spd_log/include/spdlog/fmt/bundled/format.h
index b23ff8b45e74d6c9ce015cbf055fecdf7ab6c3f6..3462998c30a823c668c732c3f8aaf61f8fdaa57f 100644
--- a/3d_party/spd_log/include/spdlog/fmt/bundled/format.h
+++ b/3d_party/spd_log/include/spdlog/fmt/bundled/format.h
@@ -680,7 +680,7 @@ inline T* make_ptr(T* ptr, std::size_t) {
 
 /**
   \rst
-  A buffer supporting a subset of ``std::vector``'s operations.
+  A buffer supporting a dataset of ``std::vector``'s operations.
   \endrst
  */
 template <typename T>
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2d5eafefdbd9fef7adbf9073f039828fa77af421..1a5ec00419269b0849b8be9e0285775a73271113 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,11 +1,11 @@
-## 20.12.0 (in progress)
+## 20.12.0
 
 FEATURES
 * implemented possibility to send data without writing to database (no need of consecutive indexes, etc. but will not be able to consume such data)
 * allow to return incomplete datasets (wihout error if one sets minimum dataset size, otherwise with "partial data" error)
 
  IMPROVEMENTS
-* Consumer API - change behavior of GetLast/get_last - do not set current pointer after call to the last image
+* Consumer API - change behavior of GetLast/get_last - do not change current pointer after call
 * Consumer API - add interrupt_current_operation to allow interrupting (from a separate thread) long consumer operation  
 * Producer API - return original data in callback payload.  
 * Producer API - allow to set queue limits (number of pending requests and/or max memory), reject new requests if reached the limits  
@@ -14,7 +14,23 @@ FEATURES
 BREAKING CHANGES
 * Consumer API - get_next_dataset, get_last_dataset, get_dataset_by_id return dictionary with 'id','expected_size','content' fields, not tuple (id,content) as before
 * Consumer API - remove group_id argument from get_last/get_by_id/get_last_dataset/get_dataset_by_id functions
-* Producer API - changed meaning of subsets (subset_id replaced with id_in_subset and this means now id of the image within a subset (e.g. module number for multi-module detector)), file_id is now a global id of a multi-set data (i.g. multi-image id) 
+* Producer API - changed meaning of subsets (subset_id replaced with dataset_substream and this means now id of the image within a subset (e.g. module number for multi-module detector)), message_id is now a global id of a multi-set data (i.g. multi-image id)
+    ####  renaming - general
+* stream -> data_source, substream -> stream
+* use millisecond everywhere for timeout/delay
+* use term `message` for blob of information we send around, rename related structs, parameters, ...
+* C++ - get rid of duplicate functions with default stream
+    ####  renaming - Producer API
+* SendData/send_data -> Send/send    
+* SendXX/send_xx -> swap parameters (stream to the end)
+* id_in_subset -> dataset_substream
+* subset_size -> dataset_size (and in general replace subset with dataset)
+    ####  renaming - Consumer API
+* broker -> consumer
+* SetLastReadMarker/set_lastread_marker -> swap arguments
+* GetUnacknowledgedTupleIds/get_unacknowledged_tuple_ids -> GetUnacknowledgedMessages/get_unacknowledged_messages
+* GetLastAcknowledgedTulpeId/get_last_acknowledged_tuple_id -> GetLastAcknowledgedMessage/get_last_acknowledged_message
+* GetUnacknowledgedMessages, -> swap parameters (stream to the end)
 
 BUG FIXES
 * fix memory leak bug in Python consumer library (lead to problems when creating many consumer instances)
@@ -23,14 +39,14 @@ BUG FIXES
 ## 20.09.1
 
 FEATURES
-* New function GetLastSubstream/last_stream in Producer API - returns info for a substream which was created last 
+* New function GetLastStream/last_stream in Producer API - returns info for a stream which was created last 
 
 IMPROVEMENTS
-* Each data tuple automatically gets a timestamp (nanoseconds from Linux epoch) at the moment it is being inserted to a database 
-* GetSubstreamList/get_substream_list returns now sorted (by timestamp of the earliest data tuple) list of substreams. Parameter `from` allows to limit the list
+* Each message automatically gets a timestamp (nanoseconds from Linux epoch) at the moment it is being inserted to a database 
+* GetStreamList/get_stream_list returns now sorted (by timestamp of the earliest message) list of streams. Parameter `from` allows to limit the list
 
 BREAKING CHANGES
-* GetSubstreamList/get_substream_list returns now not an array of strings, but array of StreamInfos/dictionaries
+* GetStreamList/get_stream_list returns now not an array of strings, but array of StreamInfos/dictionaries
 
 ## 20.09.0
 
@@ -70,21 +86,21 @@ IMPROVEMENTS
 
 ## 20.06.0
 FEATURES
-* implemented acknowledeges - one can acknowledge a data tuple, get last acknowledged tuple id, get list of unacknowledged tuple ids
-* implement getting substream info (contains last id) by producer client (not need to have consumer client)
+* implemented acknowledeges - one can acknowledge a message, get last acknowledged tuple id, get list of unacknowledged tuple ids
+* implement getting stream info (contains last id) by producer client (not need to have consumer client)
 
 IMPROVEMENTS
-* change behavior when trying to get data from a substream that does not exist - return EndOfStream instead of WrongInput
-* change behavior of GetLastXX/get_lastXX functions - current pointer is not being set to the end of a substream after this command anymore
-* substream name added to producer callback output for Python
+* change behavior when trying to get data from a stream that does not exist - return EndOfStream instead of WrongInput
+* change behavior of GetLastXX/get_lastXX functions - current pointer is not being set to the end of a stream after this command anymore
+* stream name added to producer callback output for Python
 * added simple C++ examples
 
 BUG FIXES
-* check data tuple ids should be positive
+* check message ids should be positive
 
 ## 20.03.0
 FEATURES
-* introduced substreams for producer/consumer
+* introduced streams for producer/consumer
 * introduced timeout for producer requests
 * producer accepts "auto" for beamtime, will automatically select a current one for a given beamline
 * introduced file transfer service - possibility for consumer clients to receive data also in case filesystem is inaccessible
diff --git a/authorizer/src/asapo_authorizer/server/authorize.go b/authorizer/src/asapo_authorizer/server/authorize.go
index 394b652a7c3c45b7cef75545c3ebb235e2839122..edcf0b703767f142d746a56235155bef6b942e91 100644
--- a/authorizer/src/asapo_authorizer/server/authorize.go
+++ b/authorizer/src/asapo_authorizer/server/authorize.go
@@ -13,7 +13,7 @@ import (
 type SourceCredentials struct {
 	BeamtimeId string
 	Beamline   string
-	Stream     string
+	DataSource     string
 	Token      string
 	Type 	   string
 }
@@ -30,8 +30,8 @@ func getSourceCredentials(request authorizationRequest) (SourceCredentials, erro
 		return SourceCredentials{}, errors.New("cannot get source credentials from " + request.SourceCredentials)
 	}
 	creds := SourceCredentials{vals[1], vals[2], vals[3], vals[4],vals[0]}
-	if creds.Stream == "" {
-		creds.Stream = "detector"
+	if creds.DataSource == "" {
+		creds.DataSource = "detector"
 	}
 
 	if creds.Beamline == "" {
@@ -124,7 +124,7 @@ func findBeamtimeMetaFromBeamline(beamline string) (beamtimeMeta, error) {
 func alwaysAllowed(creds SourceCredentials) (beamtimeMeta, bool) {
 	for _, pair := range settings.AlwaysAllowedBeamtimes {
 		if pair.BeamtimeId == creds.BeamtimeId {
-			pair.Stream = creds.Stream
+			pair.DataSource = creds.DataSource
 			pair.Type = creds.Type
 			return pair, true
 		}
@@ -198,7 +198,7 @@ func findMeta(creds SourceCredentials) (beamtimeMeta, error) {
 		return beamtimeMeta{}, err
 	}
 
-	meta.Stream = creds.Stream
+	meta.DataSource = creds.DataSource
 	meta.Type = creds.Type
 
 	return meta, nil
diff --git a/authorizer/src/asapo_authorizer/server/authorize_test.go b/authorizer/src/asapo_authorizer/server/authorize_test.go
index d3b8e36294e79e8c99a39c16a128135ba64b4671..4085b1b5b473ebbae4a06998e73e509ddee12c9b 100644
--- a/authorizer/src/asapo_authorizer/server/authorize_test.go
+++ b/authorizer/src/asapo_authorizer/server/authorize_test.go
@@ -58,16 +58,16 @@ var credTests = [] struct {
 	ok bool
 	message string
 } {
-	{"processed%asapo_test%auto%%", SourceCredentials{"asapo_test","auto","detector","","processed"},true,"auto beamline, stream and no token"},
-	{"processed%asapo_test%auto%%token", SourceCredentials{"asapo_test","auto","detector","token","processed"},true,"auto beamline, stream"},
-	{"processed%asapo_test%auto%stream%", SourceCredentials{"asapo_test","auto","stream","","processed"},true,"auto beamline, no token"},
-	{"processed%asapo_test%auto%stream%token", SourceCredentials{"asapo_test","auto","stream","token","processed"},true,"auto beamline,stream, token"},
-	{"processed%asapo_test%beamline%stream%token", SourceCredentials{"asapo_test","beamline","stream","token","processed"},true,"all set"},
-	{"processed%auto%beamline%stream%token", SourceCredentials{"auto","beamline","stream","token","processed"},true,"auto beamtime"},
-	{"raw%auto%auto%stream%token", SourceCredentials{},false,"auto beamtime and beamline"},
-	{"raw%%beamline%stream%token", SourceCredentials{"auto","beamline","stream","token","raw"},true,"empty beamtime"},
-	{"raw%asapo_test%%stream%token", SourceCredentials{"asapo_test","auto","stream","token","raw"},true,"empty bealine"},
-	{"raw%%%stream%token", SourceCredentials{},false,"both empty"},
+	{"processed%asapo_test%auto%%", SourceCredentials{"asapo_test","auto","detector","","processed"},true,"auto beamline, source and no token"},
+	{"processed%asapo_test%auto%%token", SourceCredentials{"asapo_test","auto","detector","token","processed"},true,"auto beamline, source"},
+	{"processed%asapo_test%auto%source%", SourceCredentials{"asapo_test","auto","source","","processed"},true,"auto beamline, no token"},
+	{"processed%asapo_test%auto%source%token", SourceCredentials{"asapo_test","auto","source","token","processed"},true,"auto beamline,source, token"},
+	{"processed%asapo_test%beamline%source%token", SourceCredentials{"asapo_test","beamline","source","token","processed"},true,"all set"},
+	{"processed%auto%beamline%source%token", SourceCredentials{"auto","beamline","source","token","processed"},true,"auto beamtime"},
+	{"raw%auto%auto%source%token", SourceCredentials{},false,"auto beamtime and beamline"},
+	{"raw%%beamline%source%token", SourceCredentials{"auto","beamline","source","token","raw"},true,"empty beamtime"},
+	{"raw%asapo_test%%source%token", SourceCredentials{"asapo_test","auto","source","token","raw"},true,"empty bealine"},
+	{"raw%%%source%token", SourceCredentials{},false,"both empty"},
 }
 
 func TestSplitCreds(t *testing.T) {
@@ -150,46 +150,46 @@ var authTests = [] struct {
 	source_type string
 	beamtime_id string
 	beamline string
-	stream string
+	dataSource string
 	token string
 	originHost string
 	status int
 	message string
 	answer string
 }{
-	{"processed","test","auto","stream", prepareToken("test"),"127.0.0.2",http.StatusOK,"user stream with correct token",
-		`{"beamtimeId":"test","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
-	{"processed","test_online","auto","stream", prepareToken("test_online"),"127.0.0.1",http.StatusOK,"with online path, processed type",
-		`{"beamtimeId":"test_online","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"","source-type":"processed"}`},
-	{"processed","test1","auto","stream", prepareToken("test1"),"127.0.0.1",http.StatusUnauthorized,"correct token, beamtime not found",
+	{"processed","test","auto","dataSource", prepareToken("test"),"127.0.0.2",http.StatusOK,"user source with correct token",
+		`{"beamtimeId":"test","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
+	{"processed","test_online","auto","dataSource", prepareToken("test_online"),"127.0.0.1",http.StatusOK,"with online path, processed type",
+		`{"beamtimeId":"test_online","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"","source-type":"processed"}`},
+	{"processed","test1","auto","dataSource", prepareToken("test1"),"127.0.0.1",http.StatusUnauthorized,"correct token, beamtime not found",
 		""},
-	{"processed","test","auto","stream", prepareToken("wrong"),"127.0.0.1",http.StatusUnauthorized,"user stream with wrong token",
+	{"processed","test","auto","dataSource", prepareToken("wrong"),"127.0.0.1",http.StatusUnauthorized,"user source with wrong token",
 		""},
-	{"processed","test","bl1","stream", prepareToken("test"),"127.0.0.1",http.StatusOK,"correct beamline given",
-		`{"beamtimeId":"test","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
-		{"processed","test","bl2","stream", prepareToken("test"),"127.0.0.1",http.StatusUnauthorized,"incorrect beamline given",
+	{"processed","test","bl1","dataSource", prepareToken("test"),"127.0.0.1",http.StatusOK,"correct beamline given",
+		`{"beamtimeId":"test","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
+		{"processed","test","bl2","dataSource", prepareToken("test"),"127.0.0.1",http.StatusUnauthorized,"incorrect beamline given",
 		""},
-	{"processed","auto","p07", "stream",prepareToken("bl_p07"),"127.0.0.1",http.StatusOK,"beamtime found",
-		`{"beamtimeId":"11111111","beamline":"p07","stream":"stream","core-path":"asap3/petra3/gpfs/p07/2020/data/11111111","beamline-path":"","source-type":"processed"}`},
-	{"processed","auto","p07", "stream",prepareToken("bl_p06"),"127.0.0.1",http.StatusUnauthorized,"wrong token",
+	{"processed","auto","p07", "dataSource",prepareToken("bl_p07"),"127.0.0.1",http.StatusOK,"beamtime found",
+		`{"beamtimeId":"11111111","beamline":"p07","dataSource":"dataSource","core-path":"asap3/petra3/gpfs/p07/2020/data/11111111","beamline-path":"","source-type":"processed"}`},
+	{"processed","auto","p07", "dataSource",prepareToken("bl_p06"),"127.0.0.1",http.StatusUnauthorized,"wrong token",
 		""},
-	{"processed","auto","p08", "stream",prepareToken("bl_p08"),"127.0.0.1",http.StatusUnauthorized,"beamtime not found",
+	{"processed","auto","p08", "dataSource",prepareToken("bl_p08"),"127.0.0.1",http.StatusUnauthorized,"beamtime not found",
 		""},
-	{"raw","test_online","auto","stream", prepareToken("test_online"),"127.0.0.1",http.StatusOK,"raw type",
-		`{"beamtimeId":"test_online","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"./bl1/current","source-type":"raw"}`},
-	{"raw","test_online","auto","stream", "","127.0.0.1",http.StatusOK,"raw type",
-		`{"beamtimeId":"test_online","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"./bl1/current","source-type":"raw"}`},
- 	{"raw","auto","p07","stream", "","127.0.0.1",http.StatusOK,"raw type, auto beamtime",
-		`{"beamtimeId":"11111111","beamline":"p07","stream":"stream","core-path":"asap3/petra3/gpfs/p07/2020/data/11111111","beamline-path":"./p07/current","source-type":"raw"}`},
+	{"raw","test_online","auto","dataSource", prepareToken("test_online"),"127.0.0.1",http.StatusOK,"raw type",
+		`{"beamtimeId":"test_online","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"./bl1/current","source-type":"raw"}`},
+	{"raw","test_online","auto","dataSource", "","127.0.0.1",http.StatusOK,"raw type",
+		`{"beamtimeId":"test_online","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test_online","beamline-path":"./bl1/current","source-type":"raw"}`},
+ 	{"raw","auto","p07","dataSource", "","127.0.0.1",http.StatusOK,"raw type, auto beamtime",
+		`{"beamtimeId":"11111111","beamline":"p07","dataSource":"dataSource","core-path":"asap3/petra3/gpfs/p07/2020/data/11111111","beamline-path":"./p07/current","source-type":"raw"}`},
 	{"raw","auto","p07","noldap", "","127.0.0.1",http.StatusNotFound,"no conection to ldap",
 		""},
-	{"raw","test_online","auto","stream", "","127.0.0.2",http.StatusUnauthorized,"raw type, wrong origin host",
+	{"raw","test_online","auto","dataSource", "","127.0.0.2",http.StatusUnauthorized,"raw type, wrong origin host",
 		""},
-	{"raw","test","auto","stream", prepareToken("test"),"127.0.0.1",http.StatusUnauthorized,"raw when not online",
+	{"raw","test","auto","dataSource", prepareToken("test"),"127.0.0.1",http.StatusUnauthorized,"raw when not online",
 		""},
-	{"processed","test","auto","stream", "","127.0.0.1:1001",http.StatusOK,"processed without token",
-		`{"beamtimeId":"test","beamline":"bl1","stream":"stream","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
-	{"processed","test","auto","stream", "","127.0.0.2",http.StatusUnauthorized,"processed without token, wrong host",
+	{"processed","test","auto","dataSource", "","127.0.0.1:1001",http.StatusOK,"processed without token",
+		`{"beamtimeId":"test","beamline":"bl1","dataSource":"dataSource","core-path":"./tf/gpfs/bl1/2019/data/test","beamline-path":"","source-type":"processed"}`},
+	{"processed","test","auto","dataSource", "","127.0.0.2",http.StatusUnauthorized,"processed without token, wrong host",
 		""},
 }
 
@@ -222,7 +222,7 @@ func TestAuthorize(t *testing.T) {
 				bl = "bl1"
 			}
 			expected_filter:="a3"+bl+"-hosts"
-			if test.stream == "noldap" {
+			if test.dataSource == "noldap" {
 				err := &common.ServerError{utils.StatusServiceUnavailable,""}
 				mockClient.On("GetAllowedIpsForBeamline", expected_uri, expected_base,expected_filter).Return([]string{}, err)
 			} else {
@@ -230,7 +230,7 @@ func TestAuthorize(t *testing.T) {
 			}
 		}
 
-		request :=  makeRequest(authorizationRequest{test.source_type+"%"+test.beamtime_id+"%"+test.beamline+"%"+test.stream+"%"+test.token,test.originHost})
+		request :=  makeRequest(authorizationRequest{test.source_type+"%"+test.beamtime_id+"%"+test.beamline+"%"+test.dataSource+"%"+test.token,test.originHost})
 		w := doPostRequest("/authorize",request)
 
 		body, _ := ioutil.ReadAll(w.Body)
diff --git a/authorizer/src/asapo_authorizer/server/server.go b/authorizer/src/asapo_authorizer/server/server.go
index e5f7518738ecce6cc70722de09161b39369f698e..7dc7aca8c467718c6324eb55e131b2da8c8defb7 100644
--- a/authorizer/src/asapo_authorizer/server/server.go
+++ b/authorizer/src/asapo_authorizer/server/server.go
@@ -8,7 +8,7 @@ import (
 type  beamtimeMeta struct {
 	BeamtimeId string  `json:"beamtimeId"`
 	Beamline string     `json:"beamline"`
-	Stream string       `json:"stream"`
+	DataSource string       `json:"dataSource"`
 	OfflinePath string `json:"core-path"`
 	OnlinePath string `json:"beamline-path"`
 	Type string `json:"source-type"`
diff --git a/broker/src/asapo_broker/database/database.go b/broker/src/asapo_broker/database/database.go
index 2ec1142d4dde6c50ea2557dff8ea0af6401d939c..0bb12f25217b93f03e3fe4556e6c2c060864d5b1 100644
--- a/broker/src/asapo_broker/database/database.go
+++ b/broker/src/asapo_broker/database/database.go
@@ -22,7 +22,7 @@ type Agent interface {
 
 type DBSettings struct {
 	ReadFromInprocessPeriod int
-	UpdateSubstreamCachePeriodMs int
+	UpdateStreamCachePeriodMs int
 }
 
 type DBError struct {
diff --git a/broker/src/asapo_broker/database/mongodb.go b/broker/src/asapo_broker/database/mongodb.go
index 48fe7150aa3dbcf5faaedf055c5b6a785d5ee503..6683be24c80d02489b474c8af254d8493cfc38a8 100644
--- a/broker/src/asapo_broker/database/mongodb.go
+++ b/broker/src/asapo_broker/database/mongodb.go
@@ -35,14 +35,14 @@ type InProcessingRecord struct {
 	ID       int `bson:"_id" json:"_id"`
 	MaxResendAttempts int `bson:"maxResendAttempts" json:"maxResendAttempts"`
 	ResendAttempts int `bson:"resendAttempts" json:"resendAttempts"`
-	DelaySec  int64 `bson:"delaySec" json:"delaySec"`
+	DelayMs  int64 `bson:"delayMs" json:"delayMs"`
 }
 
 type NegAckParamsRecord struct {
 	ID       int `bson:"_id" json:"_id"`
 	MaxResendAttempts int `bson:"maxResendAttempts" json:"maxResendAttempts"`
 	ResendAttempts int `bson:"resendAttempts" json:"resendAttempts"`
-	DelaySec  int64 `bson:"delaySec" json:"delaySec"`
+	DelayMs  int64 `bson:"delayMs" json:"delayMs"`
 }
 
 
@@ -69,8 +69,8 @@ const no_session_msg = "database client not created"
 const wrong_id_type = "wrong id type"
 const already_connected_msg = "already connected"
 
-const finish_substream_keyword = "asapo_finish_substream"
-const no_next_substream_keyword = "asapo_no_next"
+const finish_stream_keyword = "asapo_finish_stream"
+const no_next_stream_keyword = "asapo_no_next"
 
 var dbSessionLock sync.Mutex
 
@@ -168,7 +168,7 @@ func (db *Mongodb) getMaxIndex(request Request, returnIncompete bool) (max_id in
 		if request.MinDatasetSize>0 {
 			q = bson.M{"size": bson.M{"$gte": request.MinDatasetSize}}
 		} else {
-			q = bson.M{"$expr": bson.M{"$eq": []interface{}{"$size", bson.M{"$size": "$images"}}}}
+			q = bson.M{"$expr": bson.M{"$eq": []interface{}{"$size", bson.M{"$size": "$messages"}}}}
 		}
 	} else {
 		q = nil
@@ -230,13 +230,13 @@ func (db *Mongodb) incrementField(request Request, max_ind int, res interface{})
 	return nil
 }
 
-func encodeAnswer(id, id_max int, next_substream string) string {
+func encodeAnswer(id, id_max int, next_stream string) string {
 	var r = struct {
 		Op             string `json:"op"`
 		Id             int    `json:"id"`
 		Id_max         int    `json:"id_max"`
-		Next_substream string `json:"next_substream"`
-	}{"get_record_by_id", id, id_max, next_substream}
+		Next_stream string `json:"next_stream"`
+	}{"get_record_by_id", id, id_max, next_stream}
 	answer, _ := json.Marshal(&r)
 	return string(answer)
 }
@@ -257,13 +257,13 @@ func (db *Mongodb) getRecordByIDRow(request Request, id, id_max int) ([]byte, er
 
 	partialData := false
 	if request.DatasetOp {
-		imgs,ok1 :=res["images"].(primitive.A)
+		imgs,ok1 :=res["messages"].(primitive.A)
 		expectedSize,ok2 := utils.InterfaceToInt64(res["size"])
 		if !ok1 || !ok2 {
 			return nil, &DBError{utils.StatusTransactionInterrupted, "getRecordByIDRow: cannot parse database response" }
 		}
-		nImages := len(imgs)
-		if (request.MinDatasetSize==0 && int64(nImages)!=expectedSize) || (request.MinDatasetSize==0 && nImages<request.MinDatasetSize) {
+		nMessages := len(imgs)
+		if (request.MinDatasetSize==0 && int64(nMessages)!=expectedSize) || (request.MinDatasetSize==0 && nMessages<request.MinDatasetSize) {
 			partialData = true
 		}
 	}
@@ -321,7 +321,7 @@ func (db *Mongodb) negAckRecord(request Request) ([]byte, error) {
 	input := struct {
 		Id int
 		Params struct {
-			DelaySec int
+			DelayMs int
 		}
 	}{}
 
@@ -330,7 +330,7 @@ func (db *Mongodb) negAckRecord(request Request) ([]byte, error) {
 		return nil, &DBError{utils.StatusWrongInput, err.Error()}
 	}
 
-	err =  db.InsertRecordToInprocess(request.DbName,inprocess_collection_name_prefix+request.GroupId,input.Id,input.Params.DelaySec, 1)
+	err =  db.InsertRecordToInprocess(request.DbName,inprocess_collection_name_prefix+request.GroupId,input.Id,input.Params.DelayMs, 1)
 	return []byte(""), err
 }
 
@@ -361,7 +361,7 @@ func (db *Mongodb) checkDatabaseOperationPrerequisites(request Request) error {
 	}
 
 	if len(request.DbName) == 0 || len(request.DbCollectionName) == 0 {
-		return &DBError{utils.StatusWrongInput, "beamtime_id ans substream must be set"}
+		return &DBError{utils.StatusWrongInput, "beamtime_id ans stream must be set"}
 	}
 
 	return nil
@@ -386,18 +386,18 @@ func (db *Mongodb) getCurrentPointer(request Request) (LocationPointer, int, err
 	return curPointer, max_ind, nil
 }
 
-func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delaySec int,nResendAttempts int) (int, error) {
+func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delayMs int,nResendAttempts int) (int, error) {
 	var res InProcessingRecord
 	opts := options.FindOneAndUpdate().SetUpsert(false).SetReturnDocument(options.After)
-	tNow := time.Now().Unix()
+	tNow := time.Now().UnixNano()
  	var update bson.M
 	if nResendAttempts==0 {
-		update = bson.M{"$set": bson.M{"delaySec": tNow + int64(delaySec) ,"maxResendAttempts":math.MaxInt32}, "$inc": bson.M{"resendAttempts": 1}}
+		update = bson.M{"$set": bson.M{"delayMs": tNow + int64(delayMs*1e6) ,"maxResendAttempts":math.MaxInt32}, "$inc": bson.M{"resendAttempts": 1}}
 	} else {
-		update = bson.M{"$set": bson.M{"delaySec": tNow + int64(delaySec) ,"maxResendAttempts":nResendAttempts}, "$inc": bson.M{"resendAttempts": 1}}
+		update = bson.M{"$set": bson.M{"delayMs": tNow + int64(delayMs*1e6) ,"maxResendAttempts":nResendAttempts}, "$inc": bson.M{"resendAttempts": 1}}
 	}
 
-	q := bson.M{"delaySec": bson.M{"$lte": tNow},"$expr": bson.M{"$lt": []string{"$resendAttempts","$maxResendAttempts"}}}
+	q := bson.M{"delayMs": bson.M{"$lte": tNow},"$expr": bson.M{"$lt": []string{"$resendAttempts","$maxResendAttempts"}}}
 	c := db.client.Database(dbname).Collection(collection_name)
 	err := c.FindOneAndUpdate(context.TODO(), q, update, opts).Decode(&res)
 	if err != nil {
@@ -412,9 +412,9 @@ func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delay
 	return res.ID, nil
 }
 
-func (db *Mongodb) InsertRecordToInprocess(db_name string, collection_name string,id int,delaySec int, nResendAttempts int) error {
+func (db *Mongodb) InsertRecordToInprocess(db_name string, collection_name string,id int,delayMs int, nResendAttempts int) error {
 	record := InProcessingRecord{
-		id, nResendAttempts, 0,time.Now().Unix()+int64(delaySec),
+		id, nResendAttempts, 0,time.Now().UnixNano()+int64(delayMs*1e6),
 	}
 
 	c := db.client.Database(db_name).Collection(collection_name)
@@ -429,20 +429,20 @@ func (db *Mongodb) InsertToInprocessIfNeeded(db_name string, collection_name str
 	if len(extra_param) == 0 {
 		return nil
 	}
-	delaySec, nResendAttempts, err := extractsTwoIntsFromString(extra_param)
+	delayMs, nResendAttempts, err := extractsTwoIntsFromString(extra_param)
 	if err != nil {
 		return err
 	}
 
-	return db.InsertRecordToInprocess(db_name,collection_name,id,delaySec, nResendAttempts)
+	return db.InsertRecordToInprocess(db_name,collection_name,id,delayMs, nResendAttempts)
 
 }
 
 func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(request Request, ignoreTimeout bool) (int, int, error) {
-	var record_ind,  max_ind, delaySec, nResendAttempts int
+	var record_ind,  max_ind, delayMs, nResendAttempts int
 	var err error
 	if len(request.ExtraParam) != 0 {
-		delaySec, nResendAttempts, err = extractsTwoIntsFromString(request.ExtraParam)
+		delayMs, nResendAttempts, err = extractsTwoIntsFromString(request.ExtraParam)
 		if err != nil {
 			return 0, 0, err
 		}
@@ -451,7 +451,7 @@ func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(request Request, ignoreTi
 	}
 	tNow := time.Now().Unix()
 	if (atomic.LoadInt64(&db.lastReadFromInprocess) <= tNow-int64(db.settings.ReadFromInprocessPeriod)) || ignoreTimeout {
-		record_ind, err = db.getUnProcessedId(request.DbName, inprocess_collection_name_prefix+request.GroupId, delaySec,nResendAttempts)
+		record_ind, err = db.getUnProcessedId(request.DbName, inprocess_collection_name_prefix+request.GroupId, delayMs,nResendAttempts)
 		if err != nil {
 			log_str := "error getting unprocessed id " + request.DbName + ", groupid: " + request.GroupId + ":" + err.Error()
 			logger.Debug(log_str)
@@ -508,17 +508,17 @@ func (db *Mongodb) getNextAndMaxIndexes(request Request) (int, int, error) {
 func (db *Mongodb) processLastRecord(request Request, data []byte, err error) ([]byte, error) {
 	var r ServiceRecord
 	err = json.Unmarshal(data, &r)
-	if err != nil || r.Name != finish_substream_keyword {
+	if err != nil || r.Name != finish_stream_keyword {
 		return data, err
 	}
-	var next_substream string
-	next_substream, ok := r.Meta["next_substream"].(string)
+	var next_stream string
+	next_stream, ok := r.Meta["next_stream"].(string)
 	if !ok {
-		next_substream = no_next_substream_keyword
+		next_stream = no_next_stream_keyword
 	}
 
-	answer := encodeAnswer(r.ID, r.ID, next_substream)
-	log_str := "reached end of substream " + request.DbCollectionName + " , next_substream: " + next_substream
+	answer := encodeAnswer(r.ID, r.ID, next_stream)
+	log_str := "reached end of stream " + request.DbCollectionName + " , next_stream: " + next_stream
 	logger.Debug(log_str)
 
 
@@ -622,7 +622,7 @@ func (db *Mongodb) processQueryError(query, dbname string, err error) ([]byte, e
 	return nil, &DBError{utils.StatusNoData, err.Error()}
 }
 
-func (db *Mongodb) queryImages(request Request) ([]byte, error) {
+func (db *Mongodb) queryMessages(request Request) ([]byte, error) {
 	var res []map[string]interface{}
 	q, sort, err := db.BSONFromSQL(request.DbName, request.ExtraParam)
 	if err != nil {
@@ -781,10 +781,10 @@ func (db *Mongodb) getNacks(request Request, min_index, max_index int) ([]int, e
 	return resp[0].Numbers, nil
 }
 
-func (db *Mongodb) getSubstreams(request Request) ([]byte, error) {
-	rec, err := substreams.getSubstreams(db,request.DbName,request.ExtraParam)
+func (db *Mongodb) getStreams(request Request) ([]byte, error) {
+	rec, err := streams.getStreams(db,request.DbName,request.ExtraParam)
 	if err != nil {
-		return db.processQueryError("get substreams", request.DbName, err)
+		return db.processQueryError("get streams", request.DbName, err)
 	}
 	return json.Marshal(&rec)
 }
@@ -808,13 +808,13 @@ func (db *Mongodb) ProcessRequest(request Request) (answer []byte, err error) {
 		return db.getSize(request)
 	case "meta":
 		return db.getMeta(request)
-	case "queryimages":
-		return db.queryImages(request)
-	case "substreams":
-		return db.getSubstreams(request)
-	case "ackimage":
+	case "querymessages":
+		return db.queryMessages(request)
+	case "streams":
+		return db.getStreams(request)
+	case "ackmessage":
 		return db.ackRecord(request)
-	case "negackimage":
+	case "negackmessage":
 		return db.negAckRecord(request)
 	case "nacks":
 		return db.nacks(request)
diff --git a/broker/src/asapo_broker/database/mongodb_streams.go b/broker/src/asapo_broker/database/mongodb_streams.go
new file mode 100644
index 0000000000000000000000000000000000000000..fba70330b025ad6ae762c4574324073b3900a655
--- /dev/null
+++ b/broker/src/asapo_broker/database/mongodb_streams.go
@@ -0,0 +1,130 @@
+//+build !test
+
+package database
+
+import (
+	"asapo_common/utils"
+	"context"
+	"errors"
+	"go.mongodb.org/mongo-driver/bson"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+type StreamInfo struct {
+	Name      string `json:"name"`
+	Timestamp int64  `json:"timestampCreated"`
+}
+
+type StreamsRecord struct {
+	Streams []StreamInfo `json:"streams"`
+}
+
+type Streams struct {
+	records     map[string]StreamsRecord
+	lastUpdated int64
+}
+
+var streams = Streams{lastUpdated: 0, records: make(map[string]StreamsRecord, 0)}
+var streamsLock sync.Mutex
+
+func (ss *Streams) tryGetFromCache(db_name string, updatePeriodMs int) (StreamsRecord, error) {
+	if ss.lastUpdated < time.Now().UnixNano()-int64(updatePeriodMs*1000000) {
+		return StreamsRecord{}, errors.New("cache expired")
+	}
+	rec, ok := ss.records[db_name]
+	if !ok {
+		return StreamsRecord{}, errors.New("no records for " + db_name)
+	}
+	return rec, nil
+}
+
+func readStreams(db *Mongodb, db_name string) (StreamsRecord, error) {
+	database := db.client.Database(db_name)
+	result, err := database.ListCollectionNames(context.TODO(), bson.D{})
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+	var rec = StreamsRecord{[]StreamInfo{}}
+	for _, coll := range result {
+		if strings.HasPrefix(coll, data_collection_name_prefix) {
+			si := StreamInfo{Name: strings.TrimPrefix(coll, data_collection_name_prefix)}
+			rec.Streams = append(rec.Streams, si)
+		}
+	}
+	return rec, nil
+}
+
+func updateTimestamps(db *Mongodb, db_name string, rec *StreamsRecord) {
+	ss,dbFound :=streams.records[db_name]
+	currentStreams := []StreamInfo{}
+	if dbFound {
+		// sort streams by name
+		currentStreams=ss.Streams
+		sort.Slice(currentStreams,func(i, j int) bool {
+			return currentStreams[i].Name>=currentStreams[j].Name
+		})
+	}
+	for i, record := range rec.Streams {
+		ind := sort.Search(len(currentStreams),func(i int) bool {
+			return currentStreams[i].Name>=record.Name
+		})
+		if ind < len(currentStreams) && currentStreams[ind].Name == record.Name { // record found, just skip it
+			rec.Streams[i].Timestamp = currentStreams[ind].Timestamp
+			continue
+		}
+		res, err := db.getEarliestRecord(db_name, record.Name)
+		if err == nil {
+			ts,ok:=utils.InterfaceToInt64(res["timestamp"])
+			if ok {
+				rec.Streams[i].Timestamp = ts
+			}
+		}
+	}
+}
+
+func sortRecords(rec *StreamsRecord) {
+	sort.Slice(rec.Streams[:], func(i, j int) bool {
+		return rec.Streams[i].Timestamp < rec.Streams[j].Timestamp
+	})
+}
+
+func (ss *Streams) updateFromDb(db *Mongodb, db_name string) (StreamsRecord, error) {
+	rec, err := readStreams(db, db_name)
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+	updateTimestamps(db, db_name, &rec)
+	sortRecords(&rec)
+	if len(rec.Streams)>0 {
+		ss.records[db_name] = rec
+		ss.lastUpdated = time.Now().UnixNano()
+	}
+	return rec, nil
+}
+
+func (ss *Streams) getStreams(db *Mongodb, db_name string, from string) (StreamsRecord, error) {
+	streamsLock.Lock()
+	rec, err := ss.tryGetFromCache(db_name,db.settings.UpdateStreamCachePeriodMs)
+	if err != nil {
+		rec, err = ss.updateFromDb(db, db_name)
+	}
+	streamsLock.Unlock()
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+
+	if from != "" {
+		ind := len(rec.Streams)
+		for i, rec := range rec.Streams {
+			if rec.Name == from {
+				ind = i
+				break
+			}
+		}
+		rec.Streams = rec.Streams[ind:]
+	}
+	return rec, nil
+}
diff --git a/broker/src/asapo_broker/database/mongodb_substreams.go b/broker/src/asapo_broker/database/mongodb_substreams.go
deleted file mode 100644
index 999e6fa17b1c2b07b24db67e2d0166d7291d336d..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/database/mongodb_substreams.go
+++ /dev/null
@@ -1,130 +0,0 @@
-//+build !test
-
-package database
-
-import (
-	"asapo_common/utils"
-	"context"
-	"errors"
-	"go.mongodb.org/mongo-driver/bson"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-type SubstreamInfo struct {
-	Name      string `json:"name"`
-	Timestamp int64  `json:"timestampCreated"`
-}
-
-type SubstreamsRecord struct {
-	Substreams []SubstreamInfo `json:"substreams"`
-}
-
-type Substreams struct {
-	records     map[string]SubstreamsRecord
-	lastUpdated int64
-}
-
-var substreams = Substreams{lastUpdated: 0, records: make(map[string]SubstreamsRecord, 0)}
-var substreamsLock sync.Mutex
-
-func (ss *Substreams) tryGetFromCache(db_name string, updatePeriodMs int) (SubstreamsRecord, error) {
-	if ss.lastUpdated < time.Now().UnixNano()-int64(updatePeriodMs*1000000) {
-		return SubstreamsRecord{}, errors.New("cache expired")
-	}
-	rec, ok := ss.records[db_name]
-	if !ok {
-		return SubstreamsRecord{}, errors.New("no records for " + db_name)
-	}
-	return rec, nil
-}
-
-func readSubstreams(db *Mongodb, db_name string) (SubstreamsRecord, error) {
-	database := db.client.Database(db_name)
-	result, err := database.ListCollectionNames(context.TODO(), bson.D{})
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-	var rec = SubstreamsRecord{[]SubstreamInfo{}}
-	for _, coll := range result {
-		if strings.HasPrefix(coll, data_collection_name_prefix) {
-			si := SubstreamInfo{Name: strings.TrimPrefix(coll, data_collection_name_prefix)}
-			rec.Substreams = append(rec.Substreams, si)
-		}
-	}
-	return rec, nil
-}
-
-func updateTimestamps(db *Mongodb, db_name string, rec *SubstreamsRecord) {
-	ss,dbFound :=substreams.records[db_name]
-	currentSubstreams := []SubstreamInfo{}
-	if dbFound {
-		// sort substreams by name
-		currentSubstreams=ss.Substreams
-		sort.Slice(currentSubstreams,func(i, j int) bool {
-			return currentSubstreams[i].Name>=currentSubstreams[j].Name
-		})
-	}
-	for i, record := range rec.Substreams {
-		ind := sort.Search(len(currentSubstreams),func(i int) bool {
-			return currentSubstreams[i].Name>=record.Name
-		})
-		if ind < len(currentSubstreams) && currentSubstreams[ind].Name == record.Name { // record found, just skip it
-			rec.Substreams[i].Timestamp = currentSubstreams[ind].Timestamp
-			continue
-		}
-		res, err := db.getEarliestRecord(db_name, record.Name)
-		if err == nil {
-			ts,ok:=utils.InterfaceToInt64(res["timestamp"])
-			if ok {
-				rec.Substreams[i].Timestamp = ts
-			}
-		}
-	}
-}
-
-func sortRecords(rec *SubstreamsRecord) {
-	sort.Slice(rec.Substreams[:], func(i, j int) bool {
-		return rec.Substreams[i].Timestamp < rec.Substreams[j].Timestamp
-	})
-}
-
-func (ss *Substreams) updateFromDb(db *Mongodb, db_name string) (SubstreamsRecord, error) {
-	rec, err := readSubstreams(db, db_name)
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-	updateTimestamps(db, db_name, &rec)
-	sortRecords(&rec)
-	if len(rec.Substreams)>0 {
-		ss.records[db_name] = rec
-		ss.lastUpdated = time.Now().UnixNano()
-	}
-	return rec, nil
-}
-
-func (ss *Substreams) getSubstreams(db *Mongodb, db_name string, from string) (SubstreamsRecord, error) {
-	substreamsLock.Lock()
-	rec, err := ss.tryGetFromCache(db_name,db.settings.UpdateSubstreamCachePeriodMs)
-	if err != nil {
-		rec, err = ss.updateFromDb(db, db_name)
-	}
-	substreamsLock.Unlock()
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-
-	if from != "" {
-		ind := len(rec.Substreams)
-		for i, rec := range rec.Substreams {
-			if rec.Name == from {
-				ind = i
-				break
-			}
-		}
-		rec.Substreams = rec.Substreams[ind:]
-	}
-	return rec, nil
-}
diff --git a/broker/src/asapo_broker/database/mongodb_test.go b/broker/src/asapo_broker/database/mongodb_test.go
index 905b2c3613d40273379bd98663f424c7056725ea..dbf379375b64abcdfc3c82244b42f87bc8d4330f 100644
--- a/broker/src/asapo_broker/database/mongodb_test.go
+++ b/broker/src/asapo_broker/database/mongodb_test.go
@@ -22,23 +22,23 @@ type TestRecord struct {
 type TestDataset struct {
 	ID     int64          `bson:"_id" json:"_id"`
 	Size   int64          `bson:"size" json:"size"`
-	Images []TestRecord `bson:"images" json:"images"`
+	Messages []TestRecord `bson:"messages" json:"messages"`
 }
 
 var db Mongodb
 
 const dbname = "12345"
-const collection = "substream"
-const collection2 = "substream2"
+const collection = "stream"
+const collection2 = "stream2"
 const dbaddress = "127.0.0.1:27017"
 const groupId = "bid2a5auidddp1vl71d0"
 const metaID = 0
 const metaID_str = "0"
 
-var empty_next = map[string]string{"next_substream": ""}
+var empty_next = map[string]string{"next_stream": ""}
 
 var rec1 = TestRecord{1, empty_next, "aaa", 0}
-var rec_finished = TestRecord{2, map[string]string{"next_substream": "next1"}, finish_substream_keyword, 0}
+var rec_finished = TestRecord{2, map[string]string{"next_stream": "next1"}, finish_stream_keyword, 0}
 var rec2 = TestRecord{2, empty_next, "bbb", 1}
 var rec3 = TestRecord{3, empty_next, "ccc", 2}
 
@@ -84,8 +84,8 @@ func TestMongoDBGetMetaErrorWhenNotConnected(t *testing.T) {
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
-func TestMongoDBQueryImagesErrorWhenNotConnected(t *testing.T) {
-	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: "0"})
+func TestMongoDBQueryMessagesErrorWhenNotConnected(t *testing.T) {
+	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "querymessages", ExtraParam: "0"})
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
@@ -101,7 +101,7 @@ func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	defer cleanup()
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
@@ -109,7 +109,7 @@ func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	defer cleanup()
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "last"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) {
@@ -118,7 +118,7 @@ func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
@@ -127,7 +127,7 @@ func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec2)
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetNextOK(t *testing.T) {
@@ -149,7 +149,7 @@ func TestMongoDBGetNextErrorOnFinishedStream(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next1\"}", err.(*DBError).Message)
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_stream\":\"next1\"}", err.(*DBError).Message)
 }
 
 func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) {
@@ -160,7 +160,7 @@ func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.(*DBError).Message)
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err.(*DBError).Message)
 }
 
 func TestMongoDBGetNextCorrectOrder(t *testing.T) {
@@ -284,7 +284,7 @@ func TestMongoDBGetNextEmptyAfterErasingDatabase(t *testing.T) {
 
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBgetRecordByID(t *testing.T) {
@@ -303,7 +303,7 @@ func TestMongoDBgetRecordByIDFails(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetRecordNext(t *testing.T) {
@@ -514,7 +514,7 @@ var tests = []struct {
 	{"(meta.counter = 10 OR meta.counter = 11 AND (meta.text = 'bbb' OR meta.text = 'ccc')", []TestRecordMeta{}, false},
 }
 
-func TestMongoDBQueryImagesOK(t *testing.T) {
+func TestMongoDBQueryMessagesOK(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 
@@ -531,7 +531,7 @@ func TestMongoDBQueryImagesOK(t *testing.T) {
 		//			continue
 		//		}
 
-		res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: test.query})
+		res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "querymessages", ExtraParam: test.query})
 		var res []TestRecordMeta
 		json.Unmarshal(res_string, &res)
 		//		fmt.Println(string(res_string))
@@ -546,11 +546,11 @@ func TestMongoDBQueryImagesOK(t *testing.T) {
 
 }
 
-func TestMongoDBQueryImagesOnEmptyDatabase(t *testing.T) {
+func TestMongoDBQueryMessagesOnEmptyDatabase(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	for _, test := range tests {
-		res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: test.query})
+		res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "querymessages", ExtraParam: test.query})
 		var res []TestRecordMeta
 		json.Unmarshal(res_string, &res)
 		assert.Equal(t, 0, len(res))
@@ -733,38 +733,38 @@ func TestMongoDBOkOnIncompleteDatasetID(t *testing.T) {
 
 }
 
-type Substream struct {
+type Stream struct {
 	name    string
 	records []TestRecord
 }
 
-var testsSubstreams = []struct {
+var testsStreams = []struct {
 	from               string
-	substreams         []Substream
-	expectedSubstreams SubstreamsRecord
+	streams         []Stream
+	expectedStreams StreamsRecord
 	test               string
 	ok                 bool
 }{
-	{"", []Substream{}, SubstreamsRecord{[]SubstreamInfo{}}, "no substreams", true},
-	{"", []Substream{{"ss1", []TestRecord{rec2, rec1}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}}}, "one substream", true},
-	{"", []Substream{{"ss1", []TestRecord{rec2, rec1}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}, SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "two substreams", true},
-	{"ss2", []Substream{{"ss1", []TestRecord{rec1, rec2}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "with from", true},
+	{"", []Stream{}, StreamsRecord{[]StreamInfo{}}, "no streams", true},
+	{"", []Stream{{"ss1", []TestRecord{rec2, rec1}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss1", Timestamp: 0}}}, "one stream", true},
+	{"", []Stream{{"ss1", []TestRecord{rec2, rec1}}, {"ss2", []TestRecord{rec2, rec3}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss1", Timestamp: 0}, StreamInfo{Name: "ss2", Timestamp: 1}}}, "two streams", true},
+	{"ss2", []Stream{{"ss1", []TestRecord{rec1, rec2}}, {"ss2", []TestRecord{rec2, rec3}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss2", Timestamp: 1}}}, "with from", true},
 }
 
-func TestMongoDBListSubstreams(t *testing.T) {
-	for _, test := range testsSubstreams {
+func TestMongoDBListStreams(t *testing.T) {
+	for _, test := range testsStreams {
 		db.Connect(dbaddress)
-		for _, substream := range test.substreams {
-			for _, rec := range substream.records {
-				db.insertRecord(dbname, substream.name, &rec)
+		for _, stream := range test.streams {
+			for _, rec := range stream.records {
+				db.insertRecord(dbname, stream.name, &rec)
 			}
 		}
-		var rec_substreams_expect, _ = json.Marshal(test.expectedSubstreams)
+		var rec_streams_expect, _ = json.Marshal(test.expectedStreams)
 
-		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "0", Op: "substreams", ExtraParam: test.from})
+		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "0", Op: "streams", ExtraParam: test.from})
 		if test.ok {
 			assert.Nil(t, err, test.test)
-			assert.Equal(t, string(rec_substreams_expect), string(res), test.test)
+			assert.Equal(t, string(rec_streams_expect), string(res), test.test)
 		} else {
 			assert.NotNil(t, err, test.test)
 		}
@@ -772,14 +772,14 @@ func TestMongoDBListSubstreams(t *testing.T) {
 	}
 }
 
-func TestMongoDBAckImage(t *testing.T) {
+func TestMongoDBAckMessage(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 
 	db.insertRecord(dbname, collection, &rec1)
-	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
+	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
 
-	request := Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackimage", ExtraParam: query_str}
+	request := Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str}
 	res, err := db.ProcessRequest(request)
 	nacks, _ := db.getNacks(request, 1, 1)
 	assert.Nil(t, err)
@@ -815,9 +815,9 @@ func TestMongoDBNacks(t *testing.T) {
 			insertRecords(10)
 		}
 		if test.ackRecords {
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackimage\"}"})
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackimage\"}"})
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackimage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
 		}
 
 		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "nacks", ExtraParam: test.rangeString})
@@ -849,9 +849,9 @@ func TestMongoDBLastAcks(t *testing.T) {
 			insertRecords(10)
 		}
 		if test.ackRecords {
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackimage\"}"})
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackimage\"}"})
-			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackimage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
 		}
 
 		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "lastack"})
@@ -890,7 +890,7 @@ func TestMongoDBGetNextUsesInprocessedNumRetry(t *testing.T) {
 	assert.Nil(t, err1)
 	assert.NotNil(t, err2)
 	if err2 != nil {
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err2.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err2.Error())
 	}
 	assert.Equal(t, string(rec1_expect), string(res))
 	assert.Equal(t, string(rec1_expect), string(res1))
@@ -902,10 +902,10 @@ func TestMongoDBGetNextUsesInprocessedAfterTimeout(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
-	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
+	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	time.Sleep(time.Second)
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
+	res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Nil(t, err2)
@@ -920,10 +920,10 @@ func TestMongoDBGetNextReturnsToNormalAfterUsesInprocessed(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
-	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
+	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	time.Sleep(time.Second)
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"})
+	res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Nil(t, err2)
@@ -969,14 +969,14 @@ func TestMongoDBAckDeletesInprocessed(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
 	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
-	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
+	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
 
-	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackimage", ExtraParam: query_str})
+	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err.Error())
 	}
 }
 
@@ -987,18 +987,18 @@ func TestMongoDBNegAck(t *testing.T) {
 	inputParams := struct {
 		Id     int
 		Params struct {
-			DelaySec int
+			DelayMs int
 		}
 	}{}
 	inputParams.Id = 1
-	inputParams.Params.DelaySec = 0
+	inputParams.Params.DelayMs = 0
 
 	db.insertRecord(dbname, collection, &rec1)
 	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 	bparam, _ := json.Marshal(&inputParams)
 
-	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "negackimage", ExtraParam: string(bparam)})
-	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) // first time image from negack
+	db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "negackmessage", ExtraParam: string(bparam)})
+	res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) // first time message from negack
 	_, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})  // second time nothing
 
 	assert.Nil(t, err)
@@ -1006,7 +1006,7 @@ func TestMongoDBNegAck(t *testing.T) {
 	assert.NotNil(t, err1)
 	if err1 != nil {
 		assert.Equal(t, utils.StatusNoData, err1.(*DBError).Code)
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err1.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err1.Error())
 	}
 }
 
diff --git a/broker/src/asapo_broker/database/streams_test.go b/broker/src/asapo_broker/database/streams_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c172adf5483c2de061f64f55021663f74988a34b
--- /dev/null
+++ b/broker/src/asapo_broker/database/streams_test.go
@@ -0,0 +1,72 @@
+// +build integration_tests
+
+package database
+
+import (
+	"github.com/stretchr/testify/suite"
+	"testing"
+	"time"
+)
+
+type StreamsTestSuite struct {
+	suite.Suite
+}
+
+func (suite *StreamsTestSuite) SetupTest() {
+	db.Connect(dbaddress)
+}
+
+func (suite *StreamsTestSuite) TearDownTest() {
+	cleanup()
+	streams.records= map[string]StreamsRecord{}
+}
+
+func TestStreamsTestSuite(t *testing.T) {
+	suite.Run(t, new(StreamsTestSuite))
+}
+
+func (suite *StreamsTestSuite) TestStreamsEmpty() {
+	rec, err := streams.getStreams(&db, "test", "")
+	suite.Nil(err)
+	suite.Empty(rec.Streams, 0)
+}
+
+func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenEmpty() {
+	db.settings.UpdateStreamCachePeriodMs = 1000
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(1, len(rec.Streams))
+}
+
+func (suite *StreamsTestSuite) TestStreamsUsesCache() {
+	db.settings.UpdateStreamCachePeriodMs = 1000
+	db.insertRecord(dbname, collection, &rec2)
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(int64(1), rec.Streams[0].Timestamp)
+}
+
+func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenExpired() {
+	db.settings.UpdateStreamCachePeriodMs = 10
+	db.insertRecord(dbname, collection, &rec2)
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	time.Sleep(time.Millisecond * 100)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(int64(1), rec.Streams[0].Timestamp)
+}
+
+func (suite *StreamsTestSuite) TestStreamRemovesDatabase() {
+	db.settings.UpdateStreamCachePeriodMs = 0
+	db.insertRecord(dbname, collection, &rec1)
+	streams.getStreams(&db, dbname, "")
+	db.dropDatabase(dbname)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Empty(rec.Streams, 0)
+}
diff --git a/broker/src/asapo_broker/database/substreams_test.go b/broker/src/asapo_broker/database/substreams_test.go
deleted file mode 100644
index 6c3ed9be0bc70b6058e3c336f035f329369ad8ff..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/database/substreams_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// +build integration_tests
-
-package database
-
-import (
-	"github.com/stretchr/testify/suite"
-	"testing"
-	"time"
-)
-
-type SubstreamsTestSuite struct {
-	suite.Suite
-}
-
-func (suite *SubstreamsTestSuite) SetupTest() {
-	db.Connect(dbaddress)
-}
-
-func (suite *SubstreamsTestSuite) TearDownTest() {
-	cleanup()
-	substreams.records= map[string]SubstreamsRecord{}
-}
-
-func TestSubstreamsTestSuite(t *testing.T) {
-	suite.Run(t, new(SubstreamsTestSuite))
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsEmpty() {
-	rec, err := substreams.getSubstreams(&db, "test", "")
-	suite.Nil(err)
-	suite.Empty(rec.Substreams, 0)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsNotUsesCacheWhenEmpty() {
-	db.settings.UpdateSubstreamCachePeriodMs = 1000
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(1, len(rec.Substreams))
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsUsesCache() {
-	db.settings.UpdateSubstreamCachePeriodMs = 1000
-	db.insertRecord(dbname, collection, &rec2)
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(int64(1), rec.Substreams[0].Timestamp)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsNotUsesCacheWhenExpired() {
-	db.settings.UpdateSubstreamCachePeriodMs = 10
-	db.insertRecord(dbname, collection, &rec2)
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	time.Sleep(time.Millisecond * 100)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(int64(1), rec.Substreams[0].Timestamp)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamRemovesDatabase() {
-	db.settings.UpdateSubstreamCachePeriodMs = 0
-	db.insertRecord(dbname, collection, &rec1)
-	substreams.getSubstreams(&db, dbname, "")
-	db.dropDatabase(dbname)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Empty(rec.Substreams, 0)
-}
diff --git a/broker/src/asapo_broker/server/get_commands_test.go b/broker/src/asapo_broker/server/get_commands_test.go
index 2f2f9fadba8fea7297b6d555c07a28d9c33952f5..e4db0514b53c85393ce47e55c7f7de02e99da6ae 100644
--- a/broker/src/asapo_broker/server/get_commands_test.go
+++ b/broker/src/asapo_broker/server/get_commands_test.go
@@ -34,31 +34,31 @@ func TestGetCommandsTestSuite(t *testing.T) {
 
 var testsGetCommand = []struct {
 	command string
-	substream string
+	stream string
 	groupid string
 	reqString string
 	queryParams string
 	externalParam string
 }{
-	{"last", expectedSubstream, "", expectedSubstream + "/0/last","","0"},
-	{"id", expectedSubstream, "", expectedSubstream + "/0/1","","1"},
+	{"last", expectedStream, "", expectedStream + "/0/last","","0"},
+	{"id", expectedStream, "", expectedStream + "/0/1","","1"},
 	{"meta", "default", "", "default/0/meta/0","","0"},
-	{"nacks", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/nacks","","0_0"},
-	{"next", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/next","",""},
-	{"next", expectedSubstream, expectedGroupID, expectedSubstream + "/" +
-		expectedGroupID + "/next","&resend_nacks=true&delay_sec=10&resend_attempts=3","10_3"},
-	{"size", expectedSubstream, "", expectedSubstream  + "/size","","0"},
-	{"substreams", "0", "", "0/substreams","",""},
-	{"lastack", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/lastack","",""},
+	{"nacks", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/nacks","","0_0"},
+	{"next", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/next","",""},
+	{"next", expectedStream, expectedGroupID, expectedStream + "/" +
+		expectedGroupID + "/next","&resend_nacks=true&delay_ms=10000&resend_attempts=3","10000_3"},
+	{"size", expectedStream, "", expectedStream  + "/size","","0"},
+	{"streams", "0", "", "0/streams","",""},
+	{"lastack", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/lastack","",""},
 
 }
 
 
 func (suite *GetCommandsTestSuite) TestGetCommandsCallsCorrectRoutine() {
 	for _, test := range testsGetCommand {
-		suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: test.substream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
+		suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: test.stream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
 		logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request "+test.command)))
-		w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + test.reqString+correctTokenSuffix+test.queryParams)
+		w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + test.reqString+correctTokenSuffix+test.queryParams)
 		suite.Equal(http.StatusOK, w.Code, test.command+ " OK")
 		suite.Equal("Hello", string(w.Body.Bytes()), test.command+" sends data")
 	}
diff --git a/broker/src/asapo_broker/server/get_meta_test.go b/broker/src/asapo_broker/server/get_meta_test.go
index 4e305ea3e7077135002a2403603c078678a4bd76..4eb0e16547b60e7abb07a60f8667d7aadf9ef1bd 100644
--- a/broker/src/asapo_broker/server/get_meta_test.go
+++ b/broker/src/asapo_broker/server/get_meta_test.go
@@ -33,9 +33,9 @@ func TestGetMetaTestSuite(t *testing.T) {
 }
 
 func (suite *GetMetaTestSuite) TestGetMetaOK() {
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, Op: "meta", ExtraParam: "1"}).Return([]byte(""), nil)
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, Op: "meta", ExtraParam: "1"}).Return([]byte(""), nil)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request meta")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/0/meta"  + "/1" + correctTokenSuffix,"GET")
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/0/meta"  + "/1" + correctTokenSuffix,"GET")
 	suite.Equal(http.StatusOK, w.Code, "meta OK")
 }
 
diff --git a/broker/src/asapo_broker/server/get_next.go b/broker/src/asapo_broker/server/get_next.go
index 9e588fb9a73da679beffef9598aa9aa8d5df61d1..3f051f57e72398d1eeb8222fecbed005de4dc3f0 100644
--- a/broker/src/asapo_broker/server/get_next.go
+++ b/broker/src/asapo_broker/server/get_next.go
@@ -7,11 +7,11 @@ import (
 func extractResend(r *http.Request) (string) {
 	keys := r.URL.Query()
 	resend := keys.Get("resend_nacks")
-	delay_sec := keys.Get("delay_sec")
+	delay_ms := keys.Get("delay_ms")
 	resend_attempts := keys.Get("resend_attempts")
 	resend_params := ""
 	if len(resend)!=0 {
-		resend_params=delay_sec+"_"+resend_attempts
+		resend_params=delay_ms+"_"+resend_attempts
 	}
 	return resend_params
 }
diff --git a/broker/src/asapo_broker/server/get_streams.go b/broker/src/asapo_broker/server/get_streams.go
new file mode 100644
index 0000000000000000000000000000000000000000..335f15a6eff8b6698bdc338632d6d360b7891b5a
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_streams.go
@@ -0,0 +1,11 @@
+package server
+
+import (
+	"net/http"
+)
+
+func routeGetStreams(w http.ResponseWriter, r *http.Request) {
+	keys := r.URL.Query()
+	from := keys.Get("from")
+	processRequest(w, r, "streams", from, false)
+}
diff --git a/broker/src/asapo_broker/server/get_substreams.go b/broker/src/asapo_broker/server/get_substreams.go
deleted file mode 100644
index ee4600cd6d0670db8b6f7fd0a8362ffb547e9e76..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/server/get_substreams.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package server
-
-import (
-	"net/http"
-)
-
-func routeGetSubstreams(w http.ResponseWriter, r *http.Request) {
-	keys := r.URL.Query()
-	from := keys.Get("from")
-	processRequest(w, r, "substreams", from, false)
-}
diff --git a/broker/src/asapo_broker/server/listroutes.go b/broker/src/asapo_broker/server/listroutes.go
index f971d71994e0ba718f383dd41d7fb65c351fc331..8d782f58ec83f4351f898c71d7979be495cdc695 100644
--- a/broker/src/asapo_broker/server/listroutes.go
+++ b/broker/src/asapo_broker/server/listroutes.go
@@ -8,49 +8,49 @@ var listRoutes = utils.Routes{
 	utils.Route{
 		"GetNext",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/{groupid}/next",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/next",
 		routeGetNext,
 	},
 	utils.Route{
 		"GetSize",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/size",
+		"/database/{dbname}/{datasource}/{stream}/size",
 		routeGetSize,
 	},
 	utils.Route{
-		"GetSubstreams",
+		"GetStreams",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/substreams",
-		routeGetSubstreams,
+		"/database/{dbname}/{datasource}/{stream}/streams",
+		routeGetStreams,
 	},
 	utils.Route{
 		"GetLast",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/0/last",
+		"/database/{dbname}/{datasource}/{stream}/0/last",
 		routeGetLast,
 	},
 	utils.Route{
 		"GetLastAck",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/{groupid}/lastack",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/lastack",
 		routeGetLastAck,
 	},
 	utils.Route{
 		"GetNacks",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/{groupid}/nacks",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/nacks",
 		routeGetNacks,
 	},
 	utils.Route{
 		"GetID",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/0/{id}",
+		"/database/{dbname}/{datasource}/{stream}/0/{id}",
 		routeGetByID,
 	},
 	utils.Route{
 		"GetMeta",
 		"Get",
-		"/database/{dbname}/{stream}/{substream}/0/meta/{id}",
+		"/database/{dbname}/{datasource}/{stream}/0/meta/{id}",
 		routeGetMeta,
 	},
 	utils.Route{
@@ -60,22 +60,22 @@ var listRoutes = utils.Routes{
 		routeCreateGroupID,
 	},
 	utils.Route{
-		"QueryImages",
+		"QueryMessages",
 		"Post",
-		"/database/{dbname}/{stream}/{substream}/0/queryimages",
-		routeQueryImages,
+		"/database/{dbname}/{datasource}/{stream}/0/querymessages",
+		routeQueryMessages,
 	},
 	utils.Route{
 		"ResetConter",
 		"Post",
-		"/database/{dbname}/{stream}/{substream}/{groupid}/resetcounter",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/resetcounter",
 		routeResetCounter,
 	},
 	utils.Route{
-		"ImageOp",
+		"MessageOp",
 		"Post",
-		"/database/{dbname}/{stream}/{substream}/{groupid}/{id}",
-		routeImageOp,
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/{id}",
+		routeMessageOp,
 	},
 	utils.Route{
 		"Health",
diff --git a/broker/src/asapo_broker/server/post_op_image.go b/broker/src/asapo_broker/server/post_op_image.go
index 0f3b22198f867ead06f380b220b713b1bbd8c048..1440812f56e1a7f2f915233181a1cf9bdcdd176e 100644
--- a/broker/src/asapo_broker/server/post_op_image.go
+++ b/broker/src/asapo_broker/server/post_op_image.go
@@ -7,12 +7,12 @@ import (
 	"strconv"
 )
 
-type ImageOp struct {
+type MessageOp struct {
 	Id int
 	Op string
 	Params map[string]interface{} `json:",omitempty"`
 }
-func routeImageOp(w http.ResponseWriter, r *http.Request) {
+func routeMessageOp(w http.ResponseWriter, r *http.Request) {
 	body, err := ioutil.ReadAll(r.Body)
 	if err != nil {
 		http.Error(w, err.Error(), 500)
@@ -31,7 +31,7 @@ func routeImageOp(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	var  op ImageOp
+	var  op MessageOp
 	err = json.Unmarshal(body, &op)
 	if err != nil {
 		http.Error(w, err.Error(), http.StatusBadRequest)
diff --git a/broker/src/asapo_broker/server/post_op_image_test.go b/broker/src/asapo_broker/server/post_op_image_test.go
index 94fdf49f6057390156cd8656feb93ce5ab4ea6f8..259787e41bc9fc41140daf8bdfb844c55f3939d2 100644
--- a/broker/src/asapo_broker/server/post_op_image_test.go
+++ b/broker/src/asapo_broker/server/post_op_image_test.go
@@ -9,12 +9,12 @@ import (
 	"testing"
 )
 
-type ImageOpTestSuite struct {
+type MessageOpTestSuite struct {
 	suite.Suite
 	mock_db *database.MockedDatabase
 }
 
-func (suite *ImageOpTestSuite) SetupTest() {
+func (suite *MessageOpTestSuite) SetupTest() {
 	statistics.Reset()
 	suite.mock_db = new(database.MockedDatabase)
 	db = suite.mock_db
@@ -22,33 +22,33 @@ func (suite *ImageOpTestSuite) SetupTest() {
 	logger.SetMockLog()
 }
 
-func (suite *ImageOpTestSuite) TearDownTest() {
+func (suite *MessageOpTestSuite) TearDownTest() {
 	assertExpectations(suite.T(), suite.mock_db)
 	logger.UnsetMockLog()
 	db = nil
 }
 
-func TestImageOpTestSuite(t *testing.T) {
-	suite.Run(t, new(ImageOpTestSuite))
+func TestMessageOpTestSuite(t *testing.T) {
+	suite.Run(t, new(MessageOpTestSuite))
 }
 
-func (suite *ImageOpTestSuite) TestAckImageOpOK() {
-	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId: expectedGroupID, Op: "ackimage", ExtraParam: query_str}).Return([]byte(""), nil)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request ackimage")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
-	suite.Equal(http.StatusOK, w.Code, "ackimage OK")
+func (suite *MessageOpTestSuite) TestAckMessageOpOK() {
+	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId: expectedGroupID, Op: "ackmessage", ExtraParam: query_str}).Return([]byte(""), nil)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request ackmessage")))
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
+	suite.Equal(http.StatusOK, w.Code, "ackmessage OK")
 }
 
 
-func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongOp() {
-	query_str := "\"Id\":1,\"Op\":\"ackimage\"}"
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
-	suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong")
+func (suite *MessageOpTestSuite) TestAckMessageOpErrorWrongOp() {
+	query_str := "\"Id\":1,\"Op\":\"ackmessage\"}"
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
+	suite.Equal(http.StatusBadRequest, w.Code, "ackmessage wrong")
 }
 
-func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongID() {
-	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/bla" + correctTokenSuffix,"POST",query_str)
-	suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong")
+func (suite *MessageOpTestSuite) TestAckMessageOpErrorWrongID() {
+	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/bla" + correctTokenSuffix,"POST",query_str)
+	suite.Equal(http.StatusBadRequest, w.Code, "ackmessage wrong")
 }
diff --git a/broker/src/asapo_broker/server/post_query_images.go b/broker/src/asapo_broker/server/post_query_images.go
index 4d33c23588e4430bfd3154cd035488bd26417c08..87e833b684020191229484d40ea090b67e414503 100644
--- a/broker/src/asapo_broker/server/post_query_images.go
+++ b/broker/src/asapo_broker/server/post_query_images.go
@@ -5,12 +5,12 @@ import (
 	"net/http"
 )
 
-func routeQueryImages(w http.ResponseWriter, r *http.Request) {
+func routeQueryMessages(w http.ResponseWriter, r *http.Request) {
 	body, err := ioutil.ReadAll(r.Body)
 	if err != nil {
 		http.Error(w, err.Error(), 500)
 		return
 	}
 
-	processRequest(w, r, "queryimages", string(body), false)
+	processRequest(w, r, "querymessages", string(body), false)
 }
diff --git a/broker/src/asapo_broker/server/post_query_images_test.go b/broker/src/asapo_broker/server/post_query_images_test.go
index 5ac71bca05115779607015c7c1c604c996c20515..0f2b55c1477c4f27747b3b6ce9effccd9c213a2a 100644
--- a/broker/src/asapo_broker/server/post_query_images_test.go
+++ b/broker/src/asapo_broker/server/post_query_images_test.go
@@ -35,10 +35,10 @@ func TestQueryTestSuite(t *testing.T) {
 func (suite *QueryTestSuite) TestQueryOK() {
 	query_str := "aaaa"
 
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream,Op: "queryimages", ExtraParam: query_str}).Return([]byte("{}"), nil)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request queryimages")))
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream,Op: "querymessages", ExtraParam: query_str}).Return([]byte("{}"), nil)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request querymessages")))
 
-	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/0/queryimages"+correctTokenSuffix, "POST", query_str)
+	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/0/querymessages"+correctTokenSuffix, "POST", query_str)
 	suite.Equal(http.StatusOK, w.Code, "Query OK")
 }
 
diff --git a/broker/src/asapo_broker/server/post_reset_counter_test.go b/broker/src/asapo_broker/server/post_reset_counter_test.go
index d35f116a15d063dc6be8264f59ac0468b54c3ee1..37f70e2725294ac6aee3ae440000cb24374b16a4 100644
--- a/broker/src/asapo_broker/server/post_reset_counter_test.go
+++ b/broker/src/asapo_broker/server/post_reset_counter_test.go
@@ -33,11 +33,11 @@ func TestResetCounterTestSuite(t *testing.T) {
 }
 
 func (suite *ResetCounterTestSuite) TestResetCounterOK() {
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), nil)
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request resetcounter")))
 
-	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST")
+	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST")
 	suite.Equal(http.StatusOK, w.Code, "ResetCounter OK")
 }
diff --git a/broker/src/asapo_broker/server/process_request.go b/broker/src/asapo_broker/server/process_request.go
index 4adf102b6d48b8319780b1af186122251a45bdc9..3e937b879ac297a709b962f6f5d96b597bb6d76a 100644
--- a/broker/src/asapo_broker/server/process_request.go
+++ b/broker/src/asapo_broker/server/process_request.go
@@ -13,15 +13,15 @@ func extractRequestParameters(r *http.Request, needGroupID bool) (string, string
 	vars := mux.Vars(r)
 	db_name, ok1 := vars["dbname"]
 
-	stream, ok3 := vars["stream"]
-	substream, ok4 := vars["substream"]
+	datasource, ok3 := vars["datasource"]
+	stream, ok4 := vars["stream"]
 
 	ok2 := true
 	group_id := ""
 	if needGroupID {
 		group_id, ok2 = vars["groupid"]
 	}
-	return db_name, stream, substream, group_id, ok1 && ok2 && ok3 && ok4
+	return db_name, datasource, stream, group_id, ok1 && ok2 && ok3 && ok4
 }
 
 func IsLetterOrNumbers(s string) bool {
@@ -52,7 +52,7 @@ func checkGroupID(w http.ResponseWriter, needGroupID bool, group_id string, db_n
 func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_param string, needGroupID bool) {
 	r.Header.Set("Content-type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	db_name, stream, substream, group_id, ok := extractRequestParameters(r, needGroupID)
+	db_name, datasource, stream, group_id, ok := extractRequestParameters(r, needGroupID)
 	if !ok {
 		w.WriteHeader(http.StatusBadRequest)
 		return
@@ -68,10 +68,10 @@ func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_par
 	}
 
 	request := database.Request{}
-	request.DbName = db_name+"_"+stream
+	request.DbName = db_name+"_"+datasource
 	request.Op = op
 	request.ExtraParam = extra_param
-	request.DbCollectionName = substream
+	request.DbCollectionName = stream
 	request.GroupId = group_id
 	if yes, minSize := datasetRequested(r); yes {
 		request.DatasetOp = true
diff --git a/broker/src/asapo_broker/server/process_request_test.go b/broker/src/asapo_broker/server/process_request_test.go
index 5aa7b28fc2a622dc88a6ec6f0c02c1951517c233..b4967924720e52f8a431937d207f216687601171 100644
--- a/broker/src/asapo_broker/server/process_request_test.go
+++ b/broker/src/asapo_broker/server/process_request_test.go
@@ -20,12 +20,12 @@ var correctTokenSuffix, wrongTokenSuffix, suffixWithWrongToken, expectedBeamtime
 
 const expectedGroupID = "bid2a5auidddp1vl71d0"
 const wrongGroupID = "_bid2a5auidddp1vl71"
+const expectedSource = "datasource"
 const expectedStream = "stream"
-const expectedSubstream = "substream"
 
 func prepareTestAuth() {
 	expectedBeamtimeId = "beamtime_id"
-	expectedDBName = expectedBeamtimeId + "_" + expectedStream
+	expectedDBName = expectedBeamtimeId + "_" + expectedSource
 	auth = utils.NewHMACAuth("secret")
 	token, err := auth.GenerateToken(&expectedBeamtimeId)
 	if err != nil {
@@ -109,7 +109,7 @@ func TestProcessRequestTestSuite(t *testing.T) {
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong token")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + suffixWithWrongToken)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + suffixWithWrongToken)
 
 	suite.Equal(http.StatusUnauthorized, w.Code, "wrong token")
 }
@@ -117,28 +117,28 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithNoToken() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("cannot extract")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + wrongTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + wrongTokenSuffix)
 
 	suite.Equal(http.StatusUnauthorized, w.Code, "no token")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusNoData, ""})
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 
 	suite.Equal(http.StatusConflict, w.Code, "wrong database name")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusServiceUnavailable, ""})
@@ -147,14 +147,14 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 	ExpectReconnect(suite.mock_db)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	time.Sleep(time.Second)
 	suite.Equal(http.StatusNotFound, w.Code, "data not found")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), errors.New(""))
@@ -162,7 +162,7 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
 
 	ExpectReconnect(suite.mock_db)
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	time.Sleep(time.Second)
 
 	suite.Equal(http.StatusNotFound, w.Code, "internal error")
@@ -170,28 +170,28 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsCounter() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
 
-	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	suite.Equal(1, statistics.GetCounter(), "ProcessRequest increases counter")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWrongGroupID() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong groupid")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + wrongGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + wrongGroupID + "/next" + correctTokenSuffix)
 	suite.Equal(http.StatusBadRequest, w.Code, "wrong group id")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsDataset() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, DatasetOp:true, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, DatasetOp:true, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
 
-	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true")
+	doRequest("/database/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true")
 }
diff --git a/broker/src/asapo_broker/server/server.go b/broker/src/asapo_broker/server/server.go
index 957b8006ccb4eae9a0bac7d54645cb4a5342e763..01bf25de7195193e46041c12e0d00b95f722bc42 100644
--- a/broker/src/asapo_broker/server/server.go
+++ b/broker/src/asapo_broker/server/server.go
@@ -10,7 +10,7 @@ import (
 )
 
 const  kDefaultresendInterval = 10
-const  kDefaultSubstreamCacheUpdateIntervalMs = 100
+const  kDefaultStreamCacheUpdateIntervalMs = 100
 
 var db database.Agent
 
@@ -24,7 +24,7 @@ type serverSettings struct {
 	LogLevel            string
 	discoveredDbAddress string
 	CheckResendInterval *int
-	SubstreamCacheUpdateIntervalMs *int
+	StreamCacheUpdateIntervalMs *int
 }
 
 func (s *serverSettings) GetResendInterval() int {
@@ -34,11 +34,11 @@ func (s *serverSettings) GetResendInterval() int {
 	return *s.CheckResendInterval
 }
 
-func (s *serverSettings) GetSubstreamCacheUpdateInterval() int {
-	if s.SubstreamCacheUpdateIntervalMs==nil {
-		return kDefaultSubstreamCacheUpdateIntervalMs
+func (s *serverSettings) GetStreamCacheUpdateInterval() int {
+	if s.StreamCacheUpdateIntervalMs==nil {
+		return kDefaultStreamCacheUpdateIntervalMs
 	}
-	return *s.SubstreamCacheUpdateIntervalMs
+	return *s.StreamCacheUpdateIntervalMs
 }
 
 func (s *serverSettings) GetDatabaseServer() string {
@@ -91,7 +91,7 @@ func InitDB(dbAgent database.Agent) (err error) {
 		log.Debug("Got mongodb server: " + settings.discoveredDbAddress)
 	}
 
-	db.SetSettings(database.DBSettings{ReadFromInprocessPeriod: settings.GetResendInterval(),UpdateSubstreamCachePeriodMs: settings.GetSubstreamCacheUpdateInterval()})
+	db.SetSettings(database.DBSettings{ReadFromInprocessPeriod: settings.GetResendInterval(),UpdateStreamCachePeriodMs: settings.GetStreamCacheUpdateInterval()})
 
 	return db.Connect(settings.GetDatabaseServer())
 }
diff --git a/common/cpp/include/asapo/common/data_structs.h b/common/cpp/include/asapo/common/data_structs.h
index c17921eb780068ba8ba2c85c996e62f16457baf8..35b5e49d5b29d2c5726168ca1029a880d9f77695 100644
--- a/common/cpp/include/asapo/common/data_structs.h
+++ b/common/cpp/include/asapo/common/data_structs.h
@@ -1,5 +1,5 @@
-#ifndef ASAPO_FILE_INFO_H
-#define ASAPO_FILE_INFO_H
+#ifndef ASAPO_message_meta_H
+#define ASAPO_message_meta_H
 
 #include <cinttypes>
 #include <chrono>
@@ -22,7 +22,7 @@ uint64_t NanosecsEpochFromISODate(std::string date_time);
 
 bool TimeFromJson(const JsonStringParser& parser, const std::string& name, std::chrono::system_clock::time_point* val);
 
-class FileInfo {
+class MessageMeta {
   public:
     std::string name;
     std::chrono::system_clock::time_point timestamp;
@@ -31,6 +31,7 @@ class FileInfo {
     std::string source;
     std::string metadata;
     uint64_t buf_id{0};
+    uint64_t dataset_substream{0};
     std::string Json() const;
     bool SetFromJson(const std::string& json_string);
     std::string FullName(const std::string& base_path) const;
@@ -48,16 +49,16 @@ struct StreamInfo {
 
 using StreamInfos = std::vector<StreamInfo>;
 
-inline bool operator==(const FileInfo& lhs, const FileInfo& rhs) {
+inline bool operator==(const MessageMeta& lhs, const MessageMeta& rhs) {
     return  (lhs.name == rhs.name &&
              lhs.id == rhs.id &&
              lhs.timestamp == rhs.timestamp &&
              lhs.size == rhs.size);
 }
 
-using FileData = std::unique_ptr<uint8_t[]>;
+using MessageData = std::unique_ptr<uint8_t[]>;
 
-using FileInfos = std::vector<FileInfo>;
+using MessageMetas = std::vector<MessageMeta>;
 
 
 using IdList = std::vector<uint64_t>;
@@ -65,7 +66,7 @@ using IdList = std::vector<uint64_t>;
 struct DataSet {
     uint64_t id;
     uint64_t expected_size;
-    FileInfos content;
+    MessageMetas content;
     bool SetFromJson(const std::string& json_string);
 };
 
@@ -80,10 +81,10 @@ Error GetSourceTypeFromString(std::string stype,SourceType *type);
 std::string GetStringFromSourceType(SourceType type);
 
 struct SourceCredentials {
-    SourceCredentials(SourceType type, std::string beamtime, std::string beamline, std::string stream, std::string token):
+    SourceCredentials(SourceType type, std::string beamtime, std::string beamline, std::string data_source, std::string token):
         beamtime_id{std::move(beamtime)},
         beamline{std::move(beamline)},
-        stream{std::move(stream)},
+        data_source{std::move(data_source)},
         user_token{std::move(token)},
         type{type}{};
     SourceCredentials() {};
@@ -92,11 +93,11 @@ struct SourceCredentials {
     static const std::string kDefaultBeamtimeId;
     std::string beamtime_id;
     std::string beamline;
-    std::string stream;
+    std::string data_source;
     std::string user_token;
     SourceType type = SourceType::kProcessed;
     std::string GetString() {
-        return (type==SourceType::kRaw?std::string("raw"):std::string("processed")) + "%"+ beamtime_id + "%" + beamline + "%" + stream + "%" + user_token;
+        return (type==SourceType::kRaw?std::string("raw"):std::string("processed")) + "%"+ beamtime_id + "%" + beamline + "%" + data_source + "%" + user_token;
     };
 };
 
@@ -109,8 +110,5 @@ enum IngestModeFlags : uint64_t {
 
 const uint64_t kDefaultIngestMode = kTransferData | kStoreInFilesystem | kStoreInDatabase;
 
-const std::string kDefaultSubstream = "default";
-
-
 }
-#endif //ASAPO_FILE_INFO_H
+#endif //ASAPO_message_meta_H
diff --git a/common/cpp/include/asapo/common/networking.h b/common/cpp/include/asapo/common/networking.h
index c4b4eecedcc6c466ff7173fcd290f5260508e33d..bd7f5379f302f0fbd05e423b270495e699958bb0 100644
--- a/common/cpp/include/asapo/common/networking.h
+++ b/common/cpp/include/asapo/common/networking.h
@@ -21,7 +21,7 @@ enum class NetworkConnectionType : uint32_t {
 enum Opcode : uint8_t {
     kOpcodeUnknownOp = 1,
     kOpcodeTransferData,
-    kOpcodeTransferSubsetData,
+    kOpcodeTransferDatasetData,
     kOpcodeStreamInfo,
     kOpcodeLastStream,
     kOpcodeGetBufferData,
@@ -55,16 +55,16 @@ struct GenericRequestHeader {
         op_code = header.op_code, data_id = header.data_id, data_size = header.data_size, meta_size = header.meta_size,
         memcpy(custom_data, header.custom_data, kNCustomParams * sizeof(uint64_t)),
         memcpy(message, header.message, kMaxMessageSize);
-        strncpy(substream, header.substream, kMaxMessageSize);
+        strncpy(stream, header.stream, kMaxMessageSize);
     }
 
     /* Keep in mind that the message here is just strncpy'ed, you can change the message later */
     GenericRequestHeader(Opcode i_op_code = kOpcodeUnknownOp, uint64_t i_data_id = 0,
                          uint64_t i_data_size = 0, uint64_t i_meta_size = 0, const std::string& i_message = "",
-                         const std::string& i_substream = ""):
+                         const std::string& i_stream = ""):
         op_code{i_op_code}, data_id{i_data_id}, data_size{i_data_size}, meta_size{i_meta_size} {
         strncpy(message, i_message.c_str(), kMaxMessageSize);
-        strncpy(substream, i_substream.c_str(), kMaxMessageSize);
+        strncpy(stream, i_stream.c_str(), kMaxMessageSize);
     }
 
     Opcode      op_code;
@@ -73,11 +73,11 @@ struct GenericRequestHeader {
     uint64_t    meta_size;
     CustomRequestData    custom_data;
     char        message[kMaxMessageSize]; /* Can also be a binary message (e.g. MemoryRegionDetails) */
-    char        substream[kMaxMessageSize]; /* Must be a string (strcpy is used) */
+    char        stream[kMaxMessageSize]; /* Must be a string (strcpy is used) */
     std::string Json() {
         std::string s = "{\"id\":" + std::to_string(data_id) + ","
                         "\"buffer\":\"" + std::string(message) + "\"" + ","
-                        "\"substream\":\"" + std::string(substream) + "\""
+                        "\"stream\":\"" + std::string(stream) + "\""
                         + "}";
         return s;
     };
@@ -93,7 +93,7 @@ struct GenericNetworkResponse {
 };
 
 
-struct SendDataResponse :  GenericNetworkResponse {
+struct SendResponse :  GenericNetworkResponse {
 };
 
 }
diff --git a/common/cpp/include/asapo/database/database.h b/common/cpp/include/asapo/database/database.h
index d08b0f9bd566285ae767591c4f28590ab274396e..d36b95322ffac117825b6fc21d81099ce5238aac 100644
--- a/common/cpp/include/asapo/database/database.h
+++ b/common/cpp/include/asapo/database/database.h
@@ -15,14 +15,14 @@ constexpr char kDBMetaCollectionName[] = "meta";
 class Database {
   public:
     virtual Error Connect(const std::string& address, const std::string& database) = 0;
-    virtual Error Insert(const std::string& collection, const FileInfo& file, bool ignore_duplicates) const = 0;
+    virtual Error Insert(const std::string& collection, const MessageMeta& file, bool ignore_duplicates) const = 0;
     virtual Error Upsert(const std::string& collection, uint64_t id, const uint8_t* data, uint64_t size) const = 0;
-    virtual Error InsertAsSubset(const std::string& collection, const FileInfo& file, uint64_t subset_id,
-                                 uint64_t subset_size,
+    virtual Error InsertAsDatasetMessage(const std::string& collection, const MessageMeta& file,
+                                 uint64_t dataset_size,
                                  bool ignore_duplicates) const = 0;
 
-    virtual Error GetById(const std::string& collection, uint64_t id, FileInfo* file) const = 0;
-    virtual Error GetDataSetById(const std::string& collection, uint64_t set_id, uint64_t id, FileInfo* file) const = 0;
+    virtual Error GetById(const std::string& collection, uint64_t id, MessageMeta* file) const = 0;
+    virtual Error GetDataSetById(const std::string& collection, uint64_t set_id, uint64_t id, MessageMeta* file) const = 0;
     virtual Error GetStreamInfo(const std::string& collection, StreamInfo* info) const  = 0;
     virtual Error GetLastStream(StreamInfo* info) const  = 0;
     virtual ~Database() = default;
diff --git a/common/cpp/include/asapo/http_client/http_client.h b/common/cpp/include/asapo/http_client/http_client.h
index 1d4a8b0eccbf89a88cadeea24f98ab6b8c802b59..3a41ea96b28013c655b5d51ce4abcee6c80977a4 100644
--- a/common/cpp/include/asapo/http_client/http_client.h
+++ b/common/cpp/include/asapo/http_client/http_client.h
@@ -15,7 +15,7 @@ class HttpClient {
                              HttpCode* response_code,
                              Error* err) const noexcept = 0;
     virtual Error Post(const std::string& uri, const std::string& cookie,
-                       const std::string& input_data, FileData* ouput_data,
+                       const std::string& input_data, MessageData* ouput_data,
                        uint64_t output_data_size,
                        HttpCode* response_code)  const noexcept = 0;
     virtual Error Post(const std::string& uri, const std::string& cookie,
diff --git a/common/cpp/include/asapo/io/io.h b/common/cpp/include/asapo/io/io.h
index 1eb81c619ab78bc6dc88a3ec727bf01c27692a7e..3965506ca4d64ef7ba6b763b648fc9d69e4d0fee 100644
--- a/common/cpp/include/asapo/io/io.h
+++ b/common/cpp/include/asapo/io/io.h
@@ -109,19 +109,19 @@ class IO {
     virtual size_t          Read            (FileDescriptor fd, void* buf, size_t length, Error* err) const = 0;
     virtual size_t          Write           (FileDescriptor fd, const void* buf, size_t length, Error* err) const = 0;
     virtual Error           RemoveFile(const std::string& fname) const = 0;
-    virtual Error          WriteDataToFile  (const std::string& root_folder, const std::string& fname, const FileData& data,
+    virtual Error          WriteDataToFile  (const std::string& root_folder, const std::string& fname, const MessageData& data,
                                              size_t length, bool create_directories, bool allow_ovewrite) const = 0;
     virtual Error          WriteDataToFile  (const std::string& root_folder, const std::string& fname, const uint8_t* data,
                                              size_t length, bool create_directories, bool allow_ovewrite) const = 0;
     virtual Error ReceiveDataToFile(SocketDescriptor socket, const std::string& root_folder, const std::string& fname,
                                     size_t length, bool create_directories, bool allow_ovewrite) const = 0;
     virtual void            CreateNewDirectory      (const std::string& directory_name, Error* err) const = 0;
-    virtual FileData        GetDataFromFile         (const std::string& fname, uint64_t* fsize, Error* err) const = 0;
+    virtual MessageData        GetDataFromFile         (const std::string& fname, uint64_t* fsize, Error* err) const = 0;
     virtual SubDirList      GetSubDirectories(const std::string& path, Error* err) const = 0;
-    virtual std::vector<FileInfo>   FilesInFolder   (const std::string& folder, Error* err) const = 0;
+    virtual std::vector<MessageMeta>   FilesInFolder   (const std::string& folder, Error* err) const = 0;
     virtual std::string     ReadFileToString        (const std::string& fname, Error* err) const = 0;
     virtual Error GetLastError() const = 0;
-    virtual FileInfo        GetFileInfo(const std::string& name, Error* err) const = 0;
+    virtual MessageMeta        GetMessageMeta(const std::string& name, Error* err) const = 0;
 
     virtual ~IO() = default;
 };
diff --git a/common/cpp/include/asapo/unittests/MockDatabase.h b/common/cpp/include/asapo/unittests/MockDatabase.h
index e07a4c51a71a481b904e1bf6687ccdfa4f4b604f..691e39af8f4b24c25216f6b5f99180dc8de4ab4d 100644
--- a/common/cpp/include/asapo/unittests/MockDatabase.h
+++ b/common/cpp/include/asapo/unittests/MockDatabase.h
@@ -15,21 +15,21 @@ class MockDatabase : public Database {
         return Error{Connect_t(address, database)};
 
     }
-    Error Insert(const std::string& collection, const FileInfo& file, bool ignore_duplicates) const override {
+    Error Insert(const std::string& collection, const MessageMeta& file, bool ignore_duplicates) const override {
         return Error{Insert_t(collection, file, ignore_duplicates)};
     }
 
-    Error InsertAsSubset(const std::string& collection, const FileInfo& file, uint64_t subset_id,
-                         uint64_t subset_size, bool ignore_duplicates) const override {
-        return Error{InsertAsSubset_t(collection, file, subset_id, subset_size, ignore_duplicates)};
+    Error InsertAsDatasetMessage(const std::string& collection, const MessageMeta& file,
+                         uint64_t dataset_size, bool ignore_duplicates) const override {
+        return Error{InsertAsDatasetMessage_t(collection, file, dataset_size, ignore_duplicates)};
     }
 
 
     MOCK_METHOD2(Connect_t, ErrorInterface * (const std::string&, const std::string&));
-    MOCK_CONST_METHOD3(Insert_t, ErrorInterface * (const std::string&, const FileInfo&, bool));
+    MOCK_CONST_METHOD3(Insert_t, ErrorInterface * (const std::string&, const MessageMeta&, bool));
 
 
-    MOCK_CONST_METHOD5(InsertAsSubset_t, ErrorInterface * (const std::string&, const FileInfo&, uint64_t, uint64_t, bool));
+    MOCK_CONST_METHOD4(InsertAsDatasetMessage_t, ErrorInterface * (const std::string&, const MessageMeta&, uint64_t, bool));
 
 
     Error Upsert(const std::string& collection, uint64_t id, const uint8_t* data, uint64_t size) const override {
@@ -38,18 +38,18 @@ class MockDatabase : public Database {
     }
     MOCK_CONST_METHOD4(Upsert_t, ErrorInterface * (const std::string&, uint64_t id, const uint8_t* data, uint64_t size));
 
-    Error GetById(const std::string& collection, uint64_t id, FileInfo* file) const override {
+    Error GetById(const std::string& collection, uint64_t id, MessageMeta* file) const override {
         return Error{GetById_t(collection, id, file)};
     }
 
-    MOCK_CONST_METHOD3(GetById_t, ErrorInterface * (const std::string&, uint64_t id, FileInfo*));
+    MOCK_CONST_METHOD3(GetById_t, ErrorInterface * (const std::string&, uint64_t id, MessageMeta*));
 
 
-    Error GetDataSetById(const std::string& collection, uint64_t set_id, uint64_t id, FileInfo* file) const override {
+    Error GetDataSetById(const std::string& collection, uint64_t set_id, uint64_t id, MessageMeta* file) const override {
         return Error{GetSetById_t(collection, set_id, id, file)};
     }
 
-    MOCK_CONST_METHOD4(GetSetById_t, ErrorInterface * (const std::string&, uint64_t set_id, uint64_t id, FileInfo*));
+    MOCK_CONST_METHOD4(GetSetById_t, ErrorInterface * (const std::string&, uint64_t set_id, uint64_t id, MessageMeta*));
 
 
     Error GetStreamInfo(const std::string& collection, StreamInfo* info) const override {
diff --git a/common/cpp/include/asapo/unittests/MockHttpClient.h b/common/cpp/include/asapo/unittests/MockHttpClient.h
index 34db9077e54d31189ef683c72205685067fc9966..41a5b5d232e2146c7851928ba503792cb959e8f1 100644
--- a/common/cpp/include/asapo/unittests/MockHttpClient.h
+++ b/common/cpp/include/asapo/unittests/MockHttpClient.h
@@ -24,7 +24,7 @@ class MockHttpClient : public HttpClient {
         return response;
     }
 
-    Error Post(const std::string& uri,  const std::string& cookie, const std::string& input_data, FileData* ouput_data,
+    Error Post(const std::string& uri, const std::string& cookie, const std::string& input_data, MessageData* ouput_data,
                uint64_t output_data_size,
                HttpCode* response_code)  const noexcept override {
         return Error{PostReturnArray_t(uri, cookie, input_data, ouput_data, output_data_size, response_code)};
@@ -44,7 +44,7 @@ class MockHttpClient : public HttpClient {
                                    ErrorInterface** err));
     MOCK_CONST_METHOD6(PostReturnArray_t,
                        ErrorInterface * (const std::string& uri, const std::string& cookie, const std::string& input_data,
-                                         FileData* ouput_data, uint64_t output_data_size, HttpCode* code));
+                                         MessageData* ouput_data, uint64_t output_data_size, HttpCode* code));
 
 
 };
diff --git a/common/cpp/include/asapo/unittests/MockIO.h b/common/cpp/include/asapo/unittests/MockIO.h
index 94027b2a8ea56d92aac02b889b3b3230d5734cfb..d6c10de678c3cfa3ad8f4d9c0c1220c96b6e7514 100644
--- a/common/cpp/include/asapo/unittests/MockIO.h
+++ b/common/cpp/include/asapo/unittests/MockIO.h
@@ -207,11 +207,11 @@ class MockIO : public IO {
     }
     MOCK_CONST_METHOD2(CreateNewDirectory_t, void(const std::string& directory_name, ErrorInterface** err));
 
-    FileData GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const override {
+    MessageData GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const override {
         ErrorInterface* error = nullptr;
         auto data = GetDataFromFile_t(fname, fsize, &error);
         err->reset(error);
-        return FileData(data);
+        return MessageData(data);
     }
 
     MOCK_CONST_METHOD3(GetDataFromFile_t, uint8_t* (const std::string& fname, uint64_t* fsize, ErrorInterface** err));
@@ -228,7 +228,7 @@ class MockIO : public IO {
     }
     MOCK_CONST_METHOD3(SendFile_t, ErrorInterface * (SocketDescriptor socket_fd, const std::string& fname, size_t length));
 
-    Error WriteDataToFile(const std::string& root_folder, const std::string& fname, const FileData& data,
+    Error WriteDataToFile(const std::string& root_folder, const std::string& fname, const MessageData& data,
                           size_t length, bool create_directories, bool allow_ovewrite) const override {
         return Error{WriteDataToFile_t(root_folder, fname, data.get(), length, create_directories, allow_ovewrite)};
 
@@ -260,23 +260,23 @@ class MockIO : public IO {
                        const uint8_t* data, size_t fsize, bool create_directories, bool allow_ovewrite));
 
 
-    FileInfo GetFileInfo(const std::string& name, Error* err) const override {
+    MessageMeta GetMessageMeta(const std::string& name, Error* err) const override {
         ErrorInterface* error = nullptr;
-        auto data = GetFileInfo_t(name, &error);
+        auto data = GetMessageMeta_t(name, &error);
         err->reset(error);
         return data;
 
     }
 
-    MOCK_CONST_METHOD2(GetFileInfo_t, FileInfo (const std::string& name, ErrorInterface** err));
+    MOCK_CONST_METHOD2(GetMessageMeta_t, MessageMeta (const std::string& name, ErrorInterface** err));
 
-    std::vector<FileInfo> FilesInFolder(const std::string& folder, Error* err) const override {
+    std::vector<MessageMeta> FilesInFolder(const std::string& folder, Error* err) const override {
         ErrorInterface* error = nullptr;
         auto data = FilesInFolder_t(folder, &error);
         err->reset(error);
         return data;
     }
-    MOCK_CONST_METHOD2(FilesInFolder_t, std::vector<FileInfo>(const std::string& folder, ErrorInterface** err));
+    MOCK_CONST_METHOD2(FilesInFolder_t, std::vector<MessageMeta>(const std::string& folder, ErrorInterface** err));
 
 
     SubDirList GetSubDirectories(const std::string& path, Error* err) const override {
diff --git a/common/cpp/src/data_structs/data_structs.cpp b/common/cpp/src/data_structs/data_structs.cpp
index f92a8210c5f79c245ba1c01689697fce5622ded6..fc7882c29dda4c1a5add7578ca45922fb159a03b 100644
--- a/common/cpp/src/data_structs/data_structs.cpp
+++ b/common/cpp/src/data_structs/data_structs.cpp
@@ -41,7 +41,7 @@ Error GetSourceTypeFromString(std::string stype, SourceType* type) {
     }
 }
 
-std::string FileInfo::Json() const {
+std::string MessageMeta::Json() const {
     auto nanoseconds_from_epoch = NanosecsEpochFromTimePoint(timestamp);
     std::string x = name;
 //todo: change this - use / when sending file from windows
@@ -62,8 +62,8 @@ std::string FileInfo::Json() const {
                                                                                                                 "\"timestamp\":"
         + std::to_string(nanoseconds_from_epoch) + ","
                                                    "\"source\":\"" + source + "\","
-                                                                              "\"buf_id\":" + std::to_string(buf_id_int)
-        + ","
+                                                                              "\"buf_id\":" + std::to_string(buf_id_int) + ","
+         "\"dataset_substream\":" + std::to_string(dataset_substream) + ","
           "\"meta\":" + (metadata.size() == 0 ? std::string("{}") : metadata)
         + "}";
     return s;
@@ -91,7 +91,7 @@ bool DataSet::SetFromJson(const std::string &json_string) {
 
     std::vector<std::string> vec_fi_endcoded;
     Error parse_err;
-    (parse_err = parser.GetArrayRawStrings("images", &vec_fi_endcoded)) ||
+    (parse_err = parser.GetArrayRawStrings("messages", &vec_fi_endcoded)) ||
         (parse_err = parser.GetUInt64("size", &expected_size)) ||
         (parse_err = parser.GetUInt64("_id", &id));
     if (parse_err) {
@@ -99,7 +99,7 @@ bool DataSet::SetFromJson(const std::string &json_string) {
         return false;
     }
     for (auto fi_encoded : vec_fi_endcoded) {
-        FileInfo fi;
+        MessageMeta fi;
         if (!fi.SetFromJson(fi_encoded)) {
             *this = old;
             return false;
@@ -109,7 +109,7 @@ bool DataSet::SetFromJson(const std::string &json_string) {
     return true;
 }
 
-bool FileInfo::SetFromJson(const std::string &json_string) {
+bool MessageMeta::SetFromJson(const std::string &json_string) {
     auto old = *this;
 
     JsonStringParser parser(json_string);
@@ -119,6 +119,7 @@ bool FileInfo::SetFromJson(const std::string &json_string) {
         parser.GetString("name", &name) ||
         parser.GetString("source", &source) ||
         parser.GetUInt64("buf_id", &buf_id) ||
+        parser.GetUInt64("dataset_substream", &dataset_substream) ||
         parser.Embedded("meta").GetRawString(&metadata) ||
         !TimeFromJson(parser, "timestamp", &timestamp)) {
         *this = old;
@@ -130,7 +131,7 @@ bool FileInfo::SetFromJson(const std::string &json_string) {
     return true;
 }
 
-std::string FileInfo::FullName(const std::string &base_path) const {
+std::string MessageMeta::FullName(const std::string &base_path) const {
     std::string full_name;
     full_name = base_path.empty() ? "" : base_path + kPathSeparator;
     return full_name + name;
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index e562670c279684a687265c8db82441399962e943..442d20b1d27587a500b4aae929323fa8fe8e353e 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -110,7 +110,7 @@ void MongoDBClient::CleanUp() {
     }
 }
 
-bson_p PrepareBsonDocument(const FileInfo &file, Error* err) {
+bson_p PrepareBsonDocument(const MessageMeta &file, Error* err) {
     bson_error_t mongo_err;
     auto s = file.Json();
     auto json = reinterpret_cast<const uint8_t*>(s.c_str());
@@ -172,7 +172,7 @@ Error MongoDBClient::UpdateBsonDocument(uint64_t id, const bson_p &document, boo
     return err;
 }
 
-Error MongoDBClient::Insert(const std::string &collection, const FileInfo &file, bool ignore_duplicates) const {
+Error MongoDBClient::Insert(const std::string &collection, const MessageMeta &file, bool ignore_duplicates) const {
     if (!connected_) {
         return DBErrorTemplates::kNotConnected.Generate();
     }
@@ -237,9 +237,8 @@ Error MongoDBClient::AddBsonDocumentToArray(bson_t* query, bson_t* update, bool
     return err;
 }
 
-Error MongoDBClient::InsertAsSubset(const std::string &collection, const FileInfo &file,
-                                    uint64_t subset_id,
-                                    uint64_t subset_size,
+Error MongoDBClient::InsertAsDatasetMessage(const std::string &collection, const MessageMeta &file,
+                                    uint64_t dataset_size,
                                     bool ignore_duplicates) const {
     if (!connected_) {
         return DBErrorTemplates::kNotConnected.Generate();
@@ -252,14 +251,14 @@ Error MongoDBClient::InsertAsSubset(const std::string &collection, const FileInf
     if (err) {
         return err;
     }
-    auto query = BCON_NEW ("$and", "[", "{", "_id", BCON_INT64(subset_id), "}", "{", "images._id", "{", "$ne",
-                           BCON_INT64(file.id), "}", "}", "]");
+    auto query = BCON_NEW ("$and", "[", "{", "_id", BCON_INT64(file.id), "}", "{", "messages.dataset_substream", "{", "$ne",
+                           BCON_INT64(file.dataset_substream), "}", "}", "]");
     auto update = BCON_NEW ("$setOnInsert", "{",
-                            "size", BCON_INT64(subset_size),
+                            "size", BCON_INT64(dataset_size),
                             "timestamp", BCON_INT64((int64_t) NanosecsEpochFromTimePoint(file.timestamp)),
                             "}",
                             "$addToSet", "{",
-                            "images", BCON_DOCUMENT(document.get()), "}");
+                            "messages", BCON_DOCUMENT(document.get()), "}");
 
     err = AddBsonDocumentToArray(query, update, ignore_duplicates);
 
@@ -322,7 +321,7 @@ Error MongoDBClient::GetRecordFromDb(const std::string &collection, uint64_t id,
     return err;
 }
 
-Error MongoDBClient::GetById(const std::string &collection, uint64_t id, FileInfo* file) const {
+Error MongoDBClient::GetById(const std::string &collection, uint64_t id, MessageMeta* file) const {
     std::string record_str;
     auto err = GetRecordFromDb(collection, id, GetRecordMode::kById, &record_str);
     if (err) {
@@ -335,7 +334,7 @@ Error MongoDBClient::GetById(const std::string &collection, uint64_t id, FileInf
     return nullptr;
 }
 
-Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t id_in_set, uint64_t id, FileInfo* file) const {
+Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t id_in_set, uint64_t id, MessageMeta* file) const {
     std::string record_str;
     auto err = GetRecordFromDb(collection, id, GetRecordMode::kById, &record_str);
     if (err) {
@@ -347,9 +346,9 @@ Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t id_i
         DBErrorTemplates::kJsonParseError.Generate(record_str);
     }
 
-    for (const auto &fileinfo : dataset.content) {
-        if (fileinfo.id == id_in_set) {
-            *file = fileinfo;
+    for (const auto &message_meta : dataset.content) {
+        if (message_meta.dataset_substream == id_in_set) {
+            *file = message_meta;
             return nullptr;
         }
     }
diff --git a/common/cpp/src/database/mongodb_client.h b/common/cpp/src/database/mongodb_client.h
index b74691a3205e6c93ec24e061ae2fb3c6074c5a3e..a1b9bb5ef3a306006439170761a60ab86bfc6fed 100644
--- a/common/cpp/src/database/mongodb_client.h
+++ b/common/cpp/src/database/mongodb_client.h
@@ -43,12 +43,12 @@ class MongoDBClient final : public Database {
   public:
     MongoDBClient();
     Error Connect(const std::string& address, const std::string& database) override;
-    Error Insert(const std::string& collection, const FileInfo& file, bool ignore_duplicates) const override;
-    Error InsertAsSubset(const std::string& collection, const FileInfo& file, uint64_t subset_id, uint64_t subset_size,
+    Error Insert(const std::string& collection, const MessageMeta& file, bool ignore_duplicates) const override;
+    Error InsertAsDatasetMessage(const std::string& collection, const MessageMeta& file, uint64_t dataset_size,
                          bool ignore_duplicates) const override;
     Error Upsert(const std::string& collection, uint64_t id, const uint8_t* data, uint64_t size) const override;
-    Error GetById(const std::string& collection, uint64_t id, FileInfo* file) const override;
-    Error GetDataSetById(const std::string& collection, uint64_t id_in_set, uint64_t id, FileInfo* file) const override;
+    Error GetById(const std::string& collection, uint64_t id, MessageMeta* file) const override;
+    Error GetDataSetById(const std::string& collection, uint64_t id_in_set, uint64_t id, MessageMeta* file) const override;
     Error GetStreamInfo(const std::string& collection, StreamInfo* info) const override;
     Error GetLastStream(StreamInfo* info) const override;
     ~MongoDBClient() override;
diff --git a/common/cpp/src/http_client/curl_http_client.cpp b/common/cpp/src/http_client/curl_http_client.cpp
index 7524e4fc1bf58f959c2e132370c194cb34f60523..0bdefda2189197185f2c5e7797dc1b0f1a491ffa 100644
--- a/common/cpp/src/http_client/curl_http_client.cpp
+++ b/common/cpp/src/http_client/curl_http_client.cpp
@@ -108,10 +108,10 @@ Error CurlHttpClient::Command(bool post, CurlDataContainer* data_container, cons
     return ProcessCurlResponse(curl_, res, errbuf, response_code);
 }
 
-FileData AllocateMemory(uint64_t size, Error* err) {
-    FileData data;
+MessageData AllocateMemory(uint64_t size, Error* err) {
+    MessageData data;
     try {
-        data = FileData{new uint8_t[(size_t)size + 1 ]};
+        data = MessageData{new uint8_t[(size_t)size + 1 ]};
     } catch (...) {
         *err = ErrorTemplates::kMemoryAllocationError.Generate();
         return nullptr;
@@ -123,7 +123,7 @@ FileData AllocateMemory(uint64_t size, Error* err) {
 Error CurlHttpClient::Post(const std::string& uri,
                            const std::string& cookie,
                            const std::string& input_data,
-                           FileData* output_data,
+                           MessageData* output_data,
                            uint64_t output_data_size,
                            HttpCode* response_code) const noexcept {
     Error err;
diff --git a/common/cpp/src/http_client/curl_http_client.h b/common/cpp/src/http_client/curl_http_client.h
index 07412c143a0d6805c26ff6b68046f78a2b92f975..cfc2e7626974422a72d87b371660cd7052cc34ea 100644
--- a/common/cpp/src/http_client/curl_http_client.h
+++ b/common/cpp/src/http_client/curl_http_client.h
@@ -19,7 +19,7 @@ enum class CurlDataMode {
 
 struct CurlDataContainer {
     std::string string_buffer;
-    FileData* p_array;
+    MessageData* p_array;
     uint64_t array_size;
     uint64_t bytes_received = 0;
     CurlDataMode mode;
@@ -34,7 +34,7 @@ class CurlHttpClient final : public HttpClient {
     std::string Get(const std::string& uri, HttpCode* response_code, Error* err) const noexcept override;
     std::string Post(const std::string& uri, const std::string& cookie, const std::string& data, HttpCode* response_code,
                      Error* err) const noexcept override;
-    Error Post(const std::string& uri,  const std::string& cookie, const std::string& input_data, FileData* output_data,
+    Error Post(const std::string& uri, const std::string& cookie, const std::string& input_data, MessageData* output_data,
                uint64_t output_data_size,
                HttpCode* response_code)  const noexcept override;
     Error Post(const std::string& uri, const std::string& cookie,
diff --git a/common/cpp/src/system_io/system_io.cpp b/common/cpp/src/system_io/system_io.cpp
index 0c3098be19a2c2f95a89d1a7eaffe7369a5eb503..b15011f379445951910b8fcfa9dfb02ad12a18cd 100644
--- a/common/cpp/src/system_io/system_io.cpp
+++ b/common/cpp/src/system_io/system_io.cpp
@@ -38,21 +38,21 @@ const size_t SystemIO::kReadWriteBufSize = size_t(1024) * 1024 * 50; //50MiByte
 
 // PRIVATE FUNCTIONS - START
 
-void SortFileList(std::vector<FileInfo>* file_list) {
+void SortFileList(std::vector<MessageMeta>* file_list) {
     std::sort(file_list->begin(), file_list->end(),
-    [](FileInfo const & a, FileInfo const & b) {
+    [](MessageMeta const & a, MessageMeta const & b) {
         return a.timestamp < b.timestamp;
     });
 }
 
-void StripBasePath(const std::string& folder, std::vector<FileInfo>* file_list) {
+void StripBasePath(const std::string& folder, std::vector<MessageMeta>* file_list) {
     auto n_erase = folder.size() + 1;
     for (auto& file : *file_list) {
         file.name.erase(0, n_erase);
     }
 }
 
-void AssignIDs(FileInfos* file_list) {
+void AssignIDs(MessageMetas* file_list) {
     int64_t id = 0;
     for (auto& file : *file_list) {
         file.id = ++id;
@@ -86,10 +86,10 @@ uint8_t* SystemIO::AllocateArray(uint64_t fsize, Error* err) const {
 
 // PRIVATE FUNCTIONS - END
 
-FileData SystemIO::GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const {
+MessageData SystemIO::GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const {
 
     if (*fsize == 0 && !fname.empty()) {
-        auto info = GetFileInfo(fname, err);
+        auto info = GetMessageMeta(fname, err);
         if (*err != nullptr) {
             return nullptr;
         }
@@ -115,12 +115,12 @@ FileData SystemIO::GetDataFromFile(const std::string& fname, uint64_t* fsize, Er
     }
 
     Close(fd, err);
-    return FileData{data_array};
+    return MessageData{data_array};
 }
 
-FileInfos SystemIO::FilesInFolder(const std::string& folder, Error* err) const {
-    FileInfos files{};
-    CollectFileInformationRecursively(folder, &files, err);
+MessageMetas SystemIO::FilesInFolder(const std::string& folder, Error* err) const {
+    MessageMetas files{};
+    CollectMessageMetarmationRecursively(folder, &files, err);
     if (*err != nullptr) {
         return {};
     }
@@ -193,7 +193,7 @@ Error SystemIO::WriteDataToFile(const std::string& root_folder, const std::strin
 
 }
 
-Error SystemIO::WriteDataToFile(const std::string& root_folder, const std::string& fname, const FileData& data,
+Error SystemIO::WriteDataToFile(const std::string& root_folder, const std::string& fname, const MessageData& data,
                                 size_t length, bool create_directories, bool allow_ovewrite) const {
     return WriteDataToFile(root_folder, fname, data.get(), length, create_directories, allow_ovewrite);
 }
diff --git a/common/cpp/src/system_io/system_io.h b/common/cpp/src/system_io/system_io.h
index f964e92e257d20a20bfd485a4f4d3bb35a8b5500..b42cd2b1799f8e8f54c9b5f7eab6ad89c435d178 100644
--- a/common/cpp/src/system_io/system_io.h
+++ b/common/cpp/src/system_io/system_io.h
@@ -46,7 +46,7 @@ class SystemIO final : public IO {
 
     void ApplyNetworkOptions(SocketDescriptor socket_fd, Error* err) const;
 
-    //void CollectFileInformationRecursively(const std::string& path, std::vector<FileInfo>* files, IOErrors* err) const;
+    //void CollectMessageMetarmationRecursively(const std::string& path, std::vector<MessageMeta>* files, IOErrors* err) const;
     int FileOpenModeToPosixFileOpenMode(int open_flags) const;
 
     short AddressFamilyToPosixFamily      (AddressFamilies address_family) const;
@@ -78,7 +78,7 @@ class SystemIO final : public IO {
     static ssize_t		_recv(SocketDescriptor socket_fd, void* buffer, size_t length);
     static ssize_t      _read(FileDescriptor fd, void* buffer, size_t length);
     static ssize_t      _write(FileDescriptor fd, const void* buffer, size_t count);
-    void            CollectFileInformationRecursively(const std::string& path, std::vector<FileInfo>* files,
+    void            CollectMessageMetarmationRecursively(const std::string& path, std::vector<MessageMeta>* files,
                                                       Error* err) const;
     void            GetSubDirectoriesRecursively(const std::string& path, SubDirList* subdirs, Error* err) const;
     Error           CreateDirectoryWithParents(const std::string& root_path, const std::string& path) const;
@@ -100,7 +100,7 @@ class SystemIO final : public IO {
 
 
     // this is not standard function - to be implemented differently in windows and linux
-    std::vector<FileInfo>   FilesInFolder(const std::string& folder, Error* err) const override;
+    std::vector<MessageMeta>   FilesInFolder(const std::string& folder, Error* err) const override;
 
     /*
      * Network
@@ -139,8 +139,8 @@ class SystemIO final : public IO {
     size_t          Read(FileDescriptor fd, void* buf, size_t length, Error* err) const override;
     size_t          Write(FileDescriptor fd, const void* buf, size_t length, Error* err) const override;
     void            CreateNewDirectory(const std::string& directory_name, Error* err) const override;
-    FileData        GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const override;
-    Error           WriteDataToFile  (const std::string& root_folder, const std::string& fname, const FileData& data,
+    MessageData        GetDataFromFile(const std::string& fname, uint64_t* fsize, Error* err) const override;
+    Error           WriteDataToFile  (const std::string& root_folder, const std::string& fname, const MessageData& data,
                                       size_t length, bool create_directories, bool allow_ovewrite) const override;
     Error           ReceiveDataToFile(SocketDescriptor socket, const std::string& root_folder, const std::string& fname,
                                       size_t length, bool create_directories, bool allow_ovewrite) const override;
@@ -152,7 +152,7 @@ class SystemIO final : public IO {
     Error           RemoveFile(const std::string& fname) const override;
     Error           GetLastError() const override;
     std::string     AddressFromSocket(SocketDescriptor socket) const noexcept override;
-    FileInfo        GetFileInfo(const std::string& name, Error* err) const override;
+    MessageMeta        GetMessageMeta(const std::string& name, Error* err) const override;
 
 
 };
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index 0277406c6e5f723cbd54bf0339918420a1723d10..fd0b7f35ef97f60a680571d13765175946f85e4a 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -82,7 +82,7 @@ bool IsDirectory(const struct dirent* entity) {
            strstr(entity->d_name, ".") == nullptr;
 }
 
-void SetModifyDate(const struct stat& t_stat, FileInfo* file_info) {
+void SetModifyDate(const struct stat& t_stat, MessageMeta* message_meta) {
 #ifdef __APPLE__
 #define st_mtim st_mtimespec
 #endif
@@ -92,16 +92,16 @@ void SetModifyDate(const struct stat& t_stat, FileInfo* file_info) {
 #undef st_mtim
 #endif
 
-    file_info->timestamp = system_clock::time_point
+    message_meta->timestamp = system_clock::time_point
     {std::chrono::duration_cast<system_clock::duration>(d)};
 }
 
-void SetFileSize(const struct stat& t_stat, FileInfo* file_info) {
-    file_info->size = t_stat.st_size;
+void SetFileSize(const struct stat& t_stat, MessageMeta* message_meta) {
+    message_meta->size = t_stat.st_size;
 }
 
-void SetFileName(const string& name, FileInfo* file_info) {
-    file_info->name = name;
+void SetFileName(const string& name, MessageMeta* message_meta) {
+    message_meta->name = name;
 }
 
 struct stat FileStat(const string& fname, Error* err) {
@@ -114,41 +114,41 @@ struct stat FileStat(const string& fname, Error* err) {
     return t_stat;
 }
 
-FileInfo GetFileInfo(const string& name, Error* err) {
-    FileInfo file_info;
+MessageMeta GetMessageMeta(const string& name, Error* err) {
+    MessageMeta message_meta;
 
-    SetFileName(name, &file_info);
+    SetFileName(name, &message_meta);
 
     auto t_stat = FileStat(name, err);
     if (*err != nullptr) {
         (*err)->Append(name);
-        return FileInfo{};
+        return MessageMeta{};
     }
 
-    SetFileSize(t_stat, &file_info);
+    SetFileSize(t_stat, &message_meta);
 
-    SetModifyDate(t_stat, &file_info);
+    SetModifyDate(t_stat, &message_meta);
 
-    return file_info;
+    return message_meta;
 }
 
-FileInfo SystemIO::GetFileInfo(const string& name, Error* err) const {
-    return ::asapo::GetFileInfo(name, err);
+MessageMeta SystemIO::GetMessageMeta(const string& name, Error* err) const {
+    return ::asapo::GetMessageMeta(name, err);
 }
 
 void ProcessFileEntity(const struct dirent* entity, const std::string& path,
-                       FileInfos* files, Error* err) {
+                       MessageMetas* files, Error* err) {
 
     *err = nullptr;
     if (entity->d_type != DT_REG) {
         return;
     }
 
-    FileInfo file_info = GetFileInfo(path + "/" + entity->d_name, err);
+    MessageMeta message_meta = GetMessageMeta(path + "/" + entity->d_name, err);
     if (*err != nullptr) {
         return;
     }
-    files->push_back(file_info);
+    files->push_back(message_meta);
 }
 
 void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList* subdirs, Error* err) const {
@@ -176,8 +176,8 @@ void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList*
     closedir(dir);
 }
 
-void SystemIO::CollectFileInformationRecursively(const std::string& path,
-                                                 FileInfos* files, Error* err) const {
+void SystemIO::CollectMessageMetarmationRecursively(const std::string& path,
+                                                 MessageMetas* files, Error* err) const {
     errno = 0;
     auto dir = opendir((path).c_str());
     if (dir == nullptr) {
@@ -188,7 +188,7 @@ void SystemIO::CollectFileInformationRecursively(const std::string& path,
 
     while (struct dirent* current_entity = readdir(dir)) {
         if (IsDirectory(current_entity)) {
-            CollectFileInformationRecursively(path + "/" + current_entity->d_name,
+            CollectMessageMetarmationRecursively(path + "/" + current_entity->d_name,
                                               files, err);
         } else {
             ProcessFileEntity(current_entity, path, files, err);
diff --git a/common/cpp/src/system_io/system_io_windows.cpp b/common/cpp/src/system_io/system_io_windows.cpp
index cb9c6d43a20fc772070c2c683f24309b3ea4f90d..9755e2e91123f293f682db88c2ee618ccc1b3517 100644
--- a/common/cpp/src/system_io/system_io_windows.cpp
+++ b/common/cpp/src/system_io/system_io_windows.cpp
@@ -126,10 +126,10 @@ bool IsDirectory(const WIN32_FIND_DATA f) {
            strstr(f.cFileName, ".") == nullptr;
 }
 
-FileInfo GetFileInfo_win(const WIN32_FIND_DATA& f, const string& name, Error* err) {
-    FileInfo file_info;
+MessageMeta GetMessageMeta_win(const WIN32_FIND_DATA& f, const string& name, Error* err) {
+    MessageMeta message_meta;
 
-    file_info.timestamp = FileTime2TimePoint(f.ftLastWriteTime, err);
+    message_meta.timestamp = FileTime2TimePoint(f.ftLastWriteTime, err);
     if (*err) {
         return {};
     }
@@ -138,14 +138,14 @@ FileInfo GetFileInfo_win(const WIN32_FIND_DATA& f, const string& name, Error* er
     fsize.LowPart = f.nFileSizeLow;
     fsize.HighPart = f.nFileSizeHigh;
 
-    file_info.size = fsize.QuadPart;
+    message_meta.size = fsize.QuadPart;
 
-    file_info.name = name + "\\" + f.cFileName;
+    message_meta.name = name + "\\" + f.cFileName;
 
-    return file_info;
+    return message_meta;
 }
 
-FileInfo SystemIO::GetFileInfo(const std::string& name, Error* err) const {
+MessageMeta SystemIO::GetMessageMeta(const std::string& name, Error* err) const {
     WIN32_FIND_DATA f;
 
     auto hFind = FindFirstFile(name.c_str(), &f);
@@ -155,23 +155,23 @@ FileInfo SystemIO::GetFileInfo(const std::string& name, Error* err) const {
         return {};
     }
     FindClose(hFind);
-    return GetFileInfo_win(f, name, err);
+    return GetMessageMeta_win(f, name, err);
 }
 
 void ProcessFileEntity(const WIN32_FIND_DATA& f, const std::string& path,
-                       FileInfos* files, Error* err) {
+                       MessageMetas* files, Error* err) {
 
     *err = nullptr;
     if (f.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
         return;
     }
 
-    auto file_info = GetFileInfo_win(f, path, err);
+    auto message_meta = GetMessageMeta_win(f, path, err);
     if (*err) {
         return;
     }
 
-    files->push_back(file_info);
+    files->push_back(message_meta);
 }
 
 void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList* subdirs, Error* err) const {
@@ -202,8 +202,8 @@ void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList*
     }
 }
 
-void SystemIO::CollectFileInformationRecursively(const std::string& path,
-                                                 FileInfos* files, Error* err) const {
+void SystemIO::CollectMessageMetarmationRecursively(const std::string& path,
+                                                 MessageMetas* files, Error* err) const {
     WIN32_FIND_DATA find_data;
     HANDLE handle = FindFirstFile((path + "\\*.*").c_str(), &find_data);
     if (handle == INVALID_HANDLE_VALUE) {
@@ -214,7 +214,7 @@ void SystemIO::CollectFileInformationRecursively(const std::string& path,
 
     do {
         if (IsDirectory(find_data)) {
-            CollectFileInformationRecursively(path + "\\" + find_data.cFileName, files, err);
+            CollectMessageMetarmationRecursively(path + "\\" + find_data.cFileName, files, err);
         } else {
             ProcessFileEntity(find_data, path, files, err);
         }
diff --git a/common/cpp/unittests/data_structs/test_data_structs.cpp b/common/cpp/unittests/data_structs/test_data_structs.cpp
index e42481212f23dfd193803ff0c568cd9ab8698aaa..17357f5cbf7bdb2b7404bb4d920edd37813557bd 100644
--- a/common/cpp/unittests/data_structs/test_data_structs.cpp
+++ b/common/cpp/unittests/data_structs/test_data_structs.cpp
@@ -5,7 +5,7 @@
 #include <chrono>
 
 
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using asapo::StreamInfo;
 using asapo::SourceType;
 using asapo::SourceCredentials;
@@ -25,42 +25,44 @@ namespace {
 
 uint64_t big_uint = 18446744073709551615ull;
 
-FileInfo PrepareFileInfo() {
-    FileInfo finfo;
-    finfo.size = 100;
-    finfo.id = 1;
-    finfo.name = std::string("folder") + asapo::kPathSeparator + "test";
-    finfo.source = "host:1234";
-    finfo.buf_id = big_uint;
-    finfo.timestamp = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(1));
-    finfo.metadata =  "{\"bla\":10}";
-    return finfo;
+MessageMeta PrepareMessageMeta() {
+    MessageMeta message_meta;
+    message_meta.size = 100;
+    message_meta.id = 1;
+    message_meta.dataset_substream = 3;
+    message_meta.name = std::string("folder") + asapo::kPathSeparator + "test";
+    message_meta.source = "host:1234";
+    message_meta.buf_id = big_uint;
+    message_meta.timestamp = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(1));
+    message_meta.metadata =  "{\"bla\":10}";
+    return message_meta;
 }
 
-TEST(FileInFo, Defaults) {
-    FileInfo finfo;
+TEST(MessageMetaTests, Defaults) {
+    MessageMeta message_meta;
 
-    ASSERT_THAT(finfo.buf_id, Eq(0));
-    ASSERT_THAT(finfo.id, Eq(0));
+    ASSERT_THAT(message_meta.buf_id, Eq(0));
+    ASSERT_THAT(message_meta.id, Eq(0));
+    ASSERT_THAT(message_meta.dataset_substream, Eq(0));
 }
 
 
-TEST(FileInFo, CorrectConvertToJson) {
-    auto finfo = PrepareFileInfo();
-    std::string json = finfo.Json();
+TEST(MessageMetaTests, CorrectConvertToJson) {
+    auto message_meta = PrepareMessageMeta();
+    std::string json = message_meta.Json();
     if (asapo::kPathSeparator == '/') {
         ASSERT_THAT(json, Eq(
-                        R"({"_id":1,"size":100,"name":"folder/test","timestamp":1000000,"source":"host:1234","buf_id":-1,"meta":{"bla":10}})"));
+                        R"({"_id":1,"size":100,"name":"folder/test","timestamp":1000000,"source":"host:1234","buf_id":-1,"dataset_substream":3,"meta":{"bla":10}})"));
     } else {
         ASSERT_THAT(json, Eq(
-                        R"({"_id":1,"size":100,"name":"folder\\test","timestamp":1000000,"source":"host:1234","buf_id":-1,"meta":{"bla":10}})"));
+                        R"({"_id":1,"size":100,"name":"folder\\test","timestamp":1000000,"source":"host:1234","buf_id":-1,"dataset_substream":3,"meta":{"bla":10}})"));
     }
 }
 
-TEST(FileInFo, CorrectConvertFromJsonReturnsError) {
-    auto finfo = PrepareFileInfo();
+TEST(MessageMetaTests, CorrectConvertFromJsonReturnsError) {
+    auto message_meta = PrepareMessageMeta();
 
-    FileInfo result;
+    MessageMeta result;
     result.id = 10;
 
     std::string json = R"({"_id":2,"foo":"foo","bar":1})";
@@ -72,10 +74,10 @@ TEST(FileInFo, CorrectConvertFromJsonReturnsError) {
 
 }
 
-TEST(FileInFo, CorrectConvertFromJsonReturnsErrorForMetadata) {
-    auto finfo = PrepareFileInfo();
+TEST(MessageMetaTests, CorrectConvertFromJsonReturnsErrorForMetadata) {
+    auto message_meta = PrepareMessageMeta();
 
-    FileInfo result;
+    MessageMeta result;
 
     std::string json = R"({"_id":2,"foo":"foo","bar":1,{"meta":err}})";
 
@@ -87,32 +89,33 @@ TEST(FileInFo, CorrectConvertFromJsonReturnsErrorForMetadata) {
 
 
 
-TEST(FileInFo, CorrectConvertFromJson) {
-    auto finfo = PrepareFileInfo();
-    std::string json = finfo.Json();
+TEST(MessageMetaTests, CorrectConvertFromJson) {
+    auto message_meta = PrepareMessageMeta();
+    std::string json = message_meta.Json();
 
-    FileInfo result;
+    MessageMeta result;
     auto ok = result.SetFromJson(json);
 
     ASSERT_THAT(ok, Eq(true));
 
-    ASSERT_THAT(result.id, Eq(finfo.id));
-    ASSERT_THAT(result.name, Eq(finfo.name));
-    ASSERT_THAT(result.size, Eq(finfo.size));
-    ASSERT_THAT(result.timestamp, Eq(finfo.timestamp));
-    ASSERT_THAT(result.buf_id, Eq(finfo.buf_id));
-    ASSERT_THAT(result.source, Eq(finfo.source));
-    ASSERT_THAT(result.metadata, Eq(finfo.metadata));
+    ASSERT_THAT(result.id, Eq(message_meta.id));
+    ASSERT_THAT(result.name, Eq(message_meta.name));
+    ASSERT_THAT(result.size, Eq(message_meta.size));
+    ASSERT_THAT(result.timestamp, Eq(message_meta.timestamp));
+    ASSERT_THAT(result.buf_id, Eq(message_meta.buf_id));
+    ASSERT_THAT(result.source, Eq(message_meta.source));
+    ASSERT_THAT(result.metadata, Eq(message_meta.metadata));
+    ASSERT_THAT(result.dataset_substream, Eq(message_meta.dataset_substream));
 
 }
 
 
-TEST(FileInFo, CorrectConvertFromJsonEmptyMeta) {
-    auto finfo = PrepareFileInfo();
-    finfo.metadata = "";
-    std::string json = finfo.Json();
+TEST(MessageMetaTests, CorrectConvertFromJsonEmptyMeta) {
+    auto message_meta = PrepareMessageMeta();
+    message_meta.metadata = "";
+    std::string json = message_meta.Json();
 
-    FileInfo result;
+    MessageMeta result;
     auto ok = result.SetFromJson(json);
 
     ASSERT_THAT(ok, Eq(true));
@@ -121,7 +124,7 @@ TEST(FileInFo, CorrectConvertFromJsonEmptyMeta) {
 }
 
 
-TEST(FileInFo, EpochNanosecsFromNow) {
+TEST(MessageMetaTests, EpochNanosecsFromNow) {
     auto ns = asapo::EpochNanosecsFromNow();
     ASSERT_THAT(ns, ::testing::Gt(0));
 }
@@ -143,7 +146,7 @@ StreamInfo PrepareStreamInfo() {
 }
 
 
-TEST(FileInFo, TimeFromNanosec) {
+TEST(MessageMetaTests, TimeFromNanosec) {
     auto tp = asapo::TimePointfromNanosec(1000);
     auto res = asapo::NanosecsEpochFromTimePoint(tp);
     ASSERT_THAT(res, Eq(1000));
@@ -208,9 +211,9 @@ TEST(StreamInfo, ConvertToJsonWithoutID) {
 }
 
 TEST(SourceCredentials, ConvertToString) {
-    auto sc = SourceCredentials{SourceType::kRaw,"beamtime","beamline","stream","token"};
-    std::string expected1= "raw%beamtime%beamline%stream%token";
-    std::string expected2= "processed%beamtime%beamline%stream%token";
+    auto sc = SourceCredentials{SourceType::kRaw,"beamtime","beamline","source","token"};
+    std::string expected1= "raw%beamtime%beamline%source%token";
+    std::string expected2= "processed%beamtime%beamline%source%token";
 
     auto res1 = sc.GetString();
     sc.type = asapo::SourceType::kProcessed;
@@ -249,7 +252,7 @@ auto tests = std::vector<TestEpochFromISODate> {
     TestEpochFromISODate{"1970-12-01T00:00:00.", 0},
 };
 
-TEST(FileInFo, NanosecsEpochFromISODate) {
+TEST(MessageMetaTests, NanosecsEpochFromISODate) {
     for (auto test : tests) {
         auto res = asapo::NanosecsEpochFromISODate(test.iso);
         ASSERT_THAT(res, Eq(test.ns));
@@ -262,7 +265,7 @@ auto tests2 = std::vector<TestEpochFromISODate> {
     TestEpochFromISODate{"2019-07-25T15:38:11.100010002", 1564069091100010002},
 };
 
-TEST(FileInFo, ISODateFromNanosecsEpoch) {
+TEST(MessageMetaTests, ISODateFromNanosecsEpoch) {
     for (auto test : tests2) {
         auto res = asapo::IsoDateFromEpochNanosecs(test.ns);
         ASSERT_THAT(res, Eq(test.iso));
diff --git a/consumer/api/cpp/CMakeLists.txt b/consumer/api/cpp/CMakeLists.txt
index 2b0eb8e928a4eabe9b48147bab7dc0567712fe2d..24661f0da20047c86732a811b657cf9f8d5e24ee 100644
--- a/consumer/api/cpp/CMakeLists.txt
+++ b/consumer/api/cpp/CMakeLists.txt
@@ -1,9 +1,9 @@
 set(TARGET_NAME asapo-consumer)
 
 set(SOURCE_FILES
-        src/data_broker.cpp
-        src/server_data_broker.cpp
-        src/tcp_client.cpp
+        src/consumer.cpp
+        src/consumer_impl.cpp
+        src/tcp_consumer_client.cpp
         src/tcp_connection_pool.cpp
         src/fabric_consumer_client.cpp)
 
@@ -31,8 +31,8 @@ target_link_libraries(${TARGET_NAME} ${CURL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}
 ################################
 set(TEST_SOURCE_FILES
         unittests/test_consumer_api.cpp
-        unittests/test_server_broker.cpp
-        unittests/test_tcp_client.cpp
+        unittests/test_consumer_impl.cpp
+        unittests/test_tcp_consumer_client.cpp
         unittests/test_tcp_connection_pool.cpp
         unittests/test_fabric_consumer_client.cpp
         unittests/test_rds_error_mapper.cpp
diff --git a/consumer/api/cpp/include/asapo/asapo_consumer.h b/consumer/api/cpp/include/asapo/asapo_consumer.h
index 11723a89d99dd81b9d823674aec3d49af9e71fc5..e9dee4e9b5ebe0b003680e268eae48b32e91a32b 100644
--- a/consumer/api/cpp/include/asapo/asapo_consumer.h
+++ b/consumer/api/cpp/include/asapo/asapo_consumer.h
@@ -1,16 +1,9 @@
-/** @defgroup consumer The Consumer Group
- *  This is the consumer group
- *  @{
- */
-
 #ifndef ASAPO_ASAPO_CONSUMER_H
 #define ASAPO_ASAPO_CONSUMER_H
 
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "asapo/consumer/consumer_error.h"
 #include "asapo/common/version.h"
 #include <ostream>
-#endif //ASAPO_ASAPO_CONSUMER_H
 
-
-/** @} */ // end of consumer
+#endif //ASAPO_ASAPO_CONSUMER_H
diff --git a/consumer/api/cpp/include/asapo/consumer/consumer.h b/consumer/api/cpp/include/asapo/consumer/consumer.h
new file mode 100644
index 0000000000000000000000000000000000000000..678c7179b73a53cdace52021d4e2488a951e52a4
--- /dev/null
+++ b/consumer/api/cpp/include/asapo/consumer/consumer.h
@@ -0,0 +1,209 @@
+#ifndef ASAPO_DATASOURCE_H
+#define ASAPO_DATASOURCE_H
+
+#include <memory>
+#include <string>
+#include <vector>
+#include <chrono>
+
+#include "asapo/common/data_structs.h"
+#include "asapo/common/error.h"
+#include "asapo/common/networking.h"
+
+namespace asapo {
+
+class Consumer {
+  public:
+    //! Reset counter for the specific group.
+    /*!
+      \param group_id - group id to use.
+      \param stream - stream to use
+      \return nullptr of command was successful, otherwise error.
+    */
+    virtual Error ResetLastReadMarker(std::string group_id, std::string stream) = 0;
+
+    virtual Error SetLastReadMarker(std::string group_id, uint64_t value, std::string stream) = 0;
+
+    //! Acknowledge message for specific group id and stream.
+    /*!
+        \param group_id - group id to use.
+        \param id - message id
+        \param stream - stream to use
+        \return nullptr of command was successful, otherwise error.
+    */
+    virtual Error Acknowledge(std::string group_id, uint64_t id, std::string stream) = 0;
+
+    //! Negative acknowledge message for specific group id and stream.
+    /*!
+        \param group_id - group id to use.
+        \param id - message id
+        \param delay_ms - message will be redelivered after delay, 0 to redeliver immediately
+        \param stream - stream to use
+        \return nullptr of command was successful, otherwise error.
+    */
+    virtual Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_ms,
+                                      std::string stream) = 0;
+
+
+    //! Get unacknowledged messages for specific group id and stream.
+    /*!
+        \param group_id - group id to use.
+        \param from_id - return messages with ids greater or equal to from (use 0 disable limit)
+        \param to_id - return messages with ids less or equal to to (use 0 to disable limit)
+        \param stream - stream to use
+        \return nullptr if operation succeed, error otherwise.
+    */
+    virtual IdList GetUnacknowledgedMessages(std::string group_id,
+                                             uint64_t from_id,
+                                             uint64_t to_id,
+                                             std::string stream,
+                                             Error* error) = 0;
+
+    //! Set timeout for consumer operations. Default - no timeout
+    virtual void SetTimeout(uint64_t timeout_ms) = 0;
+
+    //! Will disable RDMA.
+    //! If RDMA is disabled, not available or the first connection fails to build up, it will automatically fall back to TCP.
+    //! This will only have an effect if no previous connection attempted was made on this Consumer.
+    virtual void ForceNoRdma() = 0;
+
+    //! Returns the current network connection type
+    /*!
+     * \return current network connection type. If no connection was made, the result is NetworkConnectionType::kUndefined
+     */
+    virtual NetworkConnectionType CurrentConnectionType() const = 0;
+
+    //! Get list of streams, set from to "" to get all streams
+    virtual StreamInfos GetStreamList(std::string from, Error* err) = 0;
+
+    //! Get current number of datasets
+    /*!
+      \param stream - stream to use
+      \param err - return nullptr of operation succeed, error otherwise.
+      \return number of datasets.
+    */
+    virtual uint64_t GetCurrentSize(std::string stream, Error* err) = 0;
+
+    //! Generate new GroupID.
+    /*!
+      \param err - return nullptr of operation succeed, error otherwise.
+      \return group ID.
+    */
+    virtual std::string GenerateNewGroupId(Error* err) = 0;
+
+    //! Get Beamtime metadata.
+    /*!
+      \param err - return nullptr of operation succeed, error otherwise.
+      \return beamtime metadata.
+    */
+    virtual std::string GetBeamtimeMeta(Error* err) = 0;
+
+    //! Receive next available message.
+    /*!
+      \param info -  where to store message metadata. Can be set to nullptr only message data is needed.
+      \param group_id - group id to use
+      \param data - where to store message data. Can be set to nullptr only message metadata is needed.
+      \param stream - stream to use
+      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
+    */
+    virtual Error GetNext(std::string group_id, MessageMeta* info, MessageData* data, std::string stream) = 0;
+
+    //! Retrieves message using message meta.
+    /*!
+      \param info - message metadata to use, can be updated after operation
+      \param data - where to store message data. Can be set to nullptr only message metadata is needed.
+      \return Error if data is nullptr or data cannot be read, nullptr otherwise.
+    */
+    virtual Error RetrieveData(MessageMeta* info, MessageData* data) = 0;
+
+
+    //! Receive next available completed dataset.
+    /*!
+      \param err -  will be set to error data cannot be read, nullptr otherwise.
+      \param group_id - group id to use.
+      \param min_size - wait until dataset has min_size messages (0 for maximum size)
+      \param stream - stream to use
+      \return DataSet - information about the dataset
+
+    */
+    virtual DataSet GetNextDataset(std::string group_id, uint64_t min_size, std::string stream, Error* err) = 0;
+    //! Receive last available dataset which has min_size messages.
+    /*!
+      \param err -  will be set to error data cannot be read, nullptr otherwise.
+      \param min_size - amount of messages in dataset (0 for maximum size)
+      \param stream - stream to use
+      \return DataSet - information about the dataset
+    */
+    virtual DataSet GetLastDataset(uint64_t min_size, std::string stream, Error* err) = 0;
+
+    //! Receive dataset by id.
+    /*!
+      \param id - dataset id
+      \param err -  will be set to error data cannot be read or dataset size less than min_size, nullptr otherwise.
+      \param min_size - wait until dataset has min_size messages (0 for maximum size)
+      \param stream - stream to use
+      \return DataSet - information about the dataset
+    */
+    virtual DataSet GetDatasetById(uint64_t id, uint64_t min_size, std::string stream, Error* err) = 0;
+
+    //! Receive single message by id.
+    /*!
+      \param id - message id
+      \param info -  where to store message metadata. Can be set to nullptr only message data is needed.
+      \param data - where to store message data. Can be set to nullptr only message metadata is needed.
+      \param stream - stream to use
+      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
+    */
+    virtual Error GetById(uint64_t id, MessageMeta* info, MessageData* data, std::string stream) = 0;
+
+    //! Receive id of last acknowledged message
+    /*!
+      \param group_id - group id to use.
+      \param stream - stream to use
+      \param error -  will be set in case of error, nullptr otherwise.
+      \return id of the last acknowledged message, 0 if error
+    */
+    virtual uint64_t GetLastAcknowledgedMessage(std::string group_id, std::string stream, Error* error) = 0;
+
+    //! Receive last available message.
+    /*!
+      \param info -  where to store message metadata. Can be set to nullptr only message data is needed.
+      \param data - where to store message data. Can be set to nullptr only message metadata is needed.
+      \param stream - stream to use
+      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
+    */
+    virtual Error GetLast(MessageMeta* info, MessageData* data, std::string stream) = 0;
+
+    //! Get all messages matching the query.
+    /*!
+      \param sql_query -  query string in SQL format. Limit dataset is supported
+      \param stream - stream to use
+      \param err - will be set in case of error, nullptr otherwise
+      \return vector of message metadata matchiing to specified query. Empty if nothing found or error
+    */
+    virtual MessageMetas QueryMessages(std::string query, std::string stream, Error* err) = 0;
+
+    //! Configure resending unacknowledged data
+    /*!
+      \param resend -  where to resend
+      \param delay_ms - how many milliseconds to wait before resending
+      \param resend_attempts - how many resend attempts to make
+    */
+    virtual void SetResendNacs(bool resend, uint64_t delay_ms, uint64_t resend_attempts) = 0;
+
+  //! Will try to interrupt current long runnung operations (mainly needed to exit waiting loop in C from Python)
+    virtual void InterruptCurrentOperation() = 0;
+
+    virtual ~Consumer() = default; // needed for unique_ptr to delete itself
+};
+
+/*! A class to create consumer instance. The class's only function Create is used for this */
+class ConsumerFactory {
+  public:
+    static std::unique_ptr<Consumer> CreateConsumer(std::string server_name, std::string source_path,
+                                                    bool has_filesystem, SourceCredentials source, Error* error) noexcept;
+
+};
+
+}
+#endif //ASAPO_DATASOURCE_H
diff --git a/consumer/api/cpp/include/asapo/consumer/consumer_error.h b/consumer/api/cpp/include/asapo/consumer/consumer_error.h
index cfed6107f38c61188870e2bd83402be0f965ba1b..7f3990d226fbcc3f663e367992496cc07e0fa3f7 100644
--- a/consumer/api/cpp/include/asapo/consumer/consumer_error.h
+++ b/consumer/api/cpp/include/asapo/consumer/consumer_error.h
@@ -30,7 +30,7 @@ class ConsumerErrorData : public CustomErrorData {
   public:
     uint64_t id;
     uint64_t id_max;
-    std::string next_substream;
+    std::string next_stream;
 };
 
 
@@ -64,7 +64,7 @@ auto const kWrongInput = ConsumerErrorTemplate {
 };
 
 auto const kInterruptedTransaction = ConsumerErrorTemplate {
-    "error from broker server", ConsumerErrorType::kInterruptedTransaction
+    "server error", ConsumerErrorType::kInterruptedTransaction
 };
 
 auto const kUnavailableService = ConsumerErrorTemplate {
diff --git a/consumer/api/cpp/include/asapo/consumer/data_broker.h b/consumer/api/cpp/include/asapo/consumer/data_broker.h
deleted file mode 100644
index d7392829b3d0e654db3e085411e4e52c2c3ee38c..0000000000000000000000000000000000000000
--- a/consumer/api/cpp/include/asapo/consumer/data_broker.h
+++ /dev/null
@@ -1,214 +0,0 @@
-#ifndef ASAPO_DATASOURCE_H
-#define ASAPO_DATASOURCE_H
-
-#include <memory>
-#include <string>
-#include <vector>
-#include <chrono>
-
-#include "asapo/common/data_structs.h"
-#include "asapo/common/error.h"
-#include "asapo/common/networking.h"
-
-namespace asapo {
-
-class DataBroker {
-  public:
-    //! Reset counter for the specific group.
-    /*!
-      \param group_id - group id to use.
-      \return nullptr of command was successful, otherwise error.
-    */
-    virtual Error ResetLastReadMarker(std::string group_id) = 0;
-    virtual Error ResetLastReadMarker(std::string group_id, std::string substream) = 0;
-
-    virtual Error SetLastReadMarker(uint64_t value, std::string group_id) = 0;
-    virtual Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) = 0;
-
-    //! Acknowledge data tuple for specific group id and substream.
-    /*!
-        \param group_id - group id to use.
-        \param id - data tuple id
-        \param substream (optional) - substream
-        \return nullptr of command was successful, otherwise error.
-    */
-    virtual Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) = 0;
-
-    //! Negative acknowledge data tuple for specific group id and substream.
-    /*!
-        \param group_id - group id to use.
-        \param id - data tuple id
-        \param delay_sec - data tuple will be redelivered after delay, 0 to redeliver immediately
-        \param substream (optional) - substream
-        \return nullptr of command was successful, otherwise error.
-    */
-    virtual Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
-                                      std::string substream = kDefaultSubstream) = 0;
-
-
-    //! Get unacknowledged tuple for specific group id and substream.
-    /*!
-        \param group_id - group id to use.
-        \param substream (optional) - substream
-        \param from_id - return tuples with ids greater or equal to from (use 0 disable limit)
-        \param to_id - return tuples with ids less or equal to to (use 0 to disable limit)
-        \param in (optional) - substream
-        \param err - set to nullptr of operation succeed, error otherwise.
-        \return vector of ids, might be empty
-    */
-    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, std::string substream, uint64_t from_id, uint64_t to_id,
-                                             Error* error) = 0;
-    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) = 0;
-
-    //! Set timeout for broker operations. Default - no timeout
-    virtual void SetTimeout(uint64_t timeout_ms) = 0;
-
-    //! Will disable RDMA.
-    //! If RDMA is disabled, not available or the first connection fails to build up, it will automatically fall back to TCP.
-    //! This will only have an effect if no previous connection attempted was made on this DataBroker.
-    virtual void ForceNoRdma() = 0;
-
-    //! Returns the current network connection type
-    /*!
-     * \return current network connection type. If no connection was made, the result is NetworkConnectionType::kUndefined
-     */
-    virtual NetworkConnectionType CurrentConnectionType() const = 0;
-
-    //! Get list of substreams, set from to "" to get all substreams
-    virtual StreamInfos GetSubstreamList(std::string from, Error* err) = 0;
-
-    //! Get current number of datasets
-    /*!
-      \param err - return nullptr of operation succeed, error otherwise.
-      \return number of datasets.
-    */
-    virtual uint64_t GetCurrentSize(Error* err) = 0;
-    virtual uint64_t GetCurrentSize(std::string substream, Error* err) = 0;
-
-    //! Generate new GroupID.
-    /*!
-      \param err - return nullptr of operation succeed, error otherwise.
-      \return group ID.
-    */
-    virtual std::string GenerateNewGroupId(Error* err) = 0;
-
-    //! Get Beamtime metadata.
-    /*!
-      \param err - return nullptr of operation succeed, error otherwise.
-      \return beamtime metadata.
-    */
-    virtual std::string GetBeamtimeMeta(Error* err) = 0;
-
-    //! Receive next available image.
-    /*!
-      \param info -  where to store image metadata. Can be set to nullptr only image data is needed.
-      \param group_id - group id to use.
-      \param data - where to store image data. Can be set to nullptr only image metadata is needed.
-      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
-    */
-    virtual Error GetNext(FileInfo* info, std::string group_id, FileData* data) = 0;
-    virtual Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) = 0;
-
-    //! Retrieves image using fileinfo.
-    /*!
-      \param info - image metadata to use, can be updated after operation
-      \param data - where to store image data. Can be set to nullptr only image metadata is needed.
-      \return Error if data is nullptr or data cannot be read, nullptr otherwise.
-    */
-    virtual Error RetrieveData(FileInfo* info, FileData* data) = 0;
-
-
-    //! Receive next available completed dataset.
-    /*!
-      \param err -  will be set to error data cannot be read, nullptr otherwise.
-      \param group_id - group id to use.
-      \param substream - substream to use ("" for default).
-      \param min_size - wait until dataset has min_size data tuples (0 for maximum size)
-      \return DataSet - information about the dataset
-
-    */
-    virtual DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) = 0;
-    virtual DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) = 0;
-    //! Receive last available dataset which has min_size data tuples.
-    /*!
-      \param err -  will be set to error data cannot be read, nullptr otherwise.
-      \param substream - substream to use ("" for default).
-      \param min_size - amount of data tuples in dataset (0 for maximum size)
-      \return DataSet - information about the dataset
-    */
-    virtual DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) = 0;
-    virtual DataSet GetLastDataset(uint64_t min_size, Error* err) = 0;
-
-    //! Receive dataset by id.
-    /*!
-      \param id - dataset id
-      \param err -  will be set to error data cannot be read or dataset size less than min_size, nullptr otherwise.
-      \param substream - substream to use ("" for default).
-      \param min_size - wait until dataset has min_size data tuples (0 for maximum size)
-      \return DataSet - information about the dataset
-    */
-    virtual DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) = 0;
-    virtual DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) = 0;
-
-    //! Receive single image by id.
-    /*!
-      \param id - image id
-      \param info -  where to store image metadata. Can be set to nullptr only image data is needed.
-      \param data - where to store image data. Can be set to nullptr only image metadata is needed.
-      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
-    */
-    virtual Error GetById(uint64_t id, FileInfo* info, FileData* data) = 0;
-    virtual Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) = 0;
-
-    //! Receive id of last acknowledged data tuple
-    /*!
-      \param group_id - group id to use.
-      \param substream (optional) - substream
-      \param err -  will be set in case of error, nullptr otherwise.
-      \return id of the last acknowledged image, 0 if error
-    */
-    virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) = 0;
-    virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) = 0;
-
-    //! Receive last available image.
-    /*!
-      \param info -  where to store image metadata. Can be set to nullptr only image data is needed.
-      \param data - where to store image data. Can be set to nullptr only image metadata is needed.
-      \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
-    */
-    virtual Error GetLast(FileInfo* info, FileData* data) = 0;
-    virtual Error GetLast(FileInfo* info, std::string substream, FileData* data) = 0;
-
-    //! Get all images matching the query.
-    /*!
-      \param sql_query -  query string in SQL format. Limit subset is supported
-      \param err - will be set in case of error, nullptr otherwise
-      \return vector of image metadata matchiing to specified query. Empty if nothing found or error
-    */
-    virtual FileInfos QueryImages(std::string query, Error* err) = 0;
-    virtual FileInfos QueryImages(std::string query, std::string substream, Error* err) = 0;
-
-    //! Configure resending nonacknowledged data
-    /*!
-      \param resend -  where to resend
-      \param delay_sec - how many seconds to wait before resending
-      \param resend_attempts - how many resend attempts to make
-    */
-    virtual void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) = 0;
-
-  //! Will try to interrupt current long runnung operations (mainly needed to exit waiting loop in C from Python)
-    virtual void InterruptCurrentOperation() = 0;
-
-    virtual ~DataBroker() = default; // needed for unique_ptr to delete itself
-};
-
-/*! A class to create a data broker instance. The class's only function Create is used for this */
-class DataBrokerFactory {
-  public:
-    static std::unique_ptr<DataBroker> CreateServerBroker(std::string server_name, std::string source_path,
-            bool has_filesystem, SourceCredentials source, Error* error) noexcept;
-
-};
-
-}
-#endif //ASAPO_DATASOURCE_H
diff --git a/consumer/api/cpp/src/consumer.cpp b/consumer/api/cpp/src/consumer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f5424580cbfddc13f23726a5643e20fed3f42a60
--- /dev/null
+++ b/consumer/api/cpp/src/consumer.cpp
@@ -0,0 +1,37 @@
+#include "asapo/common/networking.h"
+#include "asapo/consumer/consumer.h"
+#include "consumer_impl.h"
+#include "asapo/consumer/consumer_error.h"
+
+namespace asapo {
+
+template <typename C, typename ...Args>
+std::unique_ptr<Consumer> Create(const std::string& source_name,
+                                 Error* error,
+                                 Args&& ... args) noexcept {
+    if (source_name.empty()) {
+        *error = ConsumerErrorTemplates::kWrongInput.Generate("Empty Data Source");
+        return nullptr;
+    }
+
+    std::unique_ptr<Consumer> p = nullptr;
+    try {
+        p.reset(new C(source_name, std::forward<Args>(args)...));
+        error->reset(nullptr);
+    } catch (...) {         // we do not test this part
+        error->reset(new SimpleError("Memory error"));
+    }
+
+    return p;
+
+}
+
+std::unique_ptr<Consumer> ConsumerFactory::CreateConsumer(std::string server_name, std::string source_path,
+                                                          bool has_filesystem, SourceCredentials source, Error* error) noexcept {
+    return Create<ConsumerImpl>(std::move(server_name), error, std::move(source_path), has_filesystem,
+                                std::move(source));
+}
+
+
+}
+
diff --git a/consumer/api/cpp/src/server_data_broker.cpp b/consumer/api/cpp/src/consumer_impl.cpp
similarity index 63%
rename from consumer/api/cpp/src/server_data_broker.cpp
rename to consumer/api/cpp/src/consumer_impl.cpp
index 590ffcc55812248869f589ab3e2a5be152e86dee..7a94ecf6e028e1f0444eb5d18e298cd3421ad62b 100644
--- a/consumer/api/cpp/src/server_data_broker.cpp
+++ b/consumer/api/cpp/src/consumer_impl.cpp
@@ -1,12 +1,12 @@
-#include "server_data_broker.h"
-#include "server_data_broker.h"
+#include "consumer_impl.h"
+#include "consumer_impl.h"
 
 #include <chrono>
 
 #include "asapo/json_parser/json_parser.h"
 #include "asapo/io/io_factory.h"
 #include "asapo/http_client/http_error.h"
-#include "tcp_client.h"
+#include "tcp_consumer_client.h"
 
 #include "asapo/asapo_consumer.h"
 #include "fabric_consumer_client.h"
@@ -16,14 +16,14 @@ using std::chrono::system_clock;
 
 namespace asapo {
 
-const std::string ServerDataBroker::kBrokerServiceName = "asapo-broker";
-const std::string ServerDataBroker::kFileTransferServiceName = "asapo-file-transfer";
+const std::string ConsumerImpl::kBrokerServiceName = "asapo-broker";
+const std::string ConsumerImpl::kFileTransferServiceName = "asapo-file-transfer";
 
 Error GetNoDataResponseFromJson(const std::string &json_string, ConsumerErrorData* data) {
     JsonStringParser parser(json_string);
     Error err;
     if ((err = parser.GetUInt64("id", &data->id)) || (err = parser.GetUInt64("id_max", &data->id_max))
-        || (err = parser.GetString("next_substream", &data->next_substream))) {
+        || (err = parser.GetString("next_stream", &data->next_stream))) {
         return err;
     }
     return nullptr;
@@ -63,7 +63,7 @@ Error ConsumerErrorFromNoDataResponse(const std::string &response) {
         }
         Error err;
         if (data.id >= data.id_max) {
-            err = data.next_substream.empty() ? ConsumerErrorTemplates::kEndOfStream.Generate() :
+            err = data.next_stream.empty() ? ConsumerErrorTemplates::kEndOfStream.Generate() :
                   ConsumerErrorTemplates::kStreamFinished.Generate();
         } else {
             err = ConsumerErrorTemplates::kNoData.Generate();
@@ -104,39 +104,39 @@ Error ProcessRequestResponce(const Error &server_err, const RequestOutput* respo
     return ConsumerErrorFromHttpCode(response, code);
 }
 
-ServerDataBroker::ServerDataBroker(std::string server_uri,
-                                   std::string source_path,
-                                   bool has_filesystem,
-                                   SourceCredentials source) :
+ConsumerImpl::ConsumerImpl(std::string server_uri,
+                           std::string source_path,
+                           bool has_filesystem,
+                           SourceCredentials source) :
     io__{GenerateDefaultIO()}, httpclient__{DefaultHttpClient()},
     endpoint_{std::move(server_uri)}, source_path_{std::move(source_path)}, has_filesystem_{has_filesystem},
     source_credentials_(std::move(source)) {
 
     // net_client__ will be lazy initialized
 
-    if (source_credentials_.stream.empty()) {
-        source_credentials_.stream = SourceCredentials::kDefaultStream;
+    if (source_credentials_.data_source.empty()) {
+        source_credentials_.data_source = SourceCredentials::kDefaultStream;
     }
 
 }
 
-void ServerDataBroker::SetTimeout(uint64_t timeout_ms) {
+void ConsumerImpl::SetTimeout(uint64_t timeout_ms) {
     timeout_ms_ = timeout_ms;
 }
 
-void ServerDataBroker::ForceNoRdma() {
+void ConsumerImpl::ForceNoRdma() {
     should_try_rdma_first_ = false;
 }
 
-NetworkConnectionType ServerDataBroker::CurrentConnectionType() const {
+NetworkConnectionType ConsumerImpl::CurrentConnectionType() const {
     return current_connection_type_;
 }
 
-std::string ServerDataBroker::RequestWithToken(std::string uri) {
+std::string ConsumerImpl::RequestWithToken(std::string uri) {
     return std::move(uri) + "?token=" + source_credentials_.user_token;
 }
 
-Error ServerDataBroker::ProcessPostRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
+Error ConsumerImpl::ProcessPostRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
     Error err;
     switch (request.output_mode) {
         case OutputDataMode::string:
@@ -157,14 +157,14 @@ Error ServerDataBroker::ProcessPostRequest(const RequestInfo &request, RequestOu
     return err;
 }
 
-Error ServerDataBroker::ProcessGetRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
+Error ConsumerImpl::ProcessGetRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) {
     Error err;
     response->string_output =
         httpclient__->Get(RequestWithToken(request.host + request.api) + request.extra_params, code, &err);
     return err;
 }
 
-Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInfo &request, std::string* service_uri) {
+Error ConsumerImpl::ProcessRequest(RequestOutput* response, const RequestInfo &request, std::string* service_uri) {
     Error err;
     HttpCode code;
     if (request.post) {
@@ -178,7 +178,7 @@ Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInf
     return ProcessRequestResponce(err, response, code);
 }
 
-Error ServerDataBroker::DiscoverService(const std::string &service_name, std::string* uri_to_set) {
+Error ConsumerImpl::DiscoverService(const std::string &service_name, std::string* uri_to_set) {
     if (!uri_to_set->empty()) {
         return nullptr;
     }
@@ -198,10 +198,10 @@ Error ServerDataBroker::DiscoverService(const std::string &service_name, std::st
     return nullptr;
 }
 
-bool ServerDataBroker::SwitchToGetByIdIfPartialData(Error* err,
-                                                    const std::string &response,
-                                                    std::string* group_id,
-                                                    std::string* redirect_uri) {
+bool ConsumerImpl::SwitchToGetByIdIfPartialData(Error* err,
+                                                const std::string &response,
+                                                std::string* group_id,
+                                                std::string* redirect_uri) {
     if (*err == ConsumerErrorTemplates::kPartialData) {
         auto error_data = static_cast<const PartialErrorData*>((*err)->GetCustomData());
         if (error_data == nullptr) {
@@ -215,7 +215,7 @@ bool ServerDataBroker::SwitchToGetByIdIfPartialData(Error* err,
     return false;
 }
 
-bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string &response,std::string* group_id, std::string* redirect_uri) {
+bool ConsumerImpl::SwitchToGetByIdIfNoData(Error* err, const std::string &response, std::string* group_id, std::string* redirect_uri) {
     if (*err == ConsumerErrorTemplates::kNoData) {
         auto error_data = static_cast<const ConsumerErrorData*>((*err)->GetCustomData());
         if (error_data == nullptr) {
@@ -229,7 +229,7 @@ bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string &re
     return false;
 }
 
-RequestInfo ServerDataBroker::PrepareRequestInfo(std::string api_url, bool dataset, uint64_t min_size) {
+RequestInfo ConsumerImpl::PrepareRequestInfo(std::string api_url, bool dataset, uint64_t min_size) {
     RequestInfo ri;
     ri.host = current_broker_uri_;
     ri.api = std::move(api_url);
@@ -240,14 +240,19 @@ RequestInfo ServerDataBroker::PrepareRequestInfo(std::string api_url, bool datas
     return ri;
 }
 
-Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string group_id, std::string substream,
-                                            GetImageServerOperation op,
-                                            bool dataset, uint64_t min_size) {
+Error ConsumerImpl::GetRecordFromServer(std::string* response, std::string group_id, std::string stream,
+                                        GetMessageServerOperation op,
+                                        bool dataset, uint64_t min_size) {
+
+    if (stream.empty()) {
+        return ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+    }
+
     interrupt_flag_= false;
     std::string request_suffix = OpToUriCmd(op);
     std::string request_group = OpToUriCmd(op);
-    std::string request_api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream
-        + "/" + std::move(substream);
+    std::string request_api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source
+        + "/" + std::move(stream);
     uint64_t elapsed_ms = 0;
     Error no_data_error;
     while (true) {
@@ -260,8 +265,8 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g
         if (err == nullptr) {
             auto ri = PrepareRequestInfo(request_api + "/" + group_id + "/" + request_suffix, dataset, min_size);
             if (request_suffix == "next" && resend_) {
-                ri.extra_params = ri.extra_params + "&resend_nacks=true" + "&delay_sec=" +
-                    std::to_string(delay_sec_) + "&resend_attempts=" + std::to_string(resend_attempts_);
+                ri.extra_params = ri.extra_params + "&resend_nacks=true" + "&delay_ms=" +
+                    std::to_string(delay_ms_) + "&resend_attempts=" + std::to_string(resend_attempts_);
             }
             RequestOutput output;
             err = ProcessRequest(&output, ri, &current_broker_uri_);
@@ -294,54 +299,46 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g
     return nullptr;
 }
 
-Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, FileData* data) {
-    return GetNext(info, std::move(group_id), kDefaultSubstream, data);
-}
-
-Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) {
-    return GetImageFromServer(GetImageServerOperation::GetNext,
+Error ConsumerImpl::GetNext(std::string group_id, MessageMeta* info, MessageData* data, std::string stream) {
+    return GetMessageFromServer(GetMessageServerOperation::GetNext,
                               0,
                               std::move(group_id),
-                              std::move(substream),
+                              std::move(stream),
                               info,
                               data);
 }
 
-Error ServerDataBroker::GetLast(FileInfo* info, FileData* data) {
-    return GetLast(info, kDefaultSubstream, data);
-}
-
-Error ServerDataBroker::GetLast(FileInfo* info, std::string substream, FileData* data) {
-    return GetImageFromServer(GetImageServerOperation::GetLast,
+Error ConsumerImpl::GetLast(MessageMeta* info, MessageData* data, std::string stream) {
+    return GetMessageFromServer(GetMessageServerOperation::GetLast,
                               0,
                               "0",
-                              std::move(substream),
+                              std::move(stream),
                               info,
                               data);
 }
 
-std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) {
+std::string ConsumerImpl::OpToUriCmd(GetMessageServerOperation op) {
     switch (op) {
-        case GetImageServerOperation::GetNext:return "next";
-        case GetImageServerOperation::GetLast:return "last";
+        case GetMessageServerOperation::GetNext:return "next";
+        case GetMessageServerOperation::GetLast:return "last";
         default:return "last";
     }
 }
 
-Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id,
-                                           std::string substream,
-                                           FileInfo* info,
-                                           FileData* data) {
+Error ConsumerImpl::GetMessageFromServer(GetMessageServerOperation op, uint64_t id, std::string group_id,
+                                       std::string stream,
+                                       MessageMeta* info,
+                                       MessageData* data) {
     if (info == nullptr) {
         return ConsumerErrorTemplates::kWrongInput.Generate();
     }
 
     Error err;
     std::string response;
-    if (op == GetImageServerOperation::GetID) {
-        err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream));
+    if (op == GetMessageServerOperation::GetID) {
+        err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(stream));
     } else {
-        err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op);
+        err = GetRecordFromServer(&response, std::move(group_id), std::move(stream), op);
     }
     if (err != nullptr) {
         return err;
@@ -353,7 +350,7 @@ Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t
     return GetDataIfNeeded(info, data);
 }
 
-Error ServerDataBroker::GetDataFromFile(FileInfo* info, FileData* data) {
+Error ConsumerImpl::GetDataFromFile(MessageMeta* info, MessageData* data) {
     Error error;
     *data = io__->GetDataFromFile(info->FullName(source_path_), &info->size, &error);
     if (error) {
@@ -362,7 +359,7 @@ Error ServerDataBroker::GetDataFromFile(FileInfo* info, FileData* data) {
     return nullptr;
 }
 
-Error ServerDataBroker::RetrieveData(FileInfo* info, FileData* data) {
+Error ConsumerImpl::RetrieveData(MessageMeta* info, MessageData* data) {
     if (data == nullptr || info == nullptr) {
         return ConsumerErrorTemplates::kWrongInput.Generate("pointers are empty");
     }
@@ -382,7 +379,7 @@ Error ServerDataBroker::RetrieveData(FileInfo* info, FileData* data) {
     return GetDataFromFileTransferService(info, data, false);
 }
 
-Error ServerDataBroker::GetDataIfNeeded(FileInfo* info, FileData* data) {
+Error ConsumerImpl::GetDataIfNeeded(MessageMeta* info, MessageData* data) {
     if (data == nullptr) {
         return nullptr;
     }
@@ -391,11 +388,11 @@ Error ServerDataBroker::GetDataIfNeeded(FileInfo* info, FileData* data) {
 
 }
 
-bool ServerDataBroker::DataCanBeInBuffer(const FileInfo* info) {
+bool ConsumerImpl::DataCanBeInBuffer(const MessageMeta* info) {
     return info->buf_id > 0;
 }
 
-Error ServerDataBroker::CreateNetClientAndTryToGetFile(const FileInfo* info, FileData* data) {
+Error ConsumerImpl::CreateNetClientAndTryToGetFile(const MessageMeta* info, MessageData* data) {
     const std::lock_guard<std::mutex> lock(net_client_mutex__);
     if (net_client__) {
         return nullptr;
@@ -424,13 +421,13 @@ Error ServerDataBroker::CreateNetClientAndTryToGetFile(const FileInfo* info, Fil
     }
 
     // Create regular tcp client
-    net_client__.reset(new TcpClient());
+    net_client__.reset(new TcpConsumerClient());
     current_connection_type_ = NetworkConnectionType::kAsapoTcp;
 
     return net_client__->GetData(info, data);
 }
 
-Error ServerDataBroker::TryGetDataFromBuffer(const FileInfo* info, FileData* data) {
+Error ConsumerImpl::TryGetDataFromBuffer(const MessageMeta* info, MessageData* data) {
     if (!net_client__) {
         return CreateNetClientAndTryToGetFile(info, data);
     }
@@ -438,17 +435,17 @@ Error ServerDataBroker::TryGetDataFromBuffer(const FileInfo* info, FileData* dat
     return net_client__->GetData(info, data);
 }
 
-std::string ServerDataBroker::GenerateNewGroupId(Error* err) {
+std::string ConsumerImpl::GenerateNewGroupId(Error* err) {
     RequestInfo ri;
     ri.api = "/creategroup";
     ri.post = true;
     return BrokerRequestWithTimeout(ri, err);
 }
 
-Error ServerDataBroker::ServiceRequestWithTimeout(const std::string &service_name,
-                                                  std::string* service_uri,
-                                                  RequestInfo request,
-                                                  RequestOutput* response) {
+Error ConsumerImpl::ServiceRequestWithTimeout(const std::string &service_name,
+                                              std::string* service_uri,
+                                              RequestInfo request,
+                                              RequestOutput* response) {
     interrupt_flag_= false;
     uint64_t elapsed_ms = 0;
     Error err;
@@ -472,7 +469,7 @@ Error ServerDataBroker::ServiceRequestWithTimeout(const std::string &service_nam
     return err;
 }
 
-Error ServerDataBroker::FtsSizeRequestWithTimeout(FileInfo* info) {
+Error ConsumerImpl::FtsSizeRequestWithTimeout(MessageMeta* info) {
     RequestInfo ri = CreateFileTransferRequest(info);
     ri.extra_params = "&sizeonly=true";
     ri.output_mode = OutputDataMode::string;
@@ -487,7 +484,7 @@ Error ServerDataBroker::FtsSizeRequestWithTimeout(FileInfo* info) {
     return err;
 }
 
-Error ServerDataBroker::FtsRequestWithTimeout(FileInfo* info, FileData* data) {
+Error ConsumerImpl::FtsRequestWithTimeout(MessageMeta* info, MessageData* data) {
     RequestInfo ri = CreateFileTransferRequest(info);
     RequestOutput response;
     response.data_output_size = info->size;
@@ -499,7 +496,7 @@ Error ServerDataBroker::FtsRequestWithTimeout(FileInfo* info, FileData* data) {
     return nullptr;
 }
 
-RequestInfo ServerDataBroker::CreateFileTransferRequest(const FileInfo* info) const {
+RequestInfo ConsumerImpl::CreateFileTransferRequest(const MessageMeta* info) const {
     RequestInfo ri;
     ri.api = "/transfer";
     ri.post = true;
@@ -509,28 +506,20 @@ RequestInfo ServerDataBroker::CreateFileTransferRequest(const FileInfo* info) co
     return ri;
 }
 
-std::string ServerDataBroker::BrokerRequestWithTimeout(RequestInfo request, Error* err) {
+std::string ConsumerImpl::BrokerRequestWithTimeout(RequestInfo request, Error* err) {
     RequestOutput response;
     *err = ServiceRequestWithTimeout(kBrokerServiceName, &current_broker_uri_, request, &response);
     return std::move(response.string_output);
 }
 
-Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id) {
-    return SetLastReadMarker(value, std::move(group_id), kDefaultSubstream);
-}
-
-Error ServerDataBroker::ResetLastReadMarker(std::string group_id) {
-    return ResetLastReadMarker(std::move(group_id), kDefaultSubstream);
+Error ConsumerImpl::ResetLastReadMarker(std::string group_id, std::string stream) {
+    return SetLastReadMarker(group_id, 0, stream);
 }
 
-Error ServerDataBroker::ResetLastReadMarker(std::string group_id, std::string substream) {
-    return SetLastReadMarker(0, group_id, substream);
-}
-
-Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) {
+Error ConsumerImpl::SetLastReadMarker(std::string group_id, uint64_t value, std::string stream) {
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/"
-        + std::move(substream) + "/" + std::move(group_id) + "/resetcounter";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/"
+        + std::move(stream) + "/" + std::move(group_id) + "/resetcounter";
     ri.extra_params = "&value=" + std::to_string(value);
     ri.post = true;
 
@@ -539,10 +528,10 @@ Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id,
     return err;
 }
 
-uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) {
+uint64_t ConsumerImpl::GetCurrentSize(std::string stream, Error* err) {
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) + "/size";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) + "/size";
     auto responce = BrokerRequestWithTimeout(ri, err);
     if (*err) {
         return 0;
@@ -556,27 +545,23 @@ uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) {
     return size;
 }
 
-uint64_t ServerDataBroker::GetCurrentSize(Error* err) {
-    return GetCurrentSize(kDefaultSubstream, err);
-}
-Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, FileData* data) {
+Error ConsumerImpl::GetById(uint64_t id, MessageMeta* info, MessageData* data, std::string stream) {
     if (id == 0) {
         return ConsumerErrorTemplates::kWrongInput.Generate("id should be positive");
     }
-
-    return GetById(id, info, kDefaultSubstream, data);
+    return GetMessageFromServer(GetMessageServerOperation::GetID, id, "0", stream, info, data);
 }
 
-Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) {
-    return GetImageFromServer(GetImageServerOperation::GetID, id, "0", substream, info, data);
-}
+Error ConsumerImpl::GetRecordFromServerById(uint64_t id, std::string* response, std::string group_id,
+                                            std::string stream,
+                                            bool dataset, uint64_t min_size) {
+    if (stream.empty()) {
+        return ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+    }
 
-Error ServerDataBroker::GetRecordFromServerById(uint64_t id, std::string* response, std::string group_id,
-                                                std::string substream,
-                                                bool dataset, uint64_t min_size) {
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) +
         "/" + std::move(
         group_id) + "/" + std::to_string(id);
     if (dataset) {
@@ -589,9 +574,9 @@ Error ServerDataBroker::GetRecordFromServerById(uint64_t id, std::string* respon
     return err;
 }
 
-std::string ServerDataBroker::GetBeamtimeMeta(Error* err) {
+std::string ConsumerImpl::GetBeamtimeMeta(Error* err) {
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/default/0/meta/0";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/default/0/meta/0";
 
     return BrokerRequestWithTimeout(ri, err);
 }
@@ -600,99 +585,92 @@ DataSet DecodeDatasetFromResponse(std::string response, Error* err) {
     DataSet res;
     if (!res.SetFromJson(std::move(response))) {
         *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response:" + response);
-        return {0,0,FileInfos{}};
+        return {0,0,MessageMetas{}};
     } else {
         return res;
     }
 }
 
-FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream, Error* err) {
+MessageMetas ConsumerImpl::QueryMessages(std::string query, std::string stream, Error* err) {
+    if (stream.empty()) {
+        *err = ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+        return {};
+    }
+
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        "/" + std::move(substream) + "/0/queryimages";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        "/" + std::move(stream) + "/0/querymessages";
     ri.post = true;
     ri.body = std::move(query);
 
     auto response = BrokerRequestWithTimeout(ri, err);
     if (*err) {
-        return FileInfos{};
+        return MessageMetas{};
     }
 
-    auto dataset = DecodeDatasetFromResponse("{\"_id\":0,\"size\":0, \"images\":" + response + "}", err);
+    auto dataset = DecodeDatasetFromResponse("{\"_id\":0,\"size\":0, \"messages\":" + response + "}", err);
     return dataset.content;
 }
 
-FileInfos ServerDataBroker::QueryImages(std::string query, Error* err) {
-    return QueryImages(std::move(query), kDefaultSubstream, err);
+DataSet ConsumerImpl::GetNextDataset(std::string group_id, uint64_t min_size, std::string stream, Error* err) {
+    return GetDatasetFromServer(GetMessageServerOperation::GetNext, 0, std::move(group_id), std::move(stream),min_size, err);
 }
 
-DataSet ServerDataBroker::GetNextDataset(std::string group_id, uint64_t min_size, Error* err) {
-    return GetNextDataset(std::move(group_id), kDefaultSubstream, min_size, err);
+DataSet ConsumerImpl::GetLastDataset(uint64_t min_size, std::string stream, Error* err) {
+    return GetDatasetFromServer(GetMessageServerOperation::GetLast, 0, "0", std::move(stream),min_size, err);
 }
 
-DataSet ServerDataBroker::GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetNext, 0, std::move(group_id), std::move(substream),min_size, err);
-}
-
-DataSet ServerDataBroker::GetLastDataset(std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetLast, 0, "0", std::move(substream),min_size, err);
-}
-
-DataSet ServerDataBroker::GetLastDataset(uint64_t min_size, Error* err) {
-    return GetLastDataset(kDefaultSubstream, min_size, err);
-}
-
-DataSet ServerDataBroker::GetDatasetFromServer(GetImageServerOperation op,
-                                               uint64_t id,
-                                               std::string group_id, std::string substream,
-                                               uint64_t min_size,
-                                               Error* err) {
-    FileInfos infos;
+DataSet ConsumerImpl::GetDatasetFromServer(GetMessageServerOperation op,
+                                           uint64_t id,
+                                           std::string group_id, std::string stream,
+                                           uint64_t min_size,
+                                           Error* err) {
+    MessageMetas infos;
     std::string response;
-    if (op == GetImageServerOperation::GetID) {
-        *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream), true, min_size);
+    if (op == GetMessageServerOperation::GetID) {
+        *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(stream), true, min_size);
     } else {
-        *err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op, true, min_size);
+        *err = GetRecordFromServer(&response, std::move(group_id), std::move(stream), op, true, min_size);
     }
     if (*err != nullptr && *err!=ConsumerErrorTemplates::kPartialData) {
-        return {0, 0,FileInfos{}};
+        return {0, 0,MessageMetas{}};
     }
     return DecodeDatasetFromResponse(response, err);
 }
 
-DataSet ServerDataBroker::GetDatasetById(uint64_t id, uint64_t min_size, Error* err) {
-    return GetDatasetById(id, kDefaultSubstream, min_size, err);
-}
-
-DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetID, id, "0", std::move(substream), min_size, err);
+DataSet ConsumerImpl::GetDatasetById(uint64_t id, uint64_t min_size, std::string stream, Error* err) {
+    if (id == 0) {
+        *err =  ConsumerErrorTemplates::kWrongInput.Generate("id should be positive");
+        return {};
+    }
+    return GetDatasetFromServer(GetMessageServerOperation::GetID, id, "0", std::move(stream), min_size, err);
 }
 
-StreamInfos ParseSubstreamsFromResponse(std::string response, Error* err) {
+StreamInfos ParseStreamsFromResponse(std::string response, Error* err) {
     auto parser = JsonStringParser(std::move(response));
-    std::vector<std::string> substreams_endcoded;
-    StreamInfos substreams;
+    std::vector<std::string> streams_endcoded;
+    StreamInfos streams;
     Error parse_err;
-    *err = parser.GetArrayRawStrings("substreams", &substreams_endcoded);
+    *err = parser.GetArrayRawStrings("streams", &streams_endcoded);
     if (*err) {
         return StreamInfos{};
     }
-    for (auto substream_encoded : substreams_endcoded) {
+    for (auto stream_encoded : streams_endcoded) {
         StreamInfo si;
-        auto ok = si.SetFromJson(substream_encoded, false);
+        auto ok = si.SetFromJson(stream_encoded, false);
         if (!ok) {
-            *err = TextError("cannot parse " + substream_encoded);
+            *err = TextError("cannot parse " + stream_encoded);
             return StreamInfos{};
         }
-        substreams.emplace_back(si);
+        streams.emplace_back(si);
     }
-    return substreams;
+    return streams;
 }
 
-StreamInfos ServerDataBroker::GetSubstreamList(std::string from, Error* err) {
+StreamInfos ConsumerImpl::GetStreamList(std::string from, Error* err) {
 
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/0/substreams";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/0/streams";
     ri.post = false;
     if (!from.empty()) {
         ri.extra_params = "&from=" + from;
@@ -703,10 +681,10 @@ StreamInfos ServerDataBroker::GetSubstreamList(std::string from, Error* err) {
         return StreamInfos{};
     }
 
-    return ParseSubstreamsFromResponse(std::move(response), err);
+    return ParseStreamsFromResponse(std::move(response), err);
 }
 
-Error ServerDataBroker::UpdateFolderTokenIfNeeded(bool ignore_existing) {
+Error ConsumerImpl::UpdateFolderTokenIfNeeded(bool ignore_existing) {
     if (!folder_token_.empty() && !ignore_existing) {
         return nullptr;
     }
@@ -722,7 +700,7 @@ Error ServerDataBroker::UpdateFolderTokenIfNeeded(bool ignore_existing) {
     return nullptr;
 }
 
-RequestInfo ServerDataBroker::CreateFolderTokenRequest() const {
+RequestInfo ConsumerImpl::CreateFolderTokenRequest() const {
     RequestInfo ri;
     ri.host = endpoint_;
     ri.api = "/asapo-authorizer/folder";
@@ -734,8 +712,8 @@ RequestInfo ServerDataBroker::CreateFolderTokenRequest() const {
     return ri;
 }
 
-Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* data,
-                                                       bool retry_with_new_token) {
+Error ConsumerImpl::GetDataFromFileTransferService(MessageMeta* info, MessageData* data,
+                                                   bool retry_with_new_token) {
     auto err = UpdateFolderTokenIfNeeded(retry_with_new_token);
     if (err) {
         return err;
@@ -760,27 +738,34 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData*
     return err;
 }
 
-Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::string substream) {
+Error ConsumerImpl::Acknowledge(std::string group_id, uint64_t id, std::string stream) {
+    if (stream.empty()) {
+        return ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+    }
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
-    ri.body = "{\"Op\":\"ackimage\"}";
+    ri.body = "{\"Op\":\"ackmessage\"}";
 
     Error err;
     BrokerRequestWithTimeout(ri, &err);
     return err;
 }
 
-IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
-                                                   std::string substream,
-                                                   uint64_t from_id,
-                                                   uint64_t to_id,
-                                                   Error* error) {
+IdList ConsumerImpl::GetUnacknowledgedMessages(std::string group_id,
+                                               uint64_t from_id,
+                                               uint64_t to_id,
+                                               std::string stream,
+                                               Error* error) {
+    if (stream.empty()) {
+        *error = ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+        return {};
+    }
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/nacks";
     ri.extra_params = "&from=" + std::to_string(from_id) + "&to=" + std::to_string(to_id);
 
@@ -798,17 +783,14 @@ IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
     return list;
 }
 
-IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
-                                                   uint64_t from_id,
-                                                   uint64_t to_id,
-                                                   Error* error) {
-    return GetUnacknowledgedTupleIds(std::move(group_id), kDefaultSubstream, from_id, to_id, error);
-}
-
-uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) {
+uint64_t ConsumerImpl::GetLastAcknowledgedMessage(std::string group_id, std::string stream, Error* error) {
+    if (stream.empty()) {
+        *error = ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+        return 0;
+    }
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/lastack";
 
     auto json_string = BrokerRequestWithTimeout(ri, error);
@@ -828,32 +810,31 @@ uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std:
     return id;
 }
 
-uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, Error* error) {
-    return GetLastAcknowledgedTulpeId(std::move(group_id), kDefaultSubstream, error);
-}
-
-void ServerDataBroker::SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) {
+void ConsumerImpl::SetResendNacs(bool resend, uint64_t delay_ms, uint64_t resend_attempts) {
     resend_ = resend;
-    delay_sec_ = delay_sec;
+    delay_ms_ = delay_ms;
     resend_attempts_ = resend_attempts;
 }
 
-Error ServerDataBroker::NegativeAcknowledge(std::string group_id,
-                                            uint64_t id,
-                                            uint64_t delay_sec,
-                                            std::string substream) {
+Error ConsumerImpl::NegativeAcknowledge(std::string group_id,
+                                        uint64_t id,
+                                        uint64_t delay_ms,
+                                        std::string stream) {
+    if (stream.empty()) {
+        return ConsumerErrorTemplates::kWrongInput.Generate("empty stream");
+    }
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream +
-        +"/" + std::move(substream) +
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
-    ri.body = R"({"Op":"negackimage","Params":{"DelaySec":)" + std::to_string(delay_sec) + "}}";
+    ri.body = R"({"Op":"negackmessage","Params":{"DelayMs":)" + std::to_string(delay_ms) + "}}";
 
     Error err;
     BrokerRequestWithTimeout(ri, &err);
     return err;
 }
-void ServerDataBroker::InterruptCurrentOperation() {
+void ConsumerImpl::InterruptCurrentOperation() {
     interrupt_flag_= true;
 }
 
diff --git a/consumer/api/cpp/src/server_data_broker.h b/consumer/api/cpp/src/consumer_impl.h
similarity index 52%
rename from consumer/api/cpp/src/server_data_broker.h
rename to consumer/api/cpp/src/consumer_impl.h
index 2aa4d30e7424260bf4c2dcf1703a0a09e3dbe150..0697b5f96067597f58fa6bb43ff6461c8b01d109 100644
--- a/consumer/api/cpp/src/server_data_broker.h
+++ b/consumer/api/cpp/src/consumer_impl.h
@@ -1,17 +1,17 @@
-#ifndef ASAPO_SERVER_DATA_BROKER_H
-#define ASAPO_SERVER_DATA_BROKER_H
+#ifndef ASAPO_CONSUMER_IMPL_H
+#define ASAPO_CONSUMER_IMPL_H
 
 #include "asapo/common/networking.h"
 #include <mutex>
 #include <atomic>
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "asapo/io/io.h"
 #include "asapo/http_client/http_client.h"
 #include "net_client.h"
 
 namespace asapo {
 
-enum class GetImageServerOperation {
+enum class GetMessageServerOperation {
     GetNext,
     GetLast,
     GetID
@@ -35,7 +35,7 @@ struct RequestInfo {
 
 struct RequestOutput {
     std::string string_output;
-    FileData data_output;
+    MessageData data_output;
     uint64_t data_output_size;
     const char* to_string() const {
         if (!data_output) {
@@ -51,45 +51,37 @@ Error ConsumerErrorFromNoDataResponse(const std::string& response);
 Error ConsumerErrorFromPartialDataResponse(const std::string& response);
 DataSet DecodeDatasetFromResponse(std::string response, Error* err);
 
-class ServerDataBroker final : public asapo::DataBroker {
+class ConsumerImpl final : public asapo::Consumer {
   public:
-    explicit ServerDataBroker(std::string server_uri, std::string source_path, bool has_filesystem,
-                              SourceCredentials source);
+    explicit ConsumerImpl(std::string server_uri, std::string source_path, bool has_filesystem,
+                          SourceCredentials source);
 
-    Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) override;
-    Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
-                              std::string substream = kDefaultSubstream) override;
+    Error Acknowledge(std::string group_id, uint64_t id, std::string) override;
+    Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_ms,
+                              std::string stream) override;
 
-    IdList GetUnacknowledgedTupleIds(std::string group_id,
-                                     std::string substream,
+    IdList GetUnacknowledgedMessages(std::string group_id,
                                      uint64_t from_id,
                                      uint64_t to_id,
+                                     std::string stream,
                                      Error* error) override;
-    IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) override;
 
-    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) override;
-    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) override;
+    uint64_t GetLastAcknowledgedMessage(std::string group_id, std::string stream, Error* error) override;
 
-    Error ResetLastReadMarker(std::string group_id) override;
-    Error ResetLastReadMarker(std::string group_id, std::string substream) override;
+    Error ResetLastReadMarker(std::string group_id, std::string stream) override;
 
-    Error SetLastReadMarker(uint64_t value, std::string group_id) override;
-    Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) override;
+    Error SetLastReadMarker(std::string group_id, uint64_t value, std::string stream) override;
 
-    Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
-    Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
+    Error GetNext(std::string group_id, MessageMeta* info, MessageData* data, std::string stream) override;
 
-    Error GetLast(FileInfo* info, FileData* data) override;
-    Error GetLast(FileInfo* info, std::string substream, FileData* data) override;
+    Error GetLast(MessageMeta* info, MessageData* data, std::string stream) override;
 
     std::string GenerateNewGroupId(Error* err) override;
     std::string GetBeamtimeMeta(Error* err) override;
 
-    uint64_t GetCurrentSize(Error* err) override;
-    uint64_t GetCurrentSize(std::string substream, Error* err) override;
+    uint64_t GetCurrentSize(std::string stream, Error* err) override;
 
-    Error GetById(uint64_t id, FileInfo* info, FileData* data) override;
-    Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) override;
+    Error GetById(uint64_t id, MessageMeta* info, MessageData* data, std::string stream) override;
 
 
     void SetTimeout(uint64_t timeout_ms) override;
@@ -97,22 +89,18 @@ class ServerDataBroker final : public asapo::DataBroker {
 
     NetworkConnectionType CurrentConnectionType() const override;
 
-    FileInfos QueryImages(std::string query, Error* err) override;
-    FileInfos QueryImages(std::string query, std::string substream, Error* err) override;
+    MessageMetas QueryMessages(std::string query, std::string stream, Error* err) override;
 
-    DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) override;
-    DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetNextDataset(std::string group_id, uint64_t min_size, std::string stream, Error* err) override;
 
-    DataSet GetLastDataset(uint64_t min_size, Error* err) override;
-    DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetLastDataset(uint64_t min_size, std::string stream, Error* err) override;
 
-    DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) override;
-    DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetDatasetById(uint64_t id, uint64_t min_size, std::string stream, Error* err) override;
 
-    Error RetrieveData(FileInfo* info, FileData* data) override;
+    Error RetrieveData(MessageMeta* info, MessageData* data) override;
 
-    StreamInfos GetSubstreamList(std::string from, Error* err) override;
-    void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) override;
+    StreamInfos GetStreamList(std::string from, Error* err) override;
+    void SetResendNacs(bool resend, uint64_t delay_ms, uint64_t resend_attempts) override;
 
     virtual void InterruptCurrentOperation() override;
 
@@ -122,37 +110,37 @@ class ServerDataBroker final : public asapo::DataBroker {
     std::unique_ptr<NetClient> net_client__;
     std::mutex net_client_mutex__; // Required for the lazy initialization of net_client
   private:
-    Error GetDataFromFileTransferService(FileInfo* info, FileData* data, bool retry_with_new_token);
-    Error GetDataFromFile(FileInfo* info, FileData* data);
+    Error GetDataFromFileTransferService(MessageMeta* info, MessageData* data, bool retry_with_new_token);
+    Error GetDataFromFile(MessageMeta* info, MessageData* data);
     static const std::string kBrokerServiceName;
     static const std::string kFileTransferServiceName;
     std::string RequestWithToken(std::string uri);
-    Error GetRecordFromServer(std::string* info, std::string group_id, std::string substream, GetImageServerOperation op,
+    Error GetRecordFromServer(std::string* info, std::string group_id, std::string stream, GetMessageServerOperation op,
                               bool dataset = false, uint64_t min_size = 0);
-    Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string substream,
+    Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string stream,
                                   bool dataset = false, uint64_t min_size = 0);
-    Error GetDataIfNeeded(FileInfo* info, FileData* data);
+    Error GetDataIfNeeded(MessageMeta* info, MessageData* data);
     Error DiscoverService(const std::string& service_name, std::string* uri_to_set);
     bool SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri);
     bool SwitchToGetByIdIfPartialData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri);
     Error ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri);
-    Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
-                             FileInfo* info, FileData* data);
-    DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
+    Error GetMessageFromServer(GetMessageServerOperation op, uint64_t id, std::string group_id, std::string stream,
+                             MessageMeta* info, MessageData* data);
+    DataSet GetDatasetFromServer(GetMessageServerOperation op, uint64_t id, std::string group_id, std::string stream,
                                  uint64_t min_size, Error* err);
-    bool DataCanBeInBuffer(const FileInfo* info);
-    Error TryGetDataFromBuffer(const FileInfo* info, FileData* data);
-    Error CreateNetClientAndTryToGetFile(const FileInfo* info, FileData* data);
+    bool DataCanBeInBuffer(const MessageMeta* info);
+    Error TryGetDataFromBuffer(const MessageMeta* info, MessageData* data);
+    Error CreateNetClientAndTryToGetFile(const MessageMeta* info, MessageData* data);
     Error ServiceRequestWithTimeout(const std::string& service_name, std::string* service_uri, RequestInfo request,
                                     RequestOutput* response);
     std::string BrokerRequestWithTimeout(RequestInfo request, Error* err);
-    Error FtsRequestWithTimeout(FileInfo* info, FileData* data);
-    Error FtsSizeRequestWithTimeout(FileInfo* info);
+    Error FtsRequestWithTimeout(MessageMeta* info, MessageData* data);
+    Error FtsSizeRequestWithTimeout(MessageMeta* info);
     Error ProcessPostRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code);
     Error ProcessGetRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code);
 
     RequestInfo PrepareRequestInfo(std::string api_url, bool dataset, uint64_t min_size);
-    std::string OpToUriCmd(GetImageServerOperation op);
+    std::string OpToUriCmd(GetMessageServerOperation op);
     Error UpdateFolderTokenIfNeeded(bool ignore_existing);
     std::string endpoint_;
     std::string current_broker_uri_;
@@ -165,13 +153,13 @@ class ServerDataBroker final : public asapo::DataBroker {
     NetworkConnectionType current_connection_type_ = NetworkConnectionType::kUndefined;
     std::string folder_token_;
     RequestInfo CreateFolderTokenRequest() const;
-    RequestInfo CreateFileTransferRequest(const FileInfo* info) const;
+    RequestInfo CreateFileTransferRequest(const MessageMeta* info) const;
     uint64_t resend_timout_ = 0;
     bool resend_ = false;
-    uint64_t delay_sec_;
+    uint64_t delay_ms_;
     uint64_t resend_attempts_;
     std::atomic<bool> interrupt_flag_{ false};
 };
 
 }
-#endif //ASAPO_SERVER_DATA_BROKER_H
+#endif //ASAPO_CONSUMER_IMPL_H
diff --git a/consumer/api/cpp/src/data_broker.cpp b/consumer/api/cpp/src/data_broker.cpp
deleted file mode 100644
index 87191c1f6f6aabb1533a83285903a4228811a734..0000000000000000000000000000000000000000
--- a/consumer/api/cpp/src/data_broker.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-#include "asapo/common/networking.h"
-#include "asapo/consumer/data_broker.h"
-#include "server_data_broker.h"
-#include "asapo/consumer/consumer_error.h"
-
-namespace asapo {
-
-template <typename Broker, typename ...Args>
-std::unique_ptr<DataBroker> Create(const std::string& source_name,
-                                   Error* error,
-                                   Args&& ... args) noexcept {
-    if (source_name.empty()) {
-        *error = ConsumerErrorTemplates::kWrongInput.Generate("Empty Data Source");
-        return nullptr;
-    }
-
-    std::unique_ptr<DataBroker> p = nullptr;
-    try {
-        p.reset(new Broker(source_name, std::forward<Args>(args)...));
-        error->reset(nullptr);
-    } catch (...) {         // we do not test this part
-        error->reset(new SimpleError("Memory error"));
-    }
-
-    return p;
-
-}
-
-std::unique_ptr<DataBroker> DataBrokerFactory::CreateServerBroker(std::string server_name, std::string source_path,
-        bool has_filesystem, SourceCredentials source, Error* error) noexcept {
-    return Create<ServerDataBroker>(std::move(server_name), error, std::move(source_path), has_filesystem,
-                                    std::move(source));
-}
-
-
-}
-
diff --git a/consumer/api/cpp/src/fabric_consumer_client.cpp b/consumer/api/cpp/src/fabric_consumer_client.cpp
index 9027a3295254a067e83d7cfcf565be277c2d61ea..8c80895cb4a1048fb10cab4128677e58f7dbe6b8 100644
--- a/consumer/api/cpp/src/fabric_consumer_client.cpp
+++ b/consumer/api/cpp/src/fabric_consumer_client.cpp
@@ -10,7 +10,7 @@ FabricConsumerClient::FabricConsumerClient(): factory__(fabric::GenerateDefaultF
 
 }
 
-Error FabricConsumerClient::GetData(const FileInfo* info, FileData* data) {
+Error FabricConsumerClient::GetData(const MessageMeta* info, MessageData* data) {
     Error err;
     if (!client__) {
         client__ = factory__->CreateClient(&err);
@@ -24,7 +24,7 @@ Error FabricConsumerClient::GetData(const FileInfo* info, FileData* data) {
         return err;
     }
 
-    FileData tempData{new uint8_t[info->size]};
+    MessageData tempData{new uint8_t[info->size]};
 
     /* MemoryRegion will be released when out of scope */
     auto mr = client__->ShareMemoryRegion(tempData.get(), info->size, &err);
@@ -50,7 +50,7 @@ Error FabricConsumerClient::GetData(const FileInfo* info, FileData* data) {
     return nullptr;
 }
 
-fabric::FabricAddress FabricConsumerClient::GetAddressOrConnect(const FileInfo* info, Error* error) {
+fabric::FabricAddress FabricConsumerClient::GetAddressOrConnect(const MessageMeta* info, Error* error) {
     std::lock_guard<std::mutex> lock(mutex_);
     auto tableEntry = known_addresses_.find(info->source);
 
diff --git a/consumer/api/cpp/src/fabric_consumer_client.h b/consumer/api/cpp/src/fabric_consumer_client.h
index 6816625b083eeb339a2cd891a05fcd0b89fbec73..a9f47ac11bfffa50b7109eeeaabb392dfc65952b 100644
--- a/consumer/api/cpp/src/fabric_consumer_client.h
+++ b/consumer/api/cpp/src/fabric_consumer_client.h
@@ -25,9 +25,9 @@ class FabricConsumerClient : public NetClient {
     std::atomic<fabric::FabricMessageId> global_message_id_{0};
 
   public:
-    Error GetData(const FileInfo* info, FileData* data) override;
+    Error GetData(const MessageMeta* info, MessageData* data) override;
   private:
-    fabric::FabricAddress GetAddressOrConnect(const FileInfo* info, Error* error);
+    fabric::FabricAddress GetAddressOrConnect(const MessageMeta* info, Error* error);
 
     void PerformNetworkTransfer(fabric::FabricAddress address, const GenericRequestHeader* request_header,
                                 GenericNetworkResponse* response, Error* err);
diff --git a/consumer/api/cpp/src/net_client.h b/consumer/api/cpp/src/net_client.h
index 800c2ea5c840775b67ed220c4a7f22fe803f70ba..831b9b45677af84e74e9d312769ab82cf1ad3e5a 100644
--- a/consumer/api/cpp/src/net_client.h
+++ b/consumer/api/cpp/src/net_client.h
@@ -8,7 +8,7 @@ namespace asapo {
 
 class NetClient {
   public:
-    virtual Error GetData(const FileInfo* info, FileData* data) = 0;
+    virtual Error GetData(const MessageMeta* info, MessageData* data) = 0;
     virtual ~NetClient() = default;
 
 };
diff --git a/consumer/api/cpp/src/tcp_client.h b/consumer/api/cpp/src/tcp_client.h
deleted file mode 100644
index 7b23a9601a72ec258a1f3b2926d6577db3ff087e..0000000000000000000000000000000000000000
--- a/consumer/api/cpp/src/tcp_client.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef ASAPO_CONSUMER_TCP_CLIENT_H
-#define ASAPO_CONSUMER_TCP_CLIENT_H
-
-#include "net_client.h"
-#include "asapo/io/io.h"
-#include "tcp_connection_pool.h"
-
-namespace asapo {
-
-class TcpClient : public NetClient {
-  public:
-    explicit TcpClient();
-    Error GetData(const FileInfo* info, FileData* data) override;
-    std::unique_ptr<IO> io__;
-    std::unique_ptr<TcpConnectionPool> connection_pool__;
-  private:
-    Error SendGetDataRequest(SocketDescriptor sd, const FileInfo* info) const noexcept;
-    Error ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const FileInfo* info) const noexcept;
-    Error ReceiveResponce(SocketDescriptor sd) const noexcept;
-    Error QueryCacheHasData(SocketDescriptor* sd, const FileInfo* info, bool try_reconnect) const noexcept;
-    Error ReceiveData(SocketDescriptor sd, const FileInfo* info, FileData* data) const noexcept;
-};
-
-}
-
-#endif //ASAPO_CONSUMER_TCP_CLIENT_H
diff --git a/consumer/api/cpp/src/tcp_client.cpp b/consumer/api/cpp/src/tcp_consumer_client.cpp
similarity index 73%
rename from consumer/api/cpp/src/tcp_client.cpp
rename to consumer/api/cpp/src/tcp_consumer_client.cpp
index 977719906b03bc34f97af763c56e594d8ec363ff..a2bcc349cb90152409dfc3a68f808cfeed68a5dd 100644
--- a/consumer/api/cpp/src/tcp_client.cpp
+++ b/consumer/api/cpp/src/tcp_consumer_client.cpp
@@ -1,16 +1,16 @@
-#include "tcp_client.h"
+#include "tcp_consumer_client.h"
 #include "asapo/io/io_factory.h"
 #include "asapo/common/networking.h"
 #include "rds_response_error.h"
 
 namespace asapo {
 
-TcpClient::TcpClient() : io__{GenerateDefaultIO()}, connection_pool__{new TcpConnectionPool()} {
+TcpConsumerClient::TcpConsumerClient() : io__{GenerateDefaultIO()}, connection_pool__{new TcpConnectionPool()} {
 
 }
 
 
-Error TcpClient::SendGetDataRequest(SocketDescriptor sd, const FileInfo* info) const noexcept {
+Error TcpConsumerClient::SendGetDataRequest(SocketDescriptor sd, const MessageMeta* info) const noexcept {
     Error err;
     GenericRequestHeader request_header{kOpcodeGetBufferData, info->buf_id, info->size};
     io__->Send(sd, &request_header, sizeof(request_header), &err);
@@ -21,7 +21,7 @@ Error TcpClient::SendGetDataRequest(SocketDescriptor sd, const FileInfo* info) c
     return err;
 }
 
-Error TcpClient::ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const FileInfo* info) const noexcept {
+Error TcpConsumerClient::ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const MessageMeta* info) const noexcept {
     Error err;
     *sd = connection_pool__->Reconnect(*sd, &err);
     if (err) {
@@ -31,7 +31,7 @@ Error TcpClient::ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const Fi
     }
 }
 
-Error TcpClient::ReceiveResponce(SocketDescriptor sd) const noexcept {
+Error TcpConsumerClient::ReceiveResponce(SocketDescriptor sd) const noexcept {
     Error err;
 
     GenericNetworkResponse response;
@@ -55,7 +55,7 @@ Error TcpClient::ReceiveResponce(SocketDescriptor sd) const noexcept {
     return nullptr;
 }
 
-Error TcpClient::QueryCacheHasData(SocketDescriptor* sd, const FileInfo* info, bool try_reconnect) const noexcept {
+Error TcpConsumerClient::QueryCacheHasData(SocketDescriptor* sd, const MessageMeta* info, bool try_reconnect) const noexcept {
     Error err;
     err = SendGetDataRequest(*sd, info);
     if (err && try_reconnect) {
@@ -68,7 +68,7 @@ Error TcpClient::QueryCacheHasData(SocketDescriptor* sd, const FileInfo* info, b
     return ReceiveResponce(*sd);
 }
 
-Error TcpClient::ReceiveData(SocketDescriptor sd, const FileInfo* info, FileData* data) const noexcept {
+Error TcpConsumerClient::ReceiveData(SocketDescriptor sd, const MessageMeta* info, MessageData* data) const noexcept {
     Error err;
     uint8_t* data_array = nullptr;
     try {
@@ -80,7 +80,7 @@ Error TcpClient::ReceiveData(SocketDescriptor sd, const FileInfo* info, FileData
     io__->Receive(sd, data_array, (size_t)info->size, &err);
     connection_pool__->ReleaseConnection(sd);
     if (!err) {
-        *data = FileData{data_array};
+        *data = MessageData{data_array};
     } else {
         io__->CloseSocket(sd, nullptr);
         delete[] data_array;
@@ -88,7 +88,7 @@ Error TcpClient::ReceiveData(SocketDescriptor sd, const FileInfo* info, FileData
     return err;
 }
 
-Error TcpClient::GetData(const FileInfo* info, FileData* data) {
+Error TcpConsumerClient::GetData(const MessageMeta* info, MessageData* data) {
     Error err;
     bool reused;
     auto sd = connection_pool__->GetFreeConnection(info->source, &reused, &err);
diff --git a/consumer/api/cpp/src/tcp_consumer_client.h b/consumer/api/cpp/src/tcp_consumer_client.h
new file mode 100644
index 0000000000000000000000000000000000000000..b7961ee44ab88af22bc45ab9a87a55a5e1ab8b9e
--- /dev/null
+++ b/consumer/api/cpp/src/tcp_consumer_client.h
@@ -0,0 +1,26 @@
+#ifndef ASAPO_CONSUMER_TCP_CLIENT_H
+#define ASAPO_CONSUMER_TCP_CLIENT_H
+
+#include "net_client.h"
+#include "asapo/io/io.h"
+#include "tcp_connection_pool.h"
+
+namespace asapo {
+
+class TcpConsumerClient : public NetClient {
+  public:
+    explicit TcpConsumerClient();
+    Error GetData(const MessageMeta* info, MessageData* data) override;
+    std::unique_ptr<IO> io__;
+    std::unique_ptr<TcpConnectionPool> connection_pool__;
+  private:
+    Error SendGetDataRequest(SocketDescriptor sd, const MessageMeta* info) const noexcept;
+    Error ReconnectAndResendGetDataRequest(SocketDescriptor* sd, const MessageMeta* info) const noexcept;
+    Error ReceiveResponce(SocketDescriptor sd) const noexcept;
+    Error QueryCacheHasData(SocketDescriptor* sd, const MessageMeta* info, bool try_reconnect) const noexcept;
+    Error ReceiveData(SocketDescriptor sd, const MessageMeta* info, MessageData* data) const noexcept;
+};
+
+}
+
+#endif //ASAPO_CONSUMER_TCP_CLIENT_H
diff --git a/consumer/api/cpp/unittests/mocking.h b/consumer/api/cpp/unittests/mocking.h
index 06a1326a43594a9124b36a1043bf6a55cb7c3321..5ded4240f8ff721aae94d699c00d9d1dddd49479 100644
--- a/consumer/api/cpp/unittests/mocking.h
+++ b/consumer/api/cpp/unittests/mocking.h
@@ -12,11 +12,11 @@ namespace asapo {
 class MockNetClient : public asapo::NetClient {
   public:
 
-    Error GetData(const FileInfo* info, FileData* data) override {
+    Error GetData(const MessageMeta* info, MessageData* data) override {
         return Error(GetData_t(info, data));
     }
 
-    MOCK_CONST_METHOD2(GetData_t, ErrorInterface * (const FileInfo* info, FileData* data));
+    MOCK_CONST_METHOD2(GetData_t, ErrorInterface * (const MessageMeta* info, MessageData* data));
 
 };
 
diff --git a/consumer/api/cpp/unittests/test_consumer_api.cpp b/consumer/api/cpp/unittests/test_consumer_api.cpp
index a43e1af591ff88bfbdf3c0f5e8aebc908466d92e..a1747fb551e7f2893d799ac4aefb3b470dc34fd6 100644
--- a/consumer/api/cpp/unittests/test_consumer_api.cpp
+++ b/consumer/api/cpp/unittests/test_consumer_api.cpp
@@ -1,12 +1,12 @@
 #include <gmock/gmock.h>
 
-#include "asapo/consumer/data_broker.h"
-#include "../src/server_data_broker.h"
+#include "asapo/consumer/consumer.h"
+#include "../src/consumer_impl.h"
 #include "asapo/common/error.h"
 
-using asapo::DataBrokerFactory;
-using asapo::DataBroker;
-using asapo::ServerDataBroker;
+using asapo::ConsumerFactory;
+using asapo::Consumer;
+using asapo::ConsumerImpl;
 
 using asapo::Error;
 using ::testing::Eq;
@@ -16,7 +16,7 @@ using ::testing::Test;
 
 namespace {
 
-class DataBrokerFactoryTests : public Test {
+class ConsumerFactoryTests : public Test {
   public:
     Error error;
     void SetUp() override {
@@ -25,12 +25,17 @@ class DataBrokerFactoryTests : public Test {
 };
 
 
-TEST_F(DataBrokerFactoryTests, CreateServerDataSource) {
+TEST_F(ConsumerFactoryTests, CreateServerDataSource) {
 
-    auto data_broker = DataBrokerFactory::CreateServerBroker("server", "path", false, asapo::SourceCredentials{asapo::SourceType::kProcessed,"beamtime_id", "", "", "token"}, &error);
+    auto consumer = ConsumerFactory::CreateConsumer("server",
+                                                       "path",
+                                                       false,
+                                                       asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                "beamtime_id", "", "", "token"},
+                                                       &error);
 
     ASSERT_THAT(error, Eq(nullptr));
-    ASSERT_THAT(dynamic_cast<ServerDataBroker*>(data_broker.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<ConsumerImpl*>(consumer.get()), Ne(nullptr));
 }
 
 
diff --git a/consumer/api/cpp/unittests/test_server_broker.cpp b/consumer/api/cpp/unittests/test_consumer_impl.cpp
similarity index 59%
rename from consumer/api/cpp/unittests/test_server_broker.cpp
rename to consumer/api/cpp/unittests/test_consumer_impl.cpp
index 54d3075ab968ac11b284d0f77be9a53756f94c12..26020e46e30a5551f3bf1eb818fbadc8fd8eaaf0 100644
--- a/consumer/api/cpp/unittests/test_server_broker.cpp
+++ b/consumer/api/cpp/unittests/test_consumer_impl.cpp
@@ -3,24 +3,24 @@
 #include "gtest/gtest.h"
 #include <chrono>
 
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "asapo/consumer/consumer_error.h"
 #include "asapo/io/io.h"
 #include "../../../../common/cpp/src/system_io/system_io.h"
-#include "../src/server_data_broker.h"
+#include "../src/consumer_impl.h"
 #include "../../../../common/cpp/src/http_client/curl_http_client.h"
 #include "asapo/unittests/MockIO.h"
 #include "asapo/unittests/MockHttpClient.h"
 #include "asapo/http_client/http_error.h"
 #include "mocking.h"
-#include "../src/tcp_client.h"
+#include "../src/tcp_consumer_client.h"
 
-using asapo::DataBrokerFactory;
-using asapo::DataBroker;
-using asapo::ServerDataBroker;
+using asapo::ConsumerFactory;
+using asapo::Consumer;
+using asapo::ConsumerImpl;
 using asapo::IO;
-using asapo::FileInfo;
-using asapo::FileData;
+using asapo::MessageMeta;
+using asapo::MessageData;
 using asapo::MockIO;
 using asapo::MockHttpClient;
 using asapo::MockNetClient;
@@ -45,25 +45,25 @@ using ::testing::ElementsAre;
 namespace {
 
 TEST(FolderDataBroker, Constructor) {
-    auto data_broker =
-        std::unique_ptr<ServerDataBroker>{new ServerDataBroker("test", "path", false,
-                                                               asapo::SourceCredentials{asapo::SourceType::kProcessed,
+    auto consumer =
+        std::unique_ptr<ConsumerImpl>{new ConsumerImpl("test", "path", false,
+                                                       asapo::SourceCredentials{asapo::SourceType::kProcessed,
                                                                                         "beamtime_id", "", "", "token"})
         };
-    ASSERT_THAT(dynamic_cast<asapo::SystemIO*>(data_broker->io__.get()), Ne(nullptr));
-    ASSERT_THAT(dynamic_cast<asapo::CurlHttpClient*>(data_broker->httpclient__.get()), Ne(nullptr));
-    ASSERT_THAT(data_broker->net_client__.get(), Eq(nullptr));
+    ASSERT_THAT(dynamic_cast<asapo::SystemIO*>(consumer->io__.get()), Ne(nullptr));
+    ASSERT_THAT(dynamic_cast<asapo::CurlHttpClient*>(consumer->httpclient__.get()), Ne(nullptr));
+    ASSERT_THAT(consumer->net_client__.get(), Eq(nullptr));
 }
 
 const uint8_t expected_value = 1;
 
-class ServerDataBrokerTests : public Test {
+class ConsumerImplTests : public Test {
  public:
-  std::unique_ptr<ServerDataBroker> data_broker, fts_data_broker;
+  std::unique_ptr<ConsumerImpl> consumer, fts_consumer;
   NiceMock<MockIO> mock_io;
   NiceMock<MockHttpClient> mock_http_client;
   NiceMock<MockNetClient> mock_netclient;
-  FileInfo info;
+  MessageMeta info;
   std::string expected_server_uri = "test:8400";
   std::string expected_broker_uri = "asapo-broker:5005";
   std::string expected_fts_uri = "asapo-file-transfer:5008";
@@ -72,51 +72,51 @@ class ServerDataBrokerTests : public Test {
   std::string expected_filename = "filename";
   std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename;
   std::string expected_group_id = "groupid";
+  std::string expected_data_source = "source";
   std::string expected_stream = "stream";
-  std::string expected_substream = "substream";
   std::string expected_metadata = "{\"meta\":1}";
   std::string expected_query_string = "bla";
   std::string expected_folder_token = "folder_token";
   std::string expected_beamtime_id = "beamtime_id";
-  uint64_t expected_image_size = 100;
+  uint64_t expected_message_size = 100;
   uint64_t expected_dataset_id = 1;
   static const uint64_t expected_buf_id = 123;
-  std::string expected_next_substream = "nextsubstream";
+  std::string expected_next_stream = "nextstream";
   std::string expected_fts_query_string = "{\"Folder\":\"" + expected_path + "\",\"FileName\":\"" + expected_filename +
       "\"}";
   std::string expected_cookie = "Authorization=Bearer " + expected_folder_token;
 
   void AssertSingleFileTransfer();
   void SetUp() override {
-      data_broker = std::unique_ptr<ServerDataBroker>{
-          new ServerDataBroker(expected_server_uri,
-                               expected_path,
-                               true,
-                               asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "",
-                                                        expected_stream, expected_token})
+      consumer = std::unique_ptr<ConsumerImpl>{
+          new ConsumerImpl(expected_server_uri,
+                           expected_path,
+                           true,
+                           asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "",
+                                                        expected_data_source, expected_token})
       };
-      fts_data_broker = std::unique_ptr<ServerDataBroker>{
-          new ServerDataBroker(expected_server_uri,
-                               expected_path,
-                               false,
-                               asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "",
-                                                        expected_stream, expected_token})
+      fts_consumer = std::unique_ptr<ConsumerImpl>{
+          new ConsumerImpl(expected_server_uri,
+                           expected_path,
+                           false,
+                           asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "",
+                                                        expected_data_source, expected_token})
       };
-      data_broker->io__ = std::unique_ptr<IO>{&mock_io};
-      data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
-      data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
-      fts_data_broker->io__ = std::unique_ptr<IO>{&mock_io};
-      fts_data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
-      fts_data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
+      consumer->io__ = std::unique_ptr<IO>{&mock_io};
+      consumer->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
+      consumer->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
+      fts_consumer->io__ = std::unique_ptr<IO>{&mock_io};
+      fts_consumer->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
+      fts_consumer->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
 
   }
   void TearDown() override {
-      data_broker->io__.release();
-      data_broker->httpclient__.release();
-      data_broker->net_client__.release();
-      fts_data_broker->io__.release();
-      fts_data_broker->httpclient__.release();
-      fts_data_broker->net_client__.release();
+      consumer->io__.release();
+      consumer->httpclient__.release();
+      consumer->net_client__.release();
+      fts_consumer->io__.release();
+      fts_consumer->httpclient__.release();
+      fts_consumer->net_client__.release();
 
   }
   void MockGet(const std::string &response, asapo::HttpCode return_code = HttpCode::OK) {
@@ -142,7 +142,7 @@ class ServerDataBrokerTests : public Test {
           Return(result)));
   }
 
-  void MockBeforeFTS(FileData* data);
+  void MockBeforeFTS(MessageData* data);
 
   void MockGetFTSUri() {
       MockGetServiceUri("asapo-file-transfer", expected_fts_uri);
@@ -166,9 +166,9 @@ class ServerDataBrokerTests : public Test {
       EXPECT_CALL(mock_io, GetDataFromFile_t(expected_full_path, testing::Pointee(100), _)).Times(times).
           WillRepeatedly(DoAll(SetArgPointee<2>(new asapo::SimpleError{"s"}), testing::Return(nullptr)));
   }
-  FileInfo CreateFI(uint64_t buf_id = expected_buf_id) {
-      FileInfo fi;
-      fi.size = expected_image_size;
+  MessageMeta CreateFI(uint64_t buf_id = expected_buf_id) {
+      MessageMeta fi;
+      fi.size = expected_message_size;
       fi.id = 1;
       fi.buf_id = buf_id;
       fi.name = expected_filename;
@@ -177,30 +177,30 @@ class ServerDataBrokerTests : public Test {
   }
 };
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsErrorOnWrongInput) {
-    auto err = data_broker->GetNext(nullptr, "", nullptr);
+TEST_F(ConsumerImplTests, GetMessageReturnsErrorOnWrongInput) {
+    auto err = consumer->GetNext("", nullptr, nullptr, expected_stream);
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
 }
 
-TEST_F(ServerDataBrokerTests, DefaultStreamIsDetector) {
-    data_broker->io__.release();
-    data_broker->httpclient__.release();
-    data_broker->net_client__.release();
-    data_broker = std::unique_ptr<ServerDataBroker>{
-        new ServerDataBroker(expected_server_uri,
-                             expected_path,
-                             false,
-                             asapo::SourceCredentials{asapo::SourceType::kProcessed, "beamtime_id", "", "",
+TEST_F(ConsumerImplTests, DefaultStreamIsDetector) {
+    consumer->io__.release();
+    consumer->httpclient__.release();
+    consumer->net_client__.release();
+    consumer = std::unique_ptr<ConsumerImpl>{
+        new ConsumerImpl(expected_server_uri,
+                         expected_path,
+                         false,
+                         asapo::SourceCredentials{asapo::SourceType::kProcessed, "beamtime_id", "", "",
                                                       expected_token})
     };
-    data_broker->io__ = std::unique_ptr<IO>{&mock_io};
-    data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
-    data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
+    consumer->io__ = std::unique_ptr<IO>{&mock_io};
+    consumer->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client};
+    consumer->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient};
 
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/detector/default/" + expected_group_id
+                Get_t(expected_broker_uri + "/database/beamtime_id/detector/stream/" + expected_group_id
                           +
                               "/next?token="
                           + expected_token, _,
@@ -209,104 +209,91 @@ TEST_F(ServerDataBrokerTests, DefaultStreamIsDetector) {
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetNextUsesCorrectUriWithStream) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/"
-                                            + expected_group_id + "/next?token="
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+                                            expected_stream + "/" + expected_group_id + "/next?token="
                                             + expected_token, _,
                                         _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUriWithSubstream) {
-    MockGetBrokerUri();
-
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-                                            expected_substream + "/" + expected_group_id + "/next?token="
-                                            + expected_token, _,
-                                        _)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return("")));
-    data_broker->GetNext(&info, expected_group_id, expected_substream, nullptr);
-}
-
-TEST_F(ServerDataBrokerTests, GetLastUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetLastUsesCorrectUri) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/last?token="
+                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/"+ expected_stream+"/0/last?token="
                           + expected_token, _,
                       _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
-    data_broker->GetLast(&info, nullptr);
+    consumer->GetLast(&info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsEndOfStreamFromHttpClient) {
+TEST_F(ConsumerImplTests, GetMessageReturnsEndOfStreamFromHttpClient) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}")));
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream));
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(1));
-    ASSERT_THAT(err_data->next_substream, Eq(""));
+    ASSERT_THAT(err_data->next_stream, Eq(""));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsStreamFinishedFromHttpClient) {
+TEST_F(ConsumerImplTests, GetMessageReturnsStreamFinishedFromHttpClient) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"" + expected_next_substream
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"" + expected_next_stream
                    + "\"}")));
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kStreamFinished));
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(1));
-    ASSERT_THAT(err_data->next_substream, Eq(expected_next_substream));
+    ASSERT_THAT(err_data->next_stream, Eq(expected_next_stream));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataFromHttpClient) {
+TEST_F(ConsumerImplTests, GetMessageReturnsNoDataFromHttpClient) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_stream\":\"""\"}")));
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(2));
-    ASSERT_THAT(err_data->next_substream, Eq(""));
+    ASSERT_THAT(err_data->next_stream, Eq(""));
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsNotAuthorized) {
+TEST_F(ConsumerImplTests, GetMessageReturnsNotAuthorized) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
@@ -314,12 +301,12 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNotAuthorized) {
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsWrongResponseFromHttpClient) {
+TEST_F(ConsumerImplTests, GetMessageReturnsWrongResponseFromHttpClient) {
 
     MockGetBrokerUri();
 
@@ -328,103 +315,103 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsWrongResponseFromHttpClient) {
         SetArgPointee<2>(nullptr),
         Return("id")));
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kInterruptedTransaction));
     ASSERT_THAT(err->Explain(), HasSubstr("malformed"));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerAddressNotFound) {
+TEST_F(ConsumerImplTests, GetMessageReturnsIfBrokerAddressNotFound) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::NotFound),
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    data_broker->SetTimeout(100);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(100);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err->Explain(), AllOf(HasSubstr(expected_server_uri), HasSubstr("unavailable")));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerUriEmpty) {
+TEST_F(ConsumerImplTests, GetMessageReturnsIfBrokerUriEmpty) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    data_broker->SetTimeout(100);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(100);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err->Explain(), AllOf(HasSubstr(expected_server_uri), HasSubstr("unavailable")));
 }
 
-TEST_F(ServerDataBrokerTests, GetDoNotCallBrokerUriIfAlreadyFound) {
+TEST_F(ConsumerImplTests, GetDoNotCallBrokerUriIfAlreadyFound) {
     MockGetBrokerUri();
     MockGet("error_response");
 
-    data_broker->SetTimeout(100);
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(100);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
     Mock::VerifyAndClearExpectations(&mock_http_client);
 
     EXPECT_CALL(mock_http_client,
                 Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asap-broker"), _, _)).Times(0);
     MockGet("error_response");
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetBrokerUriAgainAfterConnectionError) {
+TEST_F(ConsumerImplTests, GetBrokerUriAgainAfterConnectionError) {
     MockGetBrokerUri();
     MockGetError();
 
-    data_broker->SetTimeout(0);
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(0);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
     Mock::VerifyAndClearExpectations(&mock_http_client);
 
     MockGetBrokerUri();
     MockGet("error_response");
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsEofStreamFromHttpClientUntilTimeout) {
+TEST_F(ConsumerImplTests, GetMessageReturnsEofStreamFromHttpClientUntilTimeout) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"""\"}")));
 
-    data_broker->SetTimeout(300);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(300);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataAfterTimeoutEvenIfOtherErrorOccured) {
+TEST_F(ConsumerImplTests, GetMessageReturnsNoDataAfterTimeoutEvenIfOtherErrorOccured) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(300);
+    consumer->SetTimeout(300);
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
         Return("{\"op\":\"get_record_by_id\",\"id\":" + std::to_string(expected_dataset_id) +
-            ",\"id_max\":2,\"next_substream\":\"""\"}")));
+            ",\"id_max\":2,\"next_stream\":\"""\"}")));
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
                                             + expected_token, _, _)).Times(AtLeast(1)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::NotFound),
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    data_broker->SetTimeout(300);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(300);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
 }
 
-TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnTransferError) {
+TEST_F(ConsumerImplTests, GetNextMessageReturnsImmediatelyOnTransferError) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
@@ -432,8 +419,8 @@ TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnTransferError) {
         SetArgPointee<2>(asapo::HttpErrorTemplates::kTransferError.Generate("sss").release()),
         Return("")));
 
-    data_broker->SetTimeout(300);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(300);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kInterruptedTransaction));
     ASSERT_THAT(err->Explain(), HasSubstr("sss"));
@@ -443,7 +430,7 @@ ACTION(AssignArg2) {
     *arg2 = asapo::HttpErrorTemplates::kConnectionError.Generate().release();
 }
 
-TEST_F(ServerDataBrokerTests, GetNextRetriesIfConnectionHttpClientErrorUntilTimeout) {
+TEST_F(ConsumerImplTests, GetNextRetriesIfConnectionHttpClientErrorUntilTimeout) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::OK),
@@ -455,27 +442,27 @@ TEST_F(ServerDataBrokerTests, GetNextRetriesIfConnectionHttpClientErrorUntilTime
         AssignArg2(),
         Return("")));
 
-    data_broker->SetTimeout(300);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(300);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kUnavailableService));
 }
 
-TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnFinshedSubstream) {
+TEST_F(ConsumerImplTests, GetNextMessageReturnsImmediatelyOnFinshedStream) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_stream\":\"next\"}")));
 
-    data_broker->SetTimeout(300);
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetTimeout(300);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kStreamFinished));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsFileInfo) {
+TEST_F(ConsumerImplTests, GetMessageReturnsMessageMeta) {
     MockGetBrokerUri();
 
     auto to_send = CreateFI();
@@ -483,7 +470,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsFileInfo) {
 
     MockGet(json);
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
 
@@ -493,73 +480,73 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsFileInfo) {
     ASSERT_THAT(info.timestamp, Eq(to_send.timestamp));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsParseError) {
+TEST_F(ConsumerImplTests, GetMessageReturnsParseError) {
     MockGetBrokerUri();
     MockGet("error_response");
 
-    auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
+    auto err = consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kInterruptedTransaction));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReturnsIfNoDataNeeded) {
+TEST_F(ConsumerImplTests, GetMessageReturnsIfNoDataNeeded) {
     MockGetBrokerUri();
     MockGet("error_response");
 
     EXPECT_CALL(mock_netclient, GetData_t(_, _)).Times(0);
     EXPECT_CALL(mock_io, GetDataFromFile_t(_, _, _)).Times(0);
 
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageTriesToGetDataFromMemoryCache) {
+TEST_F(ConsumerImplTests, GetMessageTriesToGetDataFromMemoryCache) {
     MockGetBrokerUri();
     auto to_send = CreateFI();
     auto json = to_send.Json();
     MockGet(json);
-    FileData data;
+    MessageData data;
 
     EXPECT_CALL(mock_netclient, GetData_t(&info, &data)).WillOnce(Return(nullptr));
     MockReadDataFromFile(0);
 
-    data_broker->GetNext(&info, expected_group_id, &data);
+    consumer->GetNext(expected_group_id, &info, &data, expected_stream);
 
     ASSERT_THAT(info.buf_id, Eq(expected_buf_id));
 
 }
 
-TEST_F(ServerDataBrokerTests, GetImageCallsReadFromFileIfCannotReadFromCache) {
+TEST_F(ConsumerImplTests, GetMessageCallsReadFromFileIfCannotReadFromCache) {
     MockGetBrokerUri();
     auto to_send = CreateFI();
     auto json = to_send.Json();
     MockGet(json);
 
-    FileData data;
+    MessageData data;
 
     EXPECT_CALL(mock_netclient, GetData_t(&info,
                                           &data)).WillOnce(Return(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()));
     MockReadDataFromFile();
 
-    data_broker->GetNext(&info, expected_group_id, &data);
+    consumer->GetNext(expected_group_id, &info, &data, expected_stream);
     ASSERT_THAT(info.buf_id, Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, GetImageCallsReadFromFileIfZeroBufId) {
+TEST_F(ConsumerImplTests, GetMessageCallsReadFromFileIfZeroBufId) {
     MockGetBrokerUri();
     auto to_send = CreateFI(0);
     auto json = to_send.Json();
     MockGet(json);
 
-    FileData data;
+    MessageData data;
 
     EXPECT_CALL(mock_netclient, GetData_t(_, _)).Times(0);
 
     MockReadDataFromFile();
 
-    data_broker->GetNext(&info, expected_group_id, &data);
+    consumer->GetNext(expected_group_id, &info, &data, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) {
+TEST_F(ConsumerImplTests, GenerateNewGroupIdReturnsErrorCreateGroup) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Post_t(HasSubstr("creategroup"), _, "", _, _)).WillOnce(DoAll(
@@ -567,14 +554,14 @@ TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) {
         SetArgPointee<4>(nullptr),
         Return("")));
 
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto groupid = data_broker->GenerateNewGroupId(&err);
+    auto groupid = consumer->GenerateNewGroupId(&err);
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
     ASSERT_THAT(groupid, Eq(""));
 }
 
-TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsGroupID) {
+TEST_F(ConsumerImplTests, GenerateNewGroupIdReturnsGroupID) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/creategroup?token=" + expected_token, _, "", _,
@@ -583,129 +570,98 @@ TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsGroupID) {
         SetArgPointee<4>(nullptr),
         Return(expected_group_id)));
 
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto groupid = data_broker->GenerateNewGroupId(&err);
+    auto groupid = consumer->GenerateNewGroupId(&err);
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(groupid, Eq(expected_group_id));
 }
 
-TEST_F(ServerDataBrokerTests, ResetCounterByDefaultUsesCorrectUri) {
+TEST_F(ConsumerImplTests, ResetCounterByDefaultUsesCorrectUri) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
     EXPECT_CALL(mock_http_client,
-                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
+                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/" +
                     expected_group_id +
                     "/resetcounter?token=" + expected_token + "&value=0", _, _, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return("")));
-    auto err = data_broker->ResetLastReadMarker(expected_group_id);
-    ASSERT_THAT(err, Eq(nullptr));
-}
-
-TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUri) {
-    MockGetBrokerUri();
-    data_broker->SetTimeout(100);
-
-    EXPECT_CALL(mock_http_client,
-                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
-                    expected_group_id +
-                    "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("")));
-    auto err = data_broker->SetLastReadMarker(10, expected_group_id);
+    auto err = consumer->ResetLastReadMarker(expected_group_id, expected_stream);
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUriWithSubstream) {
+TEST_F(ConsumerImplTests, ResetCounterUsesCorrectUri) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/" +
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/" +
         expected_group_id +
         "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return("")));
-    auto err = data_broker->SetLastReadMarker(10, expected_group_id, expected_substream);
+    auto err = consumer->SetLastReadMarker(expected_group_id, 10, expected_stream);
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetCurrentSizeUsesCorrectUri) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream +
-        "/default/size?token="
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/size?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("{\"size\":10}")));
     asapo::Error err;
-    auto size = data_broker->GetCurrentSize(&err);
+    auto size = consumer->GetCurrentSize(expected_stream, &err);
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(size, Eq(10));
 }
 
-TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUriWithSubstream) {
+TEST_F(ConsumerImplTests, GetCurrentSizeErrorOnWrongResponce) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/size?token="
-                                            + expected_token, _, _)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return("{\"size\":10}")));
-    asapo::Error err;
-    auto size = data_broker->GetCurrentSize(expected_substream, &err);
-    ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(size, Eq(10));
-}
-
-TEST_F(ServerDataBrokerTests, GetCurrentSizeErrorOnWrongResponce) {
-    MockGetBrokerUri();
-    data_broker->SetTimeout(100);
-
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream +
-        "/default/size?token="
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source +
+        "/"+expected_stream+"/size?token="
                                             + expected_token, _, _)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::Unauthorized),
         SetArgPointee<2>(nullptr),
         Return("")));
     asapo::Error err;
-    auto size = data_broker->GetCurrentSize(&err);
+    auto size = consumer->GetCurrentSize(expected_stream, &err);
     ASSERT_THAT(err, Ne(nullptr));
     ASSERT_THAT(size, Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, GetNDataErrorOnWrongParse) {
+TEST_F(ConsumerImplTests, GetNDataErrorOnWrongParse) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream +
-        "/default/size?token="
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source +
+        "/stream/size?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("{\"siz\":10}")));
     asapo::Error err;
-    auto size = data_broker->GetCurrentSize(&err);
+    auto size = consumer->GetCurrentSize(expected_stream,&err);
     ASSERT_THAT(err, Ne(nullptr));
     ASSERT_THAT(size, Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetByIdUsesCorrectUri) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     auto to_send = CreateFI();
     auto json = to_send.Json();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(
                                                 expected_dataset_id) + "?token="
                                             + expected_token, _,
@@ -714,65 +670,65 @@ TEST_F(ServerDataBrokerTests, GetByIdUsesCorrectUri) {
         SetArgPointee<2>(nullptr),
         Return(json)));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
+    auto err = consumer->GetById(expected_dataset_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(info.name, Eq(to_send.name));
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdTimeouts) {
+TEST_F(ConsumerImplTests, GetByIdTimeouts) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(10);
+    consumer->SetTimeout(10);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
+    auto err = consumer->GetById(expected_dataset_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStream) {
+TEST_F(ConsumerImplTests, GetByIdReturnsEndOfStream) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(10);
+    consumer->SetTimeout(10);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"""\"}")));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
+    auto err = consumer->GetById(expected_dataset_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream));
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStreamWhenIdTooLarge) {
+TEST_F(ConsumerImplTests, GetByIdReturnsEndOfStreamWhenIdTooLarge) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(10);
+    consumer->SetTimeout(10);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_stream\":\"""\"}")));
 
-    auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
+    auto err = consumer->GetById(expected_dataset_id, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream));
 }
 
-TEST_F(ServerDataBrokerTests, GetMetaDataOK) {
+TEST_F(ConsumerImplTests, GetMetaDataOK) {
     MockGetBrokerUri();
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream +
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source +
                                             "/default/0/meta/0?token="
                                             + expected_token, _,
                                         _)).WillOnce(DoAll(
@@ -781,47 +737,47 @@ TEST_F(ServerDataBrokerTests, GetMetaDataOK) {
         Return(expected_metadata)));
 
     asapo::Error err;
-    auto res = data_broker->GetBeamtimeMeta(&err);
+    auto res = consumer->GetBeamtimeMeta(&err);
 
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(res, Eq(expected_metadata));
 
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesReturnError) {
+TEST_F(ConsumerImplTests, QueryMessagesReturnError) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("querymessages"), _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::BadRequest),
         SetArgPointee<4>(nullptr),
         Return("error in query")));
 
-    data_broker->SetTimeout(1000);
+    consumer->SetTimeout(1000);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, &err);
+    auto messages = consumer->QueryMessages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
     ASSERT_THAT(err->Explain(), HasSubstr("query"));
-    ASSERT_THAT(images.size(), Eq(0));
+    ASSERT_THAT(messages.size(), Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesReturnEmptyResults) {
+TEST_F(ConsumerImplTests, QueryMessagesReturnEmptyResults) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("querymessages"), _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return("[]")));
 
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, &err);
+    auto messages = consumer->QueryMessages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(images.size(), Eq(0));
+    ASSERT_THAT(messages.size(), Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseArray) {
+TEST_F(ConsumerImplTests, QueryMessagesWrongResponseArray) {
 
     MockGetBrokerUri();
 
@@ -832,41 +788,41 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseArray) {
     auto responce_string = json1 + "," + json2 + "]"; // no [ at the beginning
 
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("querymessages"), _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return(responce_string)));
 
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, &err);
+    auto messages = consumer->QueryMessages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Ne(nullptr));
-    ASSERT_THAT(images.size(), Eq(0));
+    ASSERT_THAT(messages.size(), Eq(0));
     ASSERT_THAT(err->Explain(), HasSubstr("response"));
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseRecorsd) {
+TEST_F(ConsumerImplTests, QueryMessagesWrongResponseRecorsd) {
 
     MockGetBrokerUri();
 
     auto responce_string = R"([{"bla":1},{"err":}])";
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll(
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr("querymessages"), _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return(responce_string)));
 
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, &err);
+    auto messages = consumer->QueryMessages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Ne(nullptr));
-    ASSERT_THAT(images.size(), Eq(0));
+    ASSERT_THAT(messages.size(), Eq(0));
     ASSERT_THAT(err->Explain(), HasSubstr("response"));
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) {
+TEST_F(ConsumerImplTests, QueryMessagesReturnRecords) {
 
     MockGetBrokerUri();
 
@@ -878,46 +834,27 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) {
     auto responce_string = "[" + json1 + "," + json2 + "]";
 
     EXPECT_CALL(mock_http_client,
-                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0" +
-                    "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
+                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0" +
+                    "/querymessages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return(responce_string)));
 
-    data_broker->SetTimeout(100);
-    asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, &err);
-
-    ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(images.size(), Eq(2));
-
-    ASSERT_THAT(images[0].name, Eq(rec1.name));
-    ASSERT_THAT(images[1].name, Eq(rec2.name));
-}
-
-TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithSubstream) {
-
-    MockGetBrokerUri();
-
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/0" +
-        "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("[]")));
-
-    data_broker->SetTimeout(100);
+    consumer->SetTimeout(100);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, expected_substream, &err);
+    auto messages = consumer->QueryMessages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(messages.size(), Eq(2));
 
+    ASSERT_THAT(messages[0].name, Eq(rec1.name));
+    ASSERT_THAT(messages[1].name, Eq(rec2.name));
 }
 
-TEST_F(ServerDataBrokerTests, GetNextDatasetUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetNextDatasetUsesCorrectUri) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/" +
                                             expected_group_id + "/next?token="
                                             + expected_token + "&dataset=true&minsize=0", _,
                                         _)).WillOnce(DoAll(
@@ -925,10 +862,17 @@ TEST_F(ServerDataBrokerTests, GetNextDatasetUsesCorrectUri) {
         SetArgPointee<2>(nullptr),
         Return("")));
     asapo::Error err;
-    data_broker->GetNextDataset(expected_group_id, 0, &err);
+    consumer->GetNextDataset(expected_group_id, 0, expected_stream, &err);
+}
+
+TEST_F(ConsumerImplTests, GetNextErrorOnEmptyStream) {
+    MessageData  data;
+    auto err = consumer->GetNext(expected_group_id, &info, &data, "");
+    ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
 }
 
-TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) {
+
+TEST_F(ConsumerImplTests, GetDataSetReturnsMessageMetas) {
     asapo::Error err;
     MockGetBrokerUri();
 
@@ -944,12 +888,12 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) {
     auto json = std::string("{") +
         "\"_id\":1," +
         "\"size\":3," +
-        "\"images\":[" + json1 + "," + json2 + "," + json3 + "]" +
+        "\"messages\":[" + json1 + "," + json2 + "," + json3 + "]" +
         "}";
 
     MockGet(json);
 
-    auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err);
+    auto dataset = consumer->GetNextDataset(expected_group_id, 0, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(nullptr));
 
@@ -960,7 +904,7 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) {
     ASSERT_THAT(dataset.content[2].id, Eq(to_send3.id));
 }
 
-TEST_F(ServerDataBrokerTests, GetDataSetReturnsPartialFileInfos) {
+TEST_F(ConsumerImplTests, GetDataSetReturnsPartialMessageMetas) {
     asapo::Error err;
     MockGetBrokerUri();
 
@@ -976,12 +920,12 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsPartialFileInfos) {
     auto json = std::string("{") +
         "\"_id\":1," +
         "\"size\":3," +
-        "\"images\":[" + json1 + "," + json2 + "]" +
+        "\"messages\":[" + json1 + "," + json2 + "]" +
         "}";
 
     MockGet(json, asapo::HttpCode::PartialContent);
 
-    auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err);
+    auto dataset = consumer->GetNextDataset(expected_group_id, 0, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kPartialData));
 
@@ -995,7 +939,7 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsPartialFileInfos) {
     ASSERT_THAT(dataset.content[1].id, Eq(to_send2.id));
 }
 
-TEST_F(ServerDataBrokerTests, GetDataSetByIdReturnsPartialFileInfos) {
+TEST_F(ConsumerImplTests, GetDataSetByIdReturnsPartialMessageMetas) {
     asapo::Error err;
     MockGetBrokerUri();
 
@@ -1011,12 +955,12 @@ TEST_F(ServerDataBrokerTests, GetDataSetByIdReturnsPartialFileInfos) {
     auto json = std::string("{") +
         "\"_id\":1," +
         "\"size\":3," +
-        "\"images\":[" + json1 + "," + json2 + "]" +
+        "\"messages\":[" + json1 + "," + json2 + "]" +
         "}";
 
     MockGet(json, asapo::HttpCode::PartialContent);
 
-    auto dataset = data_broker->GetDatasetById(1, 0, &err);
+    auto dataset = consumer->GetDatasetById(1, 0, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kPartialData));
     auto err_data = static_cast<const asapo::PartialErrorData*>(err->GetCustomData());
@@ -1029,12 +973,12 @@ TEST_F(ServerDataBrokerTests, GetDataSetByIdReturnsPartialFileInfos) {
     ASSERT_THAT(dataset.content[1].id, Eq(to_send2.id));
 }
 
-TEST_F(ServerDataBrokerTests, GetDataSetReturnsParseError) {
+TEST_F(ConsumerImplTests, GetDataSetReturnsParseError) {
     MockGetBrokerUri();
     MockGet("error_response");
 
     asapo::Error err;
-    auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err);
+    auto dataset = consumer->GetNextDataset(expected_group_id, 0, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kInterruptedTransaction));
     ASSERT_THAT(dataset.content.size(), Eq(0));
@@ -1042,38 +986,24 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsParseError) {
 
 }
 
-TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUri) {
-    MockGetBrokerUri();
-
-    EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/last?token="
-                          + expected_token + "&dataset=true&minsize=2", _,
-                      _)).WillOnce(DoAll(
-        SetArgPointee<1>(HttpCode::OK),
-        SetArgPointee<2>(nullptr),
-        Return("")));
-    asapo::Error err;
-    data_broker->GetLastDataset(2, &err);
-}
-
-TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUriWithSubstream) {
+TEST_F(ConsumerImplTests, GetLastDatasetUsesCorrectUri) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-                                            expected_substream + "/0/last?token="
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+                                            expected_stream + "/0/last?token="
                                             + expected_token + "&dataset=true&minsize=1", _,
                                         _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
     asapo::Error err;
-    data_broker->GetLastDataset(expected_substream, 1, &err);
+    consumer->GetLastDataset(1, expected_stream, &err);
 }
 
-TEST_F(ServerDataBrokerTests, GetDatasetByIdUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetDatasetByIdUsesCorrectUri) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
                                             + expected_token + "&dataset=true" + "&minsize=0", _,
                                         _)).WillOnce(DoAll(
@@ -1081,34 +1011,34 @@ TEST_F(ServerDataBrokerTests, GetDatasetByIdUsesCorrectUri) {
         SetArgPointee<2>(nullptr),
         Return("")));
     asapo::Error err;
-    data_broker->GetDatasetById(expected_dataset_id, 0, &err);
+    consumer->GetDatasetById(expected_dataset_id, 0, expected_stream, &err);
 }
 
-TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUri) {
+TEST_F(ConsumerImplTests, GetStreamListUsesCorrectUri) {
     MockGetBrokerUri();
-    std::string return_substreams =
-        R"({"substreams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})";
+    std::string return_streams =
+        R"({"streams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})";
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams"
+                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/streams"
                           + "?token=" + expected_token + "&from=stream_from", _,
                       _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
-        Return(return_substreams)));
+        Return(return_streams)));
 
     asapo::Error err;
-    auto substreams = data_broker->GetSubstreamList("stream_from", &err);
+    auto streams = consumer->GetStreamList("stream_from", &err);
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(substreams.size(), Eq(2));
-    ASSERT_THAT(substreams.size(), 2);
-    ASSERT_THAT(substreams[0].Json(false), R"({"name":"test","timestampCreated":1000000})");
-    ASSERT_THAT(substreams[1].Json(false), R"({"name":"test1","timestampCreated":2000000})");
+    ASSERT_THAT(streams.size(), Eq(2));
+    ASSERT_THAT(streams.size(), 2);
+    ASSERT_THAT(streams[0].Json(false), R"({"name":"test","timestampCreated":1000000})");
+    ASSERT_THAT(streams[1].Json(false), R"({"name":"test1","timestampCreated":2000000})");
 }
 
-TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUriWithoutFrom) {
+TEST_F(ConsumerImplTests, GetStreamListUsesCorrectUriWithoutFrom) {
     MockGetBrokerUri();
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams"
+                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/streams"
                           + "?token=" + expected_token, _,
                       _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
@@ -1116,10 +1046,10 @@ TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUriWithoutFrom) {
         Return("")));;
 
     asapo::Error err;
-    auto substreams = data_broker->GetSubstreamList("", &err);
+    auto streams = consumer->GetStreamList("", &err);
 }
 
-void ServerDataBrokerTests::MockBeforeFTS(FileData* data) {
+void ConsumerImplTests::MockBeforeFTS(MessageData* data) {
     auto to_send = CreateFI();
     auto json = to_send.Json();
     MockGet(json);
@@ -1128,7 +1058,7 @@ void ServerDataBrokerTests::MockBeforeFTS(FileData* data) {
                                           data)).WillOnce(Return(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()));
 }
 
-void ServerDataBrokerTests::ExpectFolderToken() {
+void ConsumerImplTests::ExpectFolderToken() {
     std::string expected_folder_query_string = "{\"Folder\":\"" + expected_path + "\",\"BeamtimeId\":\"" +
         expected_beamtime_id
         + "\",\"Token\":\"" + expected_token + "\"}";
@@ -1144,18 +1074,18 @@ void ServerDataBrokerTests::ExpectFolderToken() {
 
 ACTION_P(AssignArg3, assign) {
     if (assign) {
-        asapo::FileData data = asapo::FileData{new uint8_t[1]};
+        asapo::MessageData data = asapo::MessageData{new uint8_t[1]};
         data[0] = expected_value;
         *arg3 = std::move(data);
     }
 }
 
-void ServerDataBrokerTests::ExpectFileTransfer(const asapo::ConsumerErrorTemplate* p_err_template) {
+void ConsumerImplTests::ExpectFileTransfer(const asapo::ConsumerErrorTemplate* p_err_template) {
     EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"),
                                                     expected_cookie,
                                                     expected_fts_query_string,
                                                     _,
-                                                    expected_image_size,
+                                                    expected_message_size,
                                                     _)).WillOnce(DoAll(
         SetArgPointee<5>(HttpCode::OK),
         AssignArg3(p_err_template == nullptr),
@@ -1163,12 +1093,12 @@ void ServerDataBrokerTests::ExpectFileTransfer(const asapo::ConsumerErrorTemplat
     ));
 }
 
-void ServerDataBrokerTests::ExpectRepeatedFileTransfer() {
+void ConsumerImplTests::ExpectRepeatedFileTransfer() {
     EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"),
                                                     expected_cookie,
                                                     expected_fts_query_string,
                                                     _,
-                                                    expected_image_size,
+                                                    expected_message_size,
                                                     _)).
         WillOnce(DoAll(
         SetArgPointee<5>(HttpCode::Unauthorized),
@@ -1179,15 +1109,15 @@ void ServerDataBrokerTests::ExpectRepeatedFileTransfer() {
     ));
 }
 
-void ServerDataBrokerTests::AssertSingleFileTransfer() {
-    asapo::FileData data = asapo::FileData{new uint8_t[1]};
+void ConsumerImplTests::AssertSingleFileTransfer() {
+    asapo::MessageData data = asapo::MessageData{new uint8_t[1]};
     MockGetBrokerUri();
     MockBeforeFTS(&data);
     ExpectFolderToken();
     MockGetFTSUri();
     ExpectFileTransfer(nullptr);
 
-    fts_data_broker->GetNext(&info, expected_group_id, &data);
+    fts_consumer->GetNext(expected_group_id, &info, &data, expected_stream);
 
     ASSERT_THAT(data[0], Eq(expected_value));
     Mock::VerifyAndClearExpectations(&mock_http_client);
@@ -1195,11 +1125,11 @@ void ServerDataBrokerTests::AssertSingleFileTransfer() {
     Mock::VerifyAndClearExpectations(&mock_io);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageUsesFileTransferServiceIfCannotReadFromCache) {
+TEST_F(ConsumerImplTests, GetMessageUsesFileTransferServiceIfCannotReadFromCache) {
     AssertSingleFileTransfer();
 }
 
-TEST_F(ServerDataBrokerTests, FileTransferReadsFileSize) {
+TEST_F(ConsumerImplTests, FileTransferReadsFileSize) {
     AssertSingleFileTransfer();
     EXPECT_CALL(mock_http_client, Post_t(HasSubstr("sizeonly=true"),
                                          expected_cookie, expected_fts_query_string, _, _)).WillOnce(DoAll(
@@ -1220,38 +1150,38 @@ TEST_F(ServerDataBrokerTests, FileTransferReadsFileSize) {
         Return(nullptr)
     ));
 
-    FileData data;
+    MessageData data;
     info.size = 0;
     info.buf_id = 0;
-    auto err = fts_data_broker->RetrieveData(&info, &data);
+    auto err = fts_consumer->RetrieveData(&info, &data);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageReusesTokenAndUri) {
+TEST_F(ConsumerImplTests, GetMessageReusesTokenAndUri) {
     AssertSingleFileTransfer();
 
-    asapo::FileData data = asapo::FileData{new uint8_t[1]};
+    asapo::MessageData data = asapo::MessageData{new uint8_t[1]};
     MockBeforeFTS(&data);
     ExpectFileTransfer(nullptr);
 
-    auto err = fts_data_broker->GetNext(&info, expected_group_id, &data);
+    auto err = fts_consumer->GetNext(expected_group_id, &info, &data, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, GetImageTriesToGetTokenAgainIfTransferFailed) {
+TEST_F(ConsumerImplTests, GetMessageTriesToGetTokenAgainIfTransferFailed) {
     AssertSingleFileTransfer();
 
-    asapo::FileData data;
+    asapo::MessageData data;
     MockBeforeFTS(&data);
     ExpectRepeatedFileTransfer();
     ExpectFolderToken();
 
-    auto err = fts_data_broker->GetNext(&info, expected_group_id, &data);
+    auto err = fts_consumer->GetNext(expected_group_id, &info, &data, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) {
+TEST_F(ConsumerImplTests, AcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
-    auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/" +
+    auto expected_acknowledge_command = "{\"Op\":\"ackmessage\"}";
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/" +
         expected_group_id
                                              + "/" + std::to_string(expected_dataset_id) + "?token="
                                              + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll(
@@ -1259,103 +1189,86 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) {
         SetArgPointee<4>(nullptr),
         Return("")));
 
-    auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id, expected_substream);
-
-    ASSERT_THAT(err, Eq(nullptr));
-}
-
-TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) {
-    MockGetBrokerUri();
-    auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
-    EXPECT_CALL(mock_http_client,
-                Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" +
-                    expected_group_id
-                           + "/" + std::to_string(expected_dataset_id) + "?token="
-                           + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll(
-        SetArgPointee<3>(HttpCode::OK),
-        SetArgPointee<4>(nullptr),
-        Return("")));
-
-    auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id);
+    auto err = consumer->Acknowledge(expected_group_id, expected_dataset_id, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-void ServerDataBrokerTests::ExpectIdList(bool error) {
+void ConsumerImplTests::ExpectIdList(bool error) {
     MockGetBrokerUri();
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/" +
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/" +
         expected_group_id + "/nacks?token=" + expected_token + "&from=1&to=0", _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return(error ? "" : "{\"unacknowledged\":[1,2,3]}")));
 }
 
-TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) {
+TEST_F(ConsumerImplTests, GetUnAcknowledgedListReturnsIds) {
     ExpectIdList(false);
     asapo::Error err;
-    auto list = data_broker->GetUnacknowledgedTupleIds(expected_group_id, expected_substream, 1, 0, &err);
+    auto list = consumer->GetUnacknowledgedMessages(expected_group_id, 1, 0, expected_stream, &err);
 
     ASSERT_THAT(list, ElementsAre(1, 2, 3));
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-void ServerDataBrokerTests::ExpectLastAckId(bool empty_response) {
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/" +
+void ConsumerImplTests::ExpectLastAckId(bool empty_response) {
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/" +
         expected_group_id + "/lastack?token=" + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return(empty_response ? "{\"lastAckId\":0}" : "{\"lastAckId\":1}")));
 }
 
-TEST_F(ServerDataBrokerTests, GetLastAcknowledgeUsesOk) {
+TEST_F(ConsumerImplTests, GetLastAcknowledgeUsesOk) {
     MockGetBrokerUri();
     ExpectLastAckId(false);
 
     asapo::Error err;
-    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_substream, &err);
+    auto ind = consumer->GetLastAcknowledgedMessage(expected_group_id, expected_stream, &err);
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(ind, Eq(1));
 }
 
-TEST_F(ServerDataBrokerTests, GetLastAcknowledgeReturnsNoData) {
+TEST_F(ConsumerImplTests, GetLastAcknowledgeReturnsNoData) {
     MockGetBrokerUri();
     ExpectLastAckId(true);
 
     asapo::Error err;
-    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_substream, &err);
+    auto ind = consumer->GetLastAcknowledgedMessage(expected_group_id, expected_stream, &err);
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
     ASSERT_THAT(ind, Eq(0));
 }
 
-TEST_F(ServerDataBrokerTests, GetByIdErrorsForId0) {
+TEST_F(ConsumerImplTests, GetByIdErrorsForId0) {
 
-    auto err = data_broker->GetById(0, &info, nullptr);
+    auto err = consumer->GetById(0, &info, nullptr, expected_stream);
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput));
 }
 
-TEST_F(ServerDataBrokerTests, ResendNacks) {
+TEST_F(ConsumerImplTests, ResendNacks) {
     MockGetBrokerUri();
 
-    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/"
+    EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/stream/"
                                             + expected_group_id + "/next?token="
-                                            + expected_token + "&resend_nacks=true&delay_sec=10&resend_attempts=3", _,
+                                            + expected_token + "&resend_nacks=true&delay_ms=10000&resend_attempts=3", _,
                                         _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
 
-    data_broker->SetResendNacs(true, 10, 3);
-    data_broker->GetNext(&info, expected_group_id, nullptr);
+    consumer->SetResendNacs(true, 10000, 3);
+    consumer->GetNext(expected_group_id, &info, nullptr, expected_stream);
 }
 
-TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) {
+TEST_F(ConsumerImplTests, NegativeAcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
-    auto expected_neg_acknowledge_command = R"({"Op":"negackimage","Params":{"DelaySec":10}})";
-    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" +
-        expected_substream + "/" +
+    auto expected_neg_acknowledge_command = R"({"Op":"negackmessage","Params":{"DelayMs":10000}})";
+    EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
+        expected_stream + "/" +
         expected_group_id
                                              + "/" + std::to_string(expected_dataset_id) + "?token="
                                              + expected_token, _, expected_neg_acknowledge_command, _, _)).WillOnce(
@@ -1364,12 +1277,12 @@ TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) {
             SetArgPointee<4>(nullptr),
             Return("")));
 
-    auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10, expected_substream);
+    auto err = consumer->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10000, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ServerDataBrokerTests, CanInterruptOperation) {
+TEST_F(ConsumerImplTests, CanInterruptOperation) {
     EXPECT_CALL(mock_http_client, Get_t(_, _, _)).Times(AtLeast(1)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::NotFound),
         SetArgPointee<2>(nullptr),
@@ -1378,13 +1291,13 @@ TEST_F(ServerDataBrokerTests, CanInterruptOperation) {
     auto start = std::chrono::system_clock::now();
     asapo::Error err;
     auto exec = [this,&err]() {
-      data_broker->SetTimeout(10000);
-      err = data_broker->GetNext(&info, "", nullptr);
+      consumer->SetTimeout(10000);
+      err = consumer->GetNext("", &info, nullptr, expected_stream);
     };
     auto thread = std::thread(exec);
     std::this_thread::sleep_for(std::chrono::milliseconds(100));
 
-    data_broker->InterruptCurrentOperation();
+    consumer->InterruptCurrentOperation();
 
     thread.join();
 
diff --git a/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp b/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp
index 4db2da2ec917ab05268fc25b692fc507baefc638..9e32cd3842cc11d96003b342ade885a0d8d7e8a7 100644
--- a/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp
+++ b/consumer/api/cpp/unittests/test_fabric_consumer_client.cpp
@@ -25,7 +25,7 @@ TEST(FabricConsumerClient, Constructor) {
     ASSERT_THAT(dynamic_cast<fabric::FabricClient*>(client.client__.get()), Eq(nullptr));
 }
 
-MATCHER_P6(M_CheckSendDataRequest, op_code, buf_id, data_size, mr_addr, mr_length, mr_key,
+MATCHER_P6(M_CheckSendRequest, op_code, buf_id, data_size, mr_addr, mr_length, mr_key,
            "Checks if a valid GenericRequestHeader was Send") {
     auto data = (GenericRequestHeader*) arg;
     auto mr = (fabric::MemoryRegionDetails*) &data->message;
@@ -37,9 +37,9 @@ MATCHER_P6(M_CheckSendDataRequest, op_code, buf_id, data_size, mr_addr, mr_lengt
            && mr->key == uint64_t(mr_key);
 }
 
-ACTION_P(A_WriteSendDataResponse, error_code) {
-    ((asapo::SendDataResponse*)arg2)->op_code = asapo::kOpcodeGetBufferData;
-    ((asapo::SendDataResponse*)arg2)->error_code = error_code;
+ACTION_P(A_WriteSendResponse, error_code) {
+    ((asapo::SendResponse*)arg2)->op_code = asapo::kOpcodeGetBufferData;
+    ((asapo::SendResponse*)arg2)->error_code = error_code;
 }
 
 class FabricConsumerClientTests : public Test {
@@ -98,7 +98,7 @@ void FabricConsumerClientTests::ExpectTransfer(void** outputData, fabric::Fabric
 
 
     Expectation sendCall = EXPECT_CALL(mock_fabric_client, Send_t(serverAddr, messageId,
-                                       M_CheckSendDataRequest(kOpcodeGetBufferData, 78954, 4123, 0x124, 4123, 20),
+                                       M_CheckSendRequest(kOpcodeGetBufferData, 78954, 4123, 0x124, 4123, 20),
                                        sizeof(GenericRequestHeader), _)).After(getDetailsCall)
                            .WillOnce(SetArgPointee<4>(sendOk ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()));
 
@@ -108,7 +108,7 @@ void FabricConsumerClientTests::ExpectTransfer(void** outputData, fabric::Fabric
                                .After(sendCall)
                                .WillOnce(DoAll(
                                              SetArgPointee<4>(recvOk ? nullptr : fabric::FabricErrorTemplates::kInternalError.Generate().release()),
-                                             A_WriteSendDataResponse(serverResponse)
+                                             A_WriteSendResponse(serverResponse)
                                          ));
         EXPECT_CALL(*mr, Destructor()).After(recvCall);
     } else {
@@ -120,10 +120,10 @@ void FabricConsumerClientTests::ExpectTransfer(void** outputData, fabric::Fabric
 TEST_F(FabricConsumerClientTests, GetData_Error_Init) {
     ExpectInit(false);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
 }
@@ -132,15 +132,15 @@ TEST_F(FabricConsumerClientTests, GetData_Error_AddConnection) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", false, -1);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
     ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
 
     // Make sure that the connection was not saved
     ExpectAddedConnection("host:1234", false, -1);
-    err = client.GetData(&expectedInfo, &expectedFileData);
+    err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
 }
@@ -149,8 +149,8 @@ TEST_F(FabricConsumerClientTests, GetData_ShareMemoryRegion_Error) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
 
@@ -160,7 +160,7 @@ TEST_F(FabricConsumerClientTests, GetData_ShareMemoryRegion_Error) {
                   Return(nullptr)
               ));
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(fabric::FabricErrorTemplates::kInternalError));
 }
@@ -169,8 +169,8 @@ TEST_F(FabricConsumerClientTests, GetData_SendFailed) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -178,18 +178,18 @@ TEST_F(FabricConsumerClientTests, GetData_SendFailed) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, false, false, kNetErrorNoError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Ne(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+    ASSERT_THAT(expectedMessageData.get(), Eq(nullptr));
 }
 
 TEST_F(FabricConsumerClientTests, GetData_RecvFailed) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -197,18 +197,18 @@ TEST_F(FabricConsumerClientTests, GetData_RecvFailed) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, true, false, kNetErrorNoError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Ne(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+    ASSERT_THAT(expectedMessageData.get(), Eq(nullptr));
 }
 
 TEST_F(FabricConsumerClientTests, GetData_ServerError) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -216,18 +216,18 @@ TEST_F(FabricConsumerClientTests, GetData_ServerError) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, true, true, kNetErrorInternalServerError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Ne(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(nullptr));
+    ASSERT_THAT(expectedMessageData.get(), Eq(nullptr));
 }
 
 TEST_F(FabricConsumerClientTests, GetData_Ok) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -235,18 +235,18 @@ TEST_F(FabricConsumerClientTests, GetData_Ok) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+    ASSERT_THAT(expectedMessageData.get(), Eq(outData));
 }
 
 TEST_F(FabricConsumerClientTests, GetData_Ok_UsedCahedConnection) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -254,26 +254,26 @@ TEST_F(FabricConsumerClientTests, GetData_Ok_UsedCahedConnection) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+    ASSERT_THAT(expectedMessageData.get(), Eq(outData));
 
     outData = nullptr;
     ExpectTransfer(&outData, 0, 1, true, true, kNetErrorNoError);
 
-    err = client.GetData(&expectedInfo, &expectedFileData);
+    err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+    ASSERT_THAT(expectedMessageData.get(), Eq(outData));
 }
 
 TEST_F(FabricConsumerClientTests, GetData_Ok_SecondConnection) {
     ExpectInit(true);
     ExpectAddedConnection("host:1234", true, 0);
 
-    FileData expectedFileData;
-    FileInfo expectedInfo{};
+    MessageData expectedMessageData;
+    MessageMeta expectedInfo{};
     expectedInfo.source = "host:1234";
     expectedInfo.size = 4123;
     expectedInfo.buf_id = 78954;
@@ -281,10 +281,10 @@ TEST_F(FabricConsumerClientTests, GetData_Ok_SecondConnection) {
     void* outData = nullptr;
     ExpectTransfer(&outData, 0, 0, true, true, kNetErrorNoError);
 
-    Error err = client.GetData(&expectedInfo, &expectedFileData);
+    Error err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+    ASSERT_THAT(expectedMessageData.get(), Eq(outData));
 
     ExpectAddedConnection("host:1235", true, 54);
     expectedInfo.source = "host:1235";
@@ -292,8 +292,8 @@ TEST_F(FabricConsumerClientTests, GetData_Ok_SecondConnection) {
     outData = nullptr;
     ExpectTransfer(&outData, 54, 1, true, true, kNetErrorNoError);
 
-    err = client.GetData(&expectedInfo, &expectedFileData);
+    err = client.GetData(&expectedInfo, &expectedMessageData);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(expectedFileData.get(), Eq(outData));
+    ASSERT_THAT(expectedMessageData.get(), Eq(outData));
 }
diff --git a/consumer/api/cpp/unittests/test_tcp_connection_pool.cpp b/consumer/api/cpp/unittests/test_tcp_connection_pool.cpp
index fcb66a241e93023af7c3419958d5a7f4d8870632..74f3990dff74f074075a62a5a870d4a0bd309bf9 100644
--- a/consumer/api/cpp/unittests/test_tcp_connection_pool.cpp
+++ b/consumer/api/cpp/unittests/test_tcp_connection_pool.cpp
@@ -9,8 +9,8 @@
 
 
 using asapo::IO;
-using asapo::FileInfo;
-using asapo::FileData;
+using asapo::MessageMeta;
+using asapo::MessageData;
 using asapo::MockIO;
 using asapo::SimpleError;
 using asapo::TcpConnectionPool;
@@ -42,7 +42,7 @@ TEST(TcpConnectioPool, Constructor) {
 class TcpConnectioPoolTests : public Test {
   public:
     NiceMock<MockIO> mock_io;
-    FileInfo info;
+    MessageMeta info;
     std::string expected_source = "test:8400";
     TcpConnectionPool pool;
     SocketDescriptor expected_sd = 123;
diff --git a/consumer/api/cpp/unittests/test_tcp_client.cpp b/consumer/api/cpp/unittests/test_tcp_consumer_client.cpp
similarity index 84%
rename from consumer/api/cpp/unittests/test_tcp_client.cpp
rename to consumer/api/cpp/unittests/test_tcp_consumer_client.cpp
index 286072c8473ce3a2ae4eaf79c3c2c948ff36b5b4..b1df9c9db1e6bd84ae1c21aba9968e0bc7c82a36 100644
--- a/consumer/api/cpp/unittests/test_tcp_client.cpp
+++ b/consumer/api/cpp/unittests/test_tcp_consumer_client.cpp
@@ -4,16 +4,16 @@
 #include "asapo/io/io.h"
 #include "asapo/unittests/MockIO.h"
 #include "mocking.h"
-#include "../src/tcp_client.h"
+#include "../src/tcp_consumer_client.h"
 #include "../../../../common/cpp/src/system_io/system_io.h"
 #include "asapo/common/networking.h"
 
 using asapo::IO;
-using asapo::FileInfo;
-using asapo::FileData;
+using asapo::MessageMeta;
+using asapo::MessageData;
 using asapo::MockIO;
 using asapo::SimpleError;
-using asapo::TcpClient;
+using asapo::TcpConsumerClient;
 using asapo::MockTCPConnectionPool;
 
 
@@ -34,12 +34,12 @@ using ::testing::DoAll;
 namespace {
 
 TEST(TcpClient, Constructor) {
-    auto client = std::unique_ptr<TcpClient> {new TcpClient()};
+    auto client = std::unique_ptr<TcpConsumerClient> {new TcpConsumerClient()};
     ASSERT_THAT(dynamic_cast<asapo::SystemIO*>(client->io__.get()), Ne(nullptr));
     ASSERT_THAT(dynamic_cast<asapo::TcpConnectionPool*>(client->connection_pool__.get()), Ne(nullptr));
 }
 
-MATCHER_P4(M_CheckSendDataRequest, op_code, buf_id, data_size, message,
+MATCHER_P4(M_CheckSendRequest, op_code, buf_id, data_size, message,
            "Checks if a valid GenericRequestHeader was Send") {
     return ((asapo::GenericRequestHeader*) arg)->op_code == op_code
            && ((asapo::GenericRequestHeader*) arg)->data_id == uint64_t(buf_id)
@@ -47,22 +47,22 @@ MATCHER_P4(M_CheckSendDataRequest, op_code, buf_id, data_size, message,
            && strcmp(((asapo::GenericRequestHeader*) arg)->message, message) == 0;
 }
 
-ACTION_P(A_WriteSendDataResponse, error_code) {
-    ((asapo::SendDataResponse*)arg1)->op_code = asapo::kOpcodeGetBufferData;
-    ((asapo::SendDataResponse*)arg1)->error_code = error_code;
+ACTION_P(A_WriteSendResponse, error_code) {
+    ((asapo::SendResponse*)arg1)->op_code = asapo::kOpcodeGetBufferData;
+    ((asapo::SendResponse*)arg1)->error_code = error_code;
 }
 
 
 class TcpClientTests : public Test {
   public:
-    std::unique_ptr<TcpClient> client = std::unique_ptr<TcpClient> {new TcpClient()};
+    std::unique_ptr<TcpConsumerClient> client = std::unique_ptr<TcpConsumerClient> {new TcpConsumerClient()};
     NiceMock<MockIO> mock_io;
     NiceMock<MockTCPConnectionPool> mock_connection_pool;
-    FileInfo info;
+    MessageMeta info;
     std::string expected_uri = "test:8400";
     uint64_t expected_buf_id = 123;
     uint64_t expected_size = 1233;
-    FileData data;
+    MessageData data;
     asapo::SocketDescriptor expected_sd = 1;
     void SetUp() override {
         info.source = expected_uri;
@@ -93,8 +93,8 @@ class TcpClientTests : public Test {
         );
     }
 
-    void ExpectSendDataRequest(asapo::SocketDescriptor sd, bool ok = true) {
-        EXPECT_CALL(mock_io, Send_t(sd, M_CheckSendDataRequest(asapo::kOpcodeGetBufferData, expected_buf_id,
+    void ExpectSendRequest(asapo::SocketDescriptor sd, bool ok = true) {
+        EXPECT_CALL(mock_io, Send_t(sd, M_CheckSendRequest(asapo::kOpcodeGetBufferData, expected_buf_id,
                                     expected_size, ""),
                                     sizeof(asapo::GenericRequestHeader), _))
         .WillOnce(
@@ -113,11 +113,11 @@ class TcpClientTests : public Test {
 
     void ExpectGetResponce(asapo::SocketDescriptor sd, bool ok, asapo::NetworkErrorCode responce_code) {
 
-        EXPECT_CALL(mock_io, Receive_t(sd, _, sizeof(asapo::SendDataResponse), _))
+        EXPECT_CALL(mock_io, Receive_t(sd, _, sizeof(asapo::SendResponse), _))
         .WillOnce(
             DoAll(
                 testing::SetArgPointee<3>(ok ? nullptr : asapo::IOErrorTemplates::kConnectionRefused.Generate().release()),
-                A_WriteSendDataResponse(responce_code),
+                A_WriteSendResponse(responce_code),
                 testing::ReturnArg<2>()
             ));
         if (!ok) {
@@ -152,7 +152,7 @@ TEST_F(TcpClientTests, ErrorGetNewConnection) {
 
 TEST_F(TcpClientTests, SendHeaderForNewConnectionReturnsError) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, false);
+    ExpectSendRequest(expected_sd, false);
 
     auto err = client->GetData(&info, &data);
 
@@ -161,7 +161,7 @@ TEST_F(TcpClientTests, SendHeaderForNewConnectionReturnsError) {
 
 TEST_F(TcpClientTests, OnErrorSendHeaderTriesToReconnectAndFails) {
     ExpectNewConnection(true, true);
-    ExpectSendDataRequest(expected_sd, false);
+    ExpectSendRequest(expected_sd, false);
     ExpectReconnect(false);
 
     auto err = client->GetData(&info, &data);
@@ -171,9 +171,9 @@ TEST_F(TcpClientTests, OnErrorSendHeaderTriesToReconnectAndFails) {
 
 TEST_F(TcpClientTests, OnErrorSendHeaderTriesToReconnectAndSendsAnotherRequest) {
     ExpectNewConnection(true, true);
-    ExpectSendDataRequest(expected_sd, false);
+    ExpectSendRequest(expected_sd, false);
     ExpectReconnect(true);
-    ExpectSendDataRequest(expected_sd + 1, false);
+    ExpectSendRequest(expected_sd + 1, false);
 
     auto err = client->GetData(&info, &data);
 
@@ -182,7 +182,7 @@ TEST_F(TcpClientTests, OnErrorSendHeaderTriesToReconnectAndSendsAnotherRequest)
 
 TEST_F(TcpClientTests, GetResponceReturnsError) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, true);
+    ExpectSendRequest(expected_sd, true);
     ExpectGetResponce(expected_sd, false, asapo::kNetErrorNoError);
 
     auto err = client->GetData(&info, &data);
@@ -192,7 +192,7 @@ TEST_F(TcpClientTests, GetResponceReturnsError) {
 
 TEST_F(TcpClientTests, GetResponceReturnsNoData) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, true);
+    ExpectSendRequest(expected_sd, true);
     ExpectGetResponce(expected_sd, true, asapo::kNetErrorNoData);
     EXPECT_CALL(mock_connection_pool, ReleaseConnection(expected_sd));
 
@@ -203,7 +203,7 @@ TEST_F(TcpClientTests, GetResponceReturnsNoData) {
 
 TEST_F(TcpClientTests, GetResponceReturnsWrongRequest) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, true);
+    ExpectSendRequest(expected_sd, true);
     ExpectGetResponce(expected_sd, true, asapo::kNetErrorWrongRequest);
     EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
 
@@ -214,7 +214,7 @@ TEST_F(TcpClientTests, GetResponceReturnsWrongRequest) {
 
 TEST_F(TcpClientTests, ErrorGettingData) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, true);
+    ExpectSendRequest(expected_sd, true);
     ExpectGetResponce(expected_sd, true, asapo::kNetErrorNoError);
     ExpectGetData(expected_sd, false);
 
@@ -225,7 +225,7 @@ TEST_F(TcpClientTests, ErrorGettingData) {
 
 TEST_F(TcpClientTests, OkGettingData) {
     ExpectNewConnection(false, true);
-    ExpectSendDataRequest(expected_sd, true);
+    ExpectSendRequest(expected_sd, true);
     ExpectGetResponce(expected_sd, true, asapo::kNetErrorNoError);
     ExpectGetData(expected_sd, true);
 
@@ -236,9 +236,9 @@ TEST_F(TcpClientTests, OkGettingData) {
 
 TEST_F(TcpClientTests, OkGettingDataWithReconnect) {
     ExpectNewConnection(true, true);
-    ExpectSendDataRequest(expected_sd, false);
+    ExpectSendRequest(expected_sd, false);
     ExpectReconnect(true);
-    ExpectSendDataRequest(expected_sd + 1, true);
+    ExpectSendRequest(expected_sd + 1, true);
     ExpectGetResponce(expected_sd + 1, true, asapo::kNetErrorNoError);
     ExpectGetData(expected_sd + 1, true);
 
diff --git a/consumer/api/python/asapo_consumer.pxd b/consumer/api/python/asapo_consumer.pxd
index a1182ee75b5608f5d796cfcaf762a930446b7215..e309bd22dfdf54ff4270dc90896971e2a80fd1cb 100644
--- a/consumer/api/python/asapo_consumer.pxd
+++ b/consumer/api/python/asapo_consumer.pxd
@@ -23,27 +23,27 @@ cdef extern from "asapo_wrappers.h" namespace "asapo":
   cdef string GetErrorString(Error* err)
 
 cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
-  cppclass FileData:
+  cppclass MessageData:
     uint8_t[] release()
     pass
 
 cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
-  cppclass FileInfo:
+  cppclass MessageMeta:
     string Json()
     bool SetFromJson(string json_str)
   cppclass IdList:
     vector[uint64_t].iterator begin()
     vector[uint64_t].iterator end()
-  cppclass FileInfos:
-    vector[FileInfo].iterator begin()
-    vector[FileInfo].iterator end()
+  cppclass MessageMetas:
+    vector[MessageMeta].iterator begin()
+    vector[MessageMeta].iterator end()
   struct DataSet:
     uint64_t id
     uint64_t expected_size
-    FileInfos content
+    MessageMetas content
   struct  SourceCredentials:
     string beamtime_id
-    string stream
+    string data_source
     string user_token
   cppclass StreamInfo:
     string Json(bool add_last_id)
@@ -57,36 +57,36 @@ cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
   NetworkConnectionType NetworkConnectionType_kFabric "asapo::NetworkConnectionType::kFabric"
 
 cdef extern from "asapo/asapo_consumer.h" namespace "asapo" nogil:
-    cdef cppclass DataBroker:
-        DataBroker() except +
+    cdef cppclass Consumer:
+        Consumer() except +
         void SetTimeout(uint64_t timeout_ms)
         void ForceNoRdma()
         NetworkConnectionType CurrentConnectionType()
-        Error GetNext(FileInfo* info, string group_id,string substream, FileData* data)
-        Error GetLast(FileInfo* info, string substream, FileData* data)
-        Error GetById(uint64_t id, FileInfo* info, string substream, FileData* data)
-        uint64_t GetCurrentSize(string substream, Error* err)
-        Error SetLastReadMarker(uint64_t value, string group_id, string substream)
-        Error ResetLastReadMarker(string group_id, string substream)
-        Error Acknowledge(string group_id, uint64_t id, string substream)
-        Error NegativeAcknowledge(string group_id, uint64_t id, uint64_t delay_sec, string substream)
-        uint64_t GetLastAcknowledgedTulpeId(string group_id, string substream, Error* error)
-        IdList GetUnacknowledgedTupleIds(string group_id, string substream, uint64_t from_id, uint64_t to_id, Error* error)
+        Error GetNext(string group_id, MessageMeta* info, MessageData* data,string stream)
+        Error GetLast(MessageMeta* info, MessageData* data, string stream)
+        Error GetById(uint64_t id, MessageMeta* info, MessageData* data, string stream)
+        uint64_t GetCurrentSize(string stream, Error* err)
+        Error SetLastReadMarker(string group_id, uint64_t value, string stream)
+        Error ResetLastReadMarker(string group_id, string stream)
+        Error Acknowledge(string group_id, uint64_t id, string stream)
+        Error NegativeAcknowledge(string group_id, uint64_t id, uint64_t delay_ms, string stream)
+        uint64_t GetLastAcknowledgedMessage(string group_id, string stream, Error* error)
+        IdList GetUnacknowledgedMessages(string group_id, uint64_t from_id, uint64_t to_id, string stream, Error* error)
         string GenerateNewGroupId(Error* err)
         string GetBeamtimeMeta(Error* err)
-        FileInfos QueryImages(string query, string substream, Error* err)
-        DataSet GetNextDataset(string group_id, string substream, uint64_t min_size, Error* err)
-        DataSet GetLastDataset(string substream, uint64_t min_size, Error* err)
-        DataSet GetDatasetById(uint64_t id, string substream, uint64_t min_size, Error* err)
-        Error RetrieveData(FileInfo* info, FileData* data)
-        vector[StreamInfo] GetSubstreamList(string from_substream, Error* err)
-        void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts)
+        MessageMetas QueryMessages(string query, string stream, Error* err)
+        DataSet GetNextDataset(string group_id, uint64_t min_size, string stream, Error* err)
+        DataSet GetLastDataset(uint64_t min_size, string stream, Error* err)
+        DataSet GetDatasetById(uint64_t id, uint64_t min_size, string stream, Error* err)
+        Error RetrieveData(MessageMeta* info, MessageData* data)
+        vector[StreamInfo] GetStreamList(string from_stream, Error* err)
+        void SetResendNacs(bool resend, uint64_t delay_ms, uint64_t resend_attempts)
         void InterruptCurrentOperation()
 
 cdef extern from "asapo/asapo_consumer.h" namespace "asapo" nogil:
-    cdef cppclass DataBrokerFactory:
-        DataBrokerFactory() except +
-        unique_ptr[DataBroker] CreateServerBroker(string server_name,string source_path,bool has_filesystem,SourceCredentials source,Error* error)
+    cdef cppclass ConsumerFactory:
+        ConsumerFactory() except +
+        unique_ptr[Consumer] CreateConsumer(string server_name,string source_path,bool has_filesystem,SourceCredentials source,Error* error)
 
 
 cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
@@ -102,4 +102,4 @@ cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
   cdef cppclass ConsumerErrorData:
     uint64_t id
     uint64_t id_max
-    string next_substream
+    string next_stream
diff --git a/consumer/api/python/asapo_consumer.pyx.in b/consumer/api/python/asapo_consumer.pyx.in
index fc5ea16b1cad54f0611ef4bb4d251c5148bac97e..399b510ed75bf03734ad8435b9c93f11b33770ab 100644
--- a/consumer/api/python/asapo_consumer.pyx.in
+++ b/consumer/api/python/asapo_consumer.pyx.in
@@ -47,10 +47,10 @@ class AsapoLocalIOError(AsapoConsumerError):
   pass
 
 class AsapoStreamFinishedError(AsapoConsumerError):
-  def __init__(self,message,id_max=None,next_substream=None):
+  def __init__(self,message,id_max=None,next_stream=None):
     AsapoConsumerError.__init__(self,message)
     self.id_max = id_max
-    self.next_substream = _str(next_substream)
+    self.next_stream = _str(next_stream)
 
 class AsapoEndOfStreamError(AsapoConsumerError):
   def __init__(self,message,id_max=None):
@@ -80,7 +80,7 @@ cdef throw_exception(Error& err, res = None):
     elif err == kStreamFinished:
             data=<ConsumerErrorData*>err.get().GetCustomData()
             if data != NULL:
-                raise AsapoStreamFinishedError(error_string,data.id_max,data.next_substream)
+                raise AsapoStreamFinishedError(error_string,data.id_max,data.next_stream)
             else:
                 raise AsapoStreamFinishedError(error_string)
     elif err == kNoData:
@@ -102,25 +102,25 @@ cdef throw_exception(Error& err, res = None):
     else:
         raise AsapoConsumerError(error_string)
 
-cdef class PyDataBroker:
-    cdef unique_ptr[DataBroker] c_broker
-    def _op(self, op, group_id, substream, meta_only, uint64_t id):
-        cdef FileInfo info
+cdef class PyConsumer:
+    cdef unique_ptr[Consumer] c_consumer
+    def _op(self, op, group_id, stream, meta_only, uint64_t id):
+        cdef MessageMeta info
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
-        cdef FileData data
-        cdef FileData* p_data =  <FileData*>NULL if meta_only else &data
+        cdef string b_stream = _bytes(stream)
+        cdef MessageData data
+        cdef MessageData* p_data =  <MessageData*>NULL if meta_only else &data
         cdef Error err
         cdef np.npy_intp dims[1]
         if op == "next":
             with nogil:
-                err =  self.c_broker.get().GetNext(&info, b_group_id,b_substream, p_data)
+                err =  self.c_consumer.get().GetNext(b_group_id, &info, p_data,b_stream)
         elif op == "last":
             with nogil:
-                err =  self.c_broker.get().GetLast(&info, b_substream, p_data)
+                err =  self.c_consumer.get().GetLast(&info, p_data, b_stream)
         elif op == "id":
             with nogil:
-                err =  self.c_broker.get().GetById(id, &info, b_substream, p_data)
+                err =  self.c_consumer.get().GetById(id, &info, p_data, b_stream)
         if err:
             throw_exception(err)
         info_str = _str(info.Json())
@@ -132,21 +132,21 @@ cdef class PyDataBroker:
         arr =  np.PyArray_SimpleNewFromData(1, dims, np.NPY_BYTE, ptr)
         PyArray_ENABLEFLAGS(arr,np.NPY_OWNDATA)
         return arr,meta
-    def get_next(self, group_id, substream = "default", meta_only = True):
-        return self._op("next",group_id,substream,meta_only,0)
-    def get_last(self, substream = "default", meta_only = True):
-        return self._op("last","",substream,meta_only,0)
-    def get_by_id(self,uint64_t id,substream = "default",meta_only = True):
-        return self._op("id","",substream,meta_only,id)
+    def get_next(self, group_id, meta_only = True, stream = "default"):
+        return self._op("next",group_id,stream,meta_only,0)
+    def get_last(self, meta_only = True, stream = "default"):
+        return self._op("last","",stream,meta_only,0)
+    def get_by_id(self,uint64_t id,meta_only = True, stream = "default"):
+        return self._op("id","",stream,meta_only,id)
     def retrieve_data(self,meta):
         json_str = json.dumps(meta)
-        cdef FileInfo info
+        cdef MessageMeta info
         if not info.SetFromJson(_bytes(json_str)):
             raise AsapoWrongInputError("wrong metadata")
         cdef Error err
-        cdef FileData data
+        cdef MessageData data
         with nogil:
-            err =  self.c_broker.get().RetrieveData(&info, &data)
+            err =  self.c_consumer.get().RetrieveData(&info, &data)
         if err:
             throw_exception(err)
         cdef np.npy_intp dims[1]
@@ -157,22 +157,22 @@ cdef class PyDataBroker:
         arr =  np.PyArray_SimpleNewFromData(1, dims, np.NPY_BYTE, ptr)
         PyArray_ENABLEFLAGS(arr,np.NPY_OWNDATA)
         return arr
-    def get_current_size(self, substream = "default"):
+    def get_current_size(self, stream = "default"):
         cdef Error err
         cdef uint64_t size
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         with nogil:
-            size =  self.c_broker.get().GetCurrentSize(b_substream,&err)
+            size =  self.c_consumer.get().GetCurrentSize(b_stream,&err)
         err_str = _str(GetErrorString(&err))
         if err:
             throw_exception(err)
         return size
     def set_timeout(self,timeout):
-        self.c_broker.get().SetTimeout(timeout)
+        self.c_consumer.get().SetTimeout(timeout)
     def force_no_rdma(self):
-        self.c_broker.get().ForceNoRdma()
+        self.c_consumer.get().ForceNoRdma()
     def current_connection_type(self):
-        cdef NetworkConnectionType connection_type = self.c_broker.get().CurrentConnectionType()
+        cdef NetworkConnectionType connection_type = self.c_consumer.get().CurrentConnectionType()
         cdef int cased = <int>connection_type
         cdef string result = "Unknown"
         if cased == <int>NetworkConnectionType_kUndefined:
@@ -182,22 +182,22 @@ cdef class PyDataBroker:
         elif cased == <int>NetworkConnectionType_kFabric:
             result = "Fabric"
         return result.decode('utf-8')
-    def set_lastread_marker(self,value,group_id, substream = "default"):
+    def set_lastread_marker(self, group_id, value, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         cdef uint64_t id = value
         with nogil:
-            err =  self.c_broker.get().SetLastReadMarker(id,b_group_id,b_substream)
+            err =  self.c_consumer.get().SetLastReadMarker(b_group_id, id, b_stream)
         if err:
             throw_exception(err)
         return
-    def reset_lastread_marker(self,group_id, substream = "default"):
+    def reset_lastread_marker(self,group_id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err =  self.c_broker.get().ResetLastReadMarker(b_group_id,b_substream)
+            err =  self.c_consumer.get().ResetLastReadMarker(b_group_id,b_stream)
         if err:
             throw_exception(err)
         return
@@ -205,60 +205,60 @@ cdef class PyDataBroker:
         cdef Error err
         cdef string group_id
         with nogil:
-            group_id = self.c_broker.get().GenerateNewGroupId(&err)
+            group_id = self.c_consumer.get().GenerateNewGroupId(&err)
         if err:
             throw_exception(err)
         return _str(group_id)
-    def get_substream_list(self, from_substream = ""):
+    def get_stream_list(self, from_stream = ""):
         cdef Error err
-        cdef vector[StreamInfo] substreams
-        cdef string b_from_substream = _bytes(from_substream)
+        cdef vector[StreamInfo] streams
+        cdef string b_from_stream = _bytes(from_stream)
         with nogil:
-            substreams = self.c_broker.get().GetSubstreamList(b_from_substream,&err)
+            streams = self.c_consumer.get().GetStreamList(b_from_stream,&err)
         if err:
             throw_exception(err)
         list = []
-        for substream in substreams:
-            list.append(json.loads(_str(substream.Json(False))))
+        for stream in streams:
+            list.append(json.loads(_str(stream.Json(False))))
         return list
-    def acknowledge(self, group_id, uint64_t id, substream = "default"):
+    def acknowledge(self, group_id, uint64_t id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err = self.c_broker.get().Acknowledge(b_group_id,id,b_substream)
+            err = self.c_consumer.get().Acknowledge(b_group_id,id,b_stream)
         if err:
             throw_exception(err)
-    def neg_acknowledge(self, group_id, uint64_t id, uint64_t delay_sec, substream = "default"):
+    def neg_acknowledge(self, group_id, uint64_t id, uint64_t delay_ms = 0, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err = self.c_broker.get().NegativeAcknowledge(b_group_id,id,delay_sec,b_substream)
+            err = self.c_consumer.get().NegativeAcknowledge(b_group_id,id,delay_ms,b_stream)
         if err:
             throw_exception(err)
-    def set_resend_nacs(self,bool resend, uint64_t delay_sec, uint64_t resend_attempts):
+    def set_resend_nacs(self,bool resend, uint64_t delay_ms, uint64_t resend_attempts):
         with nogil:
-            self.c_broker.get().SetResendNacs(resend,delay_sec,resend_attempts)
+            self.c_consumer.get().SetResendNacs(resend,delay_ms,resend_attempts)
 
-    def get_last_acknowledged_tuple_id(self, group_id, substream = "default"):
+    def get_last_acknowledged_message(self, group_id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         cdef uint64_t id
         with nogil:
-            id = self.c_broker.get().GetLastAcknowledgedTulpeId(b_group_id,b_substream,&err)
+            id = self.c_consumer.get().GetLastAcknowledgedMessage(b_group_id,b_stream,&err)
         if err:
             throw_exception(err)
         return id
 
-    def get_unacknowledged_tuple_ids(self, group_id, substream = "default", uint64_t from_id = 0, uint64_t to_id = 0):
+    def get_unacknowledged_messages(self, group_id, uint64_t from_id = 0, uint64_t to_id = 0, stream = "default"):
         cdef Error err
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef IdList ids
         with nogil:
-            ids = self.c_broker.get().GetUnacknowledgedTupleIds(b_group_id, b_substream, from_id, to_id, &err)
+            ids = self.c_consumer.get().GetUnacknowledgedMessages(b_group_id, from_id, to_id, b_stream, &err)
         if err:
             throw_exception(err)
         list = []
@@ -266,34 +266,34 @@ cdef class PyDataBroker:
             list.append(id)
         return list
 
-    def query_images(self,query, substream = "default"):
+    def query_messages(self,query, stream = "default"):
         cdef string b_query = _bytes(query)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
-        cdef FileInfos file_infos
+        cdef MessageMetas message_metas
         with nogil:
-            file_infos = self.c_broker.get().QueryImages(b_query,b_substream,&err)
+            message_metas = self.c_consumer.get().QueryMessages(b_query,b_stream,&err)
         if err:
             throw_exception(err)
         json_list = []
-        for fi in file_infos:
+        for fi in message_metas:
             json_list.append(json.loads(_str(fi.Json())))
         return json_list
-    def _op_dataset(self, op, group_id, substream, uint64_t min_size, uint64_t id):
+    def _op_dataset(self, op, group_id, stream, uint64_t min_size, uint64_t id):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
-        cdef FileInfos file_infos
+        cdef string b_stream = _bytes(stream)
+        cdef MessageMetas message_metas
         cdef DataSet dataset
         cdef Error err
         if op == "next":
             with nogil:
-                dataset = self.c_broker.get().GetNextDataset(b_group_id,b_substream, min_size, &err)
+                dataset = self.c_consumer.get().GetNextDataset(b_group_id, min_size,b_stream, &err)
         elif op == "last":
             with nogil:
-                dataset = self.c_broker.get().GetLastDataset(b_substream, min_size, &err)
+                dataset = self.c_consumer.get().GetLastDataset(min_size,b_stream, &err)
         elif op == "id":
             with nogil:
-                dataset = self.c_broker.get().GetDatasetById(id, b_substream, min_size, &err)
+                dataset = self.c_consumer.get().GetDatasetById(id, min_size,b_stream, &err)
         json_list = []
         for fi in dataset.content:
             json_list.append(json.loads(_str(fi.Json())))
@@ -301,47 +301,47 @@ cdef class PyDataBroker:
         if err:
             throw_exception(err,res)
         return res
-    def get_next_dataset(self, group_id, substream = "default", min_size = 0):
-        return self._op_dataset("next",group_id,substream,min_size,0)
-    def get_last_dataset(self, substream = "default", min_size = 0):
-        return self._op_dataset("last","0",substream,min_size,0)
-    def get_dataset_by_id(self, uint64_t id, substream = "default", min_size = 0):
-        return self._op_dataset("id","0",substream,min_size,id)
+    def get_next_dataset(self, group_id, min_size = 0, stream = "default"):
+        return self._op_dataset("next",group_id,stream,min_size,0)
+    def get_last_dataset(self, min_size = 0,  stream = "default"):
+        return self._op_dataset("last","0",stream,min_size,0)
+    def get_dataset_by_id(self, uint64_t id, min_size = 0, stream = "default"):
+        return self._op_dataset("id","0",stream,min_size,id)
     def get_beamtime_meta(self):
         cdef Error err
         cdef string meta_str
         with nogil:
-            meta_str = self.c_broker.get().GetBeamtimeMeta(&err)
+            meta_str = self.c_consumer.get().GetBeamtimeMeta(&err)
         if err:
             throw_exception(err)
         meta = json.loads(_str(meta_str))
         del meta['_id']
         return meta
     def interrupt_current_operation(self):
-        self.c_broker.get().InterruptCurrentOperation()
-cdef class __PyDataBrokerFactory:
-    cdef DataBrokerFactory c_factory
+        self.c_consumer.get().InterruptCurrentOperation()
+cdef class __PyConsumerFactory:
+    cdef ConsumerFactory c_factory
     def __cinit__(self):
         with nogil:
-            self.c_factory = DataBrokerFactory()
-    def create_server_broker(self,server_name,source_path,has_filesystem,beamtime_id,stream,token,timeout):
+            self.c_factory = ConsumerFactory()
+    def create_consumer(self,server_name,source_path,has_filesystem,beamtime_id,data_source,token,timeout):
         cdef string b_server_name = _bytes(server_name)
         cdef string b_source_path = _bytes(source_path)
         cdef bool b_has_filesystem = has_filesystem
         cdef SourceCredentials source
         source.beamtime_id = _bytes(beamtime_id)
         source.user_token = _bytes(token)
-        source.stream = _bytes(stream)
+        source.data_source = _bytes(data_source)
         cdef Error err
-        broker = PyDataBroker()
+        consumer = PyConsumer()
         with nogil:
-            broker.c_broker = self.c_factory.CreateServerBroker(b_server_name,b_source_path,b_has_filesystem,source,&err)
+            consumer.c_consumer = self.c_factory.CreateConsumer(b_server_name,b_source_path,b_has_filesystem,source,&err)
         if err:
             throw_exception(err)
-        broker.c_broker.get().SetTimeout(timeout)
-        return broker
+        consumer.c_consumer.get().SetTimeout(timeout)
+        return consumer
 
-def create_server_broker(server_name,source_path,has_filesystem,beamtime_id,stream,token,timeout_ms):
+def create_consumer(server_name,source_path,has_filesystem,beamtime_id,data_source,token,timeout_ms):
     """
       :param server_name: Server endpoint (hostname:port)
       :type server_name: string
@@ -349,11 +349,11 @@ def create_server_broker(server_name,source_path,has_filesystem,beamtime_id,stre
       :type source_path: string
       :param has_filesystem: True if the source_path is accessible locally, otherwise will use file transfer service to get data
       :type has_filesystem: bool
-      :return: Broker object and error. (None,err) if case of error, (broker, None) if success
-      :rtype: Tuple with broker object and error.
+      :return: consumer object and error. (None,err) if case of error, (consumer, None) if success
+      :rtype: Tuple with consumer object and error.
 	"""
-    factory = __PyDataBrokerFactory()
-    return factory.create_server_broker(server_name,source_path,has_filesystem, beamtime_id,stream,token,timeout_ms)
+    factory = __PyConsumerFactory()
+    return factory.create_consumer(server_name,source_path,has_filesystem, beamtime_id,data_source,token,timeout_ms)
 
 
 __version__ = "@PYTHON_ASAPO_VERSION@@ASAPO_VERSION_COMMIT@"
diff --git a/consumer/tools/folder_to_db/src/folder_db_importer.cpp b/consumer/tools/folder_to_db/src/folder_db_importer.cpp
index 4d3520186eacdaf78ba760d97727ba1284eff129..833ade112f4e6aa09a6b19f4a732a7b6ac889230 100644
--- a/consumer/tools/folder_to_db/src/folder_db_importer.cpp
+++ b/consumer/tools/folder_to_db/src/folder_db_importer.cpp
@@ -20,12 +20,12 @@ Error FolderToDbImporter::ConnectToDb(const std::unique_ptr<asapo::Database>& db
 }
 
 Error FolderToDbImporter::ImportSingleFile(const std::unique_ptr<asapo::Database>& db,
-                                           const FileInfo& file) const {
+                                           const MessageMeta& file) const {
     return db->Insert(std::string(kDBDataCollectionNamePrefix) + "_default", file, ignore_duplicates_);
 }
 
 Error FolderToDbImporter::ImportFilelistChunk(const std::unique_ptr<asapo::Database>& db,
-                                              const FileInfos& file_list, uint64_t begin, uint64_t end) const {
+                                              const MessageMetas& file_list, uint64_t begin, uint64_t end) const {
     for (auto i = begin; i < end; i++) {
         auto err = ImportSingleFile(db, file_list[(size_t)i]);
         if (err != nullptr) {
@@ -35,7 +35,7 @@ Error FolderToDbImporter::ImportFilelistChunk(const std::unique_ptr<asapo::Datab
     return nullptr;
 }
 
-Error FolderToDbImporter::PerformParallelTask(const FileInfos& file_list, uint64_t begin,
+Error FolderToDbImporter::PerformParallelTask(const MessageMetas& file_list, uint64_t begin,
                                               uint64_t end) const {
     Error err;
     auto db = CreateDbClient(&err);
@@ -66,14 +66,14 @@ Error WaitParallelTasks(std::vector<std::future<Error>>* res) {
 }
 
 
-TaskSplitParameters ComputeSplitParameters(const FileInfos& file_list, int ntasks) {
+TaskSplitParameters ComputeSplitParameters(const MessageMetas& file_list, int ntasks) {
     TaskSplitParameters parameters;
     parameters.chunk = file_list.size() / ntasks;
     parameters.remainder = file_list.size() % ntasks;
     return parameters;
 }
 
-void FolderToDbImporter::ProcessNextChunk(const FileInfos& file_list,
+void FolderToDbImporter::ProcessNextChunk(const MessageMetas& file_list,
                                           std::vector<std::future<Error>>* res,
                                           TaskSplitParameters* p) const {
     p->next_chunk_size = p->chunk + (p->remainder ? 1 : 0);
@@ -88,7 +88,7 @@ void FolderToDbImporter::ProcessNextChunk(const FileInfos& file_list,
     if (p->remainder) p->remainder -= 1;
 }
 
-Error FolderToDbImporter::ImportFilelist(const FileInfos& file_list) const {
+Error FolderToDbImporter::ImportFilelist(const MessageMetas& file_list) const {
     auto split_parameters = ComputeSplitParameters(file_list, n_tasks_);
 
     std::vector<std::future<Error>>res;
@@ -100,7 +100,7 @@ Error FolderToDbImporter::ImportFilelist(const FileInfos& file_list) const {
 }
 
 
-FileInfos FolderToDbImporter::GetFilesInFolder(const std::string& folder, Error* err) const {
+MessageMetas FolderToDbImporter::GetFilesInFolder(const std::string& folder, Error* err) const {
     auto file_list = io__->FilesInFolder(folder, err);
     return file_list;
 }
diff --git a/consumer/tools/folder_to_db/src/folder_db_importer.h b/consumer/tools/folder_to_db/src/folder_db_importer.h
index 046cf4f10343e61fa8cee3dd199ec8ceb14411f1..9087b4a763914223125d25d3c91465a9c0434fa1 100644
--- a/consumer/tools/folder_to_db/src/folder_db_importer.h
+++ b/consumer/tools/folder_to_db/src/folder_db_importer.h
@@ -57,17 +57,17 @@ class FolderToDbImporter {
     mutable std::string db_uri_ ;
     mutable std::string db_name_;
     Error ConnectToDb(const std::unique_ptr<asapo::Database>& db) const;
-    FileInfos GetFilesInFolder(const std::string& folder, Error* err) const;
-    Error ImportFilelist(const FileInfos& file_list) const;
-    Error PerformParallelTask(const FileInfos& file_list, uint64_t begin,
+    MessageMetas GetFilesInFolder(const std::string& folder, Error* err) const;
+    Error ImportFilelist(const MessageMetas& file_list) const;
+    Error PerformParallelTask(const MessageMetas& file_list, uint64_t begin,
                               uint64_t end) const;
     Error ImportSingleFile(const std::unique_ptr<asapo::Database>& db,
-                           const FileInfo& file) const;
+                           const MessageMeta& file) const;
     Error ImportFilelistChunk(const std::unique_ptr<asapo::Database>& db,
-                              const FileInfos& file_list, uint64_t begin, uint64_t end) const;
+                              const MessageMetas& file_list, uint64_t begin, uint64_t end) const;
 
     std::unique_ptr<Database> CreateDbClient(Error* err) const;
-    void ProcessNextChunk(const FileInfos& file_list, std::vector<std::future<Error>>* res,
+    void ProcessNextChunk(const MessageMetas& file_list, std::vector<std::future<Error>>* res,
                           TaskSplitParameters* p) const;
 
 };
diff --git a/consumer/tools/folder_to_db/unittests/test_folder_to_db.cpp b/consumer/tools/folder_to_db/unittests/test_folder_to_db.cpp
index 1347106a2666660f0829df8d07a3fbf6c1d5f26a..f14a72dd418cfac576bf864ed3b6434ce72a4442 100644
--- a/consumer/tools/folder_to_db/unittests/test_folder_to_db.cpp
+++ b/consumer/tools/folder_to_db/unittests/test_folder_to_db.cpp
@@ -95,17 +95,17 @@ class FakeDatabaseFactory : public DatabaseFactory {
     }
 };
 
-FileInfos CreateTestFileInfos() {
-    FileInfos file_infos;
-    FileInfo fi;
+MessageMetas CreateTestMessageMetas() {
+    MessageMetas message_metas;
+    MessageMeta fi;
     fi.size = 100;
     fi.name = "1";
-    file_infos.push_back(fi);
+    message_metas.push_back(fi);
     fi.name = "2";
-    file_infos.push_back(fi);
+    message_metas.push_back(fi);
     fi.name = "3";
-    file_infos.push_back(fi);
-    return file_infos;
+    message_metas.push_back(fi);
+    return message_metas;
 }
 
 class FolderDBConverterTests : public Test {
@@ -114,20 +114,20 @@ class FolderDBConverterTests : public Test {
     NiceMock<MockIO> mock_io;
     std::string expected_collection_name = std::string(kDBDataCollectionNamePrefix) + "_default";
     MockDatabaseFactory* mock_dbf;
-    FileInfos file_infos;
+    MessageMetas message_metas;
     std::string folder, uri, db_name;
     void SetUp() override {
         converter.io__ = std::unique_ptr<IO> {&mock_io};
         mock_dbf = new MockDatabaseFactory;
         mock_dbf->CreateDBs(3);
         converter.db_factory__ = std::unique_ptr<DatabaseFactory> {mock_dbf};
-        file_infos = CreateTestFileInfos();
+        message_metas = CreateTestMessageMetas();
         folder = "folder";
         db_name = "db_name";
         uri = "db_address";
         ON_CALL(mock_io, FilesInFolder_t(_, _)).
         WillByDefault(DoAll(testing::SetArgPointee<1>(nullptr),
-                            testing::Return(file_infos)));
+                            testing::Return(message_metas)));
     }
     void TearDown() override {
         converter.io__.release();
@@ -167,7 +167,7 @@ TEST_F(FolderDBConverterTests, ErrorWhenCannotGetFileList) {
 
     EXPECT_CALL(mock_io, FilesInFolder_t(folder, _)).
     WillOnce(DoAll(testing::SetArgPointee<1>(new asapo::SimpleError("err")),
-                   testing::Return(FileInfos {})));
+                   testing::Return(MessageMetas {})));
 
     auto error = converter.Convert(uri, folder, db_name);
     ASSERT_THAT(error, Ne(nullptr));
@@ -193,9 +193,9 @@ TEST_F(FolderDBConverterTests, ErrorWhenCannotImportFileListToDb) {
     ASSERT_THAT(error, Ne(nullptr));
 
 }
-// a matcher to compare file_infos (size and basename only) for testing purposes
-// (we do not want to create an == operator for FileInfo)
-MATCHER_P(CompareFileInfo, file, "") {
+// a matcher to compare message_metas (size and basename only) for testing purposes
+// (we do not want to create an == operator for MessageMeta)
+MATCHER_P(CompareMessageMeta, file, "") {
     if (arg.size != file.size) return false;
     if (arg.name != file.name) return false;
     return true;
@@ -204,8 +204,8 @@ MATCHER_P(CompareFileInfo, file, "") {
 
 TEST_F(FolderDBConverterTests, PassesFileListToInsert) {
 
-    for (auto& file : file_infos) {
-        EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareFileInfo(file), _)).
+    for (auto& file : message_metas) {
+        EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareMessageMeta(file), _)).
         WillOnce(testing::Return(nullptr));
     }
 
@@ -216,11 +216,11 @@ TEST_F(FolderDBConverterTests, PassesFileListToInsert) {
 
 TEST_F(FolderDBConverterTests, PassesFileListToInsertInParallel3by3) {
 
-    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[0]), _)).
+    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[0]), _)).
     WillOnce(testing::Return(nullptr));
-    EXPECT_CALL(*(mock_dbf->db[1]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[1]), _)).
+    EXPECT_CALL(*(mock_dbf->db[1]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[1]), _)).
     WillOnce(testing::Return(nullptr));
-    EXPECT_CALL(*(mock_dbf->db[2]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[2]), _)).
+    EXPECT_CALL(*(mock_dbf->db[2]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[2]), _)).
     WillOnce(testing::Return(nullptr));
 
     converter.SetNParallelTasks(3, false);
@@ -230,11 +230,11 @@ TEST_F(FolderDBConverterTests, PassesFileListToInsertInParallel3by3) {
 
 TEST_F(FolderDBConverterTests, PassesFileListToInsertInParallel3by2) {
 
-    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[0]), _)).
+    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[0]), _)).
     WillOnce(testing::Return(nullptr));
-    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[1]), _)).
+    EXPECT_CALL(*(mock_dbf->db[0]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[1]), _)).
     WillOnce(testing::Return(nullptr));
-    EXPECT_CALL(*(mock_dbf->db[1]), Insert_t(expected_collection_name, CompareFileInfo(file_infos[2]), _)).
+    EXPECT_CALL(*(mock_dbf->db[1]), Insert_t(expected_collection_name, CompareMessageMeta(message_metas[2]), _)).
     WillOnce(testing::Return(nullptr));
 
     converter.SetNParallelTasks(2, false);
@@ -245,7 +245,7 @@ TEST_F(FolderDBConverterTests, PassesFileListToInsertInParallel3by2) {
 TEST_F(FolderDBConverterTests, ComputesStatistics) {
 
     EXPECT_CALL(*mock_dbf->db[0], Insert_t(_, _, false)).
-    Times(file_infos.size()).
+    Times(message_metas.size()).
     WillRepeatedly(testing::Return(nullptr));
 
     asapo::FolderImportStatistics statistics;
@@ -256,7 +256,7 @@ TEST_F(FolderDBConverterTests, ComputesStatistics) {
     auto error = converter.Convert(uri, folder, db_name, &statistics);
 
     ASSERT_THAT(error, Eq(nullptr));
-    ASSERT_THAT(statistics.n_files_converted, Eq(file_infos.size()));
+    ASSERT_THAT(statistics.n_files_converted, Eq(message_metas.size()));
     ASSERT_THAT(statistics.time_read_folder.count(), Ge(0));
     ASSERT_THAT(statistics.time_import_files.count(), Ge(0));
 }
diff --git a/examples/consumer/CMakeLists.txt b/examples/consumer/CMakeLists.txt
index 1bb3170d2e8508175cba23266b4a1d37c0005a52..3d5359f708d9f0ed2a0a64f8edd5c207be82b261 100644
--- a/examples/consumer/CMakeLists.txt
+++ b/examples/consumer/CMakeLists.txt
@@ -1,9 +1,9 @@
 find_package(Threads)
 
-add_subdirectory(getnext_broker)
+add_subdirectory(getnext)
 
 if(BUILD_EXAMPLES AND BUILD_PYTHON)
-        add_subdirectory(getnext_broker_python)
+        add_subdirectory(getnext_python)
 endif()
 
 
diff --git a/examples/consumer/getnext_broker/CMakeLists.txt b/examples/consumer/getnext/CMakeLists.txt
similarity index 93%
rename from examples/consumer/getnext_broker/CMakeLists.txt
rename to examples/consumer/getnext/CMakeLists.txt
index a830bf5cb3cd8bd34955769a8fe974ffde909454..1e7bd4ca74068e668977e62b76a03d58628fe4ff 100644
--- a/examples/consumer/getnext_broker/CMakeLists.txt
+++ b/examples/consumer/getnext/CMakeLists.txt
@@ -1,5 +1,5 @@
-set(TARGET_NAME getnext_broker)
-set(SOURCE_FILES getnext_broker.cpp)
+set(TARGET_NAME getnext)
+set(SOURCE_FILES getnext.cpp)
 
 if (BUILD_EXAMPLES)
 
diff --git a/examples/consumer/getnext_broker/CMakeLists_separate.in b/examples/consumer/getnext/CMakeLists_separate.in
similarity index 100%
rename from examples/consumer/getnext_broker/CMakeLists_separate.in
rename to examples/consumer/getnext/CMakeLists_separate.in
diff --git a/examples/consumer/getnext_broker/Makefile.in b/examples/consumer/getnext/Makefile.in
similarity index 100%
rename from examples/consumer/getnext_broker/Makefile.in
rename to examples/consumer/getnext/Makefile.in
diff --git a/examples/consumer/getnext_broker/check_linux.sh b/examples/consumer/getnext/check_linux.sh
similarity index 78%
rename from examples/consumer/getnext_broker/check_linux.sh
rename to examples/consumer/getnext/check_linux.sh
index 18950aacd287a15eb2393143f42c7a80d2efd57c..21c2bf6842cd9a06edd94e70c5d02a826b99e4f6 100644
--- a/examples/consumer/getnext_broker/check_linux.sh
+++ b/examples/consumer/getnext/check_linux.sh
@@ -2,8 +2,8 @@
 
 source_path=dummy
 beamtime_id=test_run
-stream=detector
-database_name=${beamtime_id}_${stream}
+data_source=detector
+database_name=${beamtime_id}_${data_source}
 token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 set -e
@@ -25,7 +25,7 @@ nomad run broker.nmd
 
 for i in `seq 1 3`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 sleep 1
diff --git a/examples/consumer/getnext_broker/check_windows.bat b/examples/consumer/getnext/check_windows.bat
similarity index 78%
rename from examples/consumer/getnext_broker/check_windows.bat
rename to examples/consumer/getnext/check_windows.bat
index 260fcdd28c10b8da0cf370a385d89dea1ccce223..62b2600c6aaa80f1b4f4e794f1fbc5e7fedc04f0 100644
--- a/examples/consumer/getnext_broker/check_windows.bat
+++ b/examples/consumer/getnext/check_windows.bat
@@ -1,15 +1,15 @@
 SET source_path=dummy
 
 SET beamtime_id=test_run
-SET stream=detector
-SET database_name=%beamtime_id%_%stream%
+SET data_source=detector
+SET database_name=%beamtime_id%_%data_source%
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 call start_services.bat
 
-for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
 
 "%1" 127.0.0.1:8400 %source_path% %beamtime_id% 1 %token_test_run% 12000 1 | findstr /c:"Processed 3 file" || goto :error
diff --git a/examples/consumer/getnext_broker/getnext_broker.cpp b/examples/consumer/getnext/getnext.cpp
similarity index 88%
rename from examples/consumer/getnext_broker/getnext_broker.cpp
rename to examples/consumer/getnext/getnext.cpp
index def0c807738ca2c3a7c584db4099dc27ca0b9d32..5e90a3885be448262e12bbd099278deb62eb6f8b 100644
--- a/examples/consumer/getnext_broker/getnext_broker.cpp
+++ b/examples/consumer/getnext/getnext.cpp
@@ -35,7 +35,7 @@ struct Args {
     std::string server;
     std::string file_path;
     std::string beamtime_id;
-    std::string stream;
+    std::string data_source;
     std::string token;
     int timeout_ms;
     int nthreads;
@@ -95,22 +95,27 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
              std::vector<int>* nfiles_total, std::vector<asapo::NetworkConnectionType>* connection_type,
              LatchedTimer* timer) {
     auto exec_next = [&params, nfiles, errors, nbuf, nfiles_total, connection_type, timer](int i) {
-        asapo::FileInfo fi;
+        asapo::MessageMeta fi;
         Error err;
-        auto broker = asapo::DataBrokerFactory::CreateServerBroker(params.server, params.file_path, true,
-                      asapo::SourceCredentials{asapo::SourceType::kProcessed,params.beamtime_id, "", params.stream, params.token}, &err);
+        auto consumer = asapo::ConsumerFactory::CreateConsumer(params.server,
+                                                             params.file_path,
+                                                             true,
+                                                             asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                      params.beamtime_id, "",
+                                                                                      params.data_source, params.token},
+                                                             &err);
         if (err) {
-            std::cout << "Error CreateServerBroker: " << err << std::endl;
+            std::cout << "Error CreateConsumer: " << err << std::endl;
             exit(EXIT_FAILURE);
         }
-        //broker->ForceNoRdma();
+        //consumer->ForceNoRdma();
 
-        broker->SetTimeout((uint64_t) params.timeout_ms);
-        asapo::FileData data;
+        consumer->SetTimeout((uint64_t) params.timeout_ms);
+        asapo::MessageData data;
 
         lock.lock();
         if (group_id.empty()) {
-            group_id = broker->GenerateNewGroupId(&err);
+            group_id = consumer->GenerateNewGroupId(&err);
             if (err) {
                 (*errors)[i] += ProcessError(err);
                 lock.unlock();
@@ -121,7 +126,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
         lock.unlock();
 
         if (i == 0) {
-            auto meta = broker->GetBeamtimeMeta(&err);
+            auto meta = consumer->GetBeamtimeMeta(&err);
             if (err == nullptr) {
                 std::cout << meta << std::endl;
             } else {
@@ -132,7 +137,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
         bool isFirstFile = true;
         while (true) {
             if (params.datasets) {
-                auto dataset = broker->GetNextDataset(group_id, 0, &err);
+                auto dataset = consumer->GetNextDataset(group_id, 0, "default", &err);
                 if (err == nullptr) {
                     for (auto& fi : dataset.content) {
                         (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1;
@@ -140,7 +145,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
                     }
                 }
             } else {
-                err = broker->GetNext(&fi, group_id, params.read_data ? &data : nullptr);
+                err = consumer->GetNext(group_id, &fi, params.read_data ? &data : nullptr, "default");
                 if (isFirstFile) {
                     isFirstFile = false;
                     timer->count_down_and_wait();
@@ -167,7 +172,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
             (*nfiles)[i]++;
         }
 
-        (*connection_type)[i] = broker->CurrentConnectionType();
+        (*connection_type)[i] = consumer->CurrentConnectionType();
     };
 
     std::vector<std::thread> threads;
@@ -180,7 +185,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err
 int ReadAllData(const Args& params, uint64_t* duration_ms, uint64_t* duration_without_first_ms, int* nerrors, int* nbuf,
                 int* nfiles_total,
                 asapo::NetworkConnectionType* connection_type) {
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     std::chrono::system_clock::time_point t1 = std::chrono::system_clock::now();
 
     std::vector<int> nfiles(params.nthreads, 0);
@@ -251,14 +256,14 @@ void TryGetStream(Args* args) {
     }
     if (seglist.size() > 1) {
         args->beamtime_id = seglist[0];
-        args->stream = seglist[1];
+        args->data_source = seglist[1];
     }
     return;
 
 }
 
 int main(int argc, char* argv[]) {
-    asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
+    asapo::ExitAfterPrintVersionIfNeeded("GetNext consumer Example", argc, argv);
     Args params;
     params.datasets = false;
     if (argc != 8 && argc != 9) {
diff --git a/examples/consumer/getnext_broker_python/CMakeLists.txt b/examples/consumer/getnext_python/CMakeLists.txt
similarity index 88%
rename from examples/consumer/getnext_broker_python/CMakeLists.txt
rename to examples/consumer/getnext_python/CMakeLists.txt
index 22bd7bea66e7ecd8a7d0fd2b54273152351e9493..a551c090300c7d24cd0a72dbf8b8602ae9806308 100644
--- a/examples/consumer/getnext_broker_python/CMakeLists.txt
+++ b/examples/consumer/getnext_python/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(TARGET_NAME getnext_broker_python)
+set(TARGET_NAME getnext_python)
 
 
 prepare_asapo()
diff --git a/examples/consumer/getnext_broker_python/check_linux.sh b/examples/consumer/getnext_python/check_linux.sh
similarity index 85%
rename from examples/consumer/getnext_broker_python/check_linux.sh
rename to examples/consumer/getnext_python/check_linux.sh
index 76c8b61b181e80c343221fe027bb9eda57b0cc49..a600692a552c9aa11c75ba0030af91dcaec41e1a 100644
--- a/examples/consumer/getnext_broker_python/check_linux.sh
+++ b/examples/consumer/getnext_python/check_linux.sh
@@ -2,8 +2,8 @@
 
 source_path=dummy
 beamtime_id=test_run
-stream=detector
-database_name=${beamtime_id}_${stream}
+data_source=detector
+database_name=${beamtime_id}_${data_source}
 token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 group_id=bif31l2uiddd4r0q6b40
 set -e
@@ -25,7 +25,7 @@ nomad run broker.nmd
 
 for i in `seq 1 3`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 echo 'db.meta.insert({"_id":0,"meta_test":"test"})' | mongo ${database_name}
diff --git a/examples/consumer/getnext_broker_python/check_windows.bat b/examples/consumer/getnext_python/check_windows.bat
similarity index 89%
rename from examples/consumer/getnext_broker_python/check_windows.bat
rename to examples/consumer/getnext_python/check_windows.bat
index 8d01bee647fdb604c510c1c680aeddf8cd18e14f..c546f6108686ab0f4ca55632d599f2711e222e3f 100644
--- a/examples/consumer/getnext_broker_python/check_windows.bat
+++ b/examples/consumer/getnext_python/check_windows.bat
@@ -1,7 +1,7 @@
 SET source_path=dummy
 SET beamtime_id=test_run
-SET stream=detector
-SET database_name=%beamtime_id%_%stream%
+SET data_source=detector
+SET database_name=%beamtime_id%_%data_source%
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
@@ -9,7 +9,7 @@ set group_id=bif31l2uiddd4r0q6b40
 
 call start_services.bat
 
-for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
 
 echo db.meta.insert({"_id":0,"meta_test":"test"}) | %mongo_exe% %database_name%  || goto :error
diff --git a/examples/consumer/getnext_broker_python/getnext.py b/examples/consumer/getnext_python/getnext.py
similarity index 68%
rename from examples/consumer/getnext_broker_python/getnext.py
rename to examples/consumer/getnext_python/getnext.py
index 10d6517cdedc885ff9ddf5ec79828d5908fb8d3c..0aec93df4bdfff79a2b6733e1cb225fa955e28b2 100644
--- a/examples/consumer/getnext_broker_python/getnext.py
+++ b/examples/consumer/getnext_python/getnext.py
@@ -6,21 +6,21 @@ import sys
 
 source, path, beamtime, token, group_id = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(source,path,True, beamtime,"",token,60000)
+consumer = asapo_consumer.create_consumer(source,path,True, beamtime,"",token,60000)
 
 
 if group_id == "new":
-    group_id_new = broker.generate_group_id()
+    group_id_new = consumer.generate_group_id()
     print ('generated group id: ', group_id_new)
 else:
     group_id_new = group_id
 
-_, meta = broker.get_next(group_id_new, meta_only=True)
+_, meta = consumer.get_next(group_id_new, meta_only=True)
 print ('filename: ', meta['name'])
 print ('meta: ', json.dumps(meta, indent=4, sort_keys=True))
 
 try:
-    beamtime_meta = broker.get_beamtime_meta()
+    beamtime_meta = consumer.get_beamtime_meta()
     print ('beamtime meta: ', json.dumps(beamtime_meta, indent=4, sort_keys=True))
 except asapo_consumer.AsapoError as err:
     print ('error getting beamtime meta: ', err)
diff --git a/examples/consumer/simple-consumer/consume.cpp b/examples/consumer/simple-consumer/consume.cpp
index 8df37f0ce6d3f7f81837c25ba62766ea3c3bfaba..501fd2af5113052bf978372ff50183bc4b1fe4f7 100644
--- a/examples/consumer/simple-consumer/consume.cpp
+++ b/examples/consumer/simple-consumer/consume.cpp
@@ -14,17 +14,17 @@ int main(int argc, char* argv[]) {
     auto beamtime = "asapo_test";
     auto token = "KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc=";
 
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(endpoint, "", true, asapo::SourceCredentials{beamtime, "", "", token}, &err);
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, "", true, asapo::SourceCredentials{beamtime, "", "", token}, &err);
     exit_if_error("Cannot create consumer", err);
-    broker->SetTimeout((uint64_t) 1000);
+    consumer->SetTimeout((uint64_t) 1000);
 
-    auto group_id = broker->GenerateNewGroupId(&err);
+    auto group_id = consumer->GenerateNewGroupId(&err);
     exit_if_error("Cannot create group id", err);
 
-    asapo::FileInfo fi;
-    asapo::FileData data;
+    asapo::MessageMeta fi;
+    asapo::MessageData data;
 
-    err = broker->GetLast(&fi, group_id, &data);
+    err = consumer->GetLast(&fi, group_id, &data);
     exit_if_error("Cannot get next record", err);
 
     std::cout << "id: " << fi.id << std::endl;
diff --git a/examples/pipeline/in_to_out/check_linux.sh b/examples/pipeline/in_to_out/check_linux.sh
index 8e6398d0546d80a0c4c180c800fbb8c5051c7c22..8540d411d14f789d3902a71f2ace9a24c4c5b1d8 100644
--- a/examples/pipeline/in_to_out/check_linux.sh
+++ b/examples/pipeline/in_to_out/check_linux.sh
@@ -2,13 +2,13 @@
 
 source_path=.
 beamtime_id=asapo_test
-stream_in=detector
-stream_out=stream
-stream_out2=stream2
+data_source_in=detector
+data_source_out=data_source
+data_source_out2=data_source2
 
-indatabase_name=${beamtime_id}_${stream_in}
-outdatabase_name=${beamtime_id}_${stream_out}
-outdatabase_name2=${beamtime_id}_${stream_out2}
+indatabase_name=${beamtime_id}_${data_source_in}
+outdatabase_name=${beamtime_id}_${data_source_out}
+outdatabase_name2=${beamtime_id}_${data_source_out2}
 
 token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
@@ -55,23 +55,23 @@ echo hello3 > processed/file3
 
 for i in `seq 1 3`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'processed/file$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${indatabase_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'processed/file$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${indatabase_name}
 done
 
 sleep 1
 
-$1 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out $token 2 1000 25000 1  > out
+$1 127.0.0.1:8400 $source_path $beamtime_id $data_source_in $data_source_out $token 2 1000 25000 1  > out
 cat out
 cat out | grep "Processed 3 file(s)"
 cat out | grep "Sent 3 file(s)"
 
-echo "db.data_default.find({"_id":1})" | mongo ${outdatabase_name} | tee /dev/stderr | grep file1_${stream_out}
+echo "db.data_default.find({"_id":1})" | mongo ${outdatabase_name} | tee /dev/stderr | grep file1_${data_source_out}
 
-cat ${receiver_folder}/processed/file1_${stream_out} | grep hello1
-cat ${receiver_folder}/processed/file2_${stream_out} | grep hello2
-cat ${receiver_folder}/processed/file3_${stream_out} | grep hello3
+cat ${receiver_folder}/processed/file1_${data_source_out} | grep hello1
+cat ${receiver_folder}/processed/file2_${data_source_out} | grep hello2
+cat ${receiver_folder}/processed/file3_${data_source_out} | grep hello3
 
-$1 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out2 $token 2 1000 25000 0  > out2
+$1 127.0.0.1:8400 $source_path $beamtime_id $data_source_in $data_source_out2 $token 2 1000 25000 0  > out2
 cat out2
-test ! -f ${receiver_folder}/processed/file1_${stream_out2}
+test ! -f ${receiver_folder}/processed/file1_${data_source_out2}
 echo "db.data_default.find({"_id":1})" | mongo ${outdatabase_name2} | tee /dev/stderr | grep processed/file1
diff --git a/examples/pipeline/in_to_out/check_windows.bat b/examples/pipeline/in_to_out/check_windows.bat
index 674d48463280846ae78776415eac3a2e0aac95a5..5b0f9bb514ae45e2f867a6e88debf5e3ab4c2856 100644
--- a/examples/pipeline/in_to_out/check_windows.bat
+++ b/examples/pipeline/in_to_out/check_windows.bat
@@ -1,12 +1,12 @@
 SET source_path=.
 SET beamtime_id=asapo_test
-SET stream_in=detector
-SET stream_out=stream
-SET stream_out2=stream2
+SET data_source_in=detector
+SET data_source_out=data_source
+SET data_source_out2=data_source2
 
-SET indatabase_name=%beamtime_id%_%stream_in%
-SET outdatabase_name=%beamtime_id%_%stream_out%
-SET outdatabase_name2=%beamtime_id%_%stream_out2%
+SET indatabase_name=%beamtime_id%_%data_source_in%
+SET outdatabase_name=%beamtime_id%_%data_source_out%
+SET outdatabase_name2=%beamtime_id%_%data_source_out2%
 
 SET token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
@@ -20,7 +20,7 @@ SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
 call start_services.bat
 
-for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"processed\\file%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
+for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"processed\\file%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
 
 mkdir %receiver_folder%
 
@@ -30,19 +30,19 @@ echo hello2 > processed\file2
 echo hello3 > processed\file3
 
 
-"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %stream_in% %stream_out% %token% 2 1000 25000 1 > out
+"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %data_source_in% %data_source_out% %token% 2 1000 25000 1 > out
 type out
 findstr /I /L /C:"Processed 3 file(s)" out || goto :error
 findstr /I /L /C:"Sent 3 file(s)" out || goto :error
 
-echo db.data_default.find({"_id":1}) | %mongo_exe% %outdatabase_name% | findstr  /c:"file1_%stream_out%"  || goto :error
+echo db.data_default.find({"_id":1}) | %mongo_exe% %outdatabase_name% | findstr  /c:"file1_%data_source_out%"  || goto :error
 
-findstr /I /L /C:"hello1" %receiver_folder%\processed\file1_%stream_out% || goto :error
-findstr /I /L /C:"hello2" %receiver_folder%\processed\file2_%stream_out% || goto :error
-findstr /I /L /C:"hello3" %receiver_folder%\processed\file3_%stream_out% || goto :error
+findstr /I /L /C:"hello1" %receiver_folder%\processed\file1_%data_source_out% || goto :error
+findstr /I /L /C:"hello2" %receiver_folder%\processed\file2_%data_source_out% || goto :error
+findstr /I /L /C:"hello3" %receiver_folder%\processed\file3_%data_source_out% || goto :error
 
 
-"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %stream_in% %stream_out2% %token% 2 1000 25000 0 > out2
+"%1" 127.0.0.1:8400 %source_path% %beamtime_id%  %data_source_in% %data_source_out2% %token% 2 1000 25000 0 > out2
 type out2
 findstr /I /L /C:"Processed 3 file(s)" out2 || goto :error
 findstr /I /L /C:"Sent 3 file(s)" out2 || goto :error
diff --git a/examples/pipeline/in_to_out/in_to_out.cpp b/examples/pipeline/in_to_out/in_to_out.cpp
index bcdd041f7a2f0cc8fe3b272229d425e75630018c..801d9f32a0653776f4a2093a2a3d545bcad19dc3 100644
--- a/examples/pipeline/in_to_out/in_to_out.cpp
+++ b/examples/pipeline/in_to_out/in_to_out.cpp
@@ -16,7 +16,7 @@
 
 using std::chrono::system_clock;
 using asapo::Error;
-using BrokerPtr = std::unique_ptr<asapo::DataBroker>;
+using ConsumerPtr = std::unique_ptr<asapo::Consumer>;
 using ProducerPtr = std::unique_ptr<asapo::Producer>;
 std::string group_id = "";
 std::mutex lock_in, lock_out;
@@ -27,16 +27,16 @@ system_clock::time_point streamout_start;
 system_clock::time_point streamout_finish;
 
 struct Args {
-    std::string server;
-    std::string file_path;
-    std::string beamtime_id;
-    std::string stream_in;
-    std::string stream_out;
-    std::string token;
-    int timeout_ms;
-    int timeout_ms_producer;
-    int nthreads;
-    bool transfer_data;
+  std::string server;
+  std::string file_path;
+  std::string beamtime_id;
+  std::string stream_in;
+  std::string stream_out;
+  std::string token;
+  int timeout_ms;
+  int timeout_ms_producer;
+  int nthreads;
+  bool transfer_data;
 };
 
 void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
@@ -51,44 +51,46 @@ void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
 
 }
 
-
 void WaitConsumerThreadsFinished(std::vector<std::thread>* threads) {
-    for (auto& thread : *threads) {
+    for (auto &thread : *threads) {
         thread.join();
     }
 }
 
-int ProcessError(const Error& err) {
+int ProcessError(const Error &err) {
     if (err == nullptr) return 0;
     std::cout << err->Explain() << std::endl;
     return err == asapo::ConsumerErrorTemplates::kEndOfStream ? 0 : 1;
 }
 
-BrokerPtr CreateBrokerAndGroup(const Args& args, Error* err) {
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, args.file_path, true,
-                  asapo::SourceCredentials{asapo::SourceType::kProcessed,args.beamtime_id, "", args.stream_in, args.token}, err);
+ConsumerPtr CreateConsumerAndGroup(const Args &args, Error* err) {
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(args.server, args.file_path, true,
+                                                           asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                    args.beamtime_id, "",
+                                                                                    args.stream_in,
+                                                                                    args.token}, err);
     if (*err) {
         return nullptr;
     }
 
-    broker->SetTimeout((uint64_t) args.timeout_ms);
+    consumer->SetTimeout((uint64_t) args.timeout_ms);
 
     lock_in.lock();
 
     if (group_id.empty()) {
-        group_id = broker->GenerateNewGroupId(err);
+        group_id = consumer->GenerateNewGroupId(err);
         if (*err) {
             lock_in.unlock();
             return nullptr;
         }
     }
     lock_in.unlock();
-    return broker;
+    return consumer;
 }
 
-void GetBeamtimeMeta(const BrokerPtr& broker) {
+void GetBeamtimeMeta(const ConsumerPtr &consumer) {
     Error err;
-    auto meta = broker->GetBeamtimeMeta(&err);
+    auto meta = consumer->GetBeamtimeMeta(&err);
     if (err == nullptr) {
         std::cout << meta << std::endl;
     } else {
@@ -96,16 +98,17 @@ void GetBeamtimeMeta(const BrokerPtr& broker) {
     }
 }
 
-void SendDataDownstreamThePipeline(const Args& args, const asapo::FileInfo& fi, asapo::FileData data,
-                                   const ProducerPtr& producer) {
-    asapo::EventHeader header{fi.id, fi.size, fi.name, fi.metadata};
+void SendDownstreamThePipeline(const Args &args, const asapo::MessageMeta &fi, asapo::MessageData data,
+                               const ProducerPtr &producer) {
+    asapo::MessageHeader header{fi.id, fi.size, fi.name, fi.metadata};
     Error err_send;
     if (args.transfer_data) {
         header.file_name += "_" + args.stream_out;
-        err_send = producer->SendData(header, std::move(data), asapo::kDefaultIngestMode, ProcessAfterSend);
+        err_send = producer->Send(header, std::move(data), asapo::kDefaultIngestMode, "default", ProcessAfterSend);
     } else {
         header.file_name = args.file_path + asapo::kPathSeparator + header.file_name;
-        err_send = producer->SendData(header, nullptr, asapo::IngestModeFlags::kTransferMetaDataOnly, ProcessAfterSend);
+        err_send =
+            producer->Send(header, nullptr, asapo::IngestModeFlags::kTransferMetaDataOnly, "default", ProcessAfterSend);
         std::cout << err_send << std::endl;
     }
 
@@ -121,42 +124,43 @@ void SendDataDownstreamThePipeline(const Args& args, const asapo::FileInfo& fi,
     }
 }
 
-Error ProcessNextEvent(const Args& args, const BrokerPtr& broker, const ProducerPtr& producer) {
-    asapo::FileData data;
-    asapo::FileInfo fi;
+Error ProcessNextEvent(const Args &args, const ConsumerPtr &consumer, const ProducerPtr &producer) {
+    asapo::MessageData data;
+    asapo::MessageMeta fi;
 
-    auto err = broker->GetNext(&fi, group_id, args.transfer_data ? &data : nullptr);
+    auto err = consumer->GetNext(group_id, &fi, args.transfer_data ? &data : nullptr, "default");
     if (err) {
         return err;
     }
 
-    SendDataDownstreamThePipeline(args, fi, std::move(data), producer);
+    SendDownstreamThePipeline(args, fi, std::move(data), producer);
 
     return nullptr;
 }
 
-std::vector<std::thread> StartConsumerThreads(const Args& args, const ProducerPtr& producer,
+std::vector<std::thread> StartConsumerThreads(const Args &args, const ProducerPtr &producer,
                                               std::vector<int>* nfiles,
                                               std::vector<int>* errors) {
-    auto exec_next = [&args, nfiles, errors, &producer ](int i) {
-        asapo::FileInfo fi;
-        Error err;
-        auto broker = CreateBrokerAndGroup(args, &err);
-        if (err) {
-            (*errors)[i] += ProcessError(err);
-            return;
-        }
-
-        while (true) {
-            auto err = ProcessNextEvent(args, broker, producer);
-            if (err) {
-                (*errors)[i] += ProcessError(err);
-                if (err == asapo::ConsumerErrorTemplates::kEndOfStream || err == asapo::ConsumerErrorTemplates::kWrongInput) {
-                    break;
-                }
-            }
-            (*nfiles)[i]++;
-        }
+    auto exec_next = [&args, nfiles, errors, &producer](int i) {
+      asapo::MessageMeta fi;
+      Error err;
+      auto consumer = CreateConsumerAndGroup(args, &err);
+      if (err) {
+          (*errors)[i] += ProcessError(err);
+          return;
+      }
+
+      while (true) {
+          auto err = ProcessNextEvent(args, consumer, producer);
+          if (err) {
+              (*errors)[i] += ProcessError(err);
+              if (err == asapo::ConsumerErrorTemplates::kEndOfStream
+                  || err == asapo::ConsumerErrorTemplates::kWrongInput) {
+                  break;
+              }
+          }
+          (*nfiles)[i]++;
+      }
     };
 
     std::vector<std::thread> threads;
@@ -166,8 +170,8 @@ std::vector<std::thread> StartConsumerThreads(const Args& args, const ProducerPt
     return threads;
 }
 
-int ProcessAllData(const Args& args, const ProducerPtr& producer, uint64_t* duration_ms, int* nerrors) {
-    asapo::FileInfo fi;
+int ProcessAllData(const Args &args, const ProducerPtr &producer, uint64_t* duration_ms, int* nerrors) {
+    asapo::MessageMeta fi;
     system_clock::time_point t1 = system_clock::now();
 
     std::vector<int> nfiles(args.nthreads, 0);
@@ -185,12 +189,13 @@ int ProcessAllData(const Args& args, const ProducerPtr& producer, uint64_t* dura
     return n_total;
 }
 
-std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
+std::unique_ptr<asapo::Producer> CreateProducer(const Args &args) {
     asapo::Error err;
     auto producer = asapo::Producer::Create(args.server, args.nthreads,
                                             asapo::RequestHandlerType::kTcp,
-                                            asapo::SourceCredentials{asapo::SourceType::kProcessed,args.beamtime_id, "", args.stream_out, args.token }, 60, &err);
-    if(err) {
+                                            asapo::SourceCredentials{asapo::SourceType::kProcessed, args.beamtime_id,
+                                                                     "", args.stream_out, args.token}, 60000, &err);
+    if (err) {
         std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
         exit(EXIT_FAILURE);
     }
@@ -201,11 +206,11 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
 }
 
 int main(int argc, char* argv[]) {
-    asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
+    asapo::ExitAfterPrintVersionIfNeeded("GetNext consumer Example", argc, argv);
     Args args;
     if (argc != 11) {
         std::cout << "Usage: " + std::string{argv[0]}
-                  + " <server> <files_path> <beamtime_id> <stream_in> <stream_out> <nthreads> <token> <timeout ms>  <timeout ms producer> <transfer data>"
+            + " <server> <files_path> <beamtime_id> <stream_in> <stream_out> <nthreads> <token> <timeout ms>  <timeout ms producer> <transfer data>"
                   <<
                   std::endl;
         exit(EXIT_FAILURE);
@@ -230,25 +235,23 @@ int main(int argc, char* argv[]) {
     auto nfiles = ProcessAllData(args, producer, &duration_ms, &nerrors);
 
     if (producer->WaitRequestsFinished(args.timeout_ms_producer) != nullptr) {
-        std::cerr << "Stream out exit on timeout " << std::endl;
+        std::cerr << "Data source out exit on timeout " << std::endl;
     }
     auto duration_streamout = std::chrono::duration_cast<std::chrono::milliseconds>(streamout_finish - streamout_start);
 
-    std::cout << "Stream in " << std::endl;
+    std::cout << "Data source in " << std::endl;
     std::cout << "  Processed " << nfiles << " file(s)" << std::endl;
     std::cout << "  Successfully: " << nfiles - nerrors << std::endl;
     std::cout << "  Errors : " << nerrors << std::endl;
     std::cout << "  Elapsed : " << duration_ms - args.timeout_ms << "ms" << std::endl;
     std::cout << "  Rate : " << 1000.0f * nfiles / (duration_ms - args.timeout_ms) << std::endl;
 
-    std::cout << "Stream out " << std::endl;
+    std::cout << "Data source out " << std::endl;
     std::cout << "  Sent " << files_sent << " file(s)" << std::endl;
     std::cout << "  Elapsed : " << duration_streamout.count() << "ms" << std::endl;
     std::cout << "  Rate : " << 1000.0f * files_sent / (duration_streamout.count()) << std::endl;
 
-
     std::this_thread::sleep_for(std::chrono::milliseconds(1000));
 
-
     return (nerrors == 0) && (files_sent == nfiles) ? 0 : 1;
 }
diff --git a/examples/pipeline/in_to_out_python/check_linux.sh b/examples/pipeline/in_to_out_python/check_linux.sh
index 2c794a0a390d05c13720c97520e202ef430cbe0b..444c6ceeac2c5783d5611cd4490eb768b9702ad8 100644
--- a/examples/pipeline/in_to_out_python/check_linux.sh
+++ b/examples/pipeline/in_to_out_python/check_linux.sh
@@ -2,15 +2,15 @@
 
 source_path=.
 beamtime_id=asapo_test
-stream_in=detector
-stream_out=stream
+data_source_in=detector
+data_source_out=data_source
 
 timeout=15
 timeout_producer=25
 nthreads=4
 
-indatabase_name=${beamtime_id}_${stream_in}
-outdatabase_name=${beamtime_id}_${stream_out}
+indatabase_name=${beamtime_id}_${data_source_in}
+outdatabase_name=${beamtime_id}_${data_source_out}
 
 token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
@@ -57,7 +57,7 @@ echo hello3 > processed/file3
 
 for i in `seq 1 3`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'processed/file$i'","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${indatabase_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'processed/file$i'","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${indatabase_name}
 done
 
 sleep 1
@@ -65,13 +65,13 @@ sleep 1
 export PYTHONPATH=$2:$3:${PYTHONPATH}
 
 
-$1 $4 127.0.0.1:8400 $source_path $beamtime_id $stream_in $stream_out $token $timeout $timeout_producer $nthreads 1  > out
+$1 $4 127.0.0.1:8400 $source_path $beamtime_id $data_source_in $data_source_out $token $timeout $timeout_producer $nthreads 1  > out
 cat out
 cat out | grep "Processed 3 file(s)"
 cat out | grep "Sent 3 file(s)"
 
-echo "db.data_default.find({"_id":1})" | mongo ${outdatabase_name} | tee /dev/stderr | grep "file1_${stream_out}"
+echo "db.data_default.find({"_id":1})" | mongo ${outdatabase_name} | tee /dev/stderr | grep "file1_${data_source_out}"
 
-cat ${receiver_folder}/processed/file1_${stream_out} | grep hello1
-cat ${receiver_folder}/processed/file2_${stream_out} | grep hello2
-cat ${receiver_folder}/processed/file3_${stream_out} | grep hello3
+cat ${receiver_folder}/processed/file1_${data_source_out} | grep hello1
+cat ${receiver_folder}/processed/file2_${data_source_out} | grep hello2
+cat ${receiver_folder}/processed/file3_${data_source_out} | grep hello3
diff --git a/examples/pipeline/in_to_out_python/check_windows.bat b/examples/pipeline/in_to_out_python/check_windows.bat
index 14c125bbc79414b19792e60b573b24b95eda92f7..3160af194c3d56c15a3c943700d2541e1bb2c896 100644
--- a/examples/pipeline/in_to_out_python/check_windows.bat
+++ b/examples/pipeline/in_to_out_python/check_windows.bat
@@ -1,10 +1,10 @@
 SET source_path=.
 SET beamtime_id=asapo_test
-SET stream_in=detector
-SET stream_out=stream
+SET data_source_in=detector
+SET data_source_out=simulation
 
-SET indatabase_name=%beamtime_id%_%stream_in%
-SET outdatabase_name=%beamtime_id%_%stream_out%
+SET indatabase_name=%beamtime_id%_%data_source_in%
+SET outdatabase_name=%beamtime_id%_%data_source_out%
 
 SET token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
@@ -22,7 +22,7 @@ SET nthreads=4
 
 call start_services.bat
 
-for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"processed\\file%%x","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
+for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"processed\\file%%x","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
 
 mkdir %receiver_folder%
 mkdir processed
@@ -33,17 +33,17 @@ echo hello3 > processed\file3
 
 set PYTHONPATH=%2;%3
 
-"%1" "%4" 127.0.0.1:8400 %source_path% %beamtime_id% %stream_in% %stream_out% %token% %timeout% %timeout_producer% %nthreads% 1  > out
+"%1" "%4" 127.0.0.1:8400 %source_path% %beamtime_id% %data_source_in% %data_source_out% %token% %timeout% %timeout_producer% %nthreads% 1  > out
 
 type out
 findstr /I /L /C:"Processed 3 file(s)" out || goto :error
 findstr /I /L /C:"Sent 3 file(s)" out || goto :error
 
-echo db.data_default.find({"_id":1}) | %mongo_exe% %outdatabase_name% | findstr  /c:"file1_%stream_out%"  || goto :error
+echo db.data_default.find({"_id":1}) | %mongo_exe% %outdatabase_name% | findstr  /c:"file1_%data_source_out%"  || goto :error
 
-findstr /I /L /C:"hello1" %receiver_folder%\processed\file1_%stream_out% || goto :error
-findstr /I /L /C:"hello2" %receiver_folder%\processed\file2_%stream_out% || goto :error
-findstr /I /L /C:"hello3" %receiver_folder%\processed\file3_%stream_out% || goto :error
+findstr /I /L /C:"hello1" %receiver_folder%\processed\file1_%data_source_out% || goto :error
+findstr /I /L /C:"hello2" %receiver_folder%\processed\file2_%data_source_out% || goto :error
+findstr /I /L /C:"hello3" %receiver_folder%\processed\file3_%data_source_out% || goto :error
 
 
 goto :clean
diff --git a/examples/pipeline/in_to_out_python/in_to_out.py b/examples/pipeline/in_to_out_python/in_to_out.py
index e2b096337c563fd46921e67369effe712882da8f..0e58c1b0a0daf249ca960344e3088b177de570dd 100644
--- a/examples/pipeline/in_to_out_python/in_to_out.py
+++ b/examples/pipeline/in_to_out_python/in_to_out.py
@@ -26,11 +26,11 @@ timeout_s_producer=int(timeout_s_producer)
 nthreads=int(nthreads)
 transfer_data=int(transfer_data)>0
 
-broker = asapo_consumer.create_server_broker(source,path, True,beamtime,stream_in,token,timeout_s*1000)
+consumer = asapo_consumer.create_consumer(source,path, True,beamtime,stream_in,token,timeout_s*1000)
 
-producer  = asapo_producer.create_producer(source,'processed',beamtime,'auto', stream_out, token, nthreads, 600)
+producer  = asapo_producer.create_producer(source,'processed',beamtime,'auto', stream_out, token, nthreads, 600000)
 
-group_id  = broker.generate_group_id()
+group_id  = consumer.generate_group_id()
 
 n_recv = 0
 
@@ -41,10 +41,10 @@ else:
 
 while True:
     try:
-        data, meta = broker.get_next(group_id, meta_only=not transfer_data)
+        data, meta = consumer.get_next(group_id, meta_only=not transfer_data)
         print ("received: ",meta)
         n_recv = n_recv + 1
-        producer.send_data(meta['_id'],meta['name']+"_"+stream_out ,data,
+        producer.send(meta['_id'],meta['name']+"_"+stream_out ,data,
                              ingest_mode = ingest_mode, callback = callback)
     except  asapo_consumer.AsapoEndOfStreamError:
         break
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index fb2ae997d7ddb6c24b4bf5725b9128869bb0c38a..d983f01e2d419a5b5ca0356e746d4a64d16b54be 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -18,14 +18,14 @@ int iterations_remained;
 struct Args {
     std::string discovery_service_endpoint;
     std::string beamtime_id;
-    std::string stream;
+    std::string data_source;
     std::string token;
     size_t number_of_bytes;
     uint64_t iterations;
     uint64_t nthreads;
     uint64_t mode;
-    uint64_t timeout_sec;
-    uint64_t images_in_set;
+    uint64_t timeout_ms;
+    uint64_t messages_in_set;
 };
 
 void PrintCommandArguments(const Args& args) {
@@ -38,12 +38,12 @@ void PrintCommandArguments(const Args& args) {
               << "Write files: " << ((args.mode %100) / 10 == 1) << std::endl
               << "Tcp mode: " << ((args.mode % 10) ==0 ) << std::endl
               << "Raw: " << (args.mode / 100 == 1)<< std::endl
-              << "timeout: " << args.timeout_sec << std::endl
-              << "images in set: " << args.images_in_set << std::endl
+              << "timeout: " << args.timeout_ms << std::endl
+              << "messages in set: " << args.messages_in_set << std::endl
               << std::endl;
 }
 
-void TryGetStreamAndToken(Args* args) {
+void TryGetDataSourceAndToken(Args* args) {
     std::stringstream test(args->beamtime_id);
     std::string segment;
     std::vector<std::string> seglist;
@@ -56,7 +56,7 @@ void TryGetStreamAndToken(Args* args) {
     }
     if (seglist.size() > 1) {
         args->beamtime_id = seglist[0];
-        args->stream = seglist[1];
+        args->data_source = seglist[1];
     }
     if (seglist.size() > 2) {
         args->token = seglist[2];
@@ -73,24 +73,24 @@ void ProcessCommandArguments(int argc, char* argv[], Args* args) {
     if (argc != 8 && argc != 9) {
         std::cout <<
                   "Usage: " << argv[0] <<
-                  " <destination> <beamtime_id[%<stream>%<token>]> <number_of_kbyte> <iterations> <nthreads>"
-                  " <mode 0xx - processed source type, 1xx - raw source type, xx0 -t tcp, xx1 - filesystem, x0x - write files, x1x - do not write files> <timeout (sec)> [n images in set (default 1)]"
+                  " <destination> <beamtime_id[%<data_source>%<token>]> <number_of_kbyte> <iterations> <nthreads>"
+                  " <mode 0xx - processed source type, 1xx - raw source type, xx0 -t tcp, xx1 - filesystem, x0x - write files, x1x - do not write files> <timeout (sec)> [n messages in set (default 1)]"
                   << std::endl;
         exit(EXIT_FAILURE);
     }
     try {
         args->discovery_service_endpoint = argv[1];
         args->beamtime_id = argv[2];
-        TryGetStreamAndToken(args);
+        TryGetDataSourceAndToken(args);
         args->number_of_bytes = std::stoull(argv[3]) * 1000;
         args->iterations = std::stoull(argv[4]);
         args->nthreads = std::stoull(argv[5]);
         args->mode = std::stoull(argv[6]);
-        args->timeout_sec = std::stoull(argv[7]);
+        args->timeout_ms = std::stoull(argv[7])*1000;
         if (argc == 9) {
-            args->images_in_set = std::stoull(argv[8]);
+            args->messages_in_set = std::stoull(argv[8]);
         } else {
-            args->images_in_set = 1;
+            args->messages_in_set = 1;
         }
         PrintCommandArguments(*args);
         return;
@@ -124,56 +124,65 @@ void ProcessAfterMetaDataSend(asapo::RequestCallbackPayload payload, asapo::Erro
     return;
 }
 
-asapo::FileData CreateMemoryBuffer(size_t size) {
-    return asapo::FileData(new uint8_t[size]);
+asapo::MessageData CreateMemoryBuffer(size_t size) {
+    return asapo::MessageData(new uint8_t[size]);
 }
 
 
-bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations, uint64_t images_in_set,
-                   const std::string& stream, bool write_files, asapo::SourceType type) {
+bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t iterations, uint64_t messages_in_set,
+                   const std::string& data_source, bool write_files, asapo::SourceType type) {
 
     asapo::Error err;
     if (iterations == 0) {
-        err = producer->SendMetaData("{\"dummy_meta\":\"test\"}", &ProcessAfterMetaDataSend);
+        err = producer->SendMetadata("{\"dummy_meta\":\"test\"}", &ProcessAfterMetaDataSend);
         if (err) {
             std::cerr << "Cannot send metadata: " << err << std::endl;
             return false;
         }
     }
 
-    std::string image_folder = GetStringFromSourceType(type)+asapo::kPathSeparator;
+    std::string message_folder = GetStringFromSourceType(type)+asapo::kPathSeparator;
 
 
     for (uint64_t i = 0; i < iterations; i++) {
         auto buffer = CreateMemoryBuffer(number_of_byte);
-        asapo::EventHeader event_header{i + 1, number_of_byte, std::to_string(i + 1)};
+        asapo::MessageHeader message_header{i + 1, number_of_byte, std::to_string(i + 1)};
         std::string meta = "{\"user_meta\":\"test" + std::to_string(i + 1) + "\"}";
-        if (!stream.empty()) {
-            event_header.file_name = stream + "/" + event_header.file_name;
+        if (!data_source.empty()) {
+            message_header.file_name = data_source + "/" + message_header.file_name;
         }
-        event_header.file_name = image_folder+event_header.file_name;
-        event_header.user_metadata = std::move(meta);
-        if (images_in_set == 1) {
-            auto err = producer->SendData(event_header, std::move(buffer), write_files ? asapo::kDefaultIngestMode :
-                                          asapo::kTransferData, &ProcessAfterSend);
+        message_header.file_name = message_folder+message_header.file_name;
+        message_header.user_metadata = std::move(meta);
+        if (messages_in_set == 1) {
+            auto err = producer->Send(message_header,
+                                      std::move(buffer),
+                                      write_files ? asapo::kDefaultIngestMode :
+                                      asapo::kTransferData,
+                                      "default",
+                                      &ProcessAfterSend);
             if (err) {
                 std::cerr << "Cannot send file: " << err << std::endl;
                 return false;
             }
         } else {
-            for (uint64_t id = 0; id < images_in_set; id++) {
+            for (uint64_t id = 0; id < messages_in_set; id++) {
                 auto buffer = CreateMemoryBuffer(number_of_byte);
-                event_header.id_in_subset = id + 1;
-                event_header.subset_size = images_in_set;
-                event_header.file_id = i + 1;
-                event_header.file_name = std::to_string(i + 1) + "_" + std::to_string(id + 1);
-                if (!stream.empty()) {
-                    event_header.file_name = stream + "/" + event_header.file_name;
+                message_header.dataset_substream = id + 1;
+                message_header.dataset_size = messages_in_set;
+                message_header.message_id = i + 1;
+                message_header.file_name = std::to_string(i + 1) + "_" + std::to_string(id + 1);
+                if (!data_source.empty()) {
+                    message_header.file_name = data_source + "/" + message_header.file_name;
                 }
-                event_header.file_name = image_folder + event_header.file_name;
-                event_header.user_metadata = meta;
-                auto err = producer->SendData(event_header, std::move(buffer), write_files ? asapo::kDefaultIngestMode :
-                                              asapo::kTransferData, &ProcessAfterSend);
+                message_header.file_name = message_folder + message_header.file_name;
+                message_header.user_metadata = meta;
+                auto err =
+                    producer->Send(message_header,
+                                   std::move(buffer),
+                                   write_files ? asapo::kDefaultIngestMode :
+                                   asapo::kTransferData,
+                                   "default",
+                                   &ProcessAfterSend);
                 if (err) {
                     std::cerr << "Cannot send file: " << err << std::endl;
                     return false;
@@ -188,7 +197,7 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
     asapo::Error err;
     auto producer = asapo::Producer::Create(args.discovery_service_endpoint, args.nthreads,
                                             args.mode % 10 == 0 ? asapo::RequestHandlerType::kTcp : asapo::RequestHandlerType::kFilesystem,
-                                            asapo::SourceCredentials{args.mode / 100 == 0 ?asapo::SourceType::kProcessed:asapo::SourceType::kRaw,args.beamtime_id, "", args.stream, args.token }, 3600, &err);
+                                            asapo::SourceCredentials{args.mode / 100 == 0 ?asapo::SourceType::kProcessed:asapo::SourceType::kRaw,args.beamtime_id, "", args.data_source, args.token }, 3600000, &err);
     if(err) {
         std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
         exit(EXIT_FAILURE);
@@ -218,17 +227,17 @@ int main (int argc, char* argv[]) {
     if (args.iterations == 0) {
         iterations_remained = 1; // metadata
     } else {
-        iterations_remained = args.iterations * args.images_in_set;
+        iterations_remained = args.iterations * args.messages_in_set;
     }
 
     system_clock::time_point start_time = system_clock::now();
 
-    if(!SendDummyData(producer.get(), args.number_of_bytes, args.iterations, args.images_in_set, args.stream,
+    if(!SendDummyData(producer.get(), args.number_of_bytes, args.iterations, args.messages_in_set, args.data_source,
                       (args.mode %100) / 10 == 0,args.mode / 100 == 0 ?asapo::SourceType::kProcessed:asapo::SourceType::kRaw)) {
         return EXIT_FAILURE;
     }
 
-    auto err = producer->WaitRequestsFinished(args.timeout_sec * 1000);
+    auto err = producer->WaitRequestsFinished(args.timeout_ms);
     if (err) {
         std::cerr << "Producer exit on timeout " << std::endl;
         exit(EXIT_FAILURE);
diff --git a/examples/producer/simple-producer/produce.cpp b/examples/producer/simple-producer/produce.cpp
index 7986c4bfa0b5d53f5dbeedabcd8ac0446b84b2a3..57c8b0fd68435876c35521b06efbb2980b6b194e 100644
--- a/examples/producer/simple-producer/produce.cpp
+++ b/examples/producer/simple-producer/produce.cpp
@@ -26,16 +26,16 @@ int main(int argc, char* argv[]) {
     auto beamtime = "asapo_test";
 
     auto producer = asapo::Producer::Create(source, 1, asapo::RequestHandlerType::kTcp,
-                                            asapo::SourceCredentials{beamtime, "", "", ""}, 60, &err);
+                                            asapo::SourceCredentials{beamtime, "", "", ""}, 60000, &err);
     exit_if_error("Cannot start producer", err);
 
     std::string to_send = "hello";
     auto send_size = to_send.size() + 1;
-    auto buffer =  asapo::FileData(new uint8_t[send_size]);
+    auto buffer =  asapo::MessageData(new uint8_t[send_size]);
     memcpy(buffer.get(), to_send.c_str(), send_size);
 
-    asapo::EventHeader event_header{1, send_size, "processed"+asapo::kPathseparator +"test_file"};
-    err = producer->SendData(event_header, std::move(buffer), asapo::kDefaultIngestMode, &ProcessAfterSend);
+    asapo::EventHeader message_header{1, send_size, "processed"+asapo::kPathseparator +"test_file"};
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, &ProcessAfterSend);
     exit_if_error("Cannot send file", err);
 
     err = producer->WaitRequestsFinished(1000);
diff --git a/producer/api/cpp/include/asapo/asapo_producer.h b/producer/api/cpp/include/asapo/asapo_producer.h
index 83c074692c0b76ad3c952ac7b6a75bf22382ef1d..1e5ea176cd474cfb25e98e0523ea16ee0ac0418b 100644
--- a/producer/api/cpp/include/asapo/asapo_producer.h
+++ b/producer/api/cpp/include/asapo/asapo_producer.h
@@ -1,14 +1,3 @@
-/** @defgroup producer The Producer Group
- *  This is the producer group
- *  @{
- */
-/** @} */ // end of producer
-
-/**
-* @file asapo_producer.h
-* @ingroup producer
-*/
-
 #ifndef ASAPO_ASAPO_PRODUCER_H
 #define ASAPO_ASAPO_PRODUCER_H
 
diff --git a/producer/api/cpp/include/asapo/producer/common.h b/producer/api/cpp/include/asapo/producer/common.h
index b1612c8cfe7ae742cc6f53b6cf5aca4f3286e95e..5b2da89b06ec02a429928c484e516f1be3c1025b 100644
--- a/producer/api/cpp/include/asapo/producer/common.h
+++ b/producer/api/cpp/include/asapo/producer/common.h
@@ -14,7 +14,7 @@ const uint8_t kMaxProcessingThreads = 32;
 
 struct RequestCallbackPayload {
     GenericRequestHeader original_header;
-    FileData data;
+    MessageData data;
     std::string response;
 };
 
@@ -26,23 +26,23 @@ enum class RequestHandlerType {
 };
 
 
-struct EventHeader {
-    EventHeader() {};
-    EventHeader(uint64_t file_id_i, uint64_t file_size_i, std::string file_name_i,
-                std::string user_metadata_i = "",
-                uint64_t id_in_subset_i = 0,
-                uint64_t subset_size_i = 0 ):
-        file_id{file_id_i}, file_size{file_size_i},
+struct MessageHeader {
+    MessageHeader() {};
+    MessageHeader(uint64_t message_id_i, uint64_t data_size_i, std::string file_name_i,
+                  std::string user_metadata_i = "",
+                  uint64_t dataset_substream_i = 0,
+                  uint64_t dataset_size_i = 0 ):
+        message_id{message_id_i}, data_size{data_size_i},
         file_name{std::move(file_name_i)},
         user_metadata{std::move(user_metadata_i)},
-        id_in_subset{id_in_subset_i},
-        subset_size{subset_size_i} {};
-    uint64_t file_id = 0;
-    uint64_t file_size = 0;
+        dataset_substream{dataset_substream_i},
+        dataset_size{dataset_size_i} {};
+    uint64_t message_id = 0;
+    uint64_t data_size = 0;
     std::string file_name;
     std::string user_metadata;
-    uint64_t id_in_subset = 0;
-    uint64_t subset_size = 0;
+    uint64_t dataset_substream = 0;
+    uint64_t dataset_size = 0;
 };
 
 }
diff --git a/producer/api/cpp/include/asapo/producer/producer.h b/producer/api/cpp/include/asapo/producer/producer.h
index 7a9f0e7f88546b7f77bbdd1deb3353afdf87548a..c8de7d637ff86f09f2f976e52f792e23a54cc816 100644
--- a/producer/api/cpp/include/asapo/producer/producer.h
+++ b/producer/api/cpp/include/asapo/producer/producer.h
@@ -10,7 +10,6 @@
 
 namespace asapo {
 
-/** @ingroup producer */
 class Producer {
   public:
     //! Creates a new producer
@@ -19,89 +18,71 @@ class Producer {
      */
     static std::unique_ptr<Producer> Create(const std::string& endpoint, uint8_t n_processing_threads,
                                             asapo::RequestHandlerType type, SourceCredentials source_cred,
-                                            uint64_t timeout_sec,
+                                            uint64_t timeout_ms,
                                             Error* err);
 
     virtual ~Producer() = default;
 
-    //! Get substream information from receiver
+    //! Get stream information from receiver
     /*!
-      \param substream (optional) - substream
-      \param timeout_sec - operation timeout in seconds
-      \return StreamInfo - a structure with substream information
+      \param stream - stream to send messages to
+      \param timeout_ms - operation timeout in milliseconds
+      \return StreamInfo - a structure with stream information
     */
-    virtual StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const = 0;
-    virtual StreamInfo GetStreamInfo(uint64_t timeout_sec, Error* err) const = 0;
+    virtual StreamInfo GetStreamInfo(std::string stream, uint64_t timeout_ms, Error* err) const = 0;
 
-  //! Get substream that has the newest ingested data
+  //! Get stream that has the newest ingested data
   /*!
-    \param timeout_ms - operation timeout in seconds
-    \return StreamInfo - a structure with substream information
+    \param timeout_ms - operation timeout in milliseconds
+    \return StreamInfo - a structure with stream information
   */
-    virtual StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err) const = 0;
+    virtual StreamInfo GetLastStream(uint64_t timeout_ms, Error* err) const = 0;
 
-
-    //! Sends data to the receiver
+    //! Sends message to the receiver
     /*!
-      \param event_header - A stucture with the meta information (file name, size, a string with user metadata (JSON format)).
-      \param data - A pointer to the data to send
+      \param message_header - A stucture with the meta information (file name, size, a string with user metadata (JSON format)).
+      \param data - A smart pointer to the message data to send, can be nullptr
       \return Error - Will be nullptr on success
     */
-    virtual Error SendData(const EventHeader& event_header, FileData data, uint64_t ingest_mode,
-                           RequestCallback callback) = 0;
+    virtual Error Send(const MessageHeader &message_header,
+                       MessageData data,
+                       uint64_t ingest_mode,
+                       std::string stream,
+                       RequestCallback callback) = 0;
 
 
-    //! Sends data to the receiver - same as SendData - memory should not be freed until send is finished
+    //! Sends data to the receiver - same as Send - memory should not be freed until send is finished
     //! used e.g. for Python bindings
-    virtual Error SendData__(const EventHeader& event_header, void* data, uint64_t ingest_mode,
-                             RequestCallback callback) = 0;
-
-    //! Sends data to the receiver
-    /*!
-      \param event_header - A stucture with the meta information (file name, size, a string with user metadata (JSON format)).
-      \param data - A pointer to the data to send
-      \return Error - Will be nullptr on success
-    */
-    virtual Error SendData(const EventHeader& event_header, std::string substream, FileData data, uint64_t ingest_mode,
-                           RequestCallback callback) = 0;
-
-
-    //! Sends data to the receiver - same as SendData - memory should not be freed until send is finished
-    //! used e.g. for Python bindings
-    virtual Error SendData__(const EventHeader& event_header, std::string substream, void* data, uint64_t ingest_mode,
-                             RequestCallback callback) = 0;
+    virtual Error Send__(const MessageHeader &message_header,
+                         void* data,
+                         uint64_t ingest_mode,
+                         std::string stream,
+                         RequestCallback callback) = 0;
 
     //! Stop processing threads
     //! used e.g. for Python bindings
     virtual void StopThreads__() = 0;
 
-    //! Sends files to the default substream
-    /*!
-      \param event_header - A stucture with the meta information (file name, size is ignored).
-      \param full_path - A full path of the file to send
-      \return Error - Will be nullptr on success
-    */
-    virtual Error SendFile(const EventHeader& event_header, std::string full_path, uint64_t ingest_mode,
-                           RequestCallback callback) = 0;
-
-    //! Sends files to the substream
+    //! Sends message from a file to a stream
     /*!
-      \param event_header - A stucture with the meta information (file name, size is ignored).
-      \param full_path - A full path of the file to send
+      \param message_header - A stucture with the meta information (file name, size is ignored).
+      \param file_to_send - A full path of the file to send
       \return Error - Will be nullptr on success
     */
-    virtual Error SendFile(const EventHeader& event_header, std::string substream, std::string full_path,
+    virtual Error SendFile(const MessageHeader &message_header,
+                           std::string file_to_send,
                            uint64_t ingest_mode,
+                           std::string stream,
                            RequestCallback callback) = 0;
 
-    //! Marks substream finished
+    //! Marks stream finished
     /*!
-      \param substream - Name of the substream to makr finished
-      \param last_id - ID of the last image in substream
-      \param next_substream - Name of the next substream (empty if not set)
+      \param stream - Name of the stream to makr finished
+      \param last_id - ID of the last message in stream
+      \param next_stream - Name of the next stream (empty if not set)
       \return Error - Will be nullptr on success
     */
-    virtual Error SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+    virtual Error SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                             RequestCallback callback) = 0;
 
 
@@ -111,7 +92,7 @@ class Producer {
       \param callback - callback function
       \return Error - will be nullptr on success
     */
-    virtual Error SendMetaData(const std::string& metadata, RequestCallback callback) = 0;
+    virtual Error SendMetadata(const std::string& metadata, RequestCallback callback) = 0;
 
     //! Set internal log level
     virtual void SetLogLevel(LogLevel level) = 0;
diff --git a/producer/api/cpp/src/producer.cpp b/producer/api/cpp/src/producer.cpp
index 63a2d488baff067f4cce2862ad0475ba11bf4fee..cb94f8d0c08d1cdd8abb4deefcde65ca1f780f7b 100644
--- a/producer/api/cpp/src/producer.cpp
+++ b/producer/api/cpp/src/producer.cpp
@@ -3,7 +3,7 @@
 #include "asapo/producer/producer_error.h"
 
 std::unique_ptr<asapo::Producer> asapo::Producer::Create(const std::string& endpoint, uint8_t n_processing_threads,
-        asapo::RequestHandlerType type, SourceCredentials source_cred, uint64_t timeout_sec, Error* err) {
+                                                         asapo::RequestHandlerType type, SourceCredentials source_cred, uint64_t timeout_ms, Error* err) {
 
     if (n_processing_threads > kMaxProcessingThreads || n_processing_threads == 0) {
         *err = ProducerErrorTemplates::kWrongInput.Generate("Set number of processing threads > 0 and <= " + std::to_string(
@@ -13,7 +13,7 @@ std::unique_ptr<asapo::Producer> asapo::Producer::Create(const std::string& endp
 
     std::unique_ptr<asapo::Producer> producer;
     try {
-        producer.reset(new ProducerImpl(endpoint, n_processing_threads, timeout_sec, type));
+        producer.reset(new ProducerImpl(endpoint, n_processing_threads, timeout_ms, type));
     } catch (const std::exception& ex) {
         *err = TextError(ex.what());
         return nullptr;
diff --git a/producer/api/cpp/src/producer_impl.cpp b/producer/api/cpp/src/producer_impl.cpp
index 9399cd327647cc6976d9ae9f485ec69fc5f55fcb..0ed76a1b699e4a3335eba9812ed8019b7306c802 100644
--- a/producer/api/cpp/src/producer_impl.cpp
+++ b/producer/api/cpp/src/producer_impl.cpp
@@ -15,13 +15,13 @@
 namespace  asapo {
 
 const size_t ProducerImpl::kDiscoveryServiceUpdateFrequencyMs = 10000; // 10s
-const std::string ProducerImpl::kFinishSubStreamKeyword = "asapo_finish_substream";
-const std::string ProducerImpl::kNoNextSubStreamKeyword = "asapo_no_next";
+const std::string ProducerImpl::kFinishStreamKeyword = "asapo_finish_stream";
+const std::string ProducerImpl::kNoNextStreamKeyword = "asapo_no_next";
 
 
-ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_sec,
+ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_ms,
                            asapo::RequestHandlerType type):
-    log__{GetDefaultProducerLogger()}, timeout_sec_{timeout_sec} {
+    log__{GetDefaultProducerLogger()}, timeout_ms_{timeout_ms} {
     switch (type) {
     case RequestHandlerType::kTcp:
         discovery_service_.reset(new ReceiverDiscoveryService{endpoint, ProducerImpl::kDiscoveryServiceUpdateFrequencyMs});
@@ -34,14 +34,14 @@ ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, u
     request_pool__.reset(new RequestPool{n_processing_threads, request_handler_factory_.get(), log__});
 }
 
-GenericRequestHeader ProducerImpl::GenerateNextSendRequest(const EventHeader& event_header, std::string substream,
-        uint64_t ingest_mode) {
-    GenericRequestHeader request{kOpcodeTransferData, event_header.file_id, event_header.file_size,
-                                 event_header.user_metadata.size(), event_header.file_name, substream};
-    if (event_header.id_in_subset != 0) {
-        request.op_code = kOpcodeTransferSubsetData;
-        request.custom_data[kPosDataSetId] = event_header.id_in_subset;
-        request.custom_data[kPosDataSetSize] = event_header.subset_size;
+GenericRequestHeader ProducerImpl::GenerateNextSendRequest(const MessageHeader& message_header, std::string stream,
+                                                           uint64_t ingest_mode) {
+    GenericRequestHeader request{kOpcodeTransferData, message_header.message_id, message_header.data_size,
+                                 message_header.user_metadata.size(), message_header.file_name, stream};
+    if (message_header.dataset_substream != 0) {
+        request.op_code = kOpcodeTransferDatasetData;
+        request.custom_data[kPosDataSetId] = message_header.dataset_substream;
+        request.custom_data[kPosDataSetSize] = message_header.dataset_size;
     }
     request.custom_data[kPosIngestMode] = ingest_mode;
     return request;
@@ -71,34 +71,39 @@ Error CheckIngestMode(uint64_t ingest_mode) {
     return nullptr;
 }
 
-Error CheckProducerRequest(const EventHeader& event_header, uint64_t ingest_mode) {
-    if (event_header.file_name.size() > kMaxMessageSize) {
+Error CheckProducerRequest(const MessageHeader& message_header, uint64_t ingest_mode, const std::string& stream) {
+
+    if (stream.empty()) {
+        return ProducerErrorTemplates::kWrongInput.Generate("stream empty");
+    }
+
+    if (message_header.file_name.size() > kMaxMessageSize) {
         return ProducerErrorTemplates::kWrongInput.Generate("too long filename");
     }
 
-    if (event_header.file_name.empty() ) {
+    if (message_header.file_name.empty() ) {
         return ProducerErrorTemplates::kWrongInput.Generate("empty filename");
     }
 
-    if (event_header.id_in_subset > 0 && event_header.subset_size == 0) {
-        return ProducerErrorTemplates::kWrongInput.Generate("subset dimensions");
+    if (message_header.dataset_substream > 0 && message_header.dataset_size == 0) {
+        return ProducerErrorTemplates::kWrongInput.Generate("dataset dimensions");
     }
 
-    if (event_header.file_id == 0) {
-        return ProducerErrorTemplates::kWrongInput.Generate("data tuple id should be positive");
+    if (message_header.message_id == 0) {
+        return ProducerErrorTemplates::kWrongInput.Generate("message id should be positive");
     }
 
     return CheckIngestMode(ingest_mode);
 }
 
-Error ProducerImpl::Send(const EventHeader& event_header,
-                         std::string substream,
-                         FileData data,
+Error ProducerImpl::Send(const MessageHeader& message_header,
+                         std::string stream,
+                         MessageData data,
                          std::string full_path,
                          uint64_t ingest_mode,
                          RequestCallback callback,
                          bool manage_data_memory) {
-    auto err = CheckProducerRequest(event_header, ingest_mode);
+    auto err = CheckProducerRequest(message_header, ingest_mode, stream);
     if (err) {
         if (!manage_data_memory) {
             data.release();
@@ -107,10 +112,10 @@ Error ProducerImpl::Send(const EventHeader& event_header,
         return err;
     }
 
-    auto request_header = GenerateNextSendRequest(event_header, std::move(substream), ingest_mode);
+    auto request_header = GenerateNextSendRequest(message_header, std::move(stream), ingest_mode);
 
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(request_header),
-                std::move(data), std::move(event_header.user_metadata), std::move(full_path), callback, manage_data_memory, timeout_sec_ * 1000}
+                std::move(data), std::move(message_header.user_metadata), std::move(full_path), callback, manage_data_memory, timeout_ms_}
     });
 
 }
@@ -119,54 +124,43 @@ bool WandTransferData(uint64_t ingest_mode) {
     return ingest_mode & IngestModeFlags::kTransferData;
 }
 
-Error CheckData(uint64_t ingest_mode, const EventHeader& event_header, const FileData* data) {
+Error CheckData(uint64_t ingest_mode, const MessageHeader& message_header, const MessageData* data) {
     if (WandTransferData(ingest_mode)) {
         if (*data == nullptr) {
             return ProducerErrorTemplates::kWrongInput.Generate("need data for this ingest mode");
         }
-        if (event_header.file_size == 0) {
+        if (message_header.data_size == 0) {
             return ProducerErrorTemplates::kWrongInput.Generate("zero data size");
         }
     }
     return nullptr;
 }
 
-Error ProducerImpl::SendData(const EventHeader& event_header, FileData data,
-                             uint64_t ingest_mode, RequestCallback callback) {
-    return SendData(event_header, kDefaultSubstream, std::move(data), ingest_mode, callback);
-}
-
-Error ProducerImpl::SendData(const EventHeader& event_header,
-                             std::string substream,
-                             FileData data,
-                             uint64_t ingest_mode,
-                             RequestCallback callback) {
-    if (auto err = CheckData(ingest_mode, event_header, &data)) {
+Error ProducerImpl::Send(const MessageHeader &message_header,
+                         MessageData data,
+                         uint64_t ingest_mode,
+                         std::string stream,
+                         RequestCallback callback) {
+    if (auto err = CheckData(ingest_mode, message_header, &data)) {
         return err;
     }
-    return Send(event_header, std::move(substream), std::move(data), "", ingest_mode, callback, true);
+    return Send(message_header, std::move(stream), std::move(data), "", ingest_mode, callback, true);
 
 }
 
-Error ProducerImpl::SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+Error ProducerImpl::SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                               RequestCallback callback) {
-    EventHeader event_header;
-    event_header.file_name = kFinishSubStreamKeyword;
-    event_header.file_size = 0;
-    event_header.file_id = last_id + 1;
-    if (next_substream.empty()) {
-        next_substream = kNoNextSubStreamKeyword;
+    MessageHeader message_header;
+    message_header.file_name = kFinishStreamKeyword;
+    message_header.data_size = 0;
+    message_header.message_id = last_id + 1;
+    if (next_stream.empty()) {
+        next_stream = kNoNextStreamKeyword;
     }
-    event_header.user_metadata =  std::string("{\"next_substream\":") + "\"" + next_substream + "\"}";
-    return Send(event_header, std::move(substream), nullptr, "", IngestModeFlags::kTransferMetaDataOnly, callback, true);
+    message_header.user_metadata =  std::string("{\"next_stream\":") + "\"" + next_stream + "\"}";
+    return Send(message_header, std::move(stream), nullptr, "", IngestModeFlags::kTransferMetaDataOnly, callback, true);
 }
 
-Error ProducerImpl::SendFile(const EventHeader& event_header, std::string full_path, uint64_t ingest_mode,
-                             RequestCallback callback) {
-    return SendFile(event_header, kDefaultSubstream, std::move(full_path), ingest_mode, callback);
-}
-
-
 void ProducerImpl::SetLogLevel(LogLevel level) {
     log__->SetLogLevel(level);
 }
@@ -186,8 +180,8 @@ Error ProducerImpl::SetCredentials(SourceCredentials source_cred) {
         return ProducerErrorTemplates::kWrongInput.Generate("credentials already set");
     }
 
-    if (source_cred.stream.empty()) {
-        source_cred.stream = SourceCredentials::kDefaultStream;
+    if (source_cred.data_source.empty()) {
+        source_cred.data_source = SourceCredentials::kDefaultStream;
     }
 
     if (source_cred.beamline.empty()) {
@@ -215,36 +209,29 @@ Error ProducerImpl::SetCredentials(SourceCredentials source_cred) {
     return nullptr;
 }
 
-Error ProducerImpl::SendMetaData(const std::string& metadata, RequestCallback callback) {
+Error ProducerImpl::SendMetadata(const std::string& metadata, RequestCallback callback) {
     GenericRequestHeader request_header{kOpcodeTransferMetaData, 0, metadata.size(), 0, "beamtime_global.meta"};
     request_header.custom_data[kPosIngestMode] = asapo::IngestModeFlags::kTransferData | asapo::IngestModeFlags::kStoreInDatabase;
-    FileData data{new uint8_t[metadata.size()]};
+    MessageData data{new uint8_t[metadata.size()]};
     strncpy((char*)data.get(), metadata.c_str(), metadata.size());
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(request_header),
-                std::move(data), "", "", callback, true, timeout_sec_}
+                std::move(data), "", "", callback, true, timeout_ms_}
     });
 }
 
-Error ProducerImpl::SendData__(const EventHeader& event_header,
-                               std::string substream,
-                               void* data,
-                               uint64_t ingest_mode,
-                               RequestCallback callback) {
-    FileData data_wrapped = FileData{(uint8_t*)data};
+Error ProducerImpl::Send__(const MessageHeader &message_header,
+                           void* data,
+                           uint64_t ingest_mode,
+                           std::string stream,
+                           RequestCallback callback) {
+    MessageData data_wrapped = MessageData{(uint8_t*)data};
 
-    if (auto err = CheckData(ingest_mode, event_header, &data_wrapped)) {
+    if (auto err = CheckData(ingest_mode, message_header, &data_wrapped)) {
         data_wrapped.release();
         return err;
     }
 
-    return Send(std::move(event_header), std::move(substream), std::move(data_wrapped), "", ingest_mode, callback, false);
-}
-
-Error ProducerImpl::SendData__(const EventHeader& event_header,
-                               void* data,
-                               uint64_t ingest_mode,
-                               RequestCallback callback) {
-    return SendData__(event_header, kDefaultSubstream, data, ingest_mode, callback);
+    return Send(std::move(message_header), std::move(stream), std::move(data_wrapped), "", ingest_mode, callback, false);
 }
 
 uint64_t  ProducerImpl::GetRequestsQueueSize() {
@@ -262,16 +249,16 @@ Error ProducerImpl::WaitRequestsFinished(uint64_t timeout_ms) {
 void ProducerImpl::StopThreads__() {
     request_pool__->StopThreads();
 }
-Error ProducerImpl::SendFile(const EventHeader& event_header,
-                             std::string substream,
+Error ProducerImpl::SendFile(const MessageHeader &message_header,
                              std::string full_path,
                              uint64_t ingest_mode,
+                             std::string stream,
                              RequestCallback callback) {
     if (full_path.empty()) {
         return ProducerErrorTemplates::kWrongInput.Generate("empty filename");
     }
 
-    return Send(event_header, std::move(substream), nullptr, std::move(full_path), ingest_mode, callback, true);
+    return Send(message_header, std::move(stream), nullptr, std::move(full_path), ingest_mode, callback, true);
 
 }
 
@@ -302,9 +289,9 @@ void ActivatePromise(std::shared_ptr<std::promise<StreamInfoResult>> promise, Re
     } catch(...) {}
 }
 
-StreamInfo GetInfoFromCallback(std::future<StreamInfoResult>* promiseResult, uint64_t timeout_sec, Error* err) {
+StreamInfo GetInfoFromCallback(std::future<StreamInfoResult>* promiseResult, uint64_t timeout_ms, Error* err) {
     try {
-        auto status = promiseResult->wait_for(std::chrono::milliseconds(timeout_sec * 1000));
+        auto status = promiseResult->wait_for(std::chrono::milliseconds(timeout_ms));
         if (status == std::future_status::ready) {
             auto res = promiseResult->get();
             if (res.err == nullptr) {
@@ -321,42 +308,42 @@ StreamInfo GetInfoFromCallback(std::future<StreamInfoResult>* promiseResult, uin
 }
 
 
-GenericRequestHeader CreateRequestHeaderFromOp(StreamRequestOp op,std::string substream) {
+GenericRequestHeader CreateRequestHeaderFromOp(StreamRequestOp op,std::string stream) {
     switch (op) {
         case StreamRequestOp::kStreamInfo:
-            return GenericRequestHeader{kOpcodeStreamInfo, 0, 0, 0, "", substream};
+            return GenericRequestHeader{kOpcodeStreamInfo, 0, 0, 0, "", stream};
         case StreamRequestOp::kLastStream:
             return GenericRequestHeader{kOpcodeLastStream, 0, 0, 0, "", ""};
     }
 }
 
-StreamInfo ProducerImpl::StreamRequest(StreamRequestOp op,std::string substream, uint64_t timeout_sec, Error* err) const {
-    auto header = CreateRequestHeaderFromOp(op,substream);
+StreamInfo ProducerImpl::StreamRequest(StreamRequestOp op,std::string stream, uint64_t timeout_ms, Error* err) const {
+    auto header = CreateRequestHeaderFromOp(op,stream);
     std::unique_ptr<std::promise<StreamInfoResult>> promise {new std::promise<StreamInfoResult>};
     std::future<StreamInfoResult> promiseResult = promise->get_future();
 
     *err = request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(header),
                                                                                             nullptr, "", "",
                                                                                             unwrap_callback(ActivatePromise, std::move(promise)), true,
-                                                                                            timeout_sec * 1000}
+                                                                                            timeout_ms}
     }, true);
     if (*err) {
         return StreamInfo{};
     }
-    return GetInfoFromCallback(&promiseResult, timeout_sec + 2,
+    return GetInfoFromCallback(&promiseResult, timeout_ms + 2000,
                                err); // we give two more sec for request to exit by timeout
 }
 
-StreamInfo ProducerImpl::GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const {
-    return StreamRequest(StreamRequestOp::kStreamInfo,substream,timeout_sec,err);
-}
-
-StreamInfo ProducerImpl::GetStreamInfo(uint64_t timeout_sec, Error* err) const {
-    return GetStreamInfo(kDefaultSubstream, timeout_sec, err);
+StreamInfo ProducerImpl::GetStreamInfo(std::string stream, uint64_t timeout_ms, Error* err) const {
+    if (stream.empty()) {
+        *err = ProducerErrorTemplates::kWrongInput.Generate("stream empty");
+        return {};
+    }
+    return StreamRequest(StreamRequestOp::kStreamInfo,stream,timeout_ms,err);
 }
 
-StreamInfo ProducerImpl::GetLastSubstream(uint64_t timeout_sec, Error* err) const {
-    return StreamRequest(StreamRequestOp::kLastStream,"",timeout_sec,err);
+StreamInfo ProducerImpl::GetLastStream(uint64_t timeout_ms, Error* err) const {
+    return StreamRequest(StreamRequestOp::kLastStream,"",timeout_ms,err);
 }
 
 uint64_t ProducerImpl::GetRequestsQueueVolumeMb() {
diff --git a/producer/api/cpp/src/producer_impl.h b/producer/api/cpp/src/producer_impl.h
index d5a4dd435bdf6e0e9642183b58834c01a58e6217..9108a20dad3bdbbc6f622c268a0a74e930dbe66f 100644
--- a/producer/api/cpp/src/producer_impl.h
+++ b/producer/api/cpp/src/producer_impl.h
@@ -24,38 +24,37 @@ class ProducerImpl : public Producer {
   std::unique_ptr<RequestHandlerFactory> request_handler_factory_;
  public:
   static const size_t kDiscoveryServiceUpdateFrequencyMs;
-  static const std::string kFinishSubStreamKeyword;
-  static const std::string kNoNextSubStreamKeyword;
+  static const std::string kFinishStreamKeyword;
+  static const std::string kNoNextStreamKeyword;
 
-  explicit ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_sec,
+  explicit ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_ms,
                         asapo::RequestHandlerType type);
   ProducerImpl(const ProducerImpl &) = delete;
   ProducerImpl &operator=(const ProducerImpl &) = delete;
 
-  StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const override;
-  StreamInfo GetStreamInfo(uint64_t timeout_sec, Error* err) const override;
-  StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err) const override;
+  StreamInfo GetStreamInfo(std::string stream, uint64_t timeout_ms, Error* err) const override;
+  StreamInfo GetLastStream(uint64_t timeout_ms, Error* err) const override;
 
   void SetLogLevel(LogLevel level) override;
   void EnableLocalLog(bool enable) override;
   void EnableRemoteLog(bool enable) override;
-  Error SendData(const EventHeader &event_header,
-                 FileData data,
-                 uint64_t ingest_mode,
-                 RequestCallback callback) override;
-  Error SendData__(const EventHeader &event_header, void* data, uint64_t ingest_mode,
-                   RequestCallback callback) override;
-  Error SendData(const EventHeader &event_header, std::string substream, FileData data, uint64_t ingest_mode,
-                 RequestCallback callback) override;
-  Error SendData__(const EventHeader &event_header, std::string substream, void* data, uint64_t ingest_mode,
-                   RequestCallback callback) override;
+  Error Send(const MessageHeader &message_header,
+             MessageData data,
+             uint64_t ingest_mode,
+             std::string stream,
+             RequestCallback callback) override;
+  Error Send__(const MessageHeader &message_header,
+               void* data,
+               uint64_t ingest_mode,
+               std::string stream,
+               RequestCallback callback) override;
   void StopThreads__() override;
-  Error SendFile(const EventHeader &event_header, std::string full_path, uint64_t ingest_mode,
-                 RequestCallback callback) override;
-  Error SendFile(const EventHeader &event_header, std::string substream, std::string full_path, uint64_t ingest_mode,
+  Error SendFile(const MessageHeader &message_header,
+                 std::string full_path,
+                 uint64_t ingest_mode,
+                 std::string stream,
                  RequestCallback callback) override;
-
-  Error SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+  Error SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                   RequestCallback callback) override;
 
   AbstractLogger* log__;
@@ -63,20 +62,20 @@ class ProducerImpl : public Producer {
 
   Error SetCredentials(SourceCredentials source_cred) override;
 
-  Error SendMetaData(const std::string &metadata, RequestCallback callback) override;
+  Error SendMetadata(const std::string &metadata, RequestCallback callback) override;
   uint64_t GetRequestsQueueSize() override;
   Error WaitRequestsFinished(uint64_t timeout_ms) override;
   uint64_t GetRequestsQueueVolumeMb() override;
   void SetRequestsQueueLimits(uint64_t size, uint64_t volume) override;
  private:
-  StreamInfo StreamRequest(StreamRequestOp op, std::string substream, uint64_t timeout_sec, Error* err) const;
-  Error Send(const EventHeader &event_header, std::string substream, FileData data, std::string full_path,
+  StreamInfo StreamRequest(StreamRequestOp op, std::string stream, uint64_t timeout_ms, Error* err) const;
+  Error Send(const MessageHeader &message_header, std::string stream, MessageData data, std::string full_path,
              uint64_t ingest_mode,
              RequestCallback callback, bool manage_data_memory);
-  GenericRequestHeader GenerateNextSendRequest(const EventHeader &event_header, std::string substream,
+  GenericRequestHeader GenerateNextSendRequest(const MessageHeader &message_header, std::string stream,
                                                uint64_t ingest_mode);
   std::string source_cred_string_;
-  uint64_t timeout_sec_;
+  uint64_t timeout_ms_;
 };
 
 struct StreamInfoResult {
diff --git a/producer/api/cpp/src/producer_request.cpp b/producer/api/cpp/src/producer_request.cpp
index 5011a4b8ceb7be8d04b5e64e8f22eae2c9b14bd6..73a64c2a61052aae9d9cc68f4c717b77591e99ef 100644
--- a/producer/api/cpp/src/producer_request.cpp
+++ b/producer/api/cpp/src/producer_request.cpp
@@ -4,7 +4,7 @@
 namespace asapo {
 
 bool ProducerRequest::DataFromFile() const {
-    if (data != nullptr || original_filepath.empty() || !NeedSendData()) {
+    if (data != nullptr || original_filepath.empty() || !NeedSend()) {
         return false;
     }
     return true;
@@ -12,7 +12,7 @@ bool ProducerRequest::DataFromFile() const {
 
 ProducerRequest::ProducerRequest(std::string source_credentials,
                                  GenericRequestHeader h,
-                                 FileData data,
+                                 MessageData data,
                                  std::string metadata,
                                  std::string original_filepath,
                                  RequestCallback callback,
@@ -26,8 +26,8 @@ ProducerRequest::ProducerRequest(std::string source_credentials,
     manage_data_memory{manage_data_memory} {
 }
 
-bool ProducerRequest::NeedSendData() const {
-    if (header.op_code == kOpcodeTransferData || header.op_code == kOpcodeTransferSubsetData) {
+bool ProducerRequest::NeedSend() const {
+    if (header.op_code == kOpcodeTransferData || header.op_code == kOpcodeTransferDatasetData) {
         return header.custom_data[kPosIngestMode] & IngestModeFlags::kTransferData;
     }
     return true;
@@ -49,11 +49,11 @@ Error ProducerRequest::UpdateDataSizeFromFileIfNeeded(const IO* io) {
     }
 
     Error err;
-    auto finfo = io->GetFileInfo(original_filepath, &err);
+    auto message_meta = io->GetMessageMeta(original_filepath, &err);
     if (err) {
         return ProducerErrorTemplates::kLocalIOError.Generate(err->Explain());
     }
-    header.data_size = finfo.size;
+    header.data_size = message_meta.size;
     return nullptr;
 }
 
diff --git a/producer/api/cpp/src/producer_request.h b/producer/api/cpp/src/producer_request.h
index 47a435efb583f49157dd1558f126a3003d8e8623..4d0b73fc1eec36fcb270221f62a570d36f31230d 100644
--- a/producer/api/cpp/src/producer_request.h
+++ b/producer/api/cpp/src/producer_request.h
@@ -12,7 +12,7 @@ namespace asapo {
 class ProducerRequest : public GenericRequest {
   public:
     ~ProducerRequest();
-    ProducerRequest(std::string source_credentials, GenericRequestHeader header, FileData data,
+    ProducerRequest(std::string source_credentials, GenericRequestHeader header, MessageData data,
                     std::string metadata,
                     std::string original_filepath,
                     RequestCallback callback,
@@ -20,12 +20,12 @@ class ProducerRequest : public GenericRequest {
                     uint64_t timeout_ms);
     std::string source_credentials;
     std::string metadata;
-    FileData data;
+    MessageData data;
     std::string original_filepath;
     RequestCallback callback;
     bool manage_data_memory;
     bool DataFromFile() const;
-    bool NeedSendData() const;
+    bool NeedSend() const;
     bool NeedSendMetaData() const;
     Error UpdateDataSizeFromFileIfNeeded(const IO* io);
 
diff --git a/producer/api/cpp/src/request_handler_filesystem.cpp b/producer/api/cpp/src/request_handler_filesystem.cpp
index 3d2dacc23bde4653653daa1af58e789fb6702121..39a8c3d6935471e656d8a65ca8fecbb233c457e0 100644
--- a/producer/api/cpp/src/request_handler_filesystem.cpp
+++ b/producer/api/cpp/src/request_handler_filesystem.cpp
@@ -39,8 +39,8 @@ bool RequestHandlerFilesystem::ProcessRequestUnlocked(GenericRequest* request, b
 }
 
 void RequestHandlerFilesystem::ProcessRequestTimeout(GenericRequest* request) {
-    log__->Error("request timeout, id:" + std::to_string(request->header.data_id) + " to " + request->header.substream +
-                 " substream");
+    log__->Error("request timeout, id:" + std::to_string(request->header.data_id) + " to " + request->header.stream +
+                 " stream");
 }
 
 }
diff --git a/producer/api/cpp/src/request_handler_tcp.cpp b/producer/api/cpp/src/request_handler_tcp.cpp
index eb9356121cead25edaf9997ed4011a5b3fb441a8..157c1212e6b24089d7ed8ace19d7de3f2fc26da5 100644
--- a/producer/api/cpp/src/request_handler_tcp.cpp
+++ b/producer/api/cpp/src/request_handler_tcp.cpp
@@ -61,7 +61,7 @@ Error RequestHandlerTcp::SendRequestContent(const ProducerRequest* request) {
         }
     }
 
-    if (request->NeedSendData()) {
+    if (request->NeedSend()) {
         if (request->DataFromFile()) {
             io_error = io__->SendFile(sd_,  request->original_filepath, (size_t)request->header.data_size);
         } else {
@@ -77,7 +77,7 @@ Error RequestHandlerTcp::SendRequestContent(const ProducerRequest* request) {
 
 Error RequestHandlerTcp::ReceiveResponse(const GenericRequestHeader& request_header, std::string* response) {
     Error err;
-    SendDataResponse sendDataResponse;
+    SendResponse sendDataResponse;
     io__->Receive(sd_, &sendDataResponse, sizeof(sendDataResponse), &err);
     if(err != nullptr) {
         return err;
@@ -222,7 +222,7 @@ void RequestHandlerTcp::ProcessRequestCallback(Error err, ProducerRequest* reque
 }
 
 
-bool RequestHandlerTcp::SendDataToOneOfTheReceivers(ProducerRequest* request, bool* retry) {
+bool RequestHandlerTcp::SendToOneOfTheReceivers(ProducerRequest* request, bool* retry) {
     for (auto receiver_uri : receivers_list_) {
         if (Disconnected()) {
             auto err = ConnectToReceiver(request->source_credentials, receiver_uri);
@@ -269,7 +269,7 @@ bool RequestHandlerTcp::ProcessRequestUnlocked(GenericRequest* request, bool* re
     if (NeedRebalance()) {
         CloseConnectionToPeformRebalance();
     }
-    return SendDataToOneOfTheReceivers(producer_request, retry);
+    return SendToOneOfTheReceivers(producer_request, retry);
 }
 
 bool RequestHandlerTcp::Connected() {
@@ -297,8 +297,8 @@ void RequestHandlerTcp::TearDownProcessingRequestLocked(bool request_processed_s
 void RequestHandlerTcp::ProcessRequestTimeout(GenericRequest* request) {
     auto producer_request = static_cast<ProducerRequest*>(request);
     auto err_string = "request id:" + std::to_string(request->header.data_id) + ", opcode: " + std::to_string(
-                          request->header.op_code) + " for " + request->header.substream +
-                      " substream";
+                          request->header.op_code) + " for " + request->header.stream +
+                      " stream";
     log__->Error("timeout " + err_string);
 
     auto err = ProducerErrorTemplates::kTimeout.Generate(err_string);
diff --git a/producer/api/cpp/src/request_handler_tcp.h b/producer/api/cpp/src/request_handler_tcp.h
index 0de896610fb9324b29cf873cb5442d841a5b1447..a891a81d5e139da8c60f00d98997cb8c70e2ea1e 100644
--- a/producer/api/cpp/src/request_handler_tcp.h
+++ b/producer/api/cpp/src/request_handler_tcp.h
@@ -32,7 +32,7 @@ class RequestHandlerTcp: public RequestHandler {
   private:
     Error Authorize(const std::string& source_credentials);
     Error ConnectToReceiver(const std::string& source_credentials, const std::string& receiver_address);
-    bool SendDataToOneOfTheReceivers(ProducerRequest* request, bool* retry);
+    bool SendToOneOfTheReceivers(ProducerRequest* request, bool* retry);
     Error SendRequestContent(const ProducerRequest* request);
     Error ReceiveResponse(const GenericRequestHeader& request_header, std::string* response);
     Error TrySendToReceiver(const ProducerRequest* request, std::string* response);
diff --git a/producer/api/cpp/unittests/test_producer.cpp b/producer/api/cpp/unittests/test_producer.cpp
index da219ba91417e7b98928bb36abb93567b697c2a3..984f10225e06f008a3df5161d32206d85e446d15 100644
--- a/producer/api/cpp/unittests/test_producer.cpp
+++ b/producer/api/cpp/unittests/test_producer.cpp
@@ -15,7 +15,7 @@ namespace {
 TEST(CreateProducer, TcpProducer) {
     asapo::Error err;
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("endpoint", 4, asapo::RequestHandlerType::kTcp,
-                                                SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600, &err);
+                                                SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600000, &err);
     ASSERT_THAT(dynamic_cast<asapo::ProducerImpl*>(producer.get()), Ne(nullptr));
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -24,7 +24,7 @@ TEST(CreateProducer, ErrorBeamtime) {
     asapo::Error err;
     std::string expected_beamtimeid(asapo::kMaxMessageSize * 10, 'a');
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("endpoint", 4, asapo::RequestHandlerType::kTcp,
-                                                SourceCredentials{asapo::SourceType::kRaw,expected_beamtimeid, "", "", ""}, 3600, &err);
+                                                SourceCredentials{asapo::SourceType::kRaw,expected_beamtimeid, "", "", ""}, 3600000, &err);
     ASSERT_THAT(producer, Eq(nullptr));
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
@@ -33,7 +33,7 @@ TEST(CreateProducer, ErrorOnBothAutoBeamlineBeamtime) {
     asapo::SourceCredentials creds{asapo::SourceType::kRaw,"auto", "auto", "subname", "token"};
     asapo::Error err;
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("endpoint", 4, asapo::RequestHandlerType::kTcp,
-                                                creds, 3600, &err);
+                                                creds, 3600000, &err);
     ASSERT_THAT(producer, Eq(nullptr));
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
@@ -41,7 +41,7 @@ TEST(CreateProducer, ErrorOnBothAutoBeamlineBeamtime) {
 TEST(CreateProducer, TooManyThreads) {
     asapo::Error err;
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("", asapo::kMaxProcessingThreads + 1,
-                                                asapo::RequestHandlerType::kTcp, SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600, &err);
+                                                asapo::RequestHandlerType::kTcp, SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600000, &err);
     ASSERT_THAT(producer, Eq(nullptr));
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
@@ -50,7 +50,7 @@ TEST(CreateProducer, TooManyThreads) {
 TEST(CreateProducer, ZeroThreads) {
     asapo::Error err;
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("", 0,
-                                                asapo::RequestHandlerType::kTcp, SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600, &err);
+                                                asapo::RequestHandlerType::kTcp, SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600000, &err);
     ASSERT_THAT(producer, Eq(nullptr));
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
@@ -59,11 +59,11 @@ TEST(CreateProducer, ZeroThreads) {
 TEST(Producer, SimpleWorkflowWihoutConnection) {
     asapo::Error err;
     std::unique_ptr<asapo::Producer> producer = asapo::Producer::Create("hello", 5, asapo::RequestHandlerType::kTcp,
-                                                SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600,
+                                                SourceCredentials{asapo::SourceType::kRaw,"bt", "", "", ""}, 3600000,
                                                 &err);
 
-    asapo::EventHeader event_header{1, 1, "test"};
-    auto err_send = producer->SendData(event_header, nullptr, asapo::kTransferMetaDataOnly, nullptr);
+    asapo::MessageHeader message_header{1, 1, "test"};
+    auto err_send = producer->Send(message_header, nullptr, asapo::kTransferMetaDataOnly, "stream", nullptr);
 
     std::this_thread::sleep_for(std::chrono::milliseconds(100));
     ASSERT_THAT(producer, Ne(nullptr));
diff --git a/producer/api/cpp/unittests/test_producer_impl.cpp b/producer/api/cpp/unittests/test_producer_impl.cpp
index 3947db397e8d7578f38e2f2b9628179e7f164ba6..e9ea117ee49a9dbadcf1dff210de7655688d2c6d 100644
--- a/producer/api/cpp/unittests/test_producer_impl.cpp
+++ b/producer/api/cpp/unittests/test_producer_impl.cpp
@@ -30,10 +30,10 @@ using ::testing::HasSubstr;
 using asapo::RequestPool;
 using asapo::ProducerRequest;
 
-MATCHER_P10(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_id, file_size, message, substream,
+MATCHER_P10(M_CheckSendRequest, op_code, source_credentials, metadata, file_id, file_size, message, stream,
             ingest_mode,
-            subset_id,
-            subset_size,
+            dataset_id,
+            dataset_size,
             "Checks if a valid GenericRequestHeader was Send") {
     auto request = static_cast<ProducerRequest*>(arg);
     return ((asapo::GenericRequestHeader) (arg->header)).op_code == op_code
@@ -42,17 +42,17 @@ MATCHER_P10(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_
         && request->manage_data_memory == true
         && request->source_credentials == source_credentials
         && request->metadata == metadata
-        && (op_code == asapo::kOpcodeTransferSubsetData ? ((asapo::GenericRequestHeader) (arg->header)).custom_data[1]
-            == uint64_t(subset_id) : true)
-        && (op_code == asapo::kOpcodeTransferSubsetData ? ((asapo::GenericRequestHeader) (arg->header)).custom_data[2]
-            == uint64_t(subset_size) : true)
+        && (op_code == asapo::kOpcodeTransferDatasetData ? ((asapo::GenericRequestHeader) (arg->header)).custom_data[1]
+            == uint64_t(dataset_id) : true)
+        && (op_code == asapo::kOpcodeTransferDatasetData ? ((asapo::GenericRequestHeader) (arg->header)).custom_data[2]
+            == uint64_t(dataset_size) : true)
         && ((asapo::GenericRequestHeader) (arg->header)).custom_data[asapo::kPosIngestMode] == uint64_t(ingest_mode)
         && strcmp(((asapo::GenericRequestHeader) (arg->header)).message, message) == 0
-        && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader) (arg->header)).stream, stream) == 0;
 }
 
 TEST(ProducerImpl, Constructor) {
-    asapo::ProducerImpl producer{"", 4, 3600, asapo::RequestHandlerType::kTcp};
+    asapo::ProducerImpl producer{"", 4, 3600000, asapo::RequestHandlerType::kTcp};
     ASSERT_THAT(dynamic_cast<asapo::AbstractLogger*>(producer.log__), Ne(nullptr));
     ASSERT_THAT(dynamic_cast<asapo::RequestPool*>(producer.request_pool__.get()), Ne(nullptr));
 }
@@ -63,16 +63,16 @@ class ProducerImplTests : public testing::Test {
   asapo::ProducerRequestHandlerFactory factory{&service};
   testing::NiceMock<asapo::MockLogger> mock_logger;
   testing::NiceMock<MockRequestPull> mock_pull{&factory, &mock_logger};
-  asapo::ProducerImpl producer{"", 1, 3600, asapo::RequestHandlerType::kTcp};
+  asapo::ProducerImpl producer{"", 1, 3600000, asapo::RequestHandlerType::kTcp};
   uint64_t expected_size = 100;
   uint64_t expected_id = 10;
-  uint64_t expected_subset_id = 100;
-  uint64_t expected_subset_size = 4;
+  uint64_t expected_dataset_id = 100;
+  uint64_t expected_dataset_size = 4;
   uint64_t expected_ingest_mode = asapo::IngestModeFlags::kTransferMetaDataOnly;
 
   char expected_name[asapo::kMaxMessageSize] = "test_name";
-  char expected_substream[asapo::kMaxMessageSize] = "test_substream";
-  std::string expected_next_substream = "next_substream";
+  char expected_stream[asapo::kMaxMessageSize] = "test_stream";
+  std::string expected_next_stream = "next_stream";
 
   asapo::SourceCredentials expected_credentials{asapo::SourceType::kRaw, "beamtime_id", "beamline", "subname", "token"
   };
@@ -99,200 +99,172 @@ class ProducerImplTests : public testing::Test {
 TEST_F(ProducerImplTests, SendReturnsError) {
     EXPECT_CALL(mock_pull, AddRequest_t(_, false)).WillOnce(Return(
         asapo::ProducerErrorTemplates::kRequestPoolIsFull.Generate().release()));
-    asapo::EventHeader event_header{1, 1, "test"};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{1, 1, "test"};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kRequestPoolIsFull));
 }
 
 TEST_F(ProducerImplTests, ErrorIfFileNameTooLong) {
     std::string long_string(asapo::kMaxMessageSize + 100, 'a');
-    asapo::EventHeader event_header{1, 1, long_string};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{1, 1, long_string};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
+TEST_F(ProducerImplTests, ErrorIfStreamEmpty) {
+    asapo::MessageHeader message_header{1, 100, expected_fullpath};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, "", nullptr);
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
+}
+
+
 TEST_F(ProducerImplTests, ErrorIfFileEmpty) {
     std::string long_string(asapo::kMaxMessageSize + 100, 'a');
-    asapo::EventHeader event_header{1, 1, ""};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{1, 1, ""};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
-TEST_F(ProducerImplTests, ErrorIfSubsetSizeNotDefined) {
-    EXPECT_CALL(mock_logger, Error(testing::HasSubstr("subset dimensions")));
-    asapo::EventHeader event_header{1, 1000, "test", "", 1};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
+TEST_F(ProducerImplTests, ErrorIfDatasetSizeNotDefined) {
+    EXPECT_CALL(mock_logger, Error(testing::HasSubstr("dataset dimensions")));
+    asapo::MessageHeader message_header{1, 1000, "test", "", 1};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
 TEST_F(ProducerImplTests, ErrorIfZeroDataSize) {
-    asapo::FileData data = asapo::FileData{new uint8_t[100]};
-    asapo::EventHeader event_header{1, 0, expected_fullpath};
-    auto err = producer.SendData(event_header, std::move(data), asapo::kDefaultIngestMode, nullptr);
+    asapo::MessageData data = asapo::MessageData{new uint8_t[100]};
+    asapo::MessageHeader message_header{1, 0, expected_fullpath};
+    auto err = producer.Send(message_header, std::move(data), asapo::kDefaultIngestMode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
 TEST_F(ProducerImplTests, ErrorIfNoData) {
-    asapo::EventHeader event_header{1, 100, expected_fullpath};
-    auto err = producer.SendData(event_header, nullptr, asapo::kDefaultIngestMode, nullptr);
+    asapo::MessageHeader message_header{1, 100, expected_fullpath};
+    auto err = producer.Send(message_header, nullptr, asapo::kDefaultIngestMode, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
 TEST_F(ProducerImplTests, ErrorIfNoDataSend_) {
-    asapo::EventHeader event_header{1, 100, expected_fullpath};
-    auto err = producer.SendData__(event_header, nullptr, asapo::kDefaultIngestMode, nullptr);
+    asapo::MessageHeader message_header{1, 100, expected_fullpath};
+    auto err = producer.Send__(message_header, nullptr, asapo::kDefaultIngestMode, expected_stream, nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
 TEST_F(ProducerImplTests, ErrorIfSendingDataWithZeroId) {
-    asapo::EventHeader event_header{0, 100, expected_fullpath};
-    auto err = producer.SendData(event_header, nullptr, asapo::kTransferMetaDataOnly, nullptr);
+    asapo::MessageHeader message_header{0, 100, expected_fullpath};
+    auto err = producer.Send(message_header, nullptr, asapo::kTransferMetaDataOnly, "default", nullptr);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 }
 
 TEST_F(ProducerImplTests, OkIfNoDataWithTransferMetadataOnlyMode) {
-    asapo::EventHeader event_header{1, 100, expected_fullpath};
-    auto err = producer.SendData(event_header, nullptr, asapo::kTransferMetaDataOnly, nullptr);
+    asapo::MessageHeader message_header{1, 100, expected_fullpath};
+    auto err = producer.Send(message_header, nullptr, asapo::kTransferMetaDataOnly, "default", nullptr);
     ASSERT_THAT(err, Eq(nullptr));
 }
 
 TEST_F(ProducerImplTests, OkIfZeroSizeWithTransferMetadataOnlyMode) {
-    asapo::FileData data = asapo::FileData{new uint8_t[100]};
-    asapo::EventHeader event_header{1, 0, expected_fullpath};
-    auto err = producer.SendData(event_header, std::move(data), asapo::kTransferMetaDataOnly, nullptr);
-    ASSERT_THAT(err, Eq(nullptr));
-}
-
-TEST_F(ProducerImplTests, UsesDefaultStream) {
-    producer.SetCredentials(expected_default_credentials);
-
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
-                                                               expected_default_credentials_str,
-                                                               expected_metadata,
-                                                               expected_id,
-                                                               expected_size,
-                                                               expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
-                                                               expected_ingest_mode,
-                                                               0,
-                                                               0), false)).WillOnce(Return(nullptr));
-
-    asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
-
-    ASSERT_THAT(err, Eq(nullptr));
-}
-
-TEST_F(ProducerImplTests, OKSendingSendDataRequest) {
-    producer.SetCredentials(expected_credentials);
-
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
-                                                               expected_credentials_str,
-                                                               expected_metadata,
-                                                               expected_id,
-                                                               expected_size,
-                                                               expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
-                                                               expected_ingest_mode,
-                                                               0,
-                                                               0
-    ), false)).WillOnce(Return(
-        nullptr));
-
-    asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
-
+    asapo::MessageData data = asapo::MessageData{new uint8_t[100]};
+    asapo::MessageHeader message_header{1, 0, expected_fullpath};
+    auto err = producer.Send(message_header, std::move(data), asapo::kTransferMetaDataOnly, "default", nullptr);
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSendDataRequestWithSubstream) {
+TEST_F(ProducerImplTests, OKSendingSendRequestWithStream) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferData,
                                                                expected_credentials_str,
                                                                expected_metadata,
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               expected_substream,
+                                                               expected_stream,
                                                                expected_ingest_mode,
                                                                0,
                                                                0
     ), false)).WillOnce(Return(
         nullptr));
 
-    asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
-    auto err = producer.SendData(event_header, expected_substream, nullptr, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{expected_id, expected_size, expected_name, expected_metadata};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, expected_stream, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSubstreamFinish) {
+TEST_F(ProducerImplTests, OKSendingStreamFinish) {
     producer.SetCredentials(expected_credentials);
 
-    std::string next_stream_meta = std::string("{\"next_substream\":") + "\"" + expected_next_substream + "\"}";
+    std::string next_stream_meta = std::string("{\"next_stream\":") + "\"" + expected_next_stream + "\"}";
 
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferData,
                                                                expected_credentials_str,
                                                                next_stream_meta.c_str(),
                                                                expected_id + 1,
                                                                0,
-                                                               asapo::ProducerImpl::kFinishSubStreamKeyword.c_str(),
-                                                               expected_substream,
+                                                               asapo::ProducerImpl::kFinishStreamKeyword.c_str(),
+                                                               expected_stream,
                                                                asapo::IngestModeFlags::kTransferMetaDataOnly,
                                                                0,
                                                                0
     ), false)).WillOnce(Return(
         nullptr));
 
-    auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, expected_next_substream, nullptr);
+    auto err = producer.SendStreamFinishedFlag(expected_stream, expected_id, expected_next_stream, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSubstreamFinishWithNoNextStream) {
+TEST_F(ProducerImplTests, ErrorSendingStreamFinishWithemptyStream) {
+    producer.SetCredentials(expected_credentials);
+    auto err = producer.SendStreamFinishedFlag("", expected_id, expected_next_stream, nullptr);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
+}
+
+TEST_F(ProducerImplTests, OKSendingStreamFinishWithNoNextStream) {
     producer.SetCredentials(expected_credentials);
 
     std::string
-        next_stream_meta = std::string("{\"next_substream\":") + "\"" + asapo::ProducerImpl::kNoNextSubStreamKeyword
+        next_stream_meta = std::string("{\"next_stream\":") + "\"" + asapo::ProducerImpl::kNoNextStreamKeyword
         + "\"}";
 
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferData,
                                                                expected_credentials_str,
                                                                next_stream_meta.c_str(),
                                                                expected_id + 1,
                                                                0,
-                                                               asapo::ProducerImpl::kFinishSubStreamKeyword.c_str(),
-                                                               expected_substream,
+                                                               asapo::ProducerImpl::kFinishStreamKeyword.c_str(),
+                                                               expected_stream,
                                                                asapo::IngestModeFlags::kTransferMetaDataOnly,
                                                                0,
                                                                0
     ), false)).WillOnce(Return(
         nullptr));
 
-    auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, "", nullptr);
+    auto err = producer.SendStreamFinishedFlag(expected_stream, expected_id, "", nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSendSubsetDataRequest) {
+TEST_F(ProducerImplTests, OKSendingSendDatasetDataRequest) {
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferSubsetData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferDatasetData,
                                                                expected_credentials_str,
                                                                expected_metadata,
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
+                                                               expected_stream,
                                                                expected_ingest_mode,
-                                                               expected_subset_id,
-                                                               expected_subset_size), false)).WillOnce(
+                                                               expected_dataset_id,
+                                                               expected_dataset_size), false)).WillOnce(
         Return(
             nullptr));
 
-    asapo::EventHeader event_header
-        {expected_id, expected_size, expected_name, expected_metadata, expected_subset_id, expected_subset_size};
-    auto err = producer.SendData(event_header, nullptr, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header
+        {expected_id, expected_size, expected_name, expected_metadata, expected_dataset_id, expected_dataset_size};
+    auto err = producer.Send(message_header, nullptr, expected_ingest_mode, expected_stream, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -304,7 +276,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
     expected_ingest_mode = asapo::IngestModeFlags::kTransferData | asapo::IngestModeFlags::kStoreInDatabase ;
 
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferMetaData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferMetaData,
                                                                expected_credentials_str,
                                                                "",
                                                                expected_id,
@@ -316,7 +288,7 @@ TEST_F(ProducerImplTests, OKAddingSendMetaDataRequest) {
                                                                10), false)).WillOnce(Return(
         nullptr));
 
-    auto err = producer.SendMetaData(expected_metadata, nullptr);
+    auto err = producer.SendMetadata(expected_metadata, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -326,8 +298,8 @@ TEST_F(ProducerImplTests, ErrorSendingEmptyFileName) {
 
     EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
-    asapo::EventHeader event_header{expected_id, 0, expected_name};
-    auto err = producer.SendFile(event_header, "", expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{expected_id, 0, expected_name};
+    auto err = producer.SendFile(message_header, "", expected_ingest_mode, expected_stream, nullptr);
 
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 
@@ -338,51 +310,43 @@ TEST_F(ProducerImplTests, ErrorSendingEmptyRelativeFileName) {
 
     EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
-    asapo::EventHeader event_header{expected_id, 0, ""};
-    auto err = producer.SendFile(event_header, expected_fullpath, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{expected_id, 0, ""};
+    auto err = producer.SendFile(message_header, expected_fullpath, expected_ingest_mode, expected_stream, nullptr);
 
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 
 }
 
-TEST_F(ProducerImplTests, OKSendingSendFileRequest) {
+TEST_F(ProducerImplTests, ErrorSendingFileToEmptyStream) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
-                                                               expected_credentials_str,
-                                                               "",
-                                                               expected_id,
-                                                               0,
-                                                               expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
-                                                               expected_ingest_mode,
-                                                               0,
-                                                               0), false)).WillOnce(Return(
-        nullptr));
+    EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
-    asapo::EventHeader event_header{expected_id, 0, expected_name};
-    auto err = producer.SendFile(event_header, expected_fullpath, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{expected_id, 0, expected_name};
+    auto err = producer.SendFile(message_header, expected_fullpath, expected_ingest_mode, "", nullptr);
+
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
 
-    ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSendFileRequestWithSubstream) {
+TEST_F(ProducerImplTests, OKSendingSendFileRequestWithStream) {
     producer.SetCredentials(expected_credentials);
 
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendRequest(asapo::kOpcodeTransferData,
                                                                expected_credentials_str,
                                                                "",
                                                                expected_id,
                                                                0,
                                                                expected_name,
-                                                               expected_substream,
+                                                               expected_stream,
                                                                expected_ingest_mode,
                                                                0,
                                                                0), false)).WillOnce(Return(
         nullptr));
 
-    asapo::EventHeader event_header{expected_id, 0, expected_name};
-    auto err = producer.SendFile(event_header, expected_substream, expected_fullpath, expected_ingest_mode, nullptr);
+    asapo::MessageHeader message_header{expected_id, 0, expected_name};
+    auto err =
+        producer.SendFile(message_header, expected_fullpath, expected_ingest_mode, expected_stream, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -408,7 +372,7 @@ TEST_F(ProducerImplTests, ErrorSettingSecondTime) {
 
 TEST_F(ProducerImplTests, ErrorSendingWrongIngestMode) {
     producer.SetCredentials(expected_credentials);
-    asapo::EventHeader event_header{expected_id, 0, expected_name};
+    asapo::MessageHeader message_header{expected_id, 0, expected_name};
     uint64_t ingest_modes[] = {0, asapo::IngestModeFlags::kTransferMetaDataOnly | asapo::IngestModeFlags::kTransferData,
                                asapo::IngestModeFlags::kTransferData,
                                asapo::IngestModeFlags::kStoreInDatabase,
@@ -420,7 +384,7 @@ TEST_F(ProducerImplTests, ErrorSendingWrongIngestMode) {
     EXPECT_CALL(mock_pull, AddRequest_t(_, _)).Times(0);
 
     for (auto ingest_mode : ingest_modes) {
-        auto err = producer.SendFile(event_header, expected_fullpath, ingest_mode, nullptr);
+        auto err = producer.SendFile(message_header, expected_fullpath, ingest_mode, expected_stream, nullptr);
         ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
     }
 
@@ -460,30 +424,37 @@ TEST_F(ProducerImplTests, WaitRequestsFinished) {
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
-MATCHER_P3(M_CheckGetSubstreamInfoRequest, op_code, source_credentials, substream,
+MATCHER_P3(M_CheckGetStreamInfoRequest, op_code, source_credentials, stream,
            "Checks if a valid GenericRequestHeader was Send") {
     auto request = static_cast<ProducerRequest*>(arg);
     return ((asapo::GenericRequestHeader) (arg->header)).op_code == op_code
         && request->source_credentials == source_credentials
-        && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader) (arg->header)).stream, stream) == 0;
 }
 
 TEST_F(ProducerImplTests, GetStreamInfoMakesCorerctRequest) {
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetSubstreamInfoRequest(asapo::kOpcodeStreamInfo,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetStreamInfoRequest(asapo::kOpcodeStreamInfo,
                                                                        expected_credentials_str,
-                                                                       expected_substream), true)).WillOnce(
+                                                                       expected_stream), true)).WillOnce(
         Return(nullptr));
 
     asapo::Error err;
-    producer.GetStreamInfo(expected_substream, 1, &err);
+    producer.GetStreamInfo(expected_stream, 1000, &err);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
+TEST_F(ProducerImplTests, GetStreamInfoErrorOnEmptyStream) {
+    producer.SetCredentials(expected_credentials);
+    asapo::Error err;
+    producer.GetStreamInfo("", 1000, &err);
+    ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kWrongInput));
+}
+
 TEST(GetStreamInfoTest, GetStreamInfoTimeout) {
-    asapo::ProducerImpl producer1{"", 1, 10, asapo::RequestHandlerType::kTcp};
+    asapo::ProducerImpl producer1{"", 1, 10000, asapo::RequestHandlerType::kTcp};
     asapo::Error err;
-    auto sinfo = producer1.GetStreamInfo(5, &err);
+    auto sinfo = producer1.GetStreamInfo("stream", 5000, &err);
 
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
     ASSERT_THAT(err->Explain(), HasSubstr("opcode: 4"));
@@ -491,13 +462,13 @@ TEST(GetStreamInfoTest, GetStreamInfoTimeout) {
 
 TEST_F(ProducerImplTests, GetLastStreamMakesCorerctRequest) {
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetSubstreamInfoRequest(asapo::kOpcodeLastStream,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetStreamInfoRequest(asapo::kOpcodeLastStream,
                                                                        expected_credentials_str,
                                                                        ""), true)).WillOnce(
         Return(nullptr));
 
     asapo::Error err;
-    producer.GetLastSubstream(1, &err);
+    producer.GetLastStream(1000, &err);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
diff --git a/producer/api/cpp/unittests/test_producer_request.cpp b/producer/api/cpp/unittests/test_producer_request.cpp
index 6bb12223cac5d7351bc90a56ce1f6d17a2dccf55..eb087cfa3b3a4dec67b92d1626c7b0bb58d47ce8 100644
--- a/producer/api/cpp/unittests/test_producer_request.cpp
+++ b/producer/api/cpp/unittests/test_producer_request.cpp
@@ -60,7 +60,7 @@ TEST(ProducerRequest, Constructor) {
 TEST(ProducerRequest, Destructor) {
 // fails with data corruption if done wrong
     char data_[100];
-    asapo::FileData data{(uint8_t*)data_};
+    asapo::MessageData data{(uint8_t*)data_};
     asapo::GenericRequestHeader header{asapo::kOpcodeTransferData, 1, 1, 1, ""};
     asapo::ProducerRequest* request = new asapo::ProducerRequest{"", std::move(header), std::move(data), "", "", nullptr, false, 0};
 
diff --git a/producer/api/cpp/unittests/test_request_handler_filesystem.cpp b/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
index 1c95e5126ae092d52576934e8f0dda59a5b545e8..9c666a0561cac488cd3d0826e8b773bc52df4994 100644
--- a/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
+++ b/producer/api/cpp/unittests/test_request_handler_filesystem.cpp
@@ -86,12 +86,12 @@ class RequestHandlerFilesystemTests : public testing::Test {
     }
 };
 
-ACTION_P(A_WriteSendDataResponse, error_code) {
-    ((asapo::SendDataResponse*)arg1)->op_code = asapo::kOpcodeTransferData;
-    ((asapo::SendDataResponse*)arg1)->error_code = error_code;
+ACTION_P(A_WriteSendResponse, error_code) {
+    ((asapo::SendResponse*)arg1)->op_code = asapo::kOpcodeTransferData;
+    ((asapo::SendResponse*)arg1)->error_code = error_code;
 }
 
-MATCHER_P2(M_CheckSendDataRequest, file_id, file_size,
+MATCHER_P2(M_CheckSendRequest, file_id, file_size,
            "Checks if a valid GenericRequestHeader was Send") {
     return ((asapo::GenericRequestHeader*)arg)->op_code == asapo::kOpcodeTransferData
            && ((asapo::GenericRequestHeader*)arg)->data_id == file_id
diff --git a/producer/api/cpp/unittests/test_request_handler_tcp.cpp b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
index 4257e6049871550673ca1b423627c5b8339fd850..25a88248bb4b2b3ed107704870670260d9a1699e 100644
--- a/producer/api/cpp/unittests/test_request_handler_tcp.cpp
+++ b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
@@ -60,27 +60,27 @@ class RequestHandlerTcpTests : public testing::Test {
 
   char expected_file_name[asapo::kMaxMessageSize] = "test_name";
   char expected_beamtime_id[asapo::kMaxMessageSize] = "test_beamtime_id";
-  char expected_substream[asapo::kMaxMessageSize] = "test_substream";
+  char expected_stream[asapo::kMaxMessageSize] = "test_stream";
 
   uint64_t expected_thread_id = 2;
 
   asapo::Error callback_err;
   asapo::GenericRequestHeader header{expected_op_code, expected_file_id, expected_file_size,
-                                     expected_meta_size, expected_file_name, expected_substream};
+                                     expected_meta_size, expected_file_name, expected_stream};
   asapo::GenericRequestHeader header_fromfile{expected_op_code, expected_file_id, 0, expected_meta_size,
-                                              expected_file_name, expected_substream};
+                                              expected_file_name, expected_stream};
   bool callback_called = false;
   asapo::GenericRequestHeader callback_header;
   std::string callback_response;
   uint8_t expected_callback_data = 2;
-  asapo::FileData expected_data{[this]() {
+  asapo::MessageData expected_data{[this]() {
     auto a = new uint8_t[expected_file_size];
     for (auto i = 0; i < expected_file_size; i++) {
         a[i] = expected_callback_data;
     }
     return a;
   }()};
-  asapo::FileData callback_data;
+  asapo::MessageData callback_data;
 
   asapo::ProducerRequest request{expected_beamtime_id, header, std::move(expected_data), expected_metadata, "",
                                  [this](asapo::RequestCallbackPayload payload, asapo::Error err) {
@@ -124,14 +124,14 @@ class RequestHandlerTcpTests : public testing::Test {
   void ExpectOKAuthorize(bool only_once = false);
   void ExpectFailSendHeader(bool only_once = false);
   void ExpectFailSend(uint64_t expected_size, bool only_once);
-  void ExpectFailSendData(bool only_once = false);
+  void ExpectFailSend(bool only_once = false);
   void ExpectFailSendMetaData(bool only_once = false);
   void ExpectOKConnect(bool only_once = false);
   void ExpectOKSendHeader(bool only_once = false, asapo::Opcode code = expected_op_code);
   void ExpectOKSend(uint64_t expected_size, bool only_once);
   void ExpectOKSendAll(bool only_once);
   void ExpectGetFileSize(bool ok);
-  void ExpectOKSendData(bool only_once = false);
+  void ExpectOKSend(bool only_once = false);
   void ExpectOKSendFile(bool only_once = false);
   void ExpectFailSendFile(const asapo::ProducerErrorTemplate &err_template, bool client_error = false);
   void ExpectOKSendMetaData(bool only_once = false);
@@ -155,19 +155,19 @@ class RequestHandlerTcpTests : public testing::Test {
   }
 };
 
-ACTION_P2(A_WriteSendDataResponse, error_code, message) {
-    ((asapo::SendDataResponse*) arg1)->op_code = asapo::kOpcodeTransferData;
-    ((asapo::SendDataResponse*) arg1)->error_code = error_code;
-    strcpy(((asapo::SendDataResponse*) arg1)->message, message.c_str());
+ACTION_P2(A_WriteSendResponse, error_code, message) {
+    ((asapo::SendResponse*) arg1)->op_code = asapo::kOpcodeTransferData;
+    ((asapo::SendResponse*) arg1)->error_code = error_code;
+    strcpy(((asapo::SendResponse*) arg1)->message, message.c_str());
 }
 
-MATCHER_P5(M_CheckSendDataRequest, op_code, file_id, file_size, message, substream,
+MATCHER_P5(M_CheckSendRequest, op_code, file_id, file_size, message, stream,
            "Checks if a valid GenericRequestHeader was Send") {
     return ((asapo::GenericRequestHeader*) arg)->op_code == op_code
         && ((asapo::GenericRequestHeader*) arg)->data_id == uint64_t(file_id)
         && ((asapo::GenericRequestHeader*) arg)->data_size == uint64_t(file_size)
         && strcmp(((asapo::GenericRequestHeader*) arg)->message, message) == 0
-        && strcmp(((asapo::GenericRequestHeader*) arg)->substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader*) arg)->stream, stream) == 0;
 
 }
 
@@ -195,7 +195,7 @@ void RequestHandlerTcpTests::ExpectFailAuthorize(bool only_once) {
     int i = 0;
     for (auto expected_sd : expected_sds) {
         EXPECT_CALL(mock_io,
-                    Send_t(expected_sd, M_CheckSendDataRequest(asapo::kOpcodeAuthorize, 0, 0, expected_beamtime_id,
+                    Send_t(expected_sd, M_CheckSendRequest(asapo::kOpcodeAuthorize, 0, 0, expected_beamtime_id,
                                                                ""),
                            sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
@@ -204,12 +204,12 @@ void RequestHandlerTcpTests::ExpectFailAuthorize(bool only_once) {
                     Return(sizeof(asapo::GenericRequestHeader))
                 ));
 
-        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendResponse), _))
             .InSequence(seq_receive[i])
             .WillOnce(
                 DoAll(
                     testing::SetArgPointee<3>(nullptr),
-                    A_WriteSendDataResponse(asapo::kNetAuthorizationError, expected_auth_message),
+                    A_WriteSendResponse(asapo::kNetAuthorizationError, expected_auth_message),
                     testing::ReturnArg<2>()
                 ));
         EXPECT_CALL(mock_io, CloseSocket_t(expected_sd, _));
@@ -236,7 +236,7 @@ void RequestHandlerTcpTests::ExpectOKAuthorize(bool only_once) {
     int i = 0;
     for (auto expected_sd : expected_sds) {
         EXPECT_CALL(mock_io,
-                    Send_t(expected_sd, M_CheckSendDataRequest(asapo::kOpcodeAuthorize, 0, 0, expected_beamtime_id,
+                    Send_t(expected_sd, M_CheckSendRequest(asapo::kOpcodeAuthorize, 0, 0, expected_beamtime_id,
                                                                ""),
                            sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
@@ -245,12 +245,12 @@ void RequestHandlerTcpTests::ExpectOKAuthorize(bool only_once) {
                     Return(sizeof(asapo::GenericRequestHeader))
                 ));
 
-        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendResponse), _))
             .InSequence(seq_receive[i])
             .WillOnce(
                 DoAll(
                     testing::SetArgPointee<3>(nullptr),
-                    A_WriteSendDataResponse(asapo::kNetErrorNoError, expected_auth_message),
+                    A_WriteSendResponse(asapo::kNetErrorNoError, expected_auth_message),
                     testing::ReturnArg<2>()
                 ));
         if (only_once) {
@@ -269,11 +269,11 @@ void RequestHandlerTcpTests::ExpectOKAuthorize(bool only_once) {
 void RequestHandlerTcpTests::ExpectFailSendHeader(bool only_once) {
     int i = 0;
     for (auto expected_sd : expected_sds) {
-        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendDataRequest(expected_op_code,
+        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendRequest(expected_op_code,
                                                                         expected_file_id,
                                                                         expected_file_size,
                                                                         expected_file_name,
-                                                                        expected_substream),
+                                                                        expected_stream),
                                     sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
                 DoAll(
@@ -366,7 +366,7 @@ void RequestHandlerTcpTests::ExpectFailSend(uint64_t expected_size, bool only_on
     if (only_once) EXPECT_CALL(mock_logger, Warning(HasSubstr("put back")));
 }
 
-void RequestHandlerTcpTests::ExpectFailSendData(bool only_once) {
+void RequestHandlerTcpTests::ExpectFailSend(bool only_once) {
     ExpectFailSend(expected_file_size, only_once);
 }
 
@@ -377,7 +377,7 @@ void RequestHandlerTcpTests::ExpectFailSendMetaData(bool only_once) {
 void RequestHandlerTcpTests::ExpectFailReceive(bool only_once) {
     int i = 0;
     for (auto expected_sd : expected_sds) {
-        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendResponse), _))
             .InSequence(seq_receive[i])
             .WillOnce(
                 DoAll(
@@ -406,7 +406,7 @@ void RequestHandlerTcpTests::ExpectFailReceive(bool only_once) {
 void RequestHandlerTcpTests::ExpectOKSendAll(bool only_once) {
     ExpectOKSendHeader(only_once);
     ExpectOKSendMetaData(only_once);
-    ExpectOKSendData(only_once);
+    ExpectOKSend(only_once);
 }
 
 void RequestHandlerTcpTests::ExpectOKSend(uint64_t expected_size, bool only_once) {
@@ -426,7 +426,7 @@ void RequestHandlerTcpTests::ExpectOKSendMetaData(bool only_once) {
     ExpectOKSend(expected_meta_size, only_once);
 }
 
-void RequestHandlerTcpTests::ExpectOKSendData(bool only_once) {
+void RequestHandlerTcpTests::ExpectOKSend(bool only_once) {
     ExpectOKSend(expected_file_size, only_once);
 }
 
@@ -441,11 +441,11 @@ void RequestHandlerTcpTests::ExpectOKSendFile(bool only_once) {
 
 void RequestHandlerTcpTests::ExpectOKSendHeader(bool only_once, asapo::Opcode opcode) {
     for (auto expected_sd : expected_sds) {
-        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendDataRequest(opcode,
+        EXPECT_CALL(mock_io, Send_t(expected_sd, M_CheckSendRequest(opcode,
                                                                         expected_file_id,
                                                                         expected_file_size,
                                                                         expected_file_name,
-                                                                        expected_substream),
+                                                                        expected_stream),
                                     sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
                 DoAll(
@@ -481,12 +481,12 @@ void RequestHandlerTcpTests::ExpectOKConnect(bool only_once) {
 void RequestHandlerTcpTests::ExpectOKReceive(bool only_once, asapo::NetworkErrorCode code, std::string message) {
     int i = 0;
     for (auto expected_sd : expected_sds) {
-        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendDataResponse), _))
+        EXPECT_CALL(mock_io, Receive_t(expected_sd, _, sizeof(asapo::SendResponse), _))
             .InSequence(seq_receive[i])
             .WillOnce(
                 DoAll(
                     testing::SetArgPointee<3>(nullptr),
-                    A_WriteSendDataResponse(code, message),
+                    A_WriteSendResponse(code, message),
                     testing::ReturnArg<2>()
                 ));
         if (only_once) {
@@ -656,12 +656,12 @@ TEST_F(RequestHandlerTcpTests, ErrorWhenCannotSendHeader) {
 
 }
 
-TEST_F(RequestHandlerTcpTests, ErrorWhenCannotSendData) {
+TEST_F(RequestHandlerTcpTests, ErrorWhenCannotSend) {
     ExpectOKConnect();
     ExpectOKAuthorize();
     ExpectOKSendHeader();
     ExpectOKSendMetaData();
-    ExpectFailSendData();
+    ExpectFailSend();
 
     request_handler.PrepareProcessingRequestLocked();
     auto success = request_handler.ProcessRequestUnlocked(&request, &retry);
@@ -708,12 +708,12 @@ void RequestHandlerTcpTests::AssertImmediatelyCallBack(asapo::NetworkErrorCode e
     ExpectOKAuthorize(true);
     ExpectOKSendAll(true);
 
-    EXPECT_CALL(mock_io, Receive_t(expected_sds[0], _, sizeof(asapo::SendDataResponse), _))
+    EXPECT_CALL(mock_io, Receive_t(expected_sds[0], _, sizeof(asapo::SendResponse), _))
         .InSequence(seq_receive[0])
         .WillOnce(
             DoAll(
                 testing::SetArgPointee<3>(nullptr),
-                A_WriteSendDataResponse(error_code, expected_auth_message),
+                A_WriteSendResponse(error_code, expected_auth_message),
                 testing::ReturnArg<2>()
             ));
     EXPECT_CALL(mock_logger, Debug(AllOf(
@@ -738,12 +738,12 @@ void RequestHandlerTcpTests::AssertImmediatelyCallBack(asapo::NetworkErrorCode e
 }
 
 void RequestHandlerTcpTests::ExpectGetFileSize(bool ok) {
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     if (ok) {
         fi.size = expected_file_size;
     }
 
-    EXPECT_CALL(mock_io, GetFileInfo_t(expected_origin_fullpath, _)).WillOnce(
+    EXPECT_CALL(mock_io, GetMessageMeta_t(expected_origin_fullpath, _)).WillOnce(
         DoAll(
             testing::SetArgPointee<1>(ok ? nullptr : asapo::IOErrorTemplates::kFileNotFound.Generate().release()),
             testing::Return(fi)
@@ -883,7 +883,7 @@ TEST_F(RequestHandlerTcpTests, SendMetadataIgnoresIngestMode) {
     ExpectOKConnect(true);
     ExpectOKAuthorize(true);
     ExpectOKSendHeader(true, asapo::kOpcodeTransferMetaData);
-    ExpectOKSendData(true);
+    ExpectOKSend(true);
     ExpectOKSendMetaData(true);
     ExpectOKReceive();
 
@@ -928,7 +928,7 @@ TEST_F(RequestHandlerTcpTests, SendMetaOnlyForFileReadOK) {
     request_handler.PrepareProcessingRequestLocked();
 
     EXPECT_CALL(mock_io, SendFile_t(_, _, _)).Times(0);
-    EXPECT_CALL(mock_io, GetFileInfo_t(_, _)).Times(0);
+    EXPECT_CALL(mock_io, GetMessageMeta_t(_, _)).Times(0);
     auto ingest_mode = asapo::IngestModeFlags::kTransferMetaDataOnly;
 
     request_filesend.header.custom_data[asapo::kPosIngestMode] = ingest_mode;
@@ -941,7 +941,7 @@ TEST_F(RequestHandlerTcpTests, SendMetaOnlyForFileReadOK) {
 TEST_F(RequestHandlerTcpTests, TimeoutCallsCallback) {
     EXPECT_CALL(mock_logger, Error(AllOf(
         HasSubstr("timeout"),
-        HasSubstr("substream"))
+        HasSubstr("stream"))
     ));
 
     request_handler.ProcessRequestTimeout(&request);
diff --git a/producer/api/python/asapo_producer.pxd b/producer/api/python/asapo_producer.pxd
index f1b400c10cf70eaf222f9ddb420f48bc4c3ab433..c387fe773f2a4f277bcef12ce96dee4b4fa8dcd6 100644
--- a/producer/api/python/asapo_producer.pxd
+++ b/producer/api/python/asapo_producer.pxd
@@ -25,7 +25,7 @@ cdef extern from "asapo/asapo_producer.h" namespace "asapo":
 
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
-  cppclass FileData:
+  cppclass MessageData:
     uint8_t[] release()
     uint8_t[] get()
   cppclass StreamInfo:
@@ -55,34 +55,25 @@ cdef extern from "asapo/asapo_producer.h" namespace "asapo":
   struct  SourceCredentials:
     string beamtime_id
     string beamline
-    string stream
+    string data_source
     string user_token
     SourceType type
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
-  struct  EventHeader:
-    uint64_t file_id
-    uint64_t file_size
+  struct  MessageHeader:
+    uint64_t message_id
+    uint64_t data_size
     string file_name
     string user_metadata
-    uint64_t id_in_subset
-    uint64_t subset_size
-
-cdef extern from "asapo/asapo_producer.h" namespace "asapo":
-  struct  EventHeader:
-    uint64_t file_id
-    uint64_t file_size
-    string file_name
-    string user_metadata
-    uint64_t id_in_subset
-    uint64_t subset_size
+    uint64_t dataset_substream
+    uint64_t dataset_size
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
   struct  GenericRequestHeader:
     string Json()
   struct RequestCallbackPayload:
     GenericRequestHeader original_header
-    FileData data
+    MessageData data
     string response
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
@@ -103,16 +94,16 @@ cdef extern from "asapo_wrappers.h" namespace "asapo":
 cdef extern from "asapo/asapo_producer.h" namespace "asapo" nogil:
     cppclass Producer:
         @staticmethod
-        unique_ptr[Producer] Create(string endpoint,uint8_t nthreads,RequestHandlerType type, SourceCredentials source,uint64_t timeout_sec, Error* error)
-        Error SendFile(const EventHeader& event_header, string substream, string full_path, uint64_t ingest_mode,RequestCallback callback)
-        Error SendData__(const EventHeader& event_header, string substream, void* data, uint64_t ingest_mode,RequestCallback callback)
+        unique_ptr[Producer] Create(string endpoint,uint8_t nthreads,RequestHandlerType type, SourceCredentials source,uint64_t timeout_ms, Error* error)
+        Error SendFile(const MessageHeader& message_header, string file_to_send, uint64_t ingest_mode, string stream, RequestCallback callback)
+        Error Send__(const MessageHeader& message_header, void* data, uint64_t ingest_mode, string stream, RequestCallback callback)
         void StopThreads__()
         void SetLogLevel(LogLevel level)
         uint64_t  GetRequestsQueueSize()
         Error WaitRequestsFinished(uint64_t timeout_ms)
-        Error SendSubstreamFinishedFlag(string substream, uint64_t last_id, string next_substream, RequestCallback callback)
-        StreamInfo GetStreamInfo(string substream, uint64_t timeout_sec, Error* err)
-        StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err)
+        Error SendStreamFinishedFlag(string stream, uint64_t last_id, string next_stream, RequestCallback callback)
+        StreamInfo GetStreamInfo(string stream, uint64_t timeout_ms, Error* err)
+        StreamInfo GetLastStream(uint64_t timeout_ms, Error* err)
 
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
diff --git a/producer/api/python/asapo_producer.pyx.in b/producer/api/python/asapo_producer.pyx.in
index 1040a6fb22a2389428d6d740973388f382f3c916..67bb53dc169f1dd38806a6860dd2b8e85feb7ba5 100644
--- a/producer/api/python/asapo_producer.pyx.in
+++ b/producer/api/python/asapo_producer.pyx.in
@@ -107,13 +107,13 @@ cdef class PyProducer:
             return
          self.c_producer.get().SetLogLevel(log_level)
 
-    def __send_np_array(self, id, exposed_path,data, user_meta=None,subset=None,substream="default",ingest_mode = DEFAULT_INGEST_MODE,callback=None):
-        cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
+    def __send_np_array(self, id, exposed_path,data, user_meta=None,dataset=None,stream="default",ingest_mode = DEFAULT_INGEST_MODE,callback=None):
+        cdef MessageHeader message_header = self.create_message_header(id,exposed_path,user_meta,dataset,ingest_mode)
         if data is None:
-            event_header.file_size = 0
+            message_header.data_size = 0
         else:
-            event_header.file_size = data.nbytes
-        err = self.c_producer.get().SendData__(event_header, _bytes(substream), data_pointer_nparray(data),ingest_mode,
+            message_header.data_size = data.nbytes
+        err = self.c_producer.get().Send__(message_header, data_pointer_nparray(data),ingest_mode,_bytes(stream),
             unwrap_callback_with_memory(<RequestCallbackCythonMemory>self.c_callback_ndarr,
              <void*>self,<void*>callback, <void*>data))
         if err:
@@ -126,23 +126,23 @@ cdef class PyProducer:
         if callback != None:
             Py_XINCREF(<PyObject*>callback)
         return
-    cdef EventHeader create_event_header(self,uint64_t id, exposed_path,user_meta,subset,ingest_mode):
-        cdef EventHeader event_header
-        event_header.file_id = id
-        event_header.file_name = _bytes(exposed_path)
-        event_header.user_metadata = _bytes(user_meta) if user_meta!=None else ""
-        if subset == None:
-            event_header.id_in_subset = 0
-            event_header.subset_size = 0
+    cdef MessageHeader create_message_header(self,uint64_t id, exposed_path,user_meta,dataset,ingest_mode):
+        cdef MessageHeader message_header
+        message_header.message_id = id
+        message_header.file_name = _bytes(exposed_path)
+        message_header.user_metadata = _bytes(user_meta) if user_meta!=None else ""
+        if dataset == None:
+            message_header.dataset_substream = 0
+            message_header.dataset_size = 0
         else:
-            event_header.id_in_subset = subset[0]
-            event_header.subset_size = subset[1]
-        return event_header
-
-    def __send_bytes(self, id, exposed_path,data, user_meta=None,subset=None, substream="default", ingest_mode = DEFAULT_INGEST_MODE,callback=None):
-        cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
-        event_header.file_size = len(data)
-        err = self.c_producer.get().SendData__(event_header,_bytes(substream),  data_pointer_bytes(data), ingest_mode,
+            message_header.dataset_substream = dataset[0]
+            message_header.dataset_size = dataset[1]
+        return message_header
+
+    def __send_bytes(self, id, exposed_path,data, user_meta=None,dataset=None, stream="default", ingest_mode = DEFAULT_INGEST_MODE,callback=None):
+        cdef MessageHeader message_header = self.create_message_header(id,exposed_path,user_meta,dataset,ingest_mode)
+        message_header.data_size = len(data)
+        err = self.c_producer.get().Send__(message_header, data_pointer_bytes(data), ingest_mode, _bytes(stream),
             unwrap_callback_with_memory(<RequestCallbackCythonMemory>self.c_callback_bytesaddr,
              <void*>self,<void*>callback, <void*>data))
         if err:
@@ -152,7 +152,7 @@ cdef class PyProducer:
             Py_XINCREF(<PyObject*>callback)
         return
 
-    def send_data(self, uint64_t id, exposed_path, data, user_meta=None, subset=None, substream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
+    def send(self, uint64_t id, exposed_path, data, user_meta=None, dataset=None, ingest_mode = DEFAULT_INGEST_MODE, stream = "default", callback=None):
         """
          :param id: unique data id
          :type id: int
@@ -162,12 +162,12 @@ cdef class PyProducer:
          :type data: contiguous numpy or bytes array, can be None for INGEST_MODE_TRANSFER_METADATA_ONLY ingest mode
          :param user_meta: user metadata, default None
          :type user_meta: JSON string
-         :param subset: a tuple with two int values (id in subset, subset size), default None
-         :type subset: tuple
-         :param substream: substream name, default "default"
-         :type substream: string
+         :param dataset: a tuple with two int values (dataset substream id, amount of dataset substreams), default None
+         :type dataset: tuple
          :param ingest_mode: ingest mode flag
          :type ingest_mode: int
+         :param stream: stream name, default "default"
+         :type stream: string
          :param callback: callback function, default None
          :type callback: callback(info,err), where info - json string with event header that was used to send data and response, err - error string or None
          :raises:
@@ -175,34 +175,34 @@ cdef class PyProducer:
             AsapoProducerError: actually should not happen
         """
         if type(data) == np.ndarray or data == None:
-            self.__send_np_array(id,exposed_path,data,user_meta,subset,substream,ingest_mode,callback)
+            self.__send_np_array(id,exposed_path,data,user_meta,dataset,stream,ingest_mode,callback)
         elif type(data) == bytes:
-            self.__send_bytes(id,exposed_path,data,user_meta,subset,substream,ingest_mode,callback)
+            self.__send_bytes(id,exposed_path,data,user_meta,dataset,stream,ingest_mode,callback)
         else:
             raise(AsapoProducerError("wrong data type: " + str(type(data))))
-    def send_substream_finished_flag(self, substream, uint64_t last_id, next_substream = None, callback = None):
+    def send_stream_finished_flag(self, stream, uint64_t last_id, next_stream = None, callback = None):
         """
-         :param substream: substream name
-         :type substream: string
+         :param stream: stream name
+         :type stream: string
          :param id: id of the last record
-         :param next_substream: name of the next substream or None
-         :type substream: string
+         :param next_stream: name of the next stream or None
+         :type stream: string
          :param callback: callback function, default None
          :type callback: callback(info,err), where info - json string with event header that was used to send data and response, err - error string or None
          :raises:
             AsapoWrongInputError: wrong input (authorization, meta, ...)
             AsapoProducerError: actually should not happen
         """
-        err = self.c_producer.get().SendSubstreamFinishedFlag(_bytes(substream), last_id,_bytes(next_substream) if next_substream != None else "",
+        err = self.c_producer.get().SendStreamFinishedFlag(_bytes(stream), last_id,_bytes(next_stream) if next_stream != None else "",
         unwrap_callback(<RequestCallbackCython>self.c_callback, <void*>self,<void*>callback if callback != None else NULL))
         if err:
             throw_exception(err)
 
-    def stream_info(self, substream = 'default', uint64_t timeout_sec = 1):
+    def stream_info(self, stream = 'default', uint64_t timeout_ms = 1000):
         """
-         :param substream: substream name
-         :type substream: string
-         :param timeout_sec: timeout in seconds
+         :param stream: stream name
+         :type stream: string
+         :param timeout_ms: timeout in milliseconds
          :type timeout_ms: int
          :raises:
             AsapoWrongInputError: wrong input (authorization, ...)
@@ -211,14 +211,14 @@ cdef class PyProducer:
         """
         cdef Error err
         cdef StreamInfo info
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         with nogil:
-            info = self.c_producer.get().GetStreamInfo(b_substream,timeout_sec,&err)
+            info = self.c_producer.get().GetStreamInfo(b_stream,timeout_ms,&err)
         if err:
             throw_exception(err)
         return json.loads(_str(info.Json(True)))
 
-    def last_stream(self, uint64_t timeout_sec = 1):
+    def last_stream(self, uint64_t timeout_ms = 1000):
         """
          :param timeout_ms: timeout in seconds
          :type timeout_ms: int
@@ -230,11 +230,11 @@ cdef class PyProducer:
         cdef Error err
         cdef StreamInfo info
         with nogil:
-            info = self.c_producer.get().GetLastSubstream(timeout_sec,&err)
+            info = self.c_producer.get().GetLastStream(timeout_ms,&err)
         if err:
             throw_exception(err)
         return json.loads(_str(info.Json(True)))
-    def send_file(self, uint64_t id, local_path, exposed_path, user_meta=None, subset=None, substream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
+    def send_file(self, uint64_t id, local_path, exposed_path, user_meta=None, dataset=None, ingest_mode = DEFAULT_INGEST_MODE, stream = "default", callback=None):
         """
          :param id: unique data id
          :type id: int
@@ -244,12 +244,12 @@ cdef class PyProducer:
          :type exposed_path: string
          :param user_meta: user metadata, default None
          :type user_meta: JSON string
-         :param subset: a tuple with two int values (subset id, subset size), default None
-         :type subset: tuple
-         :param substream: substream name, default "default"
-         :type substream: string
+         :param dataset: a tuple with two int values (dataset id, dataset size), default None
+         :type dataset: tuple
          :param ingest_mode: ingest mode flag
          :type ingest_mode: int
+         :param stream: stream name, default "default"
+         :type stream: string
          :param callback: callback function, default None
          :type callback: callback(info,err), where info - json string with event header that was used to send data and response, err - error string or None
          :raises:
@@ -258,9 +258,9 @@ cdef class PyProducer:
             AsapoProducerError: actually should not happen
         """
 
-        cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
-        event_header.file_size = 0
-        err = self.c_producer.get().SendFile(event_header, _bytes(substream), _bytes(local_path), ingest_mode,
+        cdef MessageHeader message_header = self.create_message_header(id,exposed_path,user_meta,dataset,ingest_mode)
+        message_header.data_size = 0
+        err = self.c_producer.get().SendFile(message_header, _bytes(local_path), ingest_mode, _bytes(stream),
             unwrap_callback(<RequestCallbackCython>self.c_callback, <void*>self,<void*>callback if callback != None else NULL))
         if err:
             throw_exception(err)
@@ -320,7 +320,7 @@ cdef class PyProducer:
             if self.c_producer.get() is not NULL:
                 self.c_producer.get().StopThreads__()
     @staticmethod
-    def __create_producer(endpoint,type,beamtime_id,beamline,stream,token,nthreads,timeout_sec):
+    def __create_producer(endpoint,type,beamtime_id,beamline,data_source,token,nthreads,timeout_ms):
         pyProd = PyProducer()
         cdef Error err
         cdef SourceType source_type
@@ -331,14 +331,14 @@ cdef class PyProducer:
         source.beamtime_id = beamtime_id
         source.beamline = beamline
         source.user_token = token
-        source.stream = stream
+        source.data_source = data_source
         source.type = source_type
-        pyProd.c_producer = Producer.Create(endpoint,nthreads,RequestHandlerType_Tcp,source,timeout_sec,&err)
+        pyProd.c_producer = Producer.Create(endpoint,nthreads,RequestHandlerType_Tcp,source,timeout_ms,&err)
         if err:
             throw_exception(err)
         return pyProd
 
-def create_producer(endpoint,type,beamtime_id,beamline,stream,token,nthreads,timeout_sec):
+def create_producer(endpoint,type,beamtime_id,beamline,data_source,token,nthreads,timeout_ms):
     """
          :param endpoint: server endpoint (url:port)
          :type endpoint: string
@@ -348,19 +348,19 @@ def create_producer(endpoint,type,beamtime_id,beamline,stream,token,nthreads,tim
          :type beamtime_id: string
          :param beamline: beamline name, can be "auto" if beamtime_id is given
          :type beamline: string
-         :param stream: stream to producer data to
-         :type stream: string
+         :param data_source: name of the data source that produces data
+         :type data_source: string
          :param token: authorization token
          :type token: string
          :param nthreads: ingest mode flag
          :type nthreads: int
-         :param timeout_sec: send requests timeout
-         :type timeout_sec: int
+         :param timeout_ms: send requests timeout in milliseconds
+         :type timeout_ms: int
          :raises:
             AsapoWrongInputError: wrong input (number of threads, ,,,)
             AsapoProducerError: actually should not happen
     """
-    return PyProducer.__create_producer(_bytes(endpoint),_bytes(type),_bytes(beamtime_id),_bytes(beamline),_bytes(stream),_bytes(token),nthreads,timeout_sec)
+    return PyProducer.__create_producer(_bytes(endpoint),_bytes(type),_bytes(beamtime_id),_bytes(beamline),_bytes(data_source),_bytes(token),nthreads,timeout_ms)
 
 
 __version__ = "@PYTHON_ASAPO_VERSION@@ASAPO_VERSION_COMMIT@"
diff --git a/producer/event_monitor_producer/src/event_detector.h b/producer/event_monitor_producer/src/event_detector.h
index 610b8aaa478b5968a11aec96b999c2d0beee12c2..b38c3294a452af2f55e46ee15131185b9d6ee863 100644
--- a/producer/event_monitor_producer/src/event_detector.h
+++ b/producer/event_monitor_producer/src/event_detector.h
@@ -8,7 +8,7 @@ namespace asapo {
 
 class AbstractEventDetector {
   public:
-    virtual Error GetNextEvent(EventHeader* event_header) = 0;
+    virtual Error GetNextEvent(MessageHeader* message_header) = 0;
     virtual Error StartMonitoring() = 0;
     virtual ~AbstractEventDetector() = default;
 };
diff --git a/producer/event_monitor_producer/src/eventmon_config.cpp b/producer/event_monitor_producer/src/eventmon_config.cpp
index d9ede0beed1b2793e7fbc931152ce8852aa776f0..d3d403950319b4713b90194c3276ad60b96839cb 100644
--- a/producer/event_monitor_producer/src/eventmon_config.cpp
+++ b/producer/event_monitor_producer/src/eventmon_config.cpp
@@ -11,35 +11,35 @@ EventMonConfigFactory::EventMonConfigFactory() : io__{GenerateDefaultIO()} {
 
 }
 
-Error SubsetModeToEnum(const std::string& mode_str, SubSetMode* mode) {
+Error DatasetModeToEnum(const std::string& mode_str, DatasetMode* mode) {
     if (mode_str == "batch") {
-        *mode = SubSetMode::kBatch;
+        *mode = DatasetMode::kBatch;
         return nullptr;
     }
 
     if (mode_str == "none") {
-        *mode = SubSetMode::kNone;
+        *mode = DatasetMode::kNone;
         return nullptr;
     }
 
     if (mode_str == "multisource") {
-        *mode = SubSetMode::kMultiSource;
+        *mode = DatasetMode::kMultiSource;
         return nullptr;
     }
 
 
-    return TextError("Wrone subset mode:" + mode_str);
+    return TextError("Wrone dataset mode:" + mode_str);
 }
 
 Error EventMonConfigFactory::ParseConfigFile(std::string file_name) {
     JsonFileParser parser(file_name, &io__);
     Error err = nullptr;
-    std::string subset_mode;
+    std::string dataset_mode;
 
     (err = parser.GetString("AsapoEndpoint", &config.asapo_endpoint)) ||
     (err = parser.GetString("Tag", &config.tag)) ||
     (err = parser.GetString("BeamtimeID", &config.beamtime_id)) ||
-    (err = parser.GetString("Stream", &config.stream)) ||
+    (err = parser.GetString("DataSource", &config.data_source)) ||
     (err = parser.GetString("Mode", &config.mode_str)) ||
     (err = parser.GetUInt64("NThreads", &config.nthreads)) ||
     (err = parser.GetString("RootMonitoredFolder", &config.root_monitored_folder)) ||
@@ -48,19 +48,19 @@ Error EventMonConfigFactory::ParseConfigFile(std::string file_name) {
     (err = parser.GetArrayString("MonitoredSubFolders", &config.monitored_subfolders)) ||
     (err = parser.GetArrayString("IgnoreExtensions", &config.ignored_extensions)) ||
     (err = parser.GetArrayString("WhitelistExtensions", &config.whitelisted_extensions)) ||
-    (err = parser.Embedded("Subset").GetString("Mode", &subset_mode)) ||
-    (err = SubsetModeToEnum(subset_mode, &config.subset_mode));
+    (err = parser.Embedded("Dataset").GetString("Mode", &dataset_mode)) ||
+    (err = DatasetModeToEnum(dataset_mode, &config.dataset_mode));
     if (err) {
         return err;
     }
 
-    if (config.subset_mode == SubSetMode::kBatch) {
-        err = parser.Embedded("Subset").GetUInt64("BatchSize", &config.subset_batch_size);
+    if (config.dataset_mode == DatasetMode::kBatch) {
+        err = parser.Embedded("Dataset").GetUInt64("BatchSize", &config.dataset_batch_size);
     }
 
-    if (config.subset_mode == SubSetMode::kMultiSource) {
-        err = parser.Embedded("Subset").GetUInt64("NSources", &config.subset_multisource_nsources);
-        err = parser.Embedded("Subset").GetUInt64("SourceId", &config.subset_multisource_sourceid);
+    if (config.dataset_mode == DatasetMode::kMultiSource) {
+        err = parser.Embedded("Dataset").GetUInt64("NSources", &config.dataset_multisource_nsources);
+        err = parser.Embedded("Dataset").GetUInt64("SourceId", &config.dataset_multisource_sourceid);
     }
 
 
@@ -73,7 +73,7 @@ Error EventMonConfigFactory::CheckConfig() {
     (err = CheckMode()) ||
     (err = CheckLogLevel()) ||
     (err = CheckNThreads()) ||
-    (err = CheckSubsets());
+    (err = CheckDatasets());
 
 //todo: check monitored folders exist?
     return err;
@@ -113,13 +113,13 @@ Error EventMonConfigFactory::CheckNThreads() {
     return nullptr;
 }
 
-Error EventMonConfigFactory::CheckSubsets() {
-    if (config.subset_mode == SubSetMode::kBatch && config.subset_batch_size < 1) {
+Error EventMonConfigFactory::CheckDatasets() {
+    if (config.dataset_mode == DatasetMode::kBatch && config.dataset_batch_size < 1) {
         return  TextError("Batch size should > 0");
     }
 
 
-    if (config.subset_mode == SubSetMode::kMultiSource && config.subset_multisource_nsources < 1) {
+    if (config.dataset_mode == DatasetMode::kMultiSource && config.dataset_multisource_nsources < 1) {
         return  TextError("Number of sources size should be > 0");
     }
 
diff --git a/producer/event_monitor_producer/src/eventmon_config.h b/producer/event_monitor_producer/src/eventmon_config.h
index f4395970afe836a78c0dd3d2bacb2187f7d65220..3f404ed36b7bf5abf58969f15acfe1ebbe033958 100644
--- a/producer/event_monitor_producer/src/eventmon_config.h
+++ b/producer/event_monitor_producer/src/eventmon_config.h
@@ -9,7 +9,7 @@
 
 namespace asapo {
 
-enum class SubSetMode {
+enum class DatasetMode {
     kNone,
     kBatch,
     kMultiSource
@@ -27,11 +27,11 @@ struct EventMonConfig {
     std::vector<std::string> ignored_extensions;
     std::vector<std::string> whitelisted_extensions;
     bool remove_after_send = false;
-    SubSetMode subset_mode = SubSetMode::kNone;
-    uint64_t subset_batch_size = 1;
-    uint64_t subset_multisource_nsources = 1;
-    uint64_t subset_multisource_sourceid = 1;
-    std::string stream;
+    DatasetMode dataset_mode = DatasetMode::kNone;
+    uint64_t dataset_batch_size = 1;
+    uint64_t dataset_multisource_nsources = 1;
+    uint64_t dataset_multisource_sourceid = 1;
+    std::string data_source;
   private:
     std::string log_level_str;
     std::string mode_str;
diff --git a/producer/event_monitor_producer/src/eventmon_config_factory.h b/producer/event_monitor_producer/src/eventmon_config_factory.h
index 0c50df1965acfe00353357dae8602ba5b6d79395..7697238fb7822321ca87c1cdb25e168eb92b7dcf 100644
--- a/producer/event_monitor_producer/src/eventmon_config_factory.h
+++ b/producer/event_monitor_producer/src/eventmon_config_factory.h
@@ -16,7 +16,7 @@ class EventMonConfigFactory {
     Error ParseConfigFile(std::string file_name);
     Error CheckMode();
     Error CheckLogLevel();
-    Error CheckSubsets();
+    Error CheckDatasets();
     Error CheckNThreads();
     Error CheckConfig();
 };
diff --git a/producer/event_monitor_producer/src/folder_event_detector.cpp b/producer/event_monitor_producer/src/folder_event_detector.cpp
index 6ed45b9923396e5f10d69fdaddfcd14707a63d0d..e108144ba696211143e033c46e1d98d5a897bddf 100644
--- a/producer/event_monitor_producer/src/folder_event_detector.cpp
+++ b/producer/event_monitor_producer/src/folder_event_detector.cpp
@@ -49,7 +49,7 @@ Error FolderEventDetector::UpdateEventsBuffer() {
 
     for (auto& file : files) {
         if (!IgnoreFile(file) && FileInWhiteList(file) ) {
-            events_buffer_.emplace_back(EventHeader{0, 0, file});
+            events_buffer_.emplace_back(MessageHeader{0, 0, file});
         }
     }
 
@@ -57,7 +57,7 @@ Error FolderEventDetector::UpdateEventsBuffer() {
 }
 
 
-Error FolderEventDetector::GetNextEvent(EventHeader* event_header) {
+Error FolderEventDetector::GetNextEvent(MessageHeader* message_header) {
     if (!monitoring_started_) {
         auto err = TextError("monitoring is not started yet");
         return err;
@@ -69,7 +69,7 @@ Error FolderEventDetector::GetNextEvent(EventHeader* event_header) {
         }
     }
 
-    return GetHeaderFromBuffer(event_header);
+    return GetHeaderFromBuffer(message_header);
 }
 
 bool FolderEventDetector::BufferIsEmpty() const {
@@ -90,11 +90,11 @@ Error FolderEventDetector::StartMonitoring() {
     return nullptr;
 }
 
-Error FolderEventDetector::GetHeaderFromBuffer(EventHeader* event_header) {
+Error FolderEventDetector::GetHeaderFromBuffer(MessageHeader* message_header) {
     if (events_buffer_.size() == 0) {
         return EventMonitorErrorTemplates::kNoNewEvent.Generate();
     }
-    *event_header = std::move(events_buffer_.front());
+    *message_header = std::move(events_buffer_.front());
     events_buffer_.pop_front();
     return nullptr;
 }
diff --git a/producer/event_monitor_producer/src/folder_event_detector.h b/producer/event_monitor_producer/src/folder_event_detector.h
index 75874ee414a53546c11ec4fa143450d8bc0632ef..f2381a8198beedc18f9c0736f0c2f5d61cd3779a 100644
--- a/producer/event_monitor_producer/src/folder_event_detector.h
+++ b/producer/event_monitor_producer/src/folder_event_detector.h
@@ -14,16 +14,16 @@ namespace asapo {
 
 class FolderEventDetector : public AbstractEventDetector {
   public:
-    Error GetNextEvent(EventHeader* event_header) override;
+    Error GetNextEvent(MessageHeader* message_header) override;
     Error StartMonitoring() override;
     FolderEventDetector(const EventMonConfig* config);
     std::unique_ptr<SystemFolderWatch> system_folder_watch__;
   private:
     const EventMonConfig* config_;
     bool monitoring_started_ = false;
-    std::deque<EventHeader> events_buffer_;
+    std::deque<MessageHeader> events_buffer_;
     Error UpdateEventsBuffer();
-    Error GetHeaderFromBuffer(EventHeader* event_header);
+    Error GetHeaderFromBuffer(MessageHeader* message_header);
     bool IgnoreFile(const std::string& event);
     bool FileInWhiteList(const std::string& file);
     bool BufferIsEmpty() const;
diff --git a/producer/event_monitor_producer/src/main_eventmon.cpp b/producer/event_monitor_producer/src/main_eventmon.cpp
index c0841c39fc96402416005e80ba267b5e91cd0bae..d30e479a8bf6e8af518b818ee0aa4cc8accaf040 100644
--- a/producer/event_monitor_producer/src/main_eventmon.cpp
+++ b/producer/event_monitor_producer/src/main_eventmon.cpp
@@ -39,7 +39,7 @@ std::unique_ptr<Producer> CreateProducer() {
 
     Error err;
     auto producer = Producer::Create(config->asapo_endpoint, (uint8_t) config->nthreads,
-                                     config->mode, asapo::SourceCredentials{asapo::SourceType::kProcessed,config->beamtime_id, "", config->stream, ""}, 3600, &err);
+                                     config->mode, asapo::SourceCredentials{asapo::SourceType::kProcessed,config->beamtime_id, "", config->data_source, ""}, 3600000, &err);
     if(err) {
         std::cerr << "cannot create producer: " << err << std::endl;
         exit(EXIT_FAILURE);
@@ -76,18 +76,18 @@ void SignalHandler(int signal) {
 }
 
 
-void HandleSubsets(asapo::EventHeader* header) {
-    switch (GetEventMonConfig()->subset_mode) {
-    case asapo::SubSetMode::kNone:
+void HandleDatasets(asapo::MessageHeader* header) {
+    switch (GetEventMonConfig()->dataset_mode) {
+    case asapo::DatasetMode::kNone:
         return;
-    case asapo::SubSetMode::kBatch:
-        header->subset_size = GetEventMonConfig()->subset_batch_size;
-        header->id_in_subset = (header->file_id - 1) % header->subset_size + 1;
-        header->file_id = (header->file_id - 1) / header->subset_size + 1;
+    case asapo::DatasetMode::kBatch:
+        header->dataset_size = GetEventMonConfig()->dataset_batch_size;
+        header->dataset_substream = (header->message_id - 1) % header->dataset_size + 1;
+        header->message_id = (header->message_id - 1) / header->dataset_size + 1;
         break;
-    case asapo::SubSetMode::kMultiSource:
-        header->subset_size = GetEventMonConfig()->subset_multisource_nsources;
-        header->id_in_subset = GetEventMonConfig()->subset_multisource_sourceid;
+    case asapo::DatasetMode::kMultiSource:
+        header->dataset_size = GetEventMonConfig()->dataset_multisource_nsources;
+        header->dataset_substream = GetEventMonConfig()->dataset_multisource_sourceid;
         break;
     }
 }
@@ -124,8 +124,8 @@ int main (int argc, char* argv[]) {
 
     int i = 0;
     while (true) {
-        asapo::EventHeader event_header;
-        auto err = event_detector->GetNextEvent(&event_header);
+        asapo::MessageHeader message_header;
+        auto err = event_detector->GetNextEvent(&message_header);
         if (stop_signal) {
             break; // we check it here because signal can interrupt system call (ready by inotify and result in incomplete event data)
         }
@@ -135,10 +135,10 @@ int main (int argc, char* argv[]) {
             }
             continue;
         }
-        event_header.file_id = ++i;
-        HandleSubsets(&event_header);
-        producer->SendFile(event_header, GetEventMonConfig()->root_monitored_folder + asapo::kPathSeparator +
-                           event_header.file_name, asapo::kDefaultIngestMode, ProcessAfterSend);
+        message_header.message_id = ++i;
+        HandleDatasets(&message_header);
+        producer->SendFile(message_header, GetEventMonConfig()->root_monitored_folder + asapo::kPathSeparator +
+            message_header.file_name, asapo::kDefaultIngestMode, "default", ProcessAfterSend);
     }
 
     logger->Info("Producer exit. Processed " + std::to_string(i) + " files");
diff --git a/producer/event_monitor_producer/unittests/mock_eventmon_config.cpp b/producer/event_monitor_producer/unittests/mock_eventmon_config.cpp
index d842c9e080f001d41bdc0d383aed2512fb1c78ed..4d0a68286c6231d3fcee575dd96272994b0dbefb 100644
--- a/producer/event_monitor_producer/unittests/mock_eventmon_config.cpp
+++ b/producer/event_monitor_producer/unittests/mock_eventmon_config.cpp
@@ -49,30 +49,30 @@ Error SetFolderMonConfig (const EventMonConfig& config) {
     config_string += "," + std::string("\"NThreads\":") + std::to_string(config.nthreads);
     config_string += "," + std::string("\"LogLevel\":") + "\"" + log_level + "\"";
     config_string += "," + std::string("\"RemoveAfterSend\":") + (config.remove_after_send ? "true" : "false");
-    config_string += "," + std::string("\"Stream\":") + "\"" + config.stream + "\"";
+    config_string += "," + std::string("\"DataSource\":") + "\"" + config.data_source + "\"";
 
-    std::string subset_mode;
-    switch (config.subset_mode) {
-    case SubSetMode::kBatch:
-        subset_mode = "batch";
+    std::string dataset_mode;
+    switch (config.dataset_mode) {
+    case DatasetMode::kBatch:
+        dataset_mode = "batch";
         break;
-    case SubSetMode::kMultiSource:
-        subset_mode = "multisource";
+    case DatasetMode::kMultiSource:
+        dataset_mode = "multisource";
         break;
 
-    case SubSetMode::kNone:
-        subset_mode = "none";
+    case DatasetMode::kNone:
+        dataset_mode = "none";
         break;
 
     }
-    config_string += "," + std::string("\"Subset\":{");
-    config_string += std::string("\"Mode\":") + "\"" + subset_mode + "\"";
-    if (config.subset_mode == SubSetMode::kBatch) {
-        config_string += "," + std::string("\"BatchSize\":") + std::to_string(config.subset_batch_size);
+    config_string += "," + std::string("\"Dataset\":{");
+    config_string += std::string("\"Mode\":") + "\"" + dataset_mode + "\"";
+    if (config.dataset_mode == DatasetMode::kBatch) {
+        config_string += "," + std::string("\"BatchSize\":") + std::to_string(config.dataset_batch_size);
     }
-    if (config.subset_mode == SubSetMode::kMultiSource) {
-        config_string += "," + std::string("\"SourceId\":") + std::to_string(config.subset_multisource_sourceid);
-        config_string += "," + std::string("\"NSources\":") + std::to_string(config.subset_multisource_nsources);
+    if (config.dataset_mode == DatasetMode::kMultiSource) {
+        config_string += "," + std::string("\"SourceId\":") + std::to_string(config.dataset_multisource_sourceid);
+        config_string += "," + std::string("\"NSources\":") + std::to_string(config.dataset_multisource_nsources);
     }
 
     config_string += "}";
diff --git a/producer/event_monitor_producer/unittests/test_eventmon_config.cpp b/producer/event_monitor_producer/unittests/test_eventmon_config.cpp
index 3cad38b49fac6e9ac71fde88d6faba1d763363ef..f9369e4f4480b902c27d5b69ee40f6bda6b58470 100644
--- a/producer/event_monitor_producer/unittests/test_eventmon_config.cpp
+++ b/producer/event_monitor_producer/unittests/test_eventmon_config.cpp
@@ -31,7 +31,7 @@ using ::asapo::MockIO;
 using ::asapo::EventMonConfigFactory;
 using asapo::EventMonConfig;
 
-using asapo::SubSetMode;
+using asapo::DatasetMode;
 
 namespace {
 
@@ -62,9 +62,9 @@ TEST_F(ConfigTests, ReadSettingsOK) {
     test_config.monitored_subfolders = {"test1", "test2"};
     test_config.ignored_extensions = {"tmp", "test"};
     test_config.remove_after_send = true;
-    test_config.subset_mode = SubSetMode::kBatch;
-    test_config.subset_batch_size = 9;
-    test_config.stream = "stream";
+    test_config.dataset_mode = DatasetMode::kBatch;
+    test_config.dataset_batch_size = 9;
+    test_config.data_source = "source";
     test_config.whitelisted_extensions =  {"bla"};
 
     auto err = asapo::SetFolderMonConfig(test_config);
@@ -82,9 +82,9 @@ TEST_F(ConfigTests, ReadSettingsOK) {
     ASSERT_THAT(config->root_monitored_folder, Eq("tmp"));
     ASSERT_THAT(config->ignored_extensions, ElementsAre("tmp", "test"));
     ASSERT_THAT(config->remove_after_send, Eq(true));
-    ASSERT_THAT(config->subset_mode, Eq(SubSetMode::kBatch));
-    ASSERT_THAT(config->subset_batch_size, Eq(9));
-    ASSERT_THAT(config->stream, Eq("stream"));
+    ASSERT_THAT(config->dataset_mode, Eq(DatasetMode::kBatch));
+    ASSERT_THAT(config->dataset_batch_size, Eq(9));
+    ASSERT_THAT(config->data_source, Eq("source"));
 }
 
 
@@ -103,17 +103,17 @@ TEST_F(ConfigTests, ReadSettingsWhiteListOK) {
 
 TEST_F(ConfigTests, ReadSettingsMultiSourceOK) {
     asapo::EventMonConfig test_config;
-    test_config.subset_mode = SubSetMode::kMultiSource;
-    test_config.subset_multisource_nsources = 2;
-    test_config.subset_multisource_sourceid = 12;
+    test_config.dataset_mode = DatasetMode::kMultiSource;
+    test_config.dataset_multisource_nsources = 2;
+    test_config.dataset_multisource_sourceid = 12;
     auto err = asapo::SetFolderMonConfig(test_config);
 
     auto config = asapo::GetEventMonConfig();
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(config->subset_mode, Eq(SubSetMode::kMultiSource));
-    ASSERT_THAT(config->subset_multisource_nsources, Eq(2));
-    ASSERT_THAT(config->subset_multisource_sourceid, Eq(12));
+    ASSERT_THAT(config->dataset_mode, Eq(DatasetMode::kMultiSource));
+    ASSERT_THAT(config->dataset_multisource_nsources, Eq(2));
+    ASSERT_THAT(config->dataset_multisource_sourceid, Eq(12));
 
 }
 
@@ -135,16 +135,16 @@ TEST_F(ConfigTests, ReadSettingsChecksNthreads) {
 
 }
 
-TEST_F(ConfigTests, ReadSettingsChecksSubsets) {
+TEST_F(ConfigTests, ReadSettingsChecksDatasets) {
     asapo::EventMonConfig test_config;
-    test_config.subset_mode = SubSetMode::kBatch;
-    test_config.subset_batch_size = 0;
+    test_config.dataset_mode = DatasetMode::kBatch;
+    test_config.dataset_batch_size = 0;
 
     auto err = asapo::SetFolderMonConfig(test_config);
     ASSERT_THAT(err, Ne(nullptr));
 
-    test_config.subset_mode = SubSetMode::kMultiSource;
-    test_config.subset_multisource_nsources = 0;
+    test_config.dataset_mode = DatasetMode::kMultiSource;
+    test_config.dataset_multisource_nsources = 0;
 
     err = asapo::SetFolderMonConfig(test_config);
     ASSERT_THAT(err, Ne(nullptr));
@@ -152,9 +152,9 @@ TEST_F(ConfigTests, ReadSettingsChecksSubsets) {
 
 }
 
-TEST_F(ConfigTests, ReadSettingsDoesnotChecksSubsetsIfNoSubsets) {
+TEST_F(ConfigTests, ReadSettingsDoesnotChecksDatasetsIfNoDatasets) {
     asapo::EventMonConfig test_config;
-    test_config.subset_batch_size = 0;
+    test_config.dataset_batch_size = 0;
 
     auto err = asapo::SetFolderMonConfig(test_config);
     ASSERT_THAT(err, Eq(nullptr));
diff --git a/producer/event_monitor_producer/unittests/test_folder_event_detector.cpp b/producer/event_monitor_producer/unittests/test_folder_event_detector.cpp
index 7221bbf1322ea6faccd7181a7257cc11cf21fc79..1ce260a12174a0944ba5bae9fa6e535a5854684f 100644
--- a/producer/event_monitor_producer/unittests/test_folder_event_detector.cpp
+++ b/producer/event_monitor_producer/unittests/test_folder_event_detector.cpp
@@ -65,7 +65,7 @@ class FolderEventDetectorTests : public testing::Test {
     }
     void MockStartMonitoring();
     void MockGetEvents();
-    asapo::EventHeader InitiateAndReadSingleEvent();
+    asapo::MessageHeader InitiateAndReadSingleEvent();
 };
 
 void FolderEventDetectorTests::MockStartMonitoring() {
@@ -120,13 +120,13 @@ TEST_F(FolderEventDetectorTests, GetNextReturnsErrorIfMonitoringNotStarted) {
 
 TEST_F(FolderEventDetectorTests, GetNextCallsSystemGetNextFirstTimeNoEvents) {
     MockStartMonitoring();
-    asapo::EventHeader event_header;
+    asapo::MessageHeader message_header;
     EXPECT_CALL(mock_system_folder_watch, GetFileEventList_t(_));
 
 
     detector.StartMonitoring();
 
-    auto err = detector.GetNextEvent(&event_header);
+    auto err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(asapo::EventMonitorErrorTemplates::kNoNewEvent));
 }
 
@@ -140,8 +140,8 @@ TEST_F(FolderEventDetectorTests, GetNextEventError) {
 
     detector.StartMonitoring();
 
-    asapo::EventHeader event_header;
-    auto err = detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    auto err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(asapo::EventMonitorErrorTemplates::kSystemError));
 }
 
@@ -153,14 +153,14 @@ void FolderEventDetectorTests::MockGetEvents() {
         ));
 }
 
-asapo::EventHeader FolderEventDetectorTests::InitiateAndReadSingleEvent() {
+asapo::MessageHeader FolderEventDetectorTests::InitiateAndReadSingleEvent() {
     MockStartMonitoring();
     MockGetEvents();
     detector.StartMonitoring();
-    asapo::EventHeader event_header;
-    detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    detector.GetNextEvent(&message_header);
     Mock::VerifyAndClearExpectations(&mock_system_folder_watch);
-    return event_header;
+    return message_header;
 };
 
 
@@ -170,11 +170,11 @@ TEST_F(FolderEventDetectorTests, GetNextEventOK) {
 
     detector.StartMonitoring();
 
-    asapo::EventHeader event_header;
-    auto err = detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    auto err = detector.GetNextEvent(&message_header);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(event_header.file_name, Eq("test1.dat"));
+    ASSERT_THAT(message_header.file_name, Eq("test1.dat"));
 }
 
 
@@ -185,11 +185,11 @@ TEST_F(FolderEventDetectorTests, GetNextEventDoesDoSystemCallIfListNotEmpty) {
     EXPECT_CALL(mock_system_folder_watch, GetFileEventList_t(_)).Times(0);
 
 
-    asapo::EventHeader event_header;
-    auto err = detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    auto err = detector.GetNextEvent(&message_header);
 
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(event_header.file_name, Eq("test2.dat"));
+    ASSERT_THAT(message_header.file_name, Eq("test2.dat"));
 }
 
 
@@ -198,36 +198,36 @@ TEST_F(FolderEventDetectorTests, GetNextEventDoesSystemCallIfListEmpty) {
     EXPECT_CALL(mock_system_folder_watch, GetFileEventList_t(_)).Times(1);
 
 // read events 2 to 4
-    asapo::EventHeader event_header;
-    err = detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(nullptr));
-    err = detector.GetNextEvent(&event_header);
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(nullptr));
-    err = detector.GetNextEvent(&event_header);
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(nullptr));
 // read events - should initiate system call since the buffer is empty now
-    err = detector.GetNextEvent(&event_header);
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(asapo::EventMonitorErrorTemplates::kNoNewEvent));
 }
 
 TEST_F(FolderEventDetectorTests, GetNextIgnoresTmpFiles) {
     test_config.ignored_extensions = {"tmp"};
     InitiateAndReadSingleEvent();
-    asapo::EventHeader event_header;
-    err = detector.GetNextEvent(&event_header);
+    asapo::MessageHeader message_header;
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(event_header.file_name, Eq("test2.dat"));
+    ASSERT_THAT(message_header.file_name, Eq("test2.dat"));
 
 // try read event 3 test3.tmp sould be ignored
-    err = detector.GetNextEvent(&event_header);
+    err = detector.GetNextEvent(&message_header);
     ASSERT_THAT(err, Eq(asapo::EventMonitorErrorTemplates::kNoNewEvent));
 }
 
 TEST_F(FolderEventDetectorTests, GetNextRespectsWhiteList) {
     test_config.whitelisted_extensions = {"tmp"};
-    auto event_header = InitiateAndReadSingleEvent();
+    auto message_header = InitiateAndReadSingleEvent();
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(event_header.file_name, Eq("test3.tmp"));
+    ASSERT_THAT(message_header.file_name, Eq("test3.tmp"));
 }
 
 }
diff --git a/producer/event_monitor_producer/unittests/test_single_folder_watch_windows.cpp b/producer/event_monitor_producer/unittests/test_single_folder_watch_windows.cpp
index 2e83f0f8f4e28bbd826ec41d6f887877ca6e94b6..c6aa2473071b9ad66a38f429b7bc4923bd836005 100644
--- a/producer/event_monitor_producer/unittests/test_single_folder_watch_windows.cpp
+++ b/producer/event_monitor_producer/unittests/test_single_folder_watch_windows.cpp
@@ -31,8 +31,8 @@ using ::asapo::Error;
 using ::asapo::ErrorInterface;
 using asapo::FilesToSend;
 using asapo::SingleFolderWatch;
-using asapo::FileInfos;
-using asapo::FileInfo;
+using asapo::MessageMetas;
+using asapo::MessageMeta;
 
 namespace {
 
diff --git a/producer/event_monitor_producer/unittests/test_system_folder_watch_linux.cpp b/producer/event_monitor_producer/unittests/test_system_folder_watch_linux.cpp
index 7e1d3fd8eb688fcb8257c28db34f85b852909667..d2b1f50e4eedcc644756b961f1a69720707e45fb 100644
--- a/producer/event_monitor_producer/unittests/test_system_folder_watch_linux.cpp
+++ b/producer/event_monitor_producer/unittests/test_system_folder_watch_linux.cpp
@@ -27,8 +27,8 @@ using ::asapo::Error;
 using ::asapo::ErrorInterface;
 using asapo::FilesToSend;
 using asapo::SystemFolderWatch;
-using asapo::FileInfos;
-using asapo::FileInfo;
+using asapo::MessageMetas;
+using asapo::MessageMeta;
 
 namespace {
 
@@ -39,15 +39,15 @@ TEST(SystemFolderWatch, Constructor) {
     ASSERT_THAT(dynamic_cast<asapo::Inotify*>(watch.inotify__.get()), Ne(nullptr));
 }
 
-FileInfos CreateTestFileInfos() {
-    FileInfos file_infos;
-    FileInfo fi;
+MessageMetas CreateTestMessageMetas() {
+    MessageMetas message_metas;
+    MessageMeta fi;
     fi.size = 100;
     fi.name = "file1";
-    file_infos.push_back(fi);
+    message_metas.push_back(fi);
     fi.name = "subfolder/file2";
-    file_infos.push_back(fi);
-    return file_infos;
+    message_metas.push_back(fi);
+    return message_metas;
 }
 
 
@@ -64,7 +64,7 @@ class SystemFolderWatchTests : public testing::Test {
     std::vector<std::string> expected_watches{"/tmp/test1", "/tmp/test2", "/tmp/test1/sub11", "/tmp/test2/sub21", "/tmp/test2/sub22", "/tmp/test2/sub21/sub211"};
     std::string expected_filename1{"file1"};
     std::string expected_filename2{"file2"};
-    FileInfos expected_fileinfos = CreateTestFileInfos();
+    MessageMetas expected_message_metas = CreateTestMessageMetas();
     int expected_wd = 10;
     std::vector<int>expected_fds = {1, 2, 3, 4, 5, 6};
     void MockStartMonitoring();
@@ -287,11 +287,11 @@ void SystemFolderWatchTests::ExpectCreateFolder(std::string folder, bool with_fi
     if (with_files) {
         ON_CALL(mock_io, FilesInFolder_t(newfolder, _)).
         WillByDefault(DoAll(testing::SetArgPointee<1>(nullptr),
-                            testing::Return(expected_fileinfos)));
+                            testing::Return(expected_message_metas)));
     } else {
         ON_CALL(mock_io, FilesInFolder_t(newfolder, _)).
         WillByDefault(DoAll(testing::SetArgPointee<1>(nullptr),
-                            testing::Return(FileInfos{})));
+                            testing::Return(MessageMetas{})));
     }
 }
 
diff --git a/producer/event_monitor_producer/unittests/test_system_folder_watch_windows.cpp b/producer/event_monitor_producer/unittests/test_system_folder_watch_windows.cpp
index 3329022a2dcd720a06ef8f4d0d05717f5e023d2d..24faffcc9c07c77bee6f8e8aec77aedd8b742865 100644
--- a/producer/event_monitor_producer/unittests/test_system_folder_watch_windows.cpp
+++ b/producer/event_monitor_producer/unittests/test_system_folder_watch_windows.cpp
@@ -26,8 +26,8 @@ using ::asapo::Error;
 using ::asapo::ErrorInterface;
 using asapo::FilesToSend;
 using asapo::SystemFolderWatch;
-using asapo::FileInfos;
-using asapo::FileInfo;
+using asapo::MessageMetas;
+using asapo::MessageMeta;
 
 namespace {
 
@@ -37,15 +37,15 @@ TEST(SystemFolderWatch, Constructor) {
     ASSERT_THAT(dynamic_cast<asapo::IO*>(watch.io__.get()), Ne(nullptr));
 }
 
-FileInfos CreateTestFileInfos() {
-    FileInfos file_infos;
-    FileInfo fi;
+MessageMetas CreateTestMessageMetas() {
+    MessageMetas message_metas;
+    MessageMeta fi;
     fi.size = 100;
     fi.name = "file1";
-    file_infos.push_back(fi);
+    message_metas.push_back(fi);
     fi.name = "subfolder\\file2";
-    file_infos.push_back(fi);
-    return file_infos;
+    message_metas.push_back(fi);
+    return message_metas;
 }
 
 
diff --git a/receiver/src/request.cpp b/receiver/src/request.cpp
index c07c249fb79d4141c504988614860d0e64d0fc1b..5bf9c1ab2cbdd38f9c0dd8339e0a86912bc163b6 100644
--- a/receiver/src/request.cpp
+++ b/receiver/src/request.cpp
@@ -85,8 +85,8 @@ std::string Request::GetFileName() const {
     return orig_name;
 }
 
-std::string Request::GetSubstream() const {
-    return request_header_.substream;
+std::string Request::GetStream() const {
+    return request_header_.stream;
 }
 
 const std::string& Request::GetOriginUri() const {
@@ -129,11 +129,11 @@ const std::string& Request::GetMetaData() const {
 const CustomRequestData& Request::GetCustomData() const {
     return request_header_.custom_data;
 }
-const std::string& Request::GetStream() const {
-    return stream_;
+const std::string& Request::GetDataSource() const {
+    return data_source_;
 }
-void Request::SetStream(std::string stream) {
-    stream_ = std::move(stream);
+void Request::SetDataSource(std::string data_source) {
+    data_source_ = std::move(data_source);
 }
 
 void Request::UnlockDataBufferIfNeeded() {
diff --git a/receiver/src/request.h b/receiver/src/request.h
index 2bd6d5796a9f4eb74a1b83618751d7c511d07ebb..f24ad10fcc1927e69e80a2edf91a64efbc73cc26 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -43,7 +43,7 @@ class Request {
     VIRTUAL uint64_t GetMetaDataSize() const;
     VIRTUAL uint64_t GetDataID() const;
     VIRTUAL std::string GetFileName() const;
-    VIRTUAL std::string GetSubstream() const;
+    VIRTUAL std::string GetStream() const;
     VIRTUAL void* GetData() const;
     VIRTUAL Opcode GetOpCode() const;
     VIRTUAL const char* GetMessage() const;
@@ -57,8 +57,8 @@ class Request {
     VIRTUAL void SetSourceType(SourceType);
     VIRTUAL SourceType GetSourceType() const;
 
-    VIRTUAL const std::string& GetStream() const;
-    VIRTUAL void SetStream(std::string stream);
+    VIRTUAL const std::string& GetDataSource() const;
+    VIRTUAL void SetDataSource(std::string data_source);
     VIRTUAL void SetMetadata(std::string metadata);
 
     VIRTUAL void SetOnlinePath(std::string facility);
@@ -83,12 +83,12 @@ class Request {
   private:
     const GenericRequestHeader request_header_;
     const SocketDescriptor socket_fd_;
-    FileData data_buffer_;
+    MessageData data_buffer_;
     void* data_ptr;
     RequestHandlerList handlers_;
     std::string origin_uri_;
     std::string beamtime_id_;
-    std::string stream_;
+    std::string data_source_;
     std::string beamline_;
     std::string offline_path_;
     std::string online_path_;
diff --git a/receiver/src/request_handler/request_factory.cpp b/receiver/src/request_handler/request_factory.cpp
index c3372ca1305a5bacab4068cac24b5899b69e87b0..fdacdd94a3c4eed52152fddc0489786f85b76818 100644
--- a/receiver/src/request_handler/request_factory.cpp
+++ b/receiver/src/request_handler/request_factory.cpp
@@ -51,7 +51,7 @@ Error RequestFactory::AddHandlersToRequest(std::unique_ptr<Request> &request,
 
     switch (request_header.op_code) {
         case Opcode::kOpcodeTransferData:
-        case Opcode::kOpcodeTransferSubsetData: {
+        case Opcode::kOpcodeTransferDatasetData: {
             request->AddHandler(&request_handler_receive_metadata_);
             auto err = AddReceiveWriteHandlers(request, request_header);
             if (err) {
diff --git a/receiver/src/request_handler/request_handler_authorize.cpp b/receiver/src/request_handler/request_handler_authorize.cpp
index fa321e304b037acebed1c5fd351d920bc2738711..f13fc1ae28a41cedcd44ba348544e4c1e6c9d4fa 100644
--- a/receiver/src/request_handler/request_handler_authorize.cpp
+++ b/receiver/src/request_handler/request_handler_authorize.cpp
@@ -45,7 +45,7 @@ Error RequestHandlerAuthorize::Authorize(Request* request, const char* source_cr
 
     JsonStringParser parser{response};
     (err = parser.GetString("beamtimeId", &beamtime_id_)) ||
-    (err = parser.GetString("stream", &stream_)) ||
+    (err = parser.GetString("dataSource", &data_source_)) ||
     (err = parser.GetString("core-path", &offline_path_)) ||
     (err = parser.GetString("beamline-path", &online_path_)) ||
     (err = parser.GetString("source-type", &stype)) ||
@@ -55,7 +55,7 @@ Error RequestHandlerAuthorize::Authorize(Request* request, const char* source_cr
         return ErrorFromAuthorizationServerResponse(err, code);
     } else {
         log__->Debug(std::string("authorized connection from ") + request->GetOriginUri() +"source type: "+stype+ " beamline: " +
-                     beamline_ + ", beamtime id: " + beamtime_id_ + ", stream: " + stream_);
+                     beamline_ + ", beamtime id: " + beamtime_id_ + ", data soucre: " + data_source_);
     }
 
     last_updated_ = system_clock::now();
@@ -106,7 +106,7 @@ Error RequestHandlerAuthorize::ProcessOtherRequest(Request* request) const {
     }
     request->SetBeamtimeId(beamtime_id_);
     request->SetBeamline(beamline_);
-    request->SetStream(stream_);
+    request->SetDataSource(data_source_);
     request->SetOfflinePath(offline_path_);
     request->SetOnlinePath(online_path_);
     request->SetSourceType(source_type_);
diff --git a/receiver/src/request_handler/request_handler_authorize.h b/receiver/src/request_handler/request_handler_authorize.h
index 481927cf0a904c63c10cba11d439725108334786..1798ea8fb31de1e8d0e99459bcf447b2763085ea 100644
--- a/receiver/src/request_handler/request_handler_authorize.h
+++ b/receiver/src/request_handler/request_handler_authorize.h
@@ -21,7 +21,7 @@ class RequestHandlerAuthorize final: public ReceiverRequestHandler {
     std::unique_ptr<HttpClient>http_client__;
   private:
     mutable std::string beamtime_id_;
-    mutable std::string stream_;
+    mutable std::string data_source_;
     mutable std::string beamline_;
     mutable std::string offline_path_;
     mutable std::string online_path_;
diff --git a/receiver/src/request_handler/request_handler_db.cpp b/receiver/src/request_handler/request_handler_db.cpp
index c26ea6e286145dba691d7b453032a2f4eb588e67..821f0d770a551e27e2a1c0737e4902ce46e5519e 100644
--- a/receiver/src/request_handler/request_handler_db.cpp
+++ b/receiver/src/request_handler/request_handler_db.cpp
@@ -8,8 +8,8 @@ namespace asapo {
 Error RequestHandlerDb::ProcessRequest(Request* request) const {
     if (db_name_.empty()) {
         db_name_ = request->GetBeamtimeId();
-        auto stream = request->GetStream();
-        db_name_ += "_" + stream;
+        auto data_source = request->GetDataSource();
+        db_name_ += "_" + data_source;
     }
 
 
diff --git a/receiver/src/request_handler/request_handler_db_check_request.cpp b/receiver/src/request_handler/request_handler_db_check_request.cpp
index 94911d3b028047db55230ca59036227a4ba75316..b43347fee946e7bc5a48cbb31296afcb65487271 100644
--- a/receiver/src/request_handler/request_handler_db_check_request.cpp
+++ b/receiver/src/request_handler/request_handler_db_check_request.cpp
@@ -16,10 +16,10 @@ RequestHandlerDbCheckRequest::RequestHandlerDbCheckRequest(std::string collectio
 
 }
 
-Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, FileInfo* record ) const {
+Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, MessageMeta* record ) const {
     auto op_code = request->GetOpCode();
     auto id = request->GetDataID();
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     Error err;
     if (op_code == Opcode::kOpcodeTransferData) {
         err =  db_client__->GetById(col_name, id, record);
@@ -32,7 +32,7 @@ Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, File
         auto id_in_set = request->GetCustomData()[1];
         err = db_client__->GetDataSetById(col_name, id_in_set, id, record);
         if (!err) {
-            log__->Debug(std::string{"get subset record id "} + std::to_string(id) + " from " + col_name + " in " +
+            log__->Debug(std::string{"get dataset record id "} + std::to_string(id) + " from " + col_name + " in " +
                          db_name_ + " at " + GetReceiverConfig()->database_uri);
         }
         return err;
@@ -40,7 +40,7 @@ Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, File
 }
 
 
-bool RequestHandlerDbCheckRequest::SameRequestInRecord(const Request* request, const FileInfo& record) const {
+bool RequestHandlerDbCheckRequest::SameRequestInRecord(const Request* request, const MessageMeta& record) const {
     std::string meta = request->GetMetaData();
     if (meta.size() == 0) { // so it is stored in database
         meta = "{}";
@@ -55,7 +55,7 @@ Error RequestHandlerDbCheckRequest::ProcessRequest(Request* request) const {
         return err;
     }
 
-    FileInfo record;
+    MessageMeta record;
     auto  err = GetRecordFromDb(request, &record);
     if (err) {
         return err == DBErrorTemplates::kNoRecord ? nullptr : std::move(err);
diff --git a/receiver/src/request_handler/request_handler_db_check_request.h b/receiver/src/request_handler/request_handler_db_check_request.h
index 9b50673acc3b142f7b25964444869d04b648c4fe..d28dc7dffc3f98b749ca6448a20a54862e7bce62 100644
--- a/receiver/src/request_handler/request_handler_db_check_request.h
+++ b/receiver/src/request_handler/request_handler_db_check_request.h
@@ -14,8 +14,8 @@ class RequestHandlerDbCheckRequest FINAL : public RequestHandlerDb {
     RequestHandlerDbCheckRequest(std::string collection_name_prefix);
     Error ProcessRequest(Request* request) const override;
   private:
-    Error GetRecordFromDb(const Request* request, FileInfo* record) const;
-    bool SameRequestInRecord(const Request* request, const FileInfo& record) const;
+    Error GetRecordFromDb(const Request* request, MessageMeta* record) const;
+    bool SameRequestInRecord(const Request* request, const MessageMeta& record) const;
 
 };
 
diff --git a/receiver/src/request_handler/request_handler_db_stream_info.cpp b/receiver/src/request_handler/request_handler_db_stream_info.cpp
index 82689f7d65870d7dbba2099f31e1aaed2774204b..20221ba8c3babb5466046cde16baee96f145bfe0 100644
--- a/receiver/src/request_handler/request_handler_db_stream_info.cpp
+++ b/receiver/src/request_handler/request_handler_db_stream_info.cpp
@@ -14,13 +14,13 @@ Error RequestHandlerDbStreamInfo::ProcessRequest(Request* request) const {
         return err;
     }
 
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     StreamInfo info;
     auto err =  db_client__->GetStreamInfo(col_name, &info);
     if (!err) {
         log__->Debug(std::string{"get stream info from "} + col_name + " in " +
                      db_name_ + " at " + GetReceiverConfig()->database_uri);
-        info.name = request->GetSubstream();
+        info.name = request->GetStream();
         request->SetResponseMessage(info.Json(true), ResponseMessageType::kInfo);
     }
     return err;
diff --git a/receiver/src/request_handler/request_handler_db_write.cpp b/receiver/src/request_handler/request_handler_db_write.cpp
index 94241848659a0dd6dfa63f85b9456a2aa6a28055..d0113286aab69657d27e4ed7c69fdd272b411855 100644
--- a/receiver/src/request_handler/request_handler_db_write.cpp
+++ b/receiver/src/request_handler/request_handler_db_write.cpp
@@ -52,26 +52,25 @@ Error RequestHandlerDbWrite::ProcessDuplicateRecordSituation(Request* request) c
 
 
 Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const {
-    auto file_info = PrepareFileInfo(request);
+    auto message_meta = PrepareMessageMeta(request);
 
     auto op_code = request->GetOpCode();
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     Error err;
     if (op_code == Opcode::kOpcodeTransferData) {
-        err =  db_client__->Insert(col_name, file_info, false);
+        err =  db_client__->Insert(col_name, message_meta, false);
         if (!err) {
-            log__->Debug(std::string{"insert record id "} + std::to_string(file_info.id) + " to " + col_name + " in " +
+            log__->Debug(std::string{"insert record id "} + std::to_string(message_meta.id) + " to " + col_name + " in " +
                          db_name_ +
                          " at " + GetReceiverConfig()->database_uri);
         }
     } else {
-        auto subset_id = file_info.id;
-        file_info.id = request->GetCustomData()[1];
-        auto subset_size = request->GetCustomData()[2];
-        err =  db_client__->InsertAsSubset(col_name, file_info, subset_id, subset_size, false);
+        message_meta.dataset_substream = request->GetCustomData()[1];
+        auto dataset_size = request->GetCustomData()[2];
+        err =  db_client__->InsertAsDatasetMessage(col_name, message_meta, dataset_size, false);
         if (!err) {
-            log__->Debug(std::string{"insert record as subset id "} + std::to_string(file_info.id) + ", id in subset: " +
-                         std::to_string(subset_id) + " to " + col_name + " in " +
+            log__->Debug(std::string{"insert record to substream "} + std::to_string(message_meta.dataset_substream) + ", id: " +
+                         std::to_string(message_meta.id) + " to " + col_name + " in " +
                          db_name_ +
                          " at " + GetReceiverConfig()->database_uri);
         }
@@ -79,16 +78,16 @@ Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const {
     return err;
 }
 
-FileInfo RequestHandlerDbWrite::PrepareFileInfo(const Request* request) const {
-    FileInfo file_info;
-    file_info.name = request->GetFileName();
-    file_info.size = request->GetDataSize();
-    file_info.id = request->GetDataID();
-    file_info.buf_id = request->GetSlotId();
-    file_info.source = GetReceiverConfig()->dataserver.advertise_uri;
-    file_info.metadata = request->GetMetaData();
-    file_info.timestamp = std::chrono::system_clock::now();
-    return file_info;
+MessageMeta RequestHandlerDbWrite::PrepareMessageMeta(const Request* request) const {
+    MessageMeta message_meta;
+    message_meta.name = request->GetFileName();
+    message_meta.size = request->GetDataSize();
+    message_meta.id = request->GetDataID();
+    message_meta.buf_id = request->GetSlotId();
+    message_meta.source = GetReceiverConfig()->dataserver.advertise_uri;
+    message_meta.metadata = request->GetMetaData();
+    message_meta.timestamp = std::chrono::system_clock::now();
+    return message_meta;
 }
 
 RequestHandlerDbWrite::RequestHandlerDbWrite(std::string collection_name_prefix) : RequestHandlerDb(std::move(
diff --git a/receiver/src/request_handler/request_handler_db_write.h b/receiver/src/request_handler/request_handler_db_write.h
index 089f1b59c2c2aa51ac6a073f48c54f0d26055afa..bb3e78a99599d14ba3c3319b6614e63520d50939 100644
--- a/receiver/src/request_handler/request_handler_db_write.h
+++ b/receiver/src/request_handler/request_handler_db_write.h
@@ -14,7 +14,7 @@ class RequestHandlerDbWrite final: public RequestHandlerDb {
     Error ProcessRequest(Request* request) const override;
     RequestHandlerDbWrite(std::string collection_name_prefix);
   private:
-    FileInfo PrepareFileInfo(const Request* request) const;
+    MessageMeta PrepareMessageMeta(const Request* request) const;
     Error InsertRecordToDb(const Request* request) const;
     Error ProcessDuplicateRecordSituation(Request* request) const;
 
diff --git a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
index 25f31dfb592b5ee411f8f667072de842fa574845..1106da959b420f2614429f23f2e925c235d0e28d 100644
--- a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
+++ b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
@@ -279,7 +279,7 @@ TEST_F(RdsTCPServerTests, SendResponseAndSlotData_SendResponseError) {
     ASSERT_THAT(err, Ne(nullptr));
 }
 
-TEST_F(RdsTCPServerTests, SendResponseAndSlotData_SendDataError) {
+TEST_F(RdsTCPServerTests, SendResponseAndSlotData_SendError) {
     asapo::GenericNetworkResponse tmp {};
 
     asapo::ReceiverDataServerRequest expectedRequest {{}, 30};
diff --git a/receiver/unittests/receiver_mocking.h b/receiver/unittests/receiver_mocking.h
index 7b72249a58fc9bfa4250b6f1e9f0d6b4ea6748c0..200aeed190a32fbf9b8b958b87723928e11be734 100644
--- a/receiver/unittests/receiver_mocking.h
+++ b/receiver/unittests/receiver_mocking.h
@@ -65,13 +65,13 @@ class MockRequest: public Request {
         Request(request_header, socket_fd, std::move(origin_uri), nullptr, db_check_handler) {};
 
     MOCK_CONST_METHOD0(GetFileName, std::string());
-    MOCK_CONST_METHOD0(GetSubstream, std::string());
+    MOCK_CONST_METHOD0(GetStream, std::string());
     MOCK_CONST_METHOD0(GetDataSize, uint64_t());
     MOCK_CONST_METHOD0(GetDataID, uint64_t());
     MOCK_CONST_METHOD0(GetSlotId, uint64_t());
     MOCK_CONST_METHOD0(GetData, void* ());
     MOCK_CONST_METHOD0(GetBeamtimeId, const std::string & ());
-    MOCK_CONST_METHOD0(GetStream, const std::string & ());
+    MOCK_CONST_METHOD0(GetDataSource, const std::string & ());
     MOCK_CONST_METHOD0(GetMetaData, const std::string & ());
     MOCK_CONST_METHOD0(GetBeamline, const std::string & ());
     MOCK_CONST_METHOD0(GetOpCode, asapo::Opcode ());
@@ -87,7 +87,7 @@ class MockRequest: public Request {
     MOCK_CONST_METHOD0(GetCustomData_t, const uint64_t* ());
     MOCK_CONST_METHOD0(GetMessage, const char* ());
     MOCK_METHOD1(SetBeamtimeId, void (std::string));
-    MOCK_METHOD1(SetStream, void (std::string));
+    MOCK_METHOD1(SetDataSource, void (std::string));
     MOCK_METHOD1(SetBeamline, void (std::string));
     MOCK_METHOD1(SetOnlinePath, void (std::string));
     MOCK_METHOD1(SetOfflinePath, void (std::string));
diff --git a/receiver/unittests/request_handler/test_request_factory.cpp b/receiver/unittests/request_handler/test_request_factory.cpp
index 77742d925b9be1f2c9a16c37b502e4dd43283267..5f224d3d01135bacd0906518eccd10ba51c7aa72 100644
--- a/receiver/unittests/request_handler/test_request_factory.cpp
+++ b/receiver/unittests/request_handler/test_request_factory.cpp
@@ -79,8 +79,8 @@ TEST_F(FactoryTests, ErrorOnWrongCode) {
     ASSERT_THAT(err, Ne(nullptr));
 }
 
-TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCode) {
-    for (auto code : std::vector<asapo::Opcode> {asapo::Opcode::kOpcodeTransferData, asapo::Opcode::kOpcodeTransferSubsetData}) {
+TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendCode) {
+    for (auto code : std::vector<asapo::Opcode> {asapo::Opcode::kOpcodeTransferData, asapo::Opcode::kOpcodeTransferDatasetData}) {
         generic_request_header.op_code = code;
         auto request = factory.GenerateRequest(generic_request_header, 1, origin_uri, &err);
 
@@ -95,8 +95,8 @@ TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCode) {
     }
 }
 
-TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendDataCodeLargeFile) {
-    for (auto code : std::vector<asapo::Opcode> {asapo::Opcode::kOpcodeTransferData, asapo::Opcode::kOpcodeTransferSubsetData}) {
+TEST_F(FactoryTests, ReturnsDataRequestOnkNetOpcodeSendCodeLargeFile) {
+    for (auto code : std::vector<asapo::Opcode> {asapo::Opcode::kOpcodeTransferData, asapo::Opcode::kOpcodeTransferDatasetData}) {
         generic_request_header.op_code = code;
         config.receive_to_disk_threshold_mb = 0;
         SetReceiverConfig(config, "none");
diff --git a/receiver/unittests/request_handler/test_request_handler_authorizer.cpp b/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
index 304d0af27007b777e578ccb3a95a50dd2b383c9e..c1e5a97410fb279f029ee9a3447c67ba4abce3a2 100644
--- a/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_authorizer.cpp
@@ -64,7 +64,7 @@ class AuthorizerHandlerTests : public Test {
 
     NiceMock<asapo::MockLogger> mock_logger;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_stream = "stream";
+    std::string expected_data_source = "source";
     std::string expected_beamline = "beamline";
     std::string expected_beamline_path = "/beamline/p01/current";
     std::string expected_core_path = "/gpfs/blabla";
@@ -77,7 +77,7 @@ class AuthorizerHandlerTests : public Test {
     void MockRequestData();
     void SetUp() override {
         GenericRequestHeader request_header;
-        expected_source_credentials = "processed%"+expected_beamtime_id + "%stream%token";
+        expected_source_credentials = "processed%"+expected_beamtime_id + "%source%token";
         expect_request_string = std::string("{\"SourceCredentials\":\"") + expected_source_credentials +
                                 "\",\"OriginHost\":\"" +
                                 expected_producer_uri + "\"}";
@@ -111,7 +111,7 @@ class AuthorizerHandlerTests : public Test {
                 DoAll(SetArgPointee<4>(nullptr),
                       SetArgPointee<3>(code),
                       Return("{\"beamtimeId\":\"" + expected_beamtime_id +
-                             "\",\"stream\":" + "\"" + expected_stream +
+                             "\",\"dataSource\":" + "\"" + expected_data_source +
                              "\",\"beamline-path\":" + "\"" + expected_beamline_path +
                              "\",\"core-path\":" + "\"" + expected_core_path +
                              "\",\"source-type\":" + "\"" + expected_source_type_str +
@@ -123,7 +123,7 @@ class AuthorizerHandlerTests : public Test {
                                                      HasSubstr(std::to_string(int(code))),
                                                      HasSubstr(expected_source_type_str),
                                                      HasSubstr(expected_beamtime_id),
-                                                     HasSubstr(expected_stream),
+                                                     HasSubstr(expected_data_source),
                                                      HasSubstr(expected_producer_uri),
                                                      HasSubstr(expected_authorization_server))));
             } else {
@@ -131,7 +131,7 @@ class AuthorizerHandlerTests : public Test {
                                                      HasSubstr(expected_beamtime_id),
                                                      HasSubstr(expected_beamline),
                                                      HasSubstr(expected_source_type_str),
-                                                     HasSubstr(expected_stream),
+                                                     HasSubstr(expected_data_source),
                                                      HasSubstr(expected_producer_uri))));
             }
         }
@@ -156,7 +156,7 @@ class AuthorizerHandlerTests : public Test {
 
         if (!error && code == HttpCode::OK && set_request) {
             EXPECT_CALL(*mock_request, SetBeamtimeId(expected_beamtime_id));
-            EXPECT_CALL(*mock_request, SetStream(expected_stream));
+            EXPECT_CALL(*mock_request, SetDataSource(expected_data_source));
             EXPECT_CALL(*mock_request, SetOfflinePath(expected_core_path));
             EXPECT_CALL(*mock_request, SetOnlinePath(expected_beamline_path));
             EXPECT_CALL(*mock_request, SetBeamline(expected_beamline));
@@ -265,7 +265,7 @@ TEST_F(AuthorizerHandlerTests, DataTransferRequestAuthorizeUsesCachedValue) {
     EXPECT_CALL(mock_http_client, Post_t(_, _, _, _, _)).Times(0);
     EXPECT_CALL(*mock_request, SetBeamtimeId(expected_beamtime_id));
     EXPECT_CALL(*mock_request, SetBeamline(expected_beamline));
-    EXPECT_CALL(*mock_request, SetStream(expected_stream));
+    EXPECT_CALL(*mock_request, SetDataSource(expected_data_source));
     EXPECT_CALL(*mock_request, SetOnlinePath(expected_beamline_path));
     EXPECT_CALL(*mock_request, SetOfflinePath(expected_core_path));
     EXPECT_CALL(*mock_request, SetSourceType(expected_source_type));
diff --git a/receiver/unittests/request_handler/test_request_handler_db.cpp b/receiver/unittests/request_handler/test_request_handler_db.cpp
index a84aa60b1e0364b0fb017b933030d1577d49dd17..8f3bf1549c27064bc53ed1fe7320b9ef45e5d2a7 100644
--- a/receiver/unittests/request_handler/test_request_handler_db.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db.cpp
@@ -19,7 +19,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -66,8 +66,8 @@ class DbHandlerTests : public Test {
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_stream = "stream";
-    std::string expected_default_stream = "detector";
+    std::string expected_stream = "source";
+    std::string expected_default_source = "detector";
     std::string expected_discovery_server = "discovery";
     std::string expected_database_server = "127.0.0.1:27017";
 
@@ -81,7 +81,7 @@ class DbHandlerTests : public Test {
         handler.http_client__ = std::unique_ptr<asapo::HttpClient> {&mock_http_client};
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, "", nullptr});
         ON_CALL(*mock_request, GetBeamtimeId()).WillByDefault(ReturnRef(expected_beamtime_id));
-        ON_CALL(*mock_request, GetStream()).WillByDefault(ReturnRef(expected_stream));
+        ON_CALL(*mock_request, GetDataSource()).WillByDefault(ReturnRef(expected_stream));
 
     }
     void TearDown() override {
@@ -151,7 +151,7 @@ TEST_F(DbHandlerTests, ProcessRequestDiscoversMongoDbAddress) {
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream())
+    EXPECT_CALL(*mock_request, GetDataSource())
     .WillOnce(ReturnRef(expected_stream))
     ;
 
@@ -188,7 +188,7 @@ TEST_F(DbHandlerTests, ProcessRequestCallsConnectDbWhenNotConnected) {
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream())
+    EXPECT_CALL(*mock_request, GetDataSource())
     .WillOnce(ReturnRef(expected_stream))
     ;
 
diff --git a/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp b/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
index 5f02a4d1dfb6ef1f181331d7fe6e3bc787d3f99e..85c2b5da91967daf92d26146e08bd6eef6030e4f 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
@@ -21,7 +21,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -65,16 +65,16 @@ TEST(DbCheckRequestHandler, Constructor) {
 
 class DbCheckRequestHandlerTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbCheckRequest handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_default_stream = "detector";
-    std::string expected_stream = "stream";
+    std::string expected_default_source = "detector";
+    std::string expected_data_source = "source";
     std::string expected_host_uri = "127.0.0.1:1234";
     uint64_t expected_port = 1234;
     uint64_t expected_buf_id = 18446744073709551615ull;
@@ -82,10 +82,10 @@ class DbCheckRequestHandlerTests : public Test {
     std::string expected_metadata = "meta";
     uint64_t expected_file_size = 10;
     uint64_t expected_id = 15;
-    uint64_t expected_subset_id = 16;
-    uint64_t expected_subset_size = 2;
-    uint64_t expected_custom_data[asapo::kNCustomParams] {0, expected_subset_id, expected_subset_size};
-    FileInfo expected_file_info;
+    uint64_t expected_dataset_id = 16;
+    uint64_t expected_dataset_size = 2;
+    uint64_t expected_custom_data[asapo::kNCustomParams] {0, expected_dataset_id, expected_dataset_size};
+    MessageMeta expected_message_meta;
     MockFunctions mock_functions;
     int n_run = 0;
     void SetUp() override {
@@ -99,7 +99,7 @@ class DbCheckRequestHandlerTests : public Test {
         config.dataserver.advertise_uri = expected_host_uri;
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
-        expected_file_info =  PrepareFileInfo();
+        expected_message_meta =  PrepareMessageMeta();
         mock_functions.push_back([this](asapo::ErrorInterface * error, bool expect_compare) {
             MockGetByID(error, expect_compare);
             n_run++;
@@ -111,9 +111,9 @@ class DbCheckRequestHandlerTests : public Test {
 
         ON_CALL(*mock_request, GetBeamtimeId()).WillByDefault(ReturnRef(expected_beamtime_id));
     }
-    void ExpectRequestParams(asapo::Opcode op_code, const std::string& stream, bool expect_compare = true);
+    void ExpectRequestParams(asapo::Opcode op_code, const std::string& data_source, bool expect_compare = true);
 
-    FileInfo PrepareFileInfo();
+    MessageMeta PrepareMessageMeta();
     void MockGetByID(asapo::ErrorInterface* error, bool expect_compare);
     void MockGetSetByID(asapo::ErrorInterface* error, bool expect_compare);
     void TearDown() override {
@@ -123,7 +123,7 @@ class DbCheckRequestHandlerTests : public Test {
 
 };
 
-MATCHER_P(CompareFileInfo, file, "") {
+MATCHER_P(CompareMessageMeta, file, "") {
     if (arg.size != file.size) return false;
     if (arg.source != file.source) return false;
     if (arg.buf_id != file.buf_id) return false;
@@ -135,11 +135,11 @@ MATCHER_P(CompareFileInfo, file, "") {
 }
 
 
-void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std::string& stream,
+void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std::string& data_source,
         bool expect_compare) {
 
     std::string db_name = expected_beamtime_id;
-    db_name += "_" + stream;
+    db_name += "_" + data_source;
 
     if (n_run  == 0) {
         EXPECT_CALL(mock_db, Connect_t(config.database_uri, db_name)).
@@ -148,8 +148,8 @@ void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, cons
         .WillOnce(ReturnRef(expected_beamtime_id))
         ;
 
-        EXPECT_CALL(*mock_request, GetStream())
-        .WillOnce(ReturnRef(stream))
+        EXPECT_CALL(*mock_request, GetDataSource())
+        .WillOnce(ReturnRef(data_source))
         ;
     }
 
@@ -167,8 +167,8 @@ void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, cons
         ;
     }
 
-    EXPECT_CALL(*mock_request, GetSubstream())
-    .WillOnce(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream())
+    .WillOnce(Return(expected_stream))
     ;
 
     EXPECT_CALL(*mock_request, GetDataID())
@@ -179,45 +179,45 @@ void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, cons
     .WillOnce(Return(op_code))
     ;
 
-    if (op_code == asapo::Opcode::kOpcodeTransferSubsetData) {
+    if (op_code == asapo::Opcode::kOpcodeTransferDatasetData) {
         EXPECT_CALL(*mock_request, GetCustomData_t())
         .WillOnce(Return(expected_custom_data))
         ;
     }
 }
 
-FileInfo DbCheckRequestHandlerTests::PrepareFileInfo() {
-    FileInfo file_info;
-    file_info.size = expected_file_size;
-    file_info.name = expected_file_name;
-    file_info.id = expected_id;
-    file_info.buf_id = expected_buf_id;
-    file_info.source = expected_host_uri;
-    file_info.metadata = expected_metadata;
-    return file_info;
+MessageMeta DbCheckRequestHandlerTests::PrepareMessageMeta() {
+    MessageMeta message_meta;
+    message_meta.size = expected_file_size;
+    message_meta.name = expected_file_name;
+    message_meta.id = expected_id;
+    message_meta.buf_id = expected_buf_id;
+    message_meta.source = expected_host_uri;
+    message_meta.metadata = expected_metadata;
+    return message_meta;
 }
 
 void DbCheckRequestHandlerTests::MockGetByID(asapo::ErrorInterface* error, bool expect_compare ) {
-    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_stream, expect_compare);
+    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_data_source, expect_compare);
     EXPECT_CALL(mock_db, GetById_t(expected_collection_name, expected_id, _)).
     WillOnce(DoAll(
-                 SetArgPointee<2>(expected_file_info),
+                 SetArgPointee<2>(expected_message_meta),
                  testing::Return(error)
              ));
 }
 
 void DbCheckRequestHandlerTests::MockGetSetByID(asapo::ErrorInterface* error, bool expect_compare ) {
-    ExpectRequestParams(asapo::Opcode::kOpcodeTransferSubsetData, expected_stream, expect_compare);
-    EXPECT_CALL(mock_db, GetSetById_t(expected_collection_name, expected_subset_id, expected_id, _)).
+    ExpectRequestParams(asapo::Opcode::kOpcodeTransferDatasetData, expected_data_source, expect_compare);
+    EXPECT_CALL(mock_db, GetSetById_t(expected_collection_name, expected_dataset_id, expected_id, _)).
     WillOnce(DoAll(
-                 SetArgPointee<3>(expected_file_info),
+                 SetArgPointee<3>(expected_message_meta),
                  testing::Return(error)
              ));
 }
 
 
 TEST_F(DbCheckRequestHandlerTests, ErrorIfRecordsDoNotMatch) {
-    expected_file_info.metadata = expected_metadata + "_";
+    expected_message_meta.metadata = expected_metadata + "_";
 
     for (auto mock : mock_functions) {
         mock(nullptr, true);
@@ -238,7 +238,7 @@ TEST_F(DbCheckRequestHandlerTests, DuplicateErrorIfRecordsMatch) {
 }
 
 TEST_F(DbCheckRequestHandlerTests, DuplicateErrorIfRecordsMatchWithEmptyMetadata) {
-    expected_file_info.metadata = "{}";
+    expected_message_meta.metadata = "{}";
     expected_metadata = "";
     for (auto mock : mock_functions) {
         mock(nullptr, true);
diff --git a/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp b/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
index 120dfb7b2b69a08b59d1a3fa476d8d617d632479..2e6762cb541f123ce9b529e28b23bbf648792934 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
@@ -18,7 +18,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -55,15 +55,15 @@ namespace {
 
 class DbMetaLastStreamTests : public Test {
   public:
-    std::string expectedlaststream = "substream";
+    std::string expectedlaststream = "stream";
     RequestHandlerDbLastStream handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_stream = "stream";
-    std::string info_str = R"({"lastId":10,"name":"substream","timestampCreated":1000000,"timestampLast":2000000})";
+    std::string expected_data_source = "source";
+    std::string info_str = R"({"lastId":10,"name":"stream","timestampCreated":1000000,"timestampLast":2000000})";
     asapo::StreamInfo expected_stream_info;
     void SetUp() override {
         GenericRequestHeader request_header;
@@ -89,9 +89,9 @@ TEST_F(DbMetaLastStreamTests, CallsUpdate) {
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream()).WillOnce(ReturnRef(expected_stream));
+    EXPECT_CALL(*mock_request, GetDataSource()).WillOnce(ReturnRef(expected_data_source));
 
-    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_stream)).
+    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_data_source)).
     WillOnce(testing::Return(nullptr));
 
 
diff --git a/receiver/unittests/request_handler/test_request_handler_db_meta_writer.cpp b/receiver/unittests/request_handler/test_request_handler_db_meta_writer.cpp
index d80d877bb2a943edd4f99a69a8c4c70b7ebd87af..ef677673aa55abcf99bb13dee9b078c796652c4b 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_meta_writer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_meta_writer.cpp
@@ -18,7 +18,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -62,7 +62,7 @@ class DbMetaWriterHandlerTests : public Test {
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_stream = "stream";
+    std::string expected_data_source = "source";
     std::string meta_str =
         R"("info":"stat","int_data":0,"float_data":0.1,"bool":false)";
     const uint8_t* expected_meta = reinterpret_cast<const uint8_t*>(meta_str.c_str());
@@ -91,12 +91,12 @@ TEST_F(DbMetaWriterHandlerTests, CallsUpdate) {
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream())
-    .WillOnce(ReturnRef(expected_stream))
+    EXPECT_CALL(*mock_request, GetDataSource())
+    .WillOnce(ReturnRef(expected_data_source))
     ;
 
 
-    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_stream)).
+    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_data_source)).
     WillOnce(testing::Return(nullptr));
 
 
diff --git a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
index 5bcd03d12892f15eecff516ddb31035247db166c..1d1d96d3d2cf47a84928f603fd5660af329b4804 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
@@ -18,7 +18,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -55,21 +55,21 @@ namespace {
 
 class DbMetaStreamInfoTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbStreamInfo handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_stream = "stream";
-    std::string info_str = R"({"lastId":10,"name":"substream","timestampCreated":1000000,"timestampLast":2000000})";
+    std::string expected_data_source = "source";
+    std::string info_str = R"({"lastId":10,"name":"stream","timestampCreated":1000000,"timestampLast":2000000})";
     asapo::StreamInfo expected_stream_info;
     void SetUp() override {
         GenericRequestHeader request_header;
         expected_stream_info.last_id = 10;
-        expected_stream_info.name = expected_substream;
+        expected_stream_info.name = expected_stream;
         expected_stream_info.timestamp_created = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(1));
         expected_stream_info.timestamp_lastentry = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(2));
         request_header.data_id = 0;
@@ -90,13 +90,13 @@ TEST_F(DbMetaStreamInfoTests, CallsUpdate) {
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream()).WillOnce(ReturnRef(expected_stream));
+    EXPECT_CALL(*mock_request, GetDataSource()).WillOnce(ReturnRef(expected_data_source));
 
-    EXPECT_CALL(*mock_request, GetSubstream()).Times(2)
-    .WillRepeatedly(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream()).Times(2)
+    .WillRepeatedly(Return(expected_stream))
     ;
 
-    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_stream)).
+    EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_data_source)).
     WillOnce(testing::Return(nullptr));
 
 
diff --git a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
index d291c10754d9e6188930109e1a055ad18092eef6..129e704a0cea73acb6e8d19368817a815b383c29 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
@@ -21,7 +21,7 @@
 #include "../receiver_mocking.h"
 
 using asapo::MockRequest;
-using asapo::FileInfo;
+using asapo::MessageMeta;
 using ::testing::Test;
 using ::testing::Return;
 using ::testing::ReturnRef;
@@ -66,16 +66,16 @@ TEST(DbWriterHandler, Constructor) {
 
 class DbWriterHandlerTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbWrite handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
     NiceMock<asapo::MockLogger> mock_logger;
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
-    std::string expected_default_stream = "detector";
-    std::string expected_stream = "stream";
+    std::string expected_default_source = "detector";
+    std::string expected_data_source = "source";
     std::string expected_host_ip = "127.0.0.1";
     uint64_t expected_port = 1234;
     uint64_t expected_buf_id = 18446744073709551615ull;
@@ -83,9 +83,9 @@ class DbWriterHandlerTests : public Test {
     std::string expected_metadata = "meta";
     uint64_t expected_file_size = 10;
     uint64_t expected_id = 15;
-    uint64_t expected_subset_id = 15;
-    uint64_t expected_subset_size = 2;
-    uint64_t expected_custom_data[asapo::kNCustomParams] {0, expected_subset_id, expected_subset_size};
+    uint64_t expected_substream = 20;
+    uint64_t expected_dataset_size = 2;
+    uint64_t expected_custom_data[asapo::kNCustomParams] {0, expected_substream, expected_dataset_size};
     asapo::MockHandlerDbCheckRequest mock_db_check_handler{asapo::kDBDataCollectionNamePrefix};
 
     void SetUp() override {
@@ -102,10 +102,10 @@ class DbWriterHandlerTests : public Test {
 
         ON_CALL(*mock_request, GetBeamtimeId()).WillByDefault(ReturnRef(expected_beamtime_id));
     }
-    void ExpectRequestParams(asapo::Opcode op_code, const std::string& stream);
+    void ExpectRequestParams(asapo::Opcode op_code, const std::string& data_source);
     void ExpectLogger();
     void ExpectDuplicatedID();
-    FileInfo PrepareFileInfo();
+    MessageMeta PrepareMessageMeta(bool substream = false);
     void TearDown() override {
         handler.db_client__.release();
     }
@@ -113,10 +113,11 @@ class DbWriterHandlerTests : public Test {
 
 };
 
-MATCHER_P(CompareFileInfo, file, "") {
+MATCHER_P(CompareMessageMeta, file, "") {
     if (arg.size != file.size) return false;
     if (arg.source != file.source) return false;
     if (arg.buf_id != file.buf_id) return false;
+    if (arg.dataset_substream != file.dataset_substream) return false;
     if (arg.name != file.name) return false;
     if (arg.id != file.id) return false;
     if (arg.metadata != file.metadata) return false;
@@ -129,7 +130,7 @@ MATCHER_P(CompareFileInfo, file, "") {
 }
 
 
-void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std::string& stream) {
+void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std::string& data_source) {
 
     EXPECT_CALL(*mock_request, WasAlreadyProcessed())
     .WillOnce(Return(false))
@@ -139,8 +140,8 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     .WillOnce(ReturnRef(expected_beamtime_id))
     ;
 
-    EXPECT_CALL(*mock_request, GetStream())
-    .WillOnce(ReturnRef(stream))
+    EXPECT_CALL(*mock_request, GetDataSource())
+    .WillOnce(ReturnRef(data_source))
     ;
 
 
@@ -149,7 +150,7 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     ;
 
     std::string db_name = expected_beamtime_id;
-    db_name += "_" + stream;
+    db_name += "_" + data_source;
 
     EXPECT_CALL(mock_db, Connect_t(config.database_uri, db_name)).
     WillOnce(testing::Return(nullptr));
@@ -162,8 +163,8 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     .WillOnce(Return(expected_file_name))
     ;
 
-    EXPECT_CALL(*mock_request, GetSubstream())
-    .WillOnce(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream())
+    .WillOnce(Return(expected_stream))
     ;
 
 
@@ -179,7 +180,7 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     .WillOnce(Return(op_code))
     ;
 
-    if (op_code == asapo::Opcode::kOpcodeTransferSubsetData) {
+    if (op_code == asapo::Opcode::kOpcodeTransferDatasetData) {
         EXPECT_CALL(*mock_request, GetCustomData_t()).Times(2).
         WillRepeatedly(Return(expected_custom_data))
         ;
@@ -189,21 +190,24 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
 
 
 
-FileInfo DbWriterHandlerTests::PrepareFileInfo() {
-    FileInfo file_info;
-    file_info.size = expected_file_size;
-    file_info.name = expected_file_name;
-    file_info.id = expected_id;
-    file_info.buf_id = expected_buf_id;
-    file_info.source = expected_host_ip + ":" + std::to_string(expected_port);
-    file_info.metadata = expected_metadata;
-    return file_info;
+MessageMeta DbWriterHandlerTests::PrepareMessageMeta(bool substream) {
+    MessageMeta message_meta;
+    message_meta.size = expected_file_size;
+    message_meta.name = expected_file_name;
+    message_meta.id = expected_id;
+    if (substream) {
+        message_meta.dataset_substream = expected_substream;
+    }
+    message_meta.buf_id = expected_buf_id;
+    message_meta.source = expected_host_ip + ":" + std::to_string(expected_port);
+    message_meta.metadata = expected_metadata;
+    return message_meta;
 }
 void DbWriterHandlerTests::ExpectLogger() {
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("insert record"),
                                          HasSubstr(config.database_uri),
                                          HasSubstr(expected_beamtime_id),
-                                         HasSubstr(expected_stream),
+                                         HasSubstr(expected_data_source),
                                          HasSubstr(expected_collection_name)
                                         )
                                   )
@@ -213,25 +217,25 @@ void DbWriterHandlerTests::ExpectLogger() {
 
 TEST_F(DbWriterHandlerTests, CallsInsert) {
 
-    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_stream);
-    auto file_info = PrepareFileInfo();
+    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_data_source);
+    auto message_meta = PrepareMessageMeta();
 
-    EXPECT_CALL(mock_db, Insert_t(expected_collection_name, CompareFileInfo(file_info), false)).
+    EXPECT_CALL(mock_db, Insert_t(expected_collection_name, CompareMessageMeta(message_meta), false)).
     WillOnce(testing::Return(nullptr));
     ExpectLogger();
 
     handler.ProcessRequest(mock_request.get());
 }
 
-TEST_F(DbWriterHandlerTests, CallsInsertSubset) {
+TEST_F(DbWriterHandlerTests, CallsInsertDataset) {
 
-    ExpectRequestParams(asapo::Opcode::kOpcodeTransferSubsetData, expected_default_stream);
-    auto file_info = PrepareFileInfo();
+    ExpectRequestParams(asapo::Opcode::kOpcodeTransferDatasetData, expected_data_source);
+    auto message_meta = PrepareMessageMeta(true);
 
 
-    EXPECT_CALL(mock_db, InsertAsSubset_t(expected_collection_name, CompareFileInfo(file_info), expected_subset_id,
-                                          expected_subset_size, false)).
-    WillOnce(testing::Return(nullptr));
+    EXPECT_CALL(mock_db, InsertAsDatasetMessage_t(expected_collection_name, CompareMessageMeta(message_meta),
+                                                  expected_dataset_size, false)).
+    WillOnce(testing::Return(   nullptr));
     ExpectLogger();
 
     handler.ProcessRequest(mock_request.get());
@@ -239,10 +243,10 @@ TEST_F(DbWriterHandlerTests, CallsInsertSubset) {
 
 
 void DbWriterHandlerTests::ExpectDuplicatedID() {
-    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_stream);
-    auto file_info = PrepareFileInfo();
+    ExpectRequestParams(asapo::Opcode::kOpcodeTransferData, expected_data_source);
+    auto message_meta = PrepareMessageMeta();
 
-    EXPECT_CALL(mock_db, Insert_t(expected_collection_name, CompareFileInfo(file_info), false)).
+    EXPECT_CALL(mock_db, Insert_t(expected_collection_name, CompareMessageMeta(message_meta), false)).
     WillOnce(testing::Return(asapo::DBErrorTemplates::kDuplicateID.Generate().release()));
 }
 
diff --git a/receiver/unittests/request_handler/test_request_handler_receive_data.cpp b/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
index 5f4eca41f980cff3838720118e5517e8de5a8eb5..bc9a9116a8b62dc0d31825e3a31b299be211fbbd 100644
--- a/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
@@ -29,7 +29,7 @@ using ::asapo::ErrorInterface;
 using ::asapo::FileDescriptor;
 using ::asapo::SocketDescriptor;
 using ::asapo::GenericRequestHeader;
-using ::asapo::SendDataResponse;
+using ::asapo::SendResponse;
 using ::asapo::GenericRequestHeader;
 using ::asapo::GenericNetworkResponse;
 using ::asapo::Opcode;
diff --git a/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp b/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
index 0c52fa4252bea349b7ab178946199fcc2e0b9d7f..fbef728d127d162d4aebf03bb2255b31d322cdff 100644
--- a/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
@@ -29,7 +29,7 @@ using ::asapo::ErrorInterface;
 using ::asapo::FileDescriptor;
 using ::asapo::SocketDescriptor;
 using ::asapo::GenericRequestHeader;
-using ::asapo::SendDataResponse;
+using ::asapo::SendResponse;
 using ::asapo::GenericRequestHeader;
 using ::asapo::GenericNetworkResponse;
 using ::asapo::Opcode;
diff --git a/receiver/unittests/request_handler/test_requests_dispatcher.cpp b/receiver/unittests/request_handler/test_requests_dispatcher.cpp
index 0a5227d121911ff6cced611d9f6432cfafff9418..e5d6daaf0354bcd3a9a40f0cd5de60674bf06152 100644
--- a/receiver/unittests/request_handler/test_requests_dispatcher.cpp
+++ b/receiver/unittests/request_handler/test_requests_dispatcher.cpp
@@ -36,7 +36,7 @@ using asapo::Error;
 using asapo::ErrorInterface;
 using asapo::SocketDescriptor;
 using asapo::GenericRequestHeader;
-using asapo::SendDataResponse;
+using asapo::SendResponse;
 using asapo::GenericRequestHeader;
 using asapo::GenericNetworkResponse;
 using asapo::Opcode;
diff --git a/receiver/unittests/test_connection.cpp b/receiver/unittests/test_connection.cpp
index 1366ce7a617b0934e6b7289e8da81184acbbdd7e..fca987832b01204c8708a8bfe85840e9476d1c68 100644
--- a/receiver/unittests/test_connection.cpp
+++ b/receiver/unittests/test_connection.cpp
@@ -39,7 +39,7 @@ using asapo::ErrorInterface;
 using asapo::FileDescriptor;
 using asapo::SocketDescriptor;
 using asapo::GenericRequestHeader;
-using asapo::SendDataResponse;
+using asapo::SendResponse;
 using asapo::GenericRequestHeader;
 using asapo::GenericNetworkResponse;
 using asapo::Opcode;
diff --git a/receiver/unittests/test_request.cpp b/receiver/unittests/test_request.cpp
index 59ce937bdf33c689cbfcb35e0ecef8445fd135af..cea29e89dc78ee98dc004c9452cb33ac1ca40e96 100644
--- a/receiver/unittests/test_request.cpp
+++ b/receiver/unittests/test_request.cpp
@@ -29,7 +29,7 @@ using ::asapo::ErrorInterface;
 using ::asapo::FileDescriptor;
 using ::asapo::SocketDescriptor;
 using ::asapo::GenericRequestHeader;
-using ::asapo::SendDataResponse;
+using ::asapo::SendResponse;
 using ::asapo::GenericRequestHeader;
 using ::asapo::GenericNetworkResponse;
 using ::asapo::Opcode;
@@ -79,7 +79,7 @@ class RequestTests : public Test {
     uint64_t expected_slot_id{16};
     std::string expected_origin_uri = "origin_uri";
     std::string expected_metadata = "meta";
-    std::string expected_substream = "substream";
+    std::string expected_stream = "stream";
     uint64_t expected_metadata_size = expected_metadata.size();
     asapo::Opcode expected_op_code = asapo::kOpcodeTransferData;
     char expected_request_message[asapo::kMaxMessageSize] = "test_message";
@@ -186,16 +186,16 @@ void RequestTests::ExpectFileName(std::string sended, std::string received) {
 }
 
 
-TEST_F(RequestTests, GetSubstream) {
-    strcpy(generic_request_header.substream, expected_substream.c_str());
+TEST_F(RequestTests, GetStream) {
+    strcpy(generic_request_header.stream, expected_stream.c_str());
 
     request->io__.release();
     request.reset(new Request{generic_request_header, expected_socket_id, expected_origin_uri, nullptr, nullptr});
     request->io__ = std::unique_ptr<asapo::IO> {&mock_io};;
 
-    auto substream = request->GetSubstream();
+    auto stream = request->GetStream();
 
-    ASSERT_THAT(substream, Eq(expected_substream));
+    ASSERT_THAT(stream, Eq(expected_stream));
 }
 
 
@@ -223,10 +223,10 @@ TEST_F(RequestTests, SetGetBeamtimeId) {
 }
 
 
-TEST_F(RequestTests, SetGetStream) {
-    request->SetStream("stream");
+TEST_F(RequestTests, SetGetSource) {
+    request->SetDataSource("source");
 
-    ASSERT_THAT(request->GetStream(), "stream");
+    ASSERT_THAT(request->GetDataSource(), "source");
 }
 
 
diff --git a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
index b541b5913c49618102d5015aa0084e9a2eb55a28..cf5e490000ce38ef122ca30396ba2a81cc680b1a 100644
--- a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
@@ -41,7 +41,7 @@ void ServerChildThread(FabricServer* server, std::atomic<int>* serverTotalReques
         M_AssertEq(messageId / kEachInstanceRuns, request.data_id); // is client index
         M_AssertEq(messageId % kEachInstanceRuns, request.data_size); // is client run
 
-        server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, expectedRdmaBuffer, kRdmaSize, &err);
+        server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, expectedRdmaBuffer, kRdmaSize, &err);
         M_AssertEq(nullptr, err, "server->RdmaWrite");
 
         GenericNetworkResponse response{};
@@ -96,7 +96,7 @@ void ClientChildThread(const std::string& hostname, uint16_t port, int index, ch
 
         GenericRequestHeader request{};
         strcpy(request.message, "Hello World");
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
         request.data_id = index;
         request.data_size = run;
         FabricMessageId messageId = (index * kEachInstanceRuns) + run;
diff --git a/tests/automatic/asapo_fabric/simple_data_transfer.cpp b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
index e811d4644b6c9ec95daee6c957c00b7fc0070436..28a2b9ce6316fe3d429618494c742bd6c9ee403d 100644
--- a/tests/automatic/asapo_fabric/simple_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
@@ -45,7 +45,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port, char* expect
                 M_AssertEq(123 + instanceRuns, messageId);
                 M_AssertEq("Hello World", request.message);
 
-                server->RdmaWrite(clientAddress, (MemoryRegionDetails*) &request.substream, expectedRdmaBuffer, kRdmaSize,
+                server->RdmaWrite(clientAddress, (MemoryRegionDetails*) &request.stream, expectedRdmaBuffer, kRdmaSize,
                                   &err);
                 M_AssertEq(nullptr, err, "server->RdmaWrite");
 
@@ -84,7 +84,7 @@ void ClientThread(const std::string& hostname, uint16_t port, char* expectedRdma
 
             GenericRequestHeader request{};
             strcpy(request.message, "Hello World");
-            memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+            memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
             FabricMessageId messageId = 123 + instanceRuns;
             client->Send(serverAddress, messageId, &request, sizeof(request), &err);
             M_AssertEq(nullptr, err, "client->Send");
diff --git a/tests/automatic/asapo_fabric/wrong_memory_info.cpp b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
index 52259b58e17e324f777bfc8a1aa0190ba78674c4..cb6c444a4db0c04457d8c6529ef134a10773b7ae 100644
--- a/tests/automatic/asapo_fabric/wrong_memory_info.cpp
+++ b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
@@ -40,7 +40,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     M_AssertEq(nullptr, err, "server->RecvAny(1)");
     M_AssertEq(1, messageId);
     M_AssertEq("Hello World", request.message);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(FabricErrorTemplates::kInternalError, err, "server->RdmaWrite(1)");
     err = nullptr; // We have to reset the error by ourselves
     server->Send(clientAddress, messageId, dummyData.get(), kDummyDataSize, &err);
@@ -54,7 +54,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(2)");
     M_AssertEq(2, messageId);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(nullptr, err, "server->RdmaWrite(2)");
     server->Send(clientAddress, messageId, dummyData.get(), kDummyDataSize, &err);
     M_AssertEq(nullptr, err, "server->Send(2)");
@@ -68,7 +68,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(3)");
     M_AssertEq(3, messageId);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(FabricErrorTemplates::kInternalError, err, "server->RdmaWrite(3)");
 
     std::cout << "[SERVER] Waiting for client to finish" << std::endl;
@@ -98,10 +98,10 @@ void ClientThread(const std::string& hostname, uint16_t port) {
     {
         auto mr = client->ShareMemoryRegion(actualRdmaBuffer.get(), kRdmaSize, &err);
         M_AssertEq(nullptr, err, "client->ShareMemoryRegion");
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
 
         // Simulate faulty memory details
-        ((MemoryRegionDetails*)(&request.substream))->key++;
+        ((MemoryRegionDetails*)(&request.stream))->key++;
         client->Send(serverAddress, messageId, &request, sizeof(request), &err);
         M_AssertEq(nullptr, err, "client->Send(1)");
         client->Recv(serverAddress, messageId, dummyData.get(), kDummyDataSize, &err);
@@ -109,7 +109,7 @@ void ClientThread(const std::string& hostname, uint16_t port) {
         messageId++;
 
         // Simulate correct memory details
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
         client->Send(serverAddress, messageId, &request, sizeof(request), &err);
         M_AssertEq(nullptr, err, "client->Send(2)");
         client->Recv(serverAddress, messageId, dummyData.get(), kDummyDataSize, &err);
diff --git a/tests/automatic/authorizer/check_authorize/check_linux.sh b/tests/automatic/authorizer/check_authorize/check_linux.sh
index d4befd2b4d47a23fe0602cf1a01d196706c4a152..0471f37aece486d24c9823772231f960dc787fb4 100644
--- a/tests/automatic/authorizer/check_authorize/check_linux.sh
+++ b/tests/automatic/authorizer/check_authorize/check_linux.sh
@@ -19,28 +19,28 @@ mkdir -p asap3/petra3/gpfs/p00/2019/data/11000015
 mkdir -p beamline/p07/current
 cp beamtime-metadata* beamline/p07/current/
 
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%stream%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep c20180508-000-COM20181
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%stream%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p00
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%stream%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep stream
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%detector%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep c20180508-000-COM20181
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%detector%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p00
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%detector%","OriginHost":"127.0.0.1:5555"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep detector
 
 token=onm80KQF8s6d2p_laW0S5IYanUUsLcnB3QO-6QQ1M90= #token for c20180508-000-COM20181
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%stream%onm80KQF8s6d2p_laW0S5IYanUUsLcnB3QO-6QQ1M90=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep stream
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%auto%stream%onm80KQF8s6d2p_laW0S5IYanUUsLcnB3QO-6QQ1M90=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p00
-curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%stream%bla","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%detector%onm80KQF8s6d2p_laW0S5IYanUUsLcnB3QO-6QQ1M90=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep detector
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%auto%detector%onm80KQF8s6d2p_laW0S5IYanUUsLcnB3QO-6QQ1M90=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p00
+curl -v --silent --data '{"SourceCredentials":"processed%c20180508-000-COM20181%%detector%bla","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
 
 token=dccMd3NT89i32Whz7yD4VQhmEJy6Kxc35wsBbWJLXp0= #token for 11000015
 #beamtine not online
-curl -v --silent --data '{"SourceCredentials":"raw%11000015%%stream%dccMd3NT89i32Whz7yD4VQhmEJy6Kxc35wsBbWJLXp0=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
+curl -v --silent --data '{"SourceCredentials":"raw%11000015%%detector%dccMd3NT89i32Whz7yD4VQhmEJy6Kxc35wsBbWJLXp0=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
 
 token=Jaas_xTpkB0Zy5dFwjs4kCrY7yXMfbnW8Ca1aYhyKBs= #token for 11000016
-curl -v --silent --data '{"SourceCredentials":"raw%11000016%%stream%Jaas_xTpkB0Zy5dFwjs4kCrY7yXMfbnW8Ca1aYhyKBs=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
+curl -v --silent --data '{"SourceCredentials":"raw%11000016%%detector%Jaas_xTpkB0Zy5dFwjs4kCrY7yXMfbnW8Ca1aYhyKBs=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 401
 
 
 token=-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4= # for bl_p07
-curl -v --silent --data '{"SourceCredentials":"processed%auto%p07%stream%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 11111111
-curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%stream%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 11111111
-curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%stream%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p07
-curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%stream%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep /asap3/petra3/gpfs/p07/2020/data/11111111
+curl -v --silent --data '{"SourceCredentials":"processed%auto%p07%detector%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"bla"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 11111111
+curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%detector%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep 11111111
+curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%detector%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep p07
+curl -v --silent --data '{"SourceCredentials":"raw%auto%p07%detector%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=","OriginHost":"127.0.0.1:5007"}' 127.0.0.1:5007/authorize --stderr -  | tee /dev/stderr  | grep /asap3/petra3/gpfs/p07/2020/data/11111111
 
 
 rm -rf asap3 beamline
\ No newline at end of file
diff --git a/tests/automatic/authorizer/check_authorize/check_windows.bat b/tests/automatic/authorizer/check_authorize/check_windows.bat
index 23f3da008f3f6d8a30f3841ff70803b355447a03..c90cce0d6993493cea730a1cd5226ef6cc021018 100644
--- a/tests/automatic/authorizer/check_authorize/check_windows.bat
+++ b/tests/automatic/authorizer/check_authorize/check_windows.bat
@@ -9,13 +9,13 @@ mkdir asap3\petra3\gpfs\p00\2019\comissioning\c20180508-000-COM20181
 mkdir beamline\p07\current
 copy beamtime-metadata* beamline\p07\current\ /y
 
-C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%%%stream%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr c20180508-000-COM20181  || goto :error
-C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%auto%%stream%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr p00  || goto :error
-C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%%%stream%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr stream  || goto :error
+C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%%%detector%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr c20180508-000-COM20181  || goto :error
+C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%auto%%detector%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr p00  || goto :error
+C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"processed%%c20180508-000-COM20181%%%%detector%%\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr detector  || goto :error
 
-C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"raw%%c20180508-000-COM20181%%%%stream%%wrong\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr 401  || goto :error
+C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"raw%%c20180508-000-COM20181%%%%detector%%wrong\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr 401  || goto :error
 
-C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"raw%%auto%%p07%%stream%%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr 11111111  || goto :error
+C:\Curl\curl.exe -v  --silent --data "{\"SourceCredentials\":\"raw%%auto%%p07%%detector%%-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=\",\"OriginHost\":\"127.0.0.1:5555\"}" 127.0.0.1:5007/authorize --stderr - | findstr 11111111  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/broker/check_monitoring/check_linux.sh b/tests/automatic/broker/check_monitoring/check_linux.sh
index 30060cb0e2a1d1686aed89cf0c7a4dafebff7d89..caf55e77f9b566a57e8b945639adf54b45c861a1 100644
--- a/tests/automatic/broker/check_monitoring/check_linux.sh
+++ b/tests/automatic/broker/check_monitoring/check_linux.sh
@@ -29,7 +29,7 @@ groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
 
 for i in `seq 1 50`;
 do
-    curl --silent 127.0.0.1:5005/database/data/stream/substream/${groupid}/next?token=$token >/dev/null 2>&1 &
+    curl --silent 127.0.0.1:5005/database/data/source/stream/${groupid}/next?token=$token >/dev/null 2>&1 &
 done
 
 
diff --git a/tests/automatic/broker/get_last/check_linux.sh b/tests/automatic/broker/get_last/check_linux.sh
index f8d9b6d8c8e413c2d73f1c7d29668adf4620495e..a721a07b225b5a56241819c7dc80c22386438d20 100644
--- a/tests/automatic/broker/get_last/check_linux.sh
+++ b/tests/automatic/broker/get_last/check_linux.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
-database_name=data_stream
-substream=substream
+database_name=data_detector
+stream=stream
 
 set -e
 
@@ -13,8 +13,8 @@ Cleanup() {
 	kill -9 $brokerid
 }
 
-echo "db.data_${substream}.insert({"_id":2})" | mongo ${database_name}
-echo "db.data_${substream}.insert({"_id":1})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":2})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":1})" | mongo ${database_name}
 
 token=`$2 token -secret auth_secret.key data`
 
@@ -26,21 +26,21 @@ brokerid=`echo $!`
 
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
 
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr -
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr -
 
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":2'
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":2'
 
-echo "db.data_${substream}.insert({"_id":3})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":3})" | mongo ${database_name}
 
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":3'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":3'
 
-echo "db.data_${substream}.insert({"_id":4})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":4})" | mongo ${database_name}
 
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":4'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":4'
 
 #with a new group
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":4'
\ No newline at end of file
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":4'
\ No newline at end of file
diff --git a/tests/automatic/broker/get_last/check_windows.bat b/tests/automatic/broker/get_last/check_windows.bat
index ecfb48830ebb09bf9ed42cd45385db4d8f2b63f5..bc2cfdada576cdb8c12ff416df8f59716dccc274 100644
--- a/tests/automatic/broker/get_last/check_windows.bat
+++ b/tests/automatic/broker/get_last/check_windows.bat
@@ -1,4 +1,4 @@
-SET database_name=data_stream
+SET database_name=data_detector
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
 echo db.data_default.insert({"_id":1}) | %mongo_exe% %database_name%  || goto :error
@@ -17,22 +17,22 @@ C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid
 set /P groupid=< groupid
 
 
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
 
 echo db.data_default.insert({"_id":3}) | %mongo_exe% %database_name%  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":3  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":3  || goto :error
 
 echo db.data_default.insert({"_id":4}) | %mongo_exe% %database_name%  || goto :error
 
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4  || goto :error
 
 
 C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid
 set /P groupid=< groupid
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4  || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/broker/get_meta/check_linux.sh b/tests/automatic/broker/get_meta/check_linux.sh
index f6fb2b23ea0477585ab81835a882f882e9c74ab7..57b2e3335ff8e43ee8b071d4b68b1620fa1caa6e 100644
--- a/tests/automatic/broker/get_meta/check_linux.sh
+++ b/tests/automatic/broker/get_meta/check_linux.sh
@@ -1,6 +1,6 @@
 #!/usr/bin/env bash
 
-database_name=test_stream
+database_name=test_detector
 
 set -e
 
@@ -21,6 +21,6 @@ $1 -config settings.json &
 sleep 0.3
 brokerid=`echo $!`
 
-curl -v  --silent 127.0.0.1:5005/database/test/stream/default/0/meta/0?token=$token --stderr - | tee /dev/stderr | grep '"data":"test"'
-curl -v  --silent 127.0.0.1:5005/database/test/stream/default/0/meta/1?token=$token --stderr - | tee /dev/stderr | grep 'no documents'
+curl -v  --silent 127.0.0.1:5005/database/test/detector/default/0/meta/0?token=$token --stderr - | tee /dev/stderr | grep '"data":"test"'
+curl -v  --silent 127.0.0.1:5005/database/test/detector/default/0/meta/1?token=$token --stderr - | tee /dev/stderr | grep 'no documents'
 
diff --git a/tests/automatic/broker/get_meta/check_windows.bat b/tests/automatic/broker/get_meta/check_windows.bat
index 63eeaec9cc70df31ca03608dbc31c5f0f18776bc..dc39360ad6fbd96ecd35bf7aa8d8e27766ea63e8 100644
--- a/tests/automatic/broker/get_meta/check_windows.bat
+++ b/tests/automatic/broker/get_meta/check_windows.bat
@@ -1,4 +1,4 @@
-SET database_name=data_stream
+SET database_name=data_detector
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
 echo db.meta.insert({"_id":0}) | %mongo_exe% %database_name%  || goto :error
@@ -13,8 +13,8 @@ start /B "" "%full_name%" -config settings.json
 
 ping 192.0.2.1 -n 1 -w 1000 > nul
 
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/meta/0?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/0/meta/1?token=%token% --stderr - | findstr /c:"no documents"  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/meta/0?token=%token% --stderr - | findstr /c:\"_id\":0  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/0/meta/1?token=%token% --stderr - | findstr /c:"no documents"  || goto :error
 
 
 goto :clean
diff --git a/tests/automatic/broker/get_next/check_linux.sh b/tests/automatic/broker/get_next/check_linux.sh
index 52ae47b39887e098bade336293857cc845c3c317..277f78895f75804530199be5f17a3856bbbe9a63 100644
--- a/tests/automatic/broker/get_next/check_linux.sh
+++ b/tests/automatic/broker/get_next/check_linux.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
-database_name=data_stream
-substream=substream
+database_name=data_source
+stream=stream
 
 set -e
 
@@ -13,8 +13,8 @@ Cleanup() {
 	kill -9 $brokerid
 }
 
-echo "db.data_${substream}.insert({"_id":2})" | mongo ${database_name}
-echo "db.data_${substream}.insert({"_id":1})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":2})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":1})" | mongo ${database_name}
 
 token=`$2 token -secret auth_secret.key data`
 
@@ -24,10 +24,10 @@ sleep 0.3
 brokerid=`echo $!`
 
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":2'
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"id_max":2'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"id_max":2'
 
 # with a new group
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr | grep '"_id":1'
\ No newline at end of file
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr | grep '"_id":1'
\ No newline at end of file
diff --git a/tests/automatic/broker/get_next/check_windows.bat b/tests/automatic/broker/get_next/check_windows.bat
index e662d5aef93a6acea85cbd5cd2afdc9e2d242a76..0f3962bc4b44d9bc89267a8f2cce0728ccd79df7 100644
--- a/tests/automatic/broker/get_next/check_windows.bat
+++ b/tests/automatic/broker/get_next/check_windows.bat
@@ -1,4 +1,4 @@
-SET database_name=data_stream
+SET database_name=data_detector
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
 echo db.data_default.insert({"_id":1}) | %mongo_exe% %database_name%  || goto :error
@@ -16,13 +16,13 @@ ping 192.0.2.1 -n 1 -w 1000 > nul
 
 C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid
 set /P groupid=< groupid
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr  /c:\"id_max\":2  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":2  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr  /c:\"id_max\":2  || goto :error
 
 C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid
 set /P groupid=< groupid
-C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
+C:\Curl\curl.exe -v  --silent 127.0.0.1:5005/database/data/detector/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh b/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
index 0da5161d74d22e470c8f2652a8f3392191643896..21f6774fb85e118e2cf53a12447be4970b3c58f8 100644
--- a/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
+++ b/tests/automatic/bug_fixes/consumer_python_memleak/check_linux.sh
@@ -28,7 +28,7 @@ nomad run broker.nmd
 
 sleep 1
 
-echo 'db.data_default.insert({"_id":'1',"size":'$size',"name":"'$fname'","timestamp":1,"source":"none","buf_id":0,"meta":{}})' | mongo ${beamtime_id}_stream
+echo 'db.data_default.insert({"_id":'1',"size":'$size',"name":"'$fname'","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{}})' | mongo ${beamtime_id}_stream
 dd if=/dev/zero of=$fname bs=$size count=1
 
 export PYTHONPATH=$1:${PYTHONPATH}
diff --git a/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py b/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
index 2f414425c392b87b468978b8816dbe502ea0222d..9055a4c700bb161eb990dbc449d6afbd4b8be3b2 100644
--- a/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
+++ b/tests/automatic/bug_fixes/consumer_python_memleak/memleak.py
@@ -4,15 +4,15 @@ import time
 
 source, path, beamtime, token = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(
-    source, path,True, beamtime, "stream", token, 1000)
+consumer = asapo_consumer.create_consumer(
+    source, path,True, beamtime, "source", token, 1000)
 
-group_id  = broker.generate_group_id()
+group_id  = consumer.generate_group_id()
 print('generated group id: ', group_id)
 
 while True:
     try:
-        data, meta  = broker.get_last(group_id, meta_only=False)
+        data, meta  = consumer.get_last(group_id, meta_only=False)
         print('filename: ', meta['name'])
     except Exception as err:
         print('err: ', err)
diff --git a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/bugfix_callback.py b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/bugfix_callback.py
index f52bcde652a18ad1864bd6805c8fcd6315959355..c0858795a1bdf11a15e24bf61b52751c2b574df1 100644
--- a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/bugfix_callback.py
+++ b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/bugfix_callback.py
@@ -5,7 +5,7 @@ import sys
 import time
 import numpy as np
 
-stream = sys.argv[1]
+data_source = sys.argv[1]
 beamtime = sys.argv[2]
 endpoint = sys.argv[3]
 
@@ -18,21 +18,21 @@ class AsapoSender:
         self.ingest_mode = asapo_producer.DEFAULT_INGEST_MODE
         self._n_queued = 8
     def send(self, data, metadata):
-        self.producer.send_data(
+        self.producer.send(
                 metadata['_id'], metadata['name'], data,
                 ingest_mode=self.ingest_mode,
                 callback=self._callback)
     def _callback(self, header, err):
     	print ("hello self callback")
 
-producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, nthreads, 600)
+producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, nthreads, 600000)
 producer.set_log_level("debug")
 
 sender = AsapoSender(producer)
 
 meta={}
 meta['_id'] = 1
-meta['name'] = stream+"/"+"file1"
+meta['name'] = data_source+"/"+"file1"
 data = np.array([[1, 2, 3], [4, 5, 6]], np.float32)
 sender.send(data, meta)
 
diff --git a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
index 5fe621a3a80c63b4770fa81a2416f4f7bd2b8ecf..09e978362f9699b8dd9c2ac69c0b9fe5f4581940 100644
--- a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
+++ b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_linux.sh
@@ -5,7 +5,7 @@ set -e
 trap Cleanup EXIT
 
 beamtime_id=asapo_test
-stream=python
+data_source=python
 beamline=test
 receiver_root_folder=/tmp/asapo/receiver/files
 facility=test_facility
@@ -21,12 +21,12 @@ Cleanup() {
     nomad stop authorizer >/dev/null
     nomad stop nginx >/dev/null
     nomad run nginx_kill.nmd  && nomad stop -yes -purge nginx_kill > /dev/null
-    echo "db.dropDatabase()" | mongo ${beamtime_id}_${stream} >/dev/null
+    echo "db.dropDatabase()" | mongo ${beamtime_id}_${data_source} >/dev/null
 }
 
 export PYTHONPATH=$2:${PYTHONPATH}
 
-echo "db.${beamtime_id}_${stream}.insert({dummy:1})" | mongo ${beamtime_id}_${stream} >/dev/null
+echo "db.${beamtime_id}_${data_source}.insert({dummy:1})" | mongo ${beamtime_id}_${data_source} >/dev/null
 
 nomad run authorizer.nmd >/dev/null
 nomad run nginx.nmd >/dev/null
@@ -39,6 +39,6 @@ echo test > file1
 
 sleep 1
 
-$1 $3 $stream $beamtime_id  "127.0.0.1:8400" > out || cat out
+$1 $3 $data_source $beamtime_id  "127.0.0.1:8400" > out || cat out
 cat out
 cat out | grep "hello self callback"
diff --git a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
index b70d7749fa392bdb17d6ea4426e0f204540339bd..99fff14fb7c41bd6e8c9affba9d90af8638e0cb5 100644
--- a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
+++ b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
@@ -1,10 +1,10 @@
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 SET beamtime_id=asapo_test
 SET beamline=test
-SET stream=python
+SET data_source=python
 SET receiver_root_folder=c:\tmp\asapo\receiver\files
 SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\data\%beamtime_id%"
-SET dbname = %beamtime_id%_%stream%
+SET dbname = %beamtime_id%_%data_source%
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
 
@@ -18,7 +18,7 @@ ping 192.0.2.1 -n 1 -w 1000 > nul
 
 set PYTHONPATH=%2
 
-"%1" "%3" %stream% %beamtime_id%  "127.0.0.1:8400" > out
+"%1" "%3" %data_source% %beamtime_id%  "127.0.0.1:8400" > out
 type out
 type out | findstr /c:"hello self callback" || goto error
 
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt b/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
index cea0d39ae010441c92bb22e191294998d5fc32d8..e974aa40b4671c5f42f49f5f36fa86d1c89e352d 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/CMakeLists.txt
@@ -13,4 +13,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/test.json.in b/tests/automatic/bug_fixes/producer_send_after_restart/test.json.in
index ed41c425ce44f356fecb72e6c17820cae9ef7b69..488577ab4558d1e1a94d02a0e8ad2909760044c1 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/test.json.in
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 
diff --git a/tests/automatic/bug_fixes/receiver_cpu_usage/CMakeLists.txt b/tests/automatic/bug_fixes/receiver_cpu_usage/CMakeLists.txt
index f6bf9f8cfb1b26ca6b7fe568a41d61e641a76c11..15daad97f8c7aaf2a3068d839b12902da6a2ded3 100644
--- a/tests/automatic/bug_fixes/receiver_cpu_usage/CMakeLists.txt
+++ b/tests/automatic/bug_fixes/receiver_cpu_usage/CMakeLists.txt
@@ -8,4 +8,4 @@ set (ROOT_PATH "/tmp/asapo/")
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/bug_fixes/receiver_cpu_usage/test.json.in b/tests/automatic/bug_fixes/receiver_cpu_usage/test.json.in
index 7cfebfb26d502bb8699b5f199d178457c9a38872..6714f39e689b089be8d3870a85fe58ef0639bd38 100644
--- a/tests/automatic/bug_fixes/receiver_cpu_usage/test.json.in
+++ b/tests/automatic/bug_fixes/receiver_cpu_usage/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 }
diff --git a/tests/automatic/consumer/consumer_api/check_linux.sh b/tests/automatic/consumer/consumer_api/check_linux.sh
index 2ab95e58eca6f41b64e0b346cadd9ceb85abbb67..3fb2718ca677c12d23096a68694338b0e3911f70 100644
--- a/tests/automatic/consumer/consumer_api/check_linux.sh
+++ b/tests/automatic/consumer/consumer_api/check_linux.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
 beamtime_id=test_run
-stream=detector
-database_name=${beamtime_id}_${stream}
+data_source=detector
+database_name=${beamtime_id}_${data_source}
 token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 set -e
@@ -28,17 +28,17 @@ sleep 1
 
 for i in `seq 1 10`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 for i in `seq 1 5`;
 do
-	echo 'db.data_stream1.insert({"_id":'$i',"size":6,"name":"'1$i'","timestamp":1000,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_stream1.insert({"_id":'$i',"size":6,"name":"'1$i'","timestamp":1000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 for i in `seq 1 5`;
 do
-	echo 'db.data_stream2.insert({"_id":'$i',"size":6,"name":"'2$i'","timestamp":2000,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_stream2.insert({"_id":'$i',"size":6,"name":"'2$i'","timestamp":2000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 
@@ -54,24 +54,24 @@ sleep 1
 
 for i in `seq 1 10`;
 do
-	images=''
+	messages=''
 	for j in `seq 1 3`;
 	do
-		images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"meta":{"test":10}}"
+		messages="$messages,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"dataset_substream":0,"meta":{"test":10}}"
 	done
-	images=${images#?}
-	echo 'db.data_default.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name}
+	messages=${messages#?}
+	echo 'db.data_default.insert({"_id":'$i',"size":3,"messages":['$messages']})' | mongo ${database_name}
 done
 
 for i in `seq 1 5`;
 do
-	images=''
+	messages=''
 	for j in `seq 1 2`;
 	do
-		images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"meta":{"test":10}}"
+		messages="$messages,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"dataset_substream":0,"meta":{"test":10}}"
 	done
-	images=${images#?}
-	echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name}
+	messages=${messages#?}
+	echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"messages":['$messages']})' | mongo ${database_name}
 done
 
 
diff --git a/tests/automatic/consumer/consumer_api/check_windows.bat b/tests/automatic/consumer/consumer_api/check_windows.bat
index 0bc559dc68bd237a8600fea734ce71359bf3619a..19e163518f92fa6ea3bfc202608cd8b4e33c7174 100644
--- a/tests/automatic/consumer/consumer_api/check_windows.bat
+++ b/tests/automatic/consumer/consumer_api/check_windows.bat
@@ -1,18 +1,18 @@
 
 SET beamtime_id=test_run
-SET stream=detector
+SET data_source=detector
 
-SET database_name=%beamtime_id%_%stream%
+SET database_name=%beamtime_id%_%data_source%
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 call start_services.bat
 
-for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
-for /l %%x in (1, 1, 5) do echo db.data_stream1.insert({"_id":%%x,"size":6,"name":"1%%x","timestamp":1000,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_stream1.insert({"_id":%%x,"size":6,"name":"1%%x","timestamp":1000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
-for /l %%x in (1, 1, 5) do echo db.data_stream2.insert({"_id":%%x,"size":6,"name":"2%%x","timestamp":2000,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_stream2.insert({"_id":%%x,"size":6,"name":"2%%x","timestamp":2000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 echo hello1 > 1
 
 
@@ -20,7 +20,7 @@ echo hello1 > 1
 
 echo db.dropDatabase() | %mongo_exe% %database_name%
 
-for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":3,"images":[{"_id":1, "size":6,"name":"%%x_1","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":3, "size":6,"name":"%%x_3","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":3,"messages":[{"_id":1, "size":6,"name":"%%x_1","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}},{"_id":3, "size":6,"name":"%%x_3","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
 
 echo hello1 > 1_1
 
diff --git a/tests/automatic/consumer/consumer_api/consumer_api.cpp b/tests/automatic/consumer/consumer_api/consumer_api.cpp
index c0893e14ba311d39c4b1dabf4a546e6d7caedc79..e451289ed76e159ef3d4e8d7df80bfb54836fd9f 100644
--- a/tests/automatic/consumer/consumer_api/consumer_api.cpp
+++ b/tests/automatic/consumer/consumer_api/consumer_api.cpp
@@ -3,7 +3,7 @@
 #include <thread>
 #include <algorithm>
 #include <asapo/asapo_consumer.h>
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "testing.h"
 
 struct Args {
@@ -27,11 +27,11 @@ Args GetArgs(int argc, char* argv[]) {
 }
 
 
-void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::string& group_id) {
-    asapo::FileInfo fi;
+void TestSingle(const std::unique_ptr<asapo::Consumer>& consumer, const std::string& group_id) {
+    asapo::MessageMeta fi;
     asapo::Error err;
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     if (err) {
         std::cout << err->Explain() << std::endl;
     }
@@ -39,89 +39,89 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     M_AssertTrue(fi.name == "1", "GetNext filename");
     M_AssertTrue(fi.metadata == "{\"test\":10}", "GetNext metadata");
 
-    asapo::FileData data;
-    err = broker->RetrieveData(&fi, &data);
+    asapo::MessageData data;
+    err = consumer->RetrieveData(&fi, &data);
     M_AssertTrue(err == nullptr, "RetrieveData no error");
     M_AssertEq("hello1", std::string(data.get(), data.get() + fi.size));
 
 
-    err = broker->GetLast(&fi, nullptr);
+    err = consumer->GetLast(&fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetLast no error");
     M_AssertTrue(fi.name == "10", "GetLast filename");
     M_AssertTrue(fi.metadata == "{\"test\":10}", "GetLast metadata");
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNext2 no error");
     M_AssertTrue(fi.name == "2", "GetNext2 filename");
 
 
-    err = broker->SetLastReadMarker(2, group_id);
+    err = consumer->SetLastReadMarker(group_id, 2,"default");
     M_AssertTrue(err == nullptr, "SetLastReadMarker no error");
 
 
-    err = broker->GetById(8, &fi, nullptr);
+    err = consumer->GetById(8, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetById error");
     M_AssertTrue(fi.name == "8", "GetById filename");
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNext After GetById  no error");
     M_AssertTrue(fi.name == "3", "GetNext After GetById filename");
 
 
-    err = broker->GetLast(&fi, nullptr);
+    err = consumer->GetLast(&fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetLast2 no error");
 
 
-    err = broker->SetLastReadMarker(8, group_id);
+    err = consumer->SetLastReadMarker(group_id, 8,"default");
     M_AssertTrue(err == nullptr, "SetLastReadMarker 2 no error");
 
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNext3 no error");
     M_AssertTrue(fi.name == "9", "GetNext3 filename");
 
-    auto size = broker->GetCurrentSize(&err);
+    auto size = consumer->GetCurrentSize("default", &err);
     M_AssertTrue(err == nullptr, "GetCurrentSize no error");
     M_AssertTrue(size == 10, "GetCurrentSize size");
 
-    err = broker->ResetLastReadMarker(group_id);
+    err = consumer->ResetLastReadMarker(group_id,"default");
     M_AssertTrue(err == nullptr, "SetLastReadMarker");
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNext4 no error");
     M_AssertTrue(fi.name == "1", "GetNext4 filename");
 
-    auto group_id2 = broker->GenerateNewGroupId(&err);
-    err = broker->GetNext(&fi, group_id2, nullptr);
+    auto group_id2 = consumer->GenerateNewGroupId(&err);
+    err = consumer->GetNext(group_id2, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNext5 no error");
     M_AssertTrue(fi.name == "1", "GetNext5  filename");
 
-    auto images = broker->QueryImages("meta.test = 10", &err);
+    auto messages = consumer->QueryMessages("meta.test = 10","default", &err);
     M_AssertTrue(err == nullptr, "query1");
-    M_AssertTrue(images.size() == 10, "size of query answer 1");
+    M_AssertTrue(messages.size() == 10, "size of query answer 1");
 
-    images = broker->QueryImages("meta.test = 10 AND name='1'", &err);
+    messages = consumer->QueryMessages("meta.test = 10 AND name='1'","default", &err);
     M_AssertTrue(err == nullptr, "query2");
-    M_AssertTrue(images.size() == 1, "size of query answer 2");
+    M_AssertTrue(messages.size() == 1, "size of query answer 2");
     M_AssertTrue(fi.name == "1", "GetNext5  filename");
 
 
-    images = broker->QueryImages("meta.test = 11", &err);
+    messages = consumer->QueryMessages("meta.test = 11","default", &err);
     M_AssertTrue(err == nullptr, "query3");
-    M_AssertTrue(images.size() == 0, "size of query answer 3");
+    M_AssertTrue(messages.size() == 0, "size of query answer 3");
 
-    images = broker->QueryImages("meta.test = 18", &err);
+    messages = consumer->QueryMessages("meta.test = 18","default", &err);
     M_AssertTrue(err == nullptr, "query4");
-    M_AssertTrue(images.size() == 0, "size of query answer 4");
+    M_AssertTrue(messages.size() == 0, "size of query answer 4");
 
-    images = broker->QueryImages("bla", &err);
+    messages = consumer->QueryMessages("bla","default", &err);
     M_AssertTrue(err != nullptr, "query5");
-    M_AssertTrue(images.size() == 0, "size of query answer 5");
+    M_AssertTrue(messages.size() == 0, "size of query answer 5");
 
 
 //streams
 
-    err = broker->GetNext(&fi, group_id, "stream1", nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "stream1");
     if (err) {
         std::cout << err->Explain() << std::endl;
     }
@@ -129,84 +129,84 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     M_AssertTrue(err == nullptr, "GetNext stream1 no error");
     M_AssertTrue(fi.name == "11", "GetNext stream1 filename");
 
-    err = broker->GetNext(&fi, group_id, "stream2", nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "stream2");
     M_AssertTrue(err == nullptr, "GetNext stream2 no error");
     M_AssertTrue(fi.name == "21", "GetNext stream2 filename");
 
-    auto substreams = broker->GetSubstreamList("",&err);
-    M_AssertTrue(err == nullptr, "GetSubstreamList no error");
-    M_AssertTrue(substreams.size() == 3, "substreams.size");
-    M_AssertTrue(substreams[0].name == "default", "substreams0.name1");
-    M_AssertTrue(substreams[1].name == "stream1", "substreams1.name2");
-    M_AssertTrue(substreams[2].name == "stream2", "substreams2.name3");
-    std::cout<<substreams[0].Json(false)<<std::endl;
-    std::cout<<substreams[1].Json(false)<<std::endl;
-    std::cout<<substreams[2].Json(false)<<std::endl;
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[0].timestamp_created) == 0, "substreams0.timestamp");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[0].timestamp_lastentry) == 0, "substreams0.timestamp lastentry not set");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[1].timestamp_created) == 1000, "substreams1.timestamp");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[2].timestamp_created) == 2000, "substreams2.timestamp");
+    auto streams = consumer->GetStreamList("",&err);
+    M_AssertTrue(err == nullptr, "GetStreamList no error");
+    M_AssertTrue(streams.size() == 3, "streams.size");
+    M_AssertTrue(streams[0].name == "default", "streams0.name1");
+    M_AssertTrue(streams[1].name == "stream1", "streams1.name2");
+    M_AssertTrue(streams[2].name == "stream2", "streams2.name3");
+    std::cout<<streams[0].Json(false)<<std::endl;
+    std::cout<<streams[1].Json(false)<<std::endl;
+    std::cout<<streams[2].Json(false)<<std::endl;
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[0].timestamp_created) == 0, "streams0.timestamp");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[0].timestamp_lastentry) == 0, "streams0.timestamp lastentry not set");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[1].timestamp_created) == 1000, "streams1.timestamp");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[2].timestamp_created) == 2000, "streams2.timestamp");
 // acknowledges
 
-    auto id = broker->GetLastAcknowledgedTulpeId(group_id, &err);
+    auto id = consumer->GetLastAcknowledgedMessage(group_id,"default", &err);
     M_AssertTrue(err == asapo::ConsumerErrorTemplates::kNoData, "last ack default stream no data");
     M_AssertTrue(id == 0, "last ack default stream no data id = 0");
 
-    auto nacks = broker->GetUnacknowledgedTupleIds(group_id, 0, 0, &err);
+    auto nacks = consumer->GetUnacknowledgedMessages(group_id, 0, 0, "default", &err);
     M_AssertTrue(err == nullptr, "nacks default stream all");
     M_AssertTrue(nacks.size() == 10, "nacks default stream size = 10");
 
-    err = broker->Acknowledge(group_id, 1);
+    err = consumer->Acknowledge(group_id, 1,"default");
     M_AssertTrue(err == nullptr, "ack default stream no error");
 
-    nacks = broker->GetUnacknowledgedTupleIds(group_id, 0, 0, &err);
+    nacks = consumer->GetUnacknowledgedMessages(group_id, 0, 0, "default", &err);
     M_AssertTrue(nacks.size() == 9, "nacks default stream size = 9 after ack");
 
-    id = broker->GetLastAcknowledgedTulpeId(group_id, &err);
+    id = consumer->GetLastAcknowledgedMessage(group_id,"default", &err);
     M_AssertTrue(err == nullptr, "last ack default stream no error");
     M_AssertTrue(id == 1, "last ack default stream id = 1");
 
-    err = broker->Acknowledge(group_id, 1, "stream1");
+    err = consumer->Acknowledge(group_id, 1, "stream1");
     M_AssertTrue(err == nullptr, "ack stream1 no error");
 
-    nacks = broker->GetUnacknowledgedTupleIds(group_id, "stream1", 0, 0, &err);
+    nacks = consumer->GetUnacknowledgedMessages(group_id, 0, 0, "stream1", &err);
     M_AssertTrue(nacks.size() == 4, "nacks stream1 size = 4 after ack");
 
 // negative acks
-    broker->ResetLastReadMarker(group_id);
-    err = broker->GetNext(&fi, group_id, nullptr);
+    consumer->ResetLastReadMarker(group_id,"default");
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNextNegAckBeforeResend no error");
     M_AssertTrue(fi.name == "1", "GetNextNegAckBeforeResend filename");
-    err = broker->NegativeAcknowledge(group_id, 1, 0);
+    err = consumer->NegativeAcknowledge(group_id, 1, 0,"default");
     M_AssertTrue(err == nullptr, "NegativeAcknowledge no error");
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNextNegAckWithResend no error");
     M_AssertTrue(fi.name == "1", "GetNextNegAckWithResend filename");
 
 // automatic resend
-    broker->ResetLastReadMarker(group_id);
-    broker->SetResendNacs(true, 0, 1);
-    err = broker->GetNext(&fi, group_id, nullptr);
+    consumer->ResetLastReadMarker(group_id,"default");
+    consumer->SetResendNacs(true, 0, 1);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNextBeforeResend no error");
     M_AssertTrue(fi.name == "1", "GetNextBeforeResend filename");
 
-    err = broker->GetNext(&fi, group_id, nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNextWithResend no error");
     M_AssertTrue(fi.name == "1", "GetNextWithResend filename");
 
-    broker->SetResendNacs(false, 0, 1);
-    err = broker->GetNext(&fi, group_id, nullptr);
+    consumer->SetResendNacs(false, 0, 1);
+    err = consumer->GetNext(group_id, &fi, nullptr, "default");
     M_AssertTrue(err == nullptr, "GetNextAfterResend no error");
     M_AssertTrue(fi.name == "2", "GetNextAfterResend filename");
 
 }
 
 
-void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::string& group_id) {
-    asapo::FileInfo fi;
+void TestDataset(const std::unique_ptr<asapo::Consumer>& consumer, const std::string& group_id) {
+    asapo::MessageMeta fi;
     asapo::Error err;
 
-    auto dataset = broker->GetNextDataset(group_id, 0, &err);
+    auto dataset = consumer->GetNextDataset(group_id, 0, "default", &err);
     if (err) {
         std::cout << err->Explain() << std::endl;
     }
@@ -216,31 +216,31 @@ void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::st
     M_AssertTrue(dataset.content[2].name == "1_3", "GetNextDataSet filename");
     M_AssertTrue(dataset.content[0].metadata == "{\"test\":10}", "GetNext metadata");
 
-    asapo::FileData data;
-    err = broker->RetrieveData(&dataset.content[0], &data);
+    asapo::MessageData data;
+    err = consumer->RetrieveData(&dataset.content[0], &data);
     M_AssertTrue(err == nullptr, "RetrieveData no error");
     M_AssertEq("hello1", std::string(data.get(), data.get() + dataset.content[0].size));
 
 
-    dataset = broker->GetLastDataset(0, &err);
+    dataset = consumer->GetLastDataset(0, "default", &err);
     M_AssertTrue(err == nullptr, "GetLast no error");
     M_AssertTrue(dataset.content[0].name == "10_1", "GetLastDataset filename");
     M_AssertTrue(dataset.content[0].metadata == "{\"test\":10}", "GetLastDataset metadata");
 
-    dataset = broker->GetNextDataset(group_id, 0, &err);
+    dataset = consumer->GetNextDataset(group_id, 0, "default", &err);
     M_AssertTrue(err == nullptr, "GetNextDataset2 no error");
     M_AssertTrue(dataset.content[0].name == "2_1", "GetNextDataSet2 filename");
 
-    dataset = broker->GetLastDataset(0, &err);
+    dataset = consumer->GetLastDataset(0, "default", &err);
     M_AssertTrue(err == nullptr, "GetLastDataset2 no error");
 
-    dataset = broker->GetDatasetById(8, 0, &err);
+    dataset = consumer->GetDatasetById(8, 0, "default", &err);
     M_AssertTrue(err == nullptr, "GetDatasetById error");
     M_AssertTrue(dataset.content[2].name == "8_3", "GetDatasetById filename");
 
 // incomplete datasets without min_size
 
-    dataset = broker->GetNextDataset(group_id,"incomplete",0,&err);
+    dataset = consumer->GetNextDataset(group_id, 0, "incomplete", &err);
     M_AssertTrue(err == asapo::ConsumerErrorTemplates::kPartialData, "GetNextDataset incomplete error");
     M_AssertTrue(dataset.content.size() == 2, "GetNextDataset incomplete size");
     M_AssertTrue(dataset.content[0].name == "1_1", "GetNextDataset incomplete filename");
@@ -250,24 +250,24 @@ void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::st
     M_AssertTrue(dataset.expected_size == 3, "GetDatasetById expected size");
     M_AssertTrue(dataset.id == 1, "GetDatasetById expected id");
 
-    dataset = broker->GetLastDataset("incomplete", 0, &err);
+    dataset = consumer->GetLastDataset(0, "incomplete", &err);
     M_AssertTrue(err == asapo::ConsumerErrorTemplates::kEndOfStream, "GetLastDataset incomplete no data");
 
-    dataset = broker->GetDatasetById(2, "incomplete", 0, &err);
+    dataset = consumer->GetDatasetById(2, 0, "incomplete", &err);
     M_AssertTrue(err == asapo::ConsumerErrorTemplates::kPartialData, "GetDatasetById incomplete error");
     M_AssertTrue(dataset.content[0].name == "2_1", "GetDatasetById incomplete filename");
 
 // incomplete datasets with min_size
 
-    dataset = broker->GetNextDataset(group_id,"incomplete",2,&err);
+    dataset = consumer->GetNextDataset(group_id, 2, "incomplete", &err);
     M_AssertTrue(err == nullptr, "GetNextDataset incomplete minsize error");
     M_AssertTrue(dataset.id == 2, "GetDatasetById minsize id");
 
-    dataset = broker->GetLastDataset("incomplete", 2, &err);
+    dataset = consumer->GetLastDataset(2, "incomplete", &err);
     M_AssertTrue(err == nullptr, "GetNextDataset incomplete minsize error");
     M_AssertTrue(dataset.id == 5, "GetLastDataset minsize id");
 
-    dataset = broker->GetDatasetById(2, "incomplete", 2, &err);
+    dataset = consumer->GetDatasetById(2, 2, "incomplete", &err);
     M_AssertTrue(err == nullptr, "GetDatasetById incomplete minsize error");
     M_AssertTrue(dataset.content[0].name == "2_1", "GetDatasetById incomplete minsize filename");
 
@@ -276,21 +276,25 @@ void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::st
 
 void TestAll(const Args& args) {
     asapo::Error err;
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, ".", true,
-                  asapo::SourceCredentials{asapo::SourceType::kProcessed,args.run_name, "", "", args.token}, &err);
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(args.server,
+                                                         ".",
+                                                         true,
+                                                         asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                  args.run_name, "", "", args.token},
+                                                         &err);
     if (err) {
-        std::cout << "Error CreateServerBroker: " << err << std::endl;
+        std::cout << "Error CreateConsumer: " << err << std::endl;
         exit(EXIT_FAILURE);
     }
 
-    broker->SetTimeout(100);
-    auto group_id = broker->GenerateNewGroupId(&err);
+    consumer->SetTimeout(100);
+    auto group_id = consumer->GenerateNewGroupId(&err);
 
     if (args.datasets == "single") {
-        TestSingle(broker, group_id);
+        TestSingle(consumer, group_id);
     }
     if (args.datasets == "dataset") {
-        TestDataset(broker, group_id);
+        TestDataset(consumer, group_id);
     }
 
 }
diff --git a/tests/automatic/consumer/consumer_api_python/check_linux.sh b/tests/automatic/consumer/consumer_api_python/check_linux.sh
index 1f65c06141f909261993b8af5414353f72815fb9..22d179ce11e85172925902031dbfafe5c3147e19 100644
--- a/tests/automatic/consumer/consumer_api_python/check_linux.sh
+++ b/tests/automatic/consumer/consumer_api_python/check_linux.sh
@@ -2,8 +2,8 @@
 
 beamtime_id=test_run
 source_path=`pwd`/asap3/petra3/gpfs/p01/2019/data/$beamtime_id
-stream=detector
-database_name=${beamtime_id}_${stream}
+data_source=detector
+database_name=${beamtime_id}_${data_source}
 token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 set -e
 
@@ -35,19 +35,19 @@ echo -n hello1 > $source_path/1_1
 
 for i in `seq 1 5`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":6,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
-echo 'db.data_streamfts.insert({"_id":'1',"size":0,"name":"'1'","timestamp":1000,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+echo 'db.data_streamfts.insert({"_id":'1',"size":0,"name":"'1'","timestamp":1000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 
 for i in `seq 1 5`;
 do
-	echo 'db.data_stream1.insert({"_id":'$i',"size":6,"name":"'1$i'","timestamp":2000,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_stream1.insert({"_id":'$i',"size":6,"name":"'1$i'","timestamp":2000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 for i in `seq 1 5`;
 do
-	echo 'db.data_stream2.insert({"_id":'$i',"size":6,"name":"'2$i'","timestamp":3000,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_stream2.insert({"_id":'$i',"size":6,"name":"'2$i'","timestamp":3000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 sleep 1
@@ -63,24 +63,24 @@ sleep 1
 
 for i in `seq 1 10`;
 do
-	images=''
+	messages=''
 	for j in `seq 1 3`;
 	do
-		images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":0,"source":'none',"buf_id":0,"meta":{"test":10}}"
+		messages="$messages,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":0,"source":'none',"buf_id":0,"dataset_substream":0,"meta":{"test":10}}"
 	done
-	images=${images#?}
-	echo 'db.data_default.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name} >/dev/null
+	messages=${messages#?}
+	echo 'db.data_default.insert({"_id":'$i',"size":3,"messages":['$messages']})' | mongo ${database_name} >/dev/null
 done
 
 for i in `seq 1 5`;
 do
-	images=''
+	messages=''
 	for j in `seq 1 2`;
 	do
-		images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"meta":{"test":10}}"
+		messages="$messages,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"dataset_substream":0,"meta":{"test":10}}"
 	done
-	images=${images#?}
-	echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name}
+	messages=${messages#?}
+	echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"messages":['$messages']})' | mongo ${database_name}
 done
 
 
diff --git a/tests/automatic/consumer/consumer_api_python/check_windows.bat b/tests/automatic/consumer/consumer_api_python/check_windows.bat
index 5b3f47301867f1442e618a3ab0984fc07bae0336..adcf8ce57d735d5b6da75b464aabe61bc82d0cc1 100644
--- a/tests/automatic/consumer/consumer_api_python/check_windows.bat
+++ b/tests/automatic/consumer/consumer_api_python/check_windows.bat
@@ -3,22 +3,22 @@ SET beamtime_id=test_run
 SET source_path=%cd%\asap3\petra3\gpfs\p01\2019\data\%beamtime_id%
 set source_path=%source_path:\=\\%
 
-SET stream=detector
+SET data_source=detector
 
-SET database_name=%beamtime_id%_%stream%
+SET database_name=%beamtime_id%_%data_source%
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 call start_services.bat
 
-for /l %%x in (1, 1, 5) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
-echo db.data_streamfts.insert({"_id":1,"size":0,"name":"1","timestamp":1000,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+echo db.data_streamfts.insert({"_id":1,"size":0,"name":"1","timestamp":1000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
-for /l %%x in (1, 1, 5) do echo db.data_stream1.insert({"_id":%%x,"size":6,"name":"1%%x","timestamp":2000,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_stream1.insert({"_id":%%x,"size":6,"name":"1%%x","timestamp":2000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
-for /l %%x in (1, 1, 5) do echo db.data_stream2.insert({"_id":%%x,"size":6,"name":"2%%x","timestamp":3000,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_stream2.insert({"_id":%%x,"size":6,"name":"2%%x","timestamp":3000,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
 
 mkdir %source_path%
@@ -33,9 +33,9 @@ python %3/consumer_api.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_tes
 
 echo db.dropDatabase() | %mongo_exe% %database_name%
 
-for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":3,"images":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":3, "size":6,"name":"%%x_3","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":3,"messages":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}},{"_id":3, "size":6,"name":"%%x_3","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
 
-for /l %%x in (1, 1, 5) do echo db.data_incomplete.insert({"_id":%%x,"size":3,"images":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 5) do echo db.data_incomplete.insert({"_id":%%x,"size":3,"messages":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name%  || goto :error
 
 python %3/consumer_api.py 127.0.0.1:8400 %source_path% %beamtime_id%  %token_test_run% datasets || goto :error
 
diff --git a/tests/automatic/consumer/consumer_api_python/consumer_api.py b/tests/automatic/consumer/consumer_api_python/consumer_api.py
index 7533e3584b8246f2edfa9ef2cd7eeae4cbf668eb..013ce0516a8113aa1ee22b255dc37ddfe93034ef 100644
--- a/tests/automatic/consumer/consumer_api_python/consumer_api.py
+++ b/tests/automatic/consumer/consumer_api_python/consumer_api.py
@@ -38,69 +38,69 @@ def assert_eq(val, expected, name):
         sys.exit(1)
 
 
-def check_file_transfer_service(broker, group_id):
-    broker.set_timeout(1000)
-    data, meta = broker.get_by_id(1, meta_only=False)
+def check_file_transfer_service(consumer, group_id):
+    consumer.set_timeout(1000)
+    data, meta = consumer.get_by_id(1, meta_only=False)
     assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service ok")
-    data, meta = broker.get_by_id(1, "streamfts", meta_only=False)
+    data, meta = consumer.get_by_id(1, meta_only=False, stream = "streamfts")
     assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service with auto size ok")
 
 
-def check_single(broker, group_id):
+def check_single(consumer, group_id):
     global thread_res
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next1")
     assert_usermetadata(meta, "get next1")
 
-    broker.set_timeout(1000)
+    consumer.set_timeout(1000)
 
-    data = broker.retrieve_data(meta)
+    data = consumer.retrieve_data(meta)
     assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data data")
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "2", "get next2")
     assert_usermetadata(meta, "get next2")
 
-    _, meta = broker.get_last(meta_only=True)
+    _, meta = consumer.get_last(meta_only=True)
     assert_metaname(meta, "5", "get last1")
     assert_usermetadata(meta, "get last1")
 
     try:
-        broker.get_by_id(30, meta_only=True)
+        consumer.get_by_id(30, meta_only=True)
     except asapo_consumer.AsapoEndOfStreamError:
         pass
     else:
         exit_on_noerr("get_by_id no data")
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "3", "get next3")
 
 
-    size = broker.get_current_size()
+    size = consumer.get_current_size()
     assert_eq(size, 5, "get_current_size")
 
-    broker.reset_lastread_marker(group_id)
+    consumer.reset_lastread_marker(group_id)
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next4")
     assert_usermetadata(meta, "get next4")
 
-    _, meta = broker.get_by_id(3, meta_only=True)
+    _, meta = consumer.get_by_id(3, meta_only=True)
     assert_metaname(meta, "3", "get get_by_id")
     assert_usermetadata(meta, "get get_by_id")
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "2", "get next5")
     assert_usermetadata(meta, "get next5")
 
-    broker.set_lastread_marker(4, group_id)
+    consumer.set_lastread_marker(group_id,4)
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "5", "get next6")
     assert_usermetadata(meta, "get next6")
 
     try:
-        broker.get_next("_wrong_group_name", meta_only=True)
+        consumer.get_next("_wrong_group_name", meta_only=True)
     except asapo_consumer.AsapoWrongInputError as err:
         print(err)
         pass
@@ -108,106 +108,106 @@ def check_single(broker, group_id):
         exit_on_noerr("should give wrong input error")
 
     try:
-        broker.get_last(meta_only=False)
+        consumer.get_last(meta_only=False)
     except asapo_consumer.AsapoLocalIOError as err:
         print(err)
         pass
     else:
         exit_on_noerr("io error")
 
-    _, meta = broker.get_next(group_id, "stream1", meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True, stream = "stream1")
     assert_metaname(meta, "11", "get next stream1")
 
-    _, meta = broker.get_next(group_id, "stream2", meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True, stream = "stream2")
     assert_metaname(meta, "21", "get next stream2")
 
-    substreams = broker.get_substream_list("")
-    assert_eq(len(substreams), 4, "number of substreams")
-    print(substreams)
-    assert_eq(substreams[0]["name"], "default", "substreams_name1")
-    assert_eq(substreams[1]["name"], "streamfts", "substreams_name2")
-    assert_eq(substreams[2]["name"], "stream1", "substreams_name2")
-    assert_eq(substreams[3]["name"], "stream2", "substreams_name3")
-    assert_eq(substreams[1]["timestampCreated"], 1000, "substreams_timestamp2")
+    streams = consumer.get_stream_list("")
+    assert_eq(len(streams), 4, "number of streams")
+    print(streams)
+    assert_eq(streams[0]["name"], "default", "streams_name1")
+    assert_eq(streams[1]["name"], "streamfts", "streams_name2")
+    assert_eq(streams[2]["name"], "stream1", "streams_name2")
+    assert_eq(streams[3]["name"], "stream2", "streams_name3")
+    assert_eq(streams[1]["timestampCreated"], 1000, "streams_timestamp2")
 
     # acks
     try:
-        id = broker.get_last_acknowledged_tuple_id(group_id)
+        id = consumer.get_last_acknowledged_message(group_id)
     except asapo_consumer.AsapoNoDataError as err:
         print(err)
         pass
     else:
-        exit_on_noerr("get_last_acknowledged_tuple_id")
+        exit_on_noerr("get_last_acknowledged_message")
 
-    nacks = broker.get_unacknowledged_tuple_ids(group_id)
+    nacks = consumer.get_unacknowledged_messages(group_id)
     assert_eq(len(nacks), 5, "nacks default stream size = 5")
 
-    broker.acknowledge(group_id, 1)
+    consumer.acknowledge(group_id, 1)
 
-    nacks = broker.get_unacknowledged_tuple_ids(group_id)
+    nacks = consumer.get_unacknowledged_messages(group_id)
     assert_eq(len(nacks), 4, "nacks default stream size = 4")
 
-    id = broker.get_last_acknowledged_tuple_id(group_id)
+    id = consumer.get_last_acknowledged_message(group_id)
     assert_eq(id, 1, "last ack default stream id = 1")
 
-    broker.acknowledge(group_id, 1, "stream1")
-    nacks = broker.get_unacknowledged_tuple_ids(group_id)
+    consumer.acknowledge(group_id, 1, "stream1")
+    nacks = consumer.get_unacknowledged_messages(group_id)
     assert_eq(len(nacks), 4, "nacks stream1 size = 4 after ack")
 
     # neg acks
-    broker.reset_lastread_marker(group_id)
-    _, meta = broker.get_next(group_id, meta_only=True)
+    consumer.reset_lastread_marker(group_id)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next neg ack before resend")
-    broker.reset_lastread_marker(group_id)
-    _, meta = broker.get_next(group_id, meta_only=True)
+    consumer.reset_lastread_marker(group_id)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next neg ack with resend")
 
     # resend
-    broker.reset_lastread_marker(group_id)
-    broker.set_resend_nacs(True, 0, 1)
-    _, meta = broker.get_next(group_id, meta_only=True)
+    consumer.reset_lastread_marker(group_id)
+    consumer.set_resend_nacs(True, 0, 1)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next before resend")
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "1", "get next with resend")
 
-    _, meta = broker.get_next(group_id, meta_only=True)
+    _, meta = consumer.get_next(group_id, meta_only=True)
     assert_metaname(meta, "2", "get next after resend")
 
-    # images
+    # messages
 
-    images = broker.query_images("meta.test = 10")
-    assert_eq(len(images), 5, "size of query answer 1")
-    for image in images:
-        assert_usermetadata(image, "query_images")
+    messages = consumer.query_messages("meta.test = 10")
+    assert_eq(len(messages), 5, "size of query answer 1")
+    for message in messages:
+        assert_usermetadata(message, "query_messages")
 
-    images = broker.query_images("meta.test = 10 AND name='1'")
-    assert_eq(len(images), 1, "size of query answer 2 ")
+    messages = consumer.query_messages("meta.test = 10 AND name='1'")
+    assert_eq(len(messages), 1, "size of query answer 2 ")
 
-    for image in images:
-        assert_usermetadata(image, "query_images")
+    for message in messages:
+        assert_usermetadata(message, "query_messages")
 
-    images = broker.query_images("meta.test = 11")
-    assert_eq(len(images), 0, "size of query answer 3 ")
+    messages = consumer.query_messages("meta.test = 11")
+    assert_eq(len(messages), 0, "size of query answer 3 ")
 
     try:
-        images = broker.query_images("bla")
+        messages = consumer.query_messages("bla")
     except:
         pass
     else:
         exit_on_noerr("wrong query")
 
-    broker = asapo_consumer.create_server_broker("bla", path, True, beamtime, "", token, 1000)
+    consumer = asapo_consumer.create_consumer("bla", path, True, beamtime, "", token, 1000)
     try:
-        broker.get_last(meta_only=True)
+        consumer.get_last(meta_only=True)
     except asapo_consumer.AsapoUnavailableServiceError as err:
         print(err)
         pass
     else:
-        exit_on_noerr("AsapoBrokerServersNotFound")
+        exit_on_noerr("AsapoconsumerServersNotFound")
 
     try:
-        asapo_consumer.create_server_broker("", "", True, "", "", "", 1000)
+        asapo_consumer.create_consumer("", "", True, "", "", "", 1000)
     except asapo_consumer.AsapoWrongInputError as err:
         print(err)
         pass
@@ -216,10 +216,10 @@ def check_single(broker, group_id):
 
 # interrupt
     thread_res = 0
-    def long_call(broker):
+    def long_call(consumer):
         global thread_res
         try:
-            broker.get_last(meta_only=True)
+            consumer.get_last(meta_only=True)
             thread_res = 1
         except asapo_consumer.AsapoInterruptedTransactionError as err:
             global res
@@ -231,47 +231,47 @@ def check_single(broker, group_id):
             thread_res = 3
             pass
 
-    broker = asapo_consumer.create_server_broker("bla", path, True, beamtime, "", token, 60000)
-    t = Thread(target =  long_call, args =  (broker,) )
+    consumer = asapo_consumer.create_consumer("bla", path, True, beamtime, "", token, 60000)
+    t = Thread(target =  long_call, args =  (consumer,) )
     t.start()
     time.sleep(1)
-    broker.interrupt_current_operation()
+    consumer.interrupt_current_operation()
     t.join()
     assert_eq(thread_res, 2, "long call res")
 
 
 
-def check_dataset(broker, group_id):
-    res = broker.get_next_dataset(group_id)
+def check_dataset(consumer, group_id):
+    res = consumer.get_next_dataset(group_id)
     assert_eq(res['id'], 1, "get_next_dataset1")
     assert_metaname(res['content'][0], "1_1", "get nextdataset1 name1")
     assert_metaname(res['content'][1], "1_2", "get nextdataset1 name2")
     assert_usermetadata(res['content'][0], "get nextdataset1 meta")
 
-    broker.set_timeout(1000)
+    consumer.set_timeout(1000)
 
-    data = broker.retrieve_data(res['content'][0])
+    data = consumer.retrieve_data(res['content'][0])
     assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data from dataset data")
 
-    res = broker.get_next_dataset(group_id)
+    res = consumer.get_next_dataset(group_id)
     assert_eq(res['id'], 2, "get_next_dataset2")
     assert_metaname(res['content'][0], "2_1", "get nextdataset2 name1")
 
-    res = broker.get_last_dataset()
+    res = consumer.get_last_dataset()
     assert_eq(res['id'], 10, "get_last_dataset1")
     assert_eq(res['expected_size'], 3, "get_last_dataset1 size ")
     assert_metaname(res['content'][2], "10_3", "get get_last_dataset1 name3")
 
-    res = broker.get_next_dataset(group_id)
+    res = consumer.get_next_dataset(group_id)
     assert_eq(res['id'], 3, "get_next_dataset3")
 
-    res = broker.get_dataset_by_id(8)
+    res = consumer.get_dataset_by_id(8)
     assert_eq(res['id'], 8, "get_dataset_by_id1 id")
     assert_metaname(res['content'][2], "8_3", "get get_dataset_by_id1 name3")
 
     # incomplete datesets without min_size given
     try:
-        broker.get_next_dataset(group_id, "incomplete")
+        consumer.get_next_dataset(group_id, stream = "incomplete")
     except asapo_consumer.AsapoPartialDataError as err:
         assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size")
         assert_eq(err.partial_data['id'], 1, "get_next_dataset incomplete id")
@@ -282,7 +282,7 @@ def check_dataset(broker, group_id):
         exit_on_noerr("get_next_dataset incomplete err")
 
     try:
-        broker.get_dataset_by_id(2, "incomplete")
+        consumer.get_dataset_by_id(2, stream = "incomplete")
     except asapo_consumer.AsapoPartialDataError as err:
         assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size")
         assert_eq(err.partial_data['id'], 2, "get_next_dataset incomplete id")
@@ -293,37 +293,37 @@ def check_dataset(broker, group_id):
         exit_on_noerr("get_next_dataset incomplete err")
 
     try:
-        broker.get_last_dataset("incomplete")
+        consumer.get_last_dataset(stream = "incomplete")
     except asapo_consumer.AsapoEndOfStreamError as err:
         pass
     else:
         exit_on_noerr("get_last_dataset incomplete err")
     # incomplete with min_size given
-    res = broker.get_next_dataset(group_id, "incomplete", min_size=2)
+    res = consumer.get_next_dataset(group_id, min_size=2, stream =  "incomplete")
     assert_eq(res['id'], 2, "get_next_dataset incomplete with minsize")
 
-    res = broker.get_last_dataset("incomplete", min_size=2)
+    res = consumer.get_last_dataset(min_size=2, stream = "incomplete")
     assert_eq(res['id'], 5, "get_last_dataset incomplete with minsize")
 
-    res = broker.get_dataset_by_id(2, "incomplete", min_size=1)
+    res = consumer.get_dataset_by_id(2, min_size=1, stream = "incomplete")
     assert_eq(res['id'], 2, "get_dataset_by_id incomplete with minsize")
 
 
 source, path, beamtime, token, mode = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(source, path, True, beamtime, "", token, 60000)
-broker_fts = asapo_consumer.create_server_broker(source, path, False, beamtime, "", token, 60000)
+consumer = asapo_consumer.create_consumer(source, path, True, beamtime, "", token, 60000)
+consumer_fts = asapo_consumer.create_consumer(source, path, False, beamtime, "", token, 60000)
 
-group_id = broker.generate_group_id()
+group_id = consumer.generate_group_id()
 
-group_id_fts = broker_fts.generate_group_id()
+group_id_fts = consumer_fts.generate_group_id()
 
 if mode == "single":
-    check_single(broker, group_id)
-    check_file_transfer_service(broker_fts, group_id_fts)
+    check_single(consumer, group_id)
+    check_file_transfer_service(consumer_fts, group_id_fts)
 
 if mode == "datasets":
-    check_dataset(broker, group_id)
+    check_dataset(consumer, group_id)
 
 print("tests done")
 sys.exit(0)
diff --git a/tests/automatic/consumer/next_multithread_broker/check_linux.sh b/tests/automatic/consumer/next_multithread_broker/check_linux.sh
index 44562efde33c87728fd7946b609f5c7acde9a062..b172ad0ac649f3ec6646f1c71b3ce881fd55d61b 100644
--- a/tests/automatic/consumer/next_multithread_broker/check_linux.sh
+++ b/tests/automatic/consumer/next_multithread_broker/check_linux.sh
@@ -25,7 +25,7 @@ sleep 1
 
 for i in `seq 1 10`;
 do
-	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}})' | mongo ${database_name}
+	echo 'db.data_default.insert({"_id":'$i',"size":100,"name":"'$i'","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}})' | mongo ${database_name}
 done
 
 $@ 127.0.0.1:8400 test_run 4 10 $token_test_run
diff --git a/tests/automatic/consumer/next_multithread_broker/check_windows.bat b/tests/automatic/consumer/next_multithread_broker/check_windows.bat
index 2c111405a170ddeb799a164db2df468f84ef6710..4a13c733a4e3764b4aa452e7e7af806fc6eb5f22 100644
--- a/tests/automatic/consumer/next_multithread_broker/check_windows.bat
+++ b/tests/automatic/consumer/next_multithread_broker/check_windows.bat
@@ -4,7 +4,7 @@ set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
 call start_services.bat
 
-for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
+for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","timestamp":0,"source":"none","buf_id":0,"dataset_substream":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
 
 %1 127.0.0.1:8400 test_run 4 10 %token_test_run% || goto :error
diff --git a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
index 75a82ff03afc7a510046b5e55c0bea813cde14bb..d12de6de804c5884d55b371bccb58de80fabe000 100644
--- a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
+++ b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
@@ -2,18 +2,18 @@
 #include <vector>
 #include <thread>
 #include <algorithm>
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "testing.h"
 
-void Assert(std::vector<asapo::FileInfos> file_infos, int nthreads, int nfiles) {
+void Assert(std::vector<asapo::MessageMetas> message_metas, int nthreads, int nfiles) {
     std::vector<std::string> expect, result;
     for (int i = 1; i <= nfiles; i++) {
         expect.push_back(std::to_string(i));
     }
     int nfiles_read = 0;
     for (int i = 0; i < nthreads; i++) {
-        nfiles_read += file_infos[i].size();
-        for (const auto& fi : file_infos[i]) {
+        nfiles_read += message_metas[i].size();
+        for (const auto& fi : message_metas[i]) {
             result.push_back(fi.name);
         }
     }
@@ -53,19 +53,24 @@ Args GetArgs(int argc, char* argv[]) {
 
 void TestAll(const Args& args) {
     asapo::Error err;
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, "dummy", true, asapo::SourceCredentials{asapo::SourceType::kProcessed,args.run_name, "", "", args.token}, &err);
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(args.server,
+                                                         "dummy",
+                                                         true,
+                                                         asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                  args.run_name, "", "", args.token},
+                                                         &err);
     if (err) {
-        std::cout << "Error CreateServerBroker: " << err << std::endl;
+        std::cout << "Error CreateConsumer: " << err << std::endl;
         exit(EXIT_FAILURE);
     }
 
-    auto group_id = broker->GenerateNewGroupId(&err);
-    broker->SetTimeout(10000);
-    std::vector<asapo::FileInfos>file_infos(args.nthreads);
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    consumer->SetTimeout(10000);
+    std::vector<asapo::MessageMetas>message_metas(args.nthreads);
     auto exec_next = [&](int i) {
-        asapo::FileInfo fi;
-        while ((err = broker->GetNext(&fi, group_id, nullptr)) == nullptr) {
-            file_infos[i].emplace_back(fi);
+        asapo::MessageMeta fi;
+        while ((err = consumer->GetNext(group_id, &fi, nullptr, "default")) == nullptr) {
+            message_metas[i].emplace_back(fi);
         }
         printf("%s\n", err->Explain().c_str());
     };
@@ -81,7 +86,7 @@ void TestAll(const Args& args) {
         }
     }
 
-    Assert(file_infos, args.nthreads, args.nfiles);
+    Assert(message_metas, args.nthreads, args.nfiles);
 }
 
 int main(int argc, char* argv[]) {
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
index 730de4a472a46e52611368a095bd0883f06ab0ad..33e1b6a6665255ebc048374b96c67505155b77ff 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
+++ b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
@@ -1,8 +1,8 @@
 #include <iostream>
 #include <vector>
-#include "asapo/consumer/data_broker.h"
+#include "asapo/consumer/consumer.h"
 #include "testing.h"
-#include "../../../consumer/api/cpp/src/server_data_broker.h"
+#include "../../../consumer/api/cpp/src/consumer_impl.h"
 #include "asapo/preprocessor/definitions.h"
 #include "asapo/io/io_factory.h"
 #include "asapo/io/io.h"
@@ -32,14 +32,19 @@ int main(int argc, char* argv[]) {
     std::string authorize_request = "{\"Folder\":\"" + args.folder + "\",\"BeamtimeId\":\"aaa\",\"Token\":\"" + token +
                                     "\"}";
     asapo::Error err;
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.uri_authorizer, "", true, asapo::SourceCredentials{asapo::SourceType::kProcessed,"", "", "", ""}, &err);
-    auto server_broker = static_cast<asapo::ServerDataBroker*>(broker.get());
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(args.uri_authorizer,
+                                                         "",
+                                                         true,
+                                                         asapo::SourceCredentials{asapo::SourceType::kProcessed, "", "",
+                                                                                  "", ""},
+                                                         &err);
+    auto consumer_impl = static_cast<asapo::ConsumerImpl*>(consumer.get());
     M_AssertEq(nullptr, err);
 
     asapo::HttpCode code;
     std::string response;
     std::string input_data;
-    auto folder_token = server_broker->httpclient__->Post(args.uri_authorizer + "/folder", "", authorize_request, &code,
+    auto folder_token = consumer_impl->httpclient__->Post(args.uri_authorizer + "/folder", "", authorize_request, &code,
                         &err);
     M_AssertTrue(err == nullptr);
     M_AssertTrue(code == asapo::HttpCode::OK);
@@ -47,21 +52,21 @@ int main(int argc, char* argv[]) {
         std::cout << err->Explain();
     }
 
-    server_broker->httpclient__->Post(args.uri_authorizer + "/folder", "", "", &code, &err);
+    consumer_impl->httpclient__->Post(args.uri_authorizer + "/folder", "", "", &code, &err);
     M_AssertTrue(code == asapo::HttpCode::BadRequest);
 
-    server_broker->httpclient__->Post(args.uri_authorizer + "/bla", "", "", &code, &err);
+    consumer_impl->httpclient__->Post(args.uri_authorizer + "/bla", "", "", &code, &err);
     M_AssertTrue(code == asapo::HttpCode::NotFound);
 
 // check post with data
     std::string transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"aaa\"}";
     std::string cookie = "Authorization=Bearer " + folder_token + ";";
-    auto content = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &code, &err);
+    auto content = consumer_impl->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &code, &err);
     M_AssertEq("hello", content);
     M_AssertTrue(code == asapo::HttpCode::OK);
 // with array
-    asapo::FileData data;
-    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, 5, &code);
+    asapo::MessageData data;
+    err = consumer_impl->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, 5, &code);
     M_AssertEq( "hello", reinterpret_cast<char const*>(data.get()));
     M_AssertTrue(code == asapo::HttpCode::OK);
 
@@ -71,7 +76,7 @@ int main(int argc, char* argv[]) {
     uint64_t size = 0;
     auto expected_data = io->GetDataFromFile(fname, &size, &err);
     M_AssertEq(nullptr, err);
-    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, size, &code);
+    err = consumer_impl->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, size, &code);
     M_AssertTrue(code == asapo::HttpCode::OK);
     for (uint64_t i = 0; i < size; i++) {
         if (expected_data[i] != data[i]) {
@@ -81,11 +86,11 @@ int main(int argc, char* argv[]) {
 
 // with file
     transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"aaa\"}";
-    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "bbb", &code);
+    err = consumer_impl->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "bbb", &code);
     M_AssertTrue(code == asapo::HttpCode::OK);
 
     transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"random\"}";
-    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "random", &code);
+    err = consumer_impl->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "random", &code);
     M_AssertTrue(code == asapo::HttpCode::OK);
 
     return 0;
diff --git a/tests/automatic/full_chain/CMakeLists.txt b/tests/automatic/full_chain/CMakeLists.txt
index 0d7dd8b9fa95ef4c83f07ceb334ffce0b49c9b92..a13904442843c0b2787fde56dfcc4d01ee5f15e3 100644
--- a/tests/automatic/full_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/CMakeLists.txt
@@ -1,7 +1,7 @@
 add_subdirectory(simple_chain)
 if (BUILD_PYTHON)
     add_subdirectory(simple_chain_usermeta_python)
-    add_subdirectory(send_recv_substreams_python)
+    add_subdirectory(send_recv_streams_python)
 endif()
 add_subdirectory(simple_chain_metadata)
 add_subdirectory(two_beamlines)
@@ -13,4 +13,4 @@ add_subdirectory(simple_chain_filegen_multisource)
 add_subdirectory(simple_chain_filegen_readdata_cache)
 add_subdirectory(simple_chain_filegen_readdata_file)
 add_subdirectory(simple_chain_dataset)
-add_subdirectory(send_recv_substreams)
+add_subdirectory(send_recv_streams)
diff --git a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt b/tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
similarity index 84%
rename from tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
rename to tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
index 7ddfbbfbea7efa3dd31162a130bd557e15cf3f17..0a2ab15e89032a5278cf4376ede48c82c140b041 100644
--- a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
@@ -1,5 +1,5 @@
-set(TARGET_NAME send_recv_substreams)
-set(SOURCE_FILES send_recv_substreams.cpp)
+set(TARGET_NAME send_recv_streams)
+set(SOURCE_FILES send_recv_streams.cpp)
 
 add_executable(${TARGET_NAME} ${SOURCE_FILES})
 target_link_libraries(${TARGET_NAME} asapo-consumer asapo-producer)
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_linux.sh b/tests/automatic/full_chain/send_recv_streams/check_linux.sh
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams/check_linux.sh
rename to tests/automatic/full_chain/send_recv_streams/check_linux.sh
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_windows.bat b/tests/automatic/full_chain/send_recv_streams/check_windows.bat
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams/check_windows.bat
rename to tests/automatic/full_chain/send_recv_streams/check_windows.bat
diff --git a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp b/tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
similarity index 60%
rename from tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
rename to tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
index 8be9df7b4607c35c281592a94f7ecc0b59bce977..5f025e5f434998f801316a0be33e0426d7937334 100644
--- a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
+++ b/tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
@@ -14,7 +14,7 @@
 #include "asapo/asapo_producer.h"
 
 using asapo::Error;
-using BrokerPtr = std::unique_ptr<asapo::DataBroker>;
+using ConsumerPtr = std::unique_ptr<asapo::Consumer>;
 using ProducerPtr = std::unique_ptr<asapo::Producer>;
 std::string group_id = "";
 
@@ -34,22 +34,26 @@ void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
     files_sent++;
 }
 
-BrokerPtr CreateBrokerAndGroup(const Args& args, Error* err) {
-    auto broker = asapo::DataBrokerFactory::CreateServerBroker(args.server, ".", true,
-                  asapo::SourceCredentials{asapo::SourceType::kProcessed,args.beamtime_id, "", "", args.token}, err);
+ConsumerPtr CreateConsumerAndGroup(const Args& args, Error* err) {
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(args.server,
+                                                         ".",
+                                                         true,
+                                                         asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                  args.beamtime_id, "", "", args.token},
+                                                         err);
     if (*err) {
         return nullptr;
     }
 
-    broker->SetTimeout(10000);
+    consumer->SetTimeout(10000);
 
     if (group_id.empty()) {
-        group_id = broker->GenerateNewGroupId(err);
+        group_id = consumer->GenerateNewGroupId(err);
         if (*err) {
             return nullptr;
         }
     }
-    return broker;
+    return consumer;
 }
 
 ProducerPtr CreateProducer(const Args& args) {
@@ -57,7 +61,7 @@ ProducerPtr CreateProducer(const Args& args) {
     auto producer = asapo::Producer::Create(args.server, 1,
                                             asapo::RequestHandlerType::kTcp,
                                             asapo::SourceCredentials{asapo::SourceType::kProcessed,
-                                                                     args.beamtime_id, "", "", args.token }, 60, &err);
+                                                                     args.beamtime_id, "", "", args.token }, 60000, &err);
     if(err) {
         std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
         exit(EXIT_FAILURE);
@@ -69,7 +73,7 @@ ProducerPtr CreateProducer(const Args& args) {
 }
 
 int main(int argc, char* argv[]) {
-    asapo::ExitAfterPrintVersionIfNeeded("GetNext Broker Example", argc, argv);
+    asapo::ExitAfterPrintVersionIfNeeded("GetNext consumer Example", argc, argv);
     Args args;
     if (argc != 5) {
         std::cout << "Usage: " + std::string{argv[0]}
@@ -86,29 +90,29 @@ int main(int argc, char* argv[]) {
     uint64_t n = 1;
 
     for (uint64_t i = 0; i < n; i++) {
-        asapo::EventHeader event_header{i + 1, 0, std::to_string(i + 1)};
-        producer->SendData(event_header, "substream1", nullptr, asapo::kTransferMetaDataOnly, ProcessAfterSend);
+        asapo::MessageHeader message_header{i + 1, 0, std::to_string(i + 1)};
+        producer->Send(message_header, nullptr, asapo::kTransferMetaDataOnly, "stream1", ProcessAfterSend);
     }
-    producer->SendSubstreamFinishedFlag("substream1", n, "substream2", ProcessAfterSend);
+    producer->SendStreamFinishedFlag("stream1", n, "stream2", ProcessAfterSend);
     producer->WaitRequestsFinished(10000);
 
     Error err;
-    auto consumer = CreateBrokerAndGroup(args, &err);
+    auto consumer = CreateConsumerAndGroup(args, &err);
     if (err) {
-        std::cout << "Error CreateBrokerAndGroup: " << err << std::endl;
+        std::cout << "Error CreateConsumerAndGroup: " << err << std::endl;
         exit(EXIT_FAILURE);
     }
 
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     for (uint64_t i = 0; i < n; i++) {
-        consumer->GetNext(&fi, group_id, "substream1", nullptr);
+        consumer->GetNext(group_id, &fi, nullptr, "stream1");
     }
 
-    err = consumer->GetNext(&fi, group_id, "substream1", nullptr);
+    err = consumer->GetNext(group_id, &fi, nullptr, "stream1");
     if (err != asapo::ConsumerErrorTemplates::kStreamFinished) {
         return 1;
     }
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
-    return (err_data->next_substream == "substream2") && (files_sent == n + 1) ? 0 : 1;
+    return (err_data->next_stream == "stream2") && (files_sent == n + 1) ? 0 : 1;
 }
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt b/tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
similarity index 78%
rename from tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
rename to tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
index 7a5de77a6e828154c92308725514f0109d59b68a..f592e6f8b6b992a4f2d75125a53453d06a880719 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(TARGET_NAME send_recv_substreams_python)
+set(TARGET_NAME send_recv_streams_python)
 prepare_asapo()
 
 if (UNIX)
@@ -9,6 +9,6 @@ else()
     get_target_property(PYTHON_LIBS_PRODUCER asapo_producer BINARY_DIR)
 endif()
 
-file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/send_recv_substreams.py TEST_SCRIPT )
+file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/send_recv_streams.py TEST_SCRIPT )
 
 add_script_test("${TARGET_NAME}" "${Python_EXECUTABLE} ${PYTHON_LIBS_CONSUMER} ${PYTHON_LIBS_PRODUCER} ${TEST_SCRIPT}" nomem)
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh b/tests/automatic/full_chain/send_recv_streams_python/check_linux.sh
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh
rename to tests/automatic/full_chain/send_recv_streams_python/check_linux.sh
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat b/tests/automatic/full_chain/send_recv_streams_python/check_windows.bat
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat
rename to tests/automatic/full_chain/send_recv_streams_python/check_windows.bat
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py b/tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
similarity index 52%
rename from tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
rename to tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
index f55f12ae6734b8c5ea81afcaa18aa42b352175de..5f48b974a311c3bb9cc7278465c49ac3c0712f04 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
+++ b/tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
@@ -25,35 +25,35 @@ def callback(header,err):
 
 source, beamtime, token = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(source,".",True, beamtime,"",token,timeout)
-producer  = asapo_producer.create_producer(source,'processed',beamtime,'auto', "", token, 1, 600)
+consumer = asapo_consumer.create_consumer(source,".",True, beamtime,"",token,timeout)
+producer  = asapo_producer.create_producer(source,'processed',beamtime,'auto', "", token, 1, 600000)
 producer.set_log_level("debug")
 
-group_id  = broker.generate_group_id()
+group_id  = consumer.generate_group_id()
 
 n_send = 10
 
 for i in range(n_send):
-    producer.send_data(i+1, "name"+str(i),None,ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY,substream = "substream", callback = callback)
+    producer.send(i+1, "name"+str(i),None,ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY,stream = "stream", callback = callback)
 
-producer.send_substream_finished_flag("substream", 10, next_substream = "next_substream", callback = callback)
+producer.send_stream_finished_flag("stream", 10, next_stream = "next_stream", callback = callback)
 producer.wait_requests_finished(timeout)
 
 n_recv = 0
-substream_finished=False
+stream_finished=False
 while True:
     try:
-        data, meta = broker.get_next(group_id,substream = "substream", meta_only=True)
+        data, meta = consumer.get_next(group_id,stream = "stream", meta_only=True)
         print ("received: ",meta)
         n_recv = n_recv + 1
-    except  asapo_consumer.AsapoStreamFinishedError as finished_substream:
-        substream_finished = True
-        assert_eq(finished_substream.id_max, 11, "last id")
-        assert_eq(finished_substream.next_substream, "next_substream", "next substream")
+    except  asapo_consumer.AsapoStreamFinishedError as finished_stream:
+        stream_finished = True
+        assert_eq(finished_stream.id_max, 11, "last id")
+        assert_eq(finished_stream.next_stream, "next_stream", "next stream")
         break
 
 assert_eq(n_recv, n_send, "send=recv")
-assert_eq(substream_finished, True, "substream finished")
-print('Using connection type: ' + broker.current_connection_type())
+assert_eq(stream_finished, True, "stream finished")
+print('Using connection type: ' + consumer.current_connection_type())
 
 
diff --git a/tests/automatic/full_chain/simple_chain/CMakeLists.txt b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
index 1f7374efe1954591b0c95d5a8c9f064686481780..84ddc34f2943138033ed6e7f531435994ebc76d0 100644
--- a/tests/automatic/full_chain/simple_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_simple_chain)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
index cf7580f6ea6f586300e671d29792dd31600bed1a..c6014bf7bc53c44f5ae3fcb6b64a44878008d387 100644
--- a/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_dataset/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_simple_chain_dataset)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
index 8bd0d723e8f5e9fd26f0413495370b9391cf327c..878b5192b990fba317f109df09235fd0de488e6a 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen/CMakeLists.txt
@@ -12,4 +12,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen/test.json.in b/tests/automatic/full_chain/simple_chain_filegen/test.json.in
index eddefac2b2a700bbdd4703bd7221b641a99216a1..fef76848b8562c9058e3dfe681b2ac7170cc2a86 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/test.json.in
+++ b/tests/automatic/full_chain/simple_chain_filegen/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
-  "Subset": {
+ "DataSource": "",
+  "Dataset": {
    	"Mode":"none"
   }
 
diff --git a/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
index 6a8d10614a88cb7b2e722efe226c1a7d70d64b5c..810f673112d1af8c124ae62a8364af670f7a8353 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_batches/CMakeLists.txt
@@ -12,4 +12,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen_batches/test.json.in b/tests/automatic/full_chain/simple_chain_filegen_batches/test.json.in
index 0b760c2ea8d92034668462fb60320c45c9789b2b..a1053cd5b1efc23bf41039f8cd8167a62a7db213 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_batches/test.json.in
+++ b/tests/automatic/full_chain/simple_chain_filegen_batches/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":false,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
  	"Mode":"batch",
   	"BatchSize":3
  }
diff --git a/tests/automatic/full_chain/simple_chain_filegen_multisource/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_multisource/CMakeLists.txt
index 16d6a270f0b362ddea8b6180f08e23427ff7599d..0341d8b7ad7dc345b8b3407a7fe5ccc061ea5822 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_multisource/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_multisource/CMakeLists.txt
@@ -15,4 +15,4 @@ configure_file(test.json.in test1.json @ONLY)
 SET (ID 2)
 configure_file(test.json.in test2.json @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen_multisource/test.json.in b/tests/automatic/full_chain/simple_chain_filegen_multisource/test.json.in
index 09aa803aa41948346be1f951e85383364f6827d2..5f32f629e2ba57f733c8dd922cac14d30e9ea26e 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_multisource/test.json.in
+++ b/tests/automatic/full_chain/simple_chain_filegen_multisource/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
  	"Mode":"multisource",
   	"SourceId":@ID@,
   	"NSources":2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
index 682fdc6a2196ab447e5f29282a4b98ebdbac6116..17c1adc618b76c372691be8c4ee0e0591e28d266 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/CMakeLists.txt
@@ -12,11 +12,11 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
 
 if (ENABLE_LIBFABRIC)
     if (ENABLE_LIBFABRIC_LOCALHOST)
-        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
+        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
     else ()
         message(WARNING "Disabled automated LibFabric of '${TARGET_NAME}-fabric' test because 'ENABLE_LIBFABRIC_LOCALHOST' is not enabled.")
     endif()
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/test.json.in b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/test.json.in
index ed41c425ce44f356fecb72e6c17820cae9ef7b69..488577ab4558d1e1a94d02a0e8ad2909760044c1 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/test.json.in
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
index c54130a4ac26bcdfb6c7b5adf3a92830141642c9..4cc2403a472a4829c214879075f96f62f673d3a7 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/CMakeLists.txt
@@ -13,4 +13,4 @@ endif()
 
 configure_file(test.json.in test.json @ONLY)
 
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:event-monitor-producer-bin> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/test.json.in b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/test.json.in
index ed41c425ce44f356fecb72e6c17820cae9ef7b69..488577ab4558d1e1a94d02a0e8ad2909760044c1 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/test.json.in
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 
diff --git a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
index 2ff284ef9ad078f5f16274ce7ed69f9eb9f21185..7149bd170d51a480e1833b5d1de290f30047e7e2 100644
--- a/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_metadata/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_simple_chain_metadata)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_raw/CMakeLists.txt b/tests/automatic/full_chain/simple_chain_raw/CMakeLists.txt
index 1cf8ce6f58bbe85a5c752c1055f742f958d137d0..3bbd3e039f3ab1e1bf14e8701313900e8c0a53ce 100644
--- a/tests/automatic/full_chain/simple_chain_raw/CMakeLists.txt
+++ b/tests/automatic/full_chain/simple_chain_raw/CMakeLists.txt
@@ -20,4 +20,4 @@ configure_file(beamtime-metadata-11111111.json beamtime-metadata-11111111.json C
 
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/settings.json.tpl.in authorizer.json.tpl @ONLY)
 
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
+add_script_test("${TARGET_NAME}" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME>" nomem)
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
index 64671af15df57480a57f41ad512f6ad66c790ded..3023c28f4c8a518836df54349c492d8c78ae6372 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh
@@ -53,5 +53,5 @@ export Python_EXECUTABLE=$5
 
 echo "Start python consumer in metadata only mode"
 $Python_EXECUTABLE $3/get_user_meta.py $proxy_address $receiver_folder $beamtime_id $token new | tee out
-grep "found images: 100" out
+grep "found messages: 100" out
 grep "test100" out
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
index 2a67b0c4a582883669db4057beadd582269caa0c..e68687fa6050d59c41fd987b0ad8ed026abb6445 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
@@ -23,7 +23,7 @@ set PYTHONPATH=%4
 
 python3 %3/get_user_meta.py %proxy_address% %receiver_folder% %beamtime_id%  %token% new > out
 type out
-type out | findstr /c:"found images: 100" || goto :error
+type out | findstr /c:"found messages: 100" || goto :error
 type out | findstr /c:"test100" || goto :error
 
 goto :clean
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py b/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
index eaa115726de4fea638cf1658d26fc1111196ce8a..7a3c0521cee64271a99298ebaf4ff388b2f03574 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/get_user_meta.py
@@ -5,9 +5,9 @@ import sys
 
 source, path, beamtime, token, group_id = sys.argv[1:]
 
-broker = asapo_consumer.create_server_broker(source,path,True, beamtime,"",token,60000)
+consumer = asapo_consumer.create_consumer(source,path,True, beamtime,"",token,60000)
 
-images = broker.query_images("meta.user_meta regexp 'test*' order by _id")
+messages = consumer.query_messages("meta.user_meta regexp 'test*' order by _id")
 
-print ('found images:',len(images))
-print (images[99]['meta']['user_meta'])
+print ('found messages:',len(messages))
+print (messages[99]['meta']['user_meta'])
diff --git a/tests/automatic/full_chain/two_beamlines/CMakeLists.txt b/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
index 88c67cc9255bf4c8ec089c86730d83bda8561968..b4b621b3dca1d4ae1162df26625b5d77a7069f2a 100644
--- a/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
+++ b/tests/automatic/full_chain/two_beamlines/CMakeLists.txt
@@ -4,11 +4,11 @@ set(TARGET_NAME full_chain_two_beamlines)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
 
 if (ENABLE_LIBFABRIC)
     if (ENABLE_LIBFABRIC_LOCALHOST)
-        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
+        add_script_test("${TARGET_NAME}-fabric" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> fabric" nomem)
     else ()
         message(WARNING "Disabled automated LibFabric of '${TARGET_NAME}-fabric' test because 'ENABLE_LIBFABRIC_LOCALHOST' is not enabled.")
     endif()
diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh
index f6c7e26f028ec6ba2c906540d4b6842acdf8988f..f518a57a459409d7bee58caf630ff87b4150601a 100644
--- a/tests/automatic/full_chain/two_beamlines/check_linux.sh
+++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh
@@ -9,7 +9,7 @@ consumer_bin=$2
 asapo_tool_bin=$3
 network_type=$4
 
-stream=detector
+data_source=detector
 
 beamtime_id1=asapo_test1
 token1=`$asapo_tool_bin token -secret auth_secret.key $beamtime_id1`
@@ -40,13 +40,13 @@ Cleanup() {
     nomad stop broker
     nomad stop authorizer
 #    kill $producerid
-    echo "db.dropDatabase()" | mongo ${beamtime_id1}_${stream}
-    echo "db.dropDatabase()" | mongo ${beamtime_id2}_${stream}
+    echo "db.dropDatabase()" | mongo ${beamtime_id1}_${data_source}
+    echo "db.dropDatabase()" | mongo ${beamtime_id2}_${data_source}
     influx -execute "drop database ${monitor_database_name}"
 }
 
-echo "db.${beamtime_id1}_${stream}.insert({dummy:1})" | mongo ${beamtime_id1}_${stream}
-echo "db.${beamtime_id2}_${stream}.insert({dummy:1})" | mongo ${beamtime_id2}_${stream}
+echo "db.${beamtime_id1}_${data_source}.insert({dummy:1})" | mongo ${beamtime_id1}_${data_source}
+echo "db.${beamtime_id2}_${data_source}.insert({dummy:1})" | mongo ${beamtime_id2}_${data_source}
 
 nomad run nginx.nmd
 nomad run authorizer.nmd
diff --git a/tests/automatic/full_chain/two_beamlines/check_windows.bat b/tests/automatic/full_chain/two_beamlines/check_windows.bat
index 1a906b99e11a6421310d5f1995a9309572ccb441..1f09a912ea9b8f0e886d9ba58a1edf9c241bc162 100644
--- a/tests/automatic/full_chain/two_beamlines/check_windows.bat
+++ b/tests/automatic/full_chain/two_beamlines/check_windows.bat
@@ -3,7 +3,7 @@ SET beamtime_id1=asapo_test1
 SET beamline1=test1
 SET beamtime_id2=asapo_test2
 SET beamline2=test2
-SET stream=detector
+SET data_source=detector
 
 SET receiver_root_folder=c:\tmp\asapo\receiver\files
 SET facility=test_facility
@@ -20,8 +20,8 @@ set /P token2=< token
 
 set proxy_address="127.0.0.1:8400"
 
-echo db.%beamtime_id1%_%stream%.insert({dummy:1}) | %mongo_exe% %beamtime_id1%_%stream%
-echo db.%beamtime_id2%_%stream%.insert({dummy:1}) | %mongo_exe% %beamtime_id2%_%stream%
+echo db.%beamtime_id1%_%data_source%.insert({dummy:1}) | %mongo_exe% %beamtime_id1%_%data_source%
+echo db.%beamtime_id2%_%data_source%.insert({dummy:1}) | %mongo_exe% %beamtime_id2%_%data_source%
 
 call start_services.bat
 
@@ -54,7 +54,7 @@ call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token1
 del /f token2
-echo db.dropDatabase() | %mongo_exe% %beamtime_id1%_%stream%
-echo db.dropDatabase() | %mongo_exe% %beamtime_id2%_%stream%
+echo db.dropDatabase() | %mongo_exe% %beamtime_id1%_%data_source%
+echo db.dropDatabase() | %mongo_exe% %beamtime_id2%_%data_source%
 
 
diff --git a/tests/automatic/full_chain/two_streams/CMakeLists.txt b/tests/automatic/full_chain/two_streams/CMakeLists.txt
index 505c1ff22dde1cbc00fcb4a4a1a08f62d4e04795..bde0f7e3971762241ed5e201a65ea989ecec101e 100644
--- a/tests/automatic/full_chain/two_streams/CMakeLists.txt
+++ b/tests/automatic/full_chain/two_streams/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME full_chain_two_streams)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt b/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
index 826d9d19a9409532bcb194a3e8899501f11b8091..9c50d188705d726e4850d41d8fb352d8bc2b5010 100644
--- a/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
+++ b/tests/automatic/high_avail/broker_mongo_restart/CMakeLists.txt
@@ -4,4 +4,4 @@ set(TARGET_NAME broker_mongo_restart)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
+add_script_test("${TARGET_NAME}-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> tcp" nomem)
diff --git a/tests/automatic/high_avail/services_restart/CMakeLists.txt b/tests/automatic/high_avail/services_restart/CMakeLists.txt
index 25a2b009764f40a11a6b5745f054ec86bf240b3f..4b27ebf5b089288c4ac5e4a89a2de33322385ac4 100644
--- a/tests/automatic/high_avail/services_restart/CMakeLists.txt
+++ b/tests/automatic/high_avail/services_restart/CMakeLists.txt
@@ -4,5 +4,5 @@ set(TARGET_NAME service_restart)
 # Testing
 ################################
 prepare_asapo()
-add_script_test("${TARGET_NAME}-all-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> broker 1000 998 tcp" nomem)
-add_script_test("${TARGET_NAME}-all-but-broker-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext_broker> $<TARGET_PROPERTY:asapo,EXENAME> receiver 1000 1000 tcp" nomem)
+add_script_test("${TARGET_NAME}-all-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> broker 1000 998 tcp" nomem)
+add_script_test("${TARGET_NAME}-all-but-broker-tcp" "$<TARGET_FILE:dummy-data-producer> $<TARGET_FILE:getnext> $<TARGET_PROPERTY:asapo,EXENAME> receiver 1000 1000 tcp" nomem)
diff --git a/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp b/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
index 318dfb64e7e9289fe2be8a27a3934fe3c903369b..c360f0339ac45070731b91e92a5c139ec903af9c 100644
--- a/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
+++ b/tests/automatic/mongo_db/insert_retrieve/insert_retrieve_mongodb.cpp
@@ -35,7 +35,7 @@ int main(int argc, char* argv[]) {
     auto args = GetArgs(argc, argv);
     asapo::MongoDBClient db;
 
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     fi.size = 100;
     fi.name = "relpath/1";
     fi.id = args.file_id;
@@ -66,7 +66,7 @@ int main(int argc, char* argv[]) {
     Assert(err, args.keyword);
 
     if (args.keyword == "OK") { // check retrieve
-        asapo::FileInfo fi_db;
+        asapo::MessageMeta fi_db;
         asapo::MongoDBClient db_new;
         db_new.Connect("127.0.0.1", "data");
         err = db_new.GetById("data_test", fi.id, &fi_db);
diff --git a/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp b/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp
index 8e04f6e4790e39145b3217d6438b0db8a4104606..a19a182b13d51933a765f2df81e22db4660a87af 100644
--- a/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp
+++ b/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp
@@ -34,37 +34,35 @@ int main(int argc, char* argv[]) {
     auto args = GetArgs(argc, argv);
     asapo::MongoDBClient db;
 
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     fi.size = 100;
     fi.name = "relpath/1";
-    uint64_t subset_id = args.file_id;
     fi.timestamp = std::chrono::system_clock::now();
     fi.buf_id = 18446744073709551615ull;
     fi.source = "host:1234";
-    fi.id = 10;
+    fi.id = args.file_id;
+    fi.dataset_substream = 10;
 
-    uint64_t subset_size = 2;
+    uint64_t dataset_size = 2;
 
     if (args.keyword != "Notconnected") {
         db.Connect("127.0.0.1", "data");
     }
 
-    auto err =  db.InsertAsSubset("test", fi, subset_id, subset_size, true);
+    auto err =  db.InsertAsDatasetMessage("test", fi, dataset_size, true);
 
 
     if (args.keyword == "DuplicateID") {
         Assert(err, "OK");
-        fi.id = 2;
-        err =  db.InsertAsSubset("test", fi, subset_id, subset_size, true);
-//        Assert(err, "OK");
-        err =  db.InsertAsSubset("test", fi, subset_id, subset_size, false);
+        err =  db.InsertAsDatasetMessage("test", fi, dataset_size, true);
+        err =  db.InsertAsDatasetMessage("test", fi, dataset_size, false);
     }
 
     Assert(err, args.keyword);
 
     if (args.keyword == "OK") { // check retrieve
-        asapo::FileInfo fi_db;
-        err = db.GetDataSetById("test", fi.id,subset_id, &fi_db);
+        asapo::MessageMeta fi_db;
+        err = db.GetDataSetById("test", fi.dataset_substream,fi.id, &fi_db);
         M_AssertTrue(fi_db == fi, "get record from db");
         M_AssertEq(nullptr, err);
         err = db.GetDataSetById("test", 0, 0, &fi_db);
diff --git a/tests/automatic/producer/aai/check_linux.sh b/tests/automatic/producer/aai/check_linux.sh
index fc7df86401c04972baa03c6f8ba67905f14c8f3e..eb3b7492f80da285e49aa1124b7e768b1426edfe 100644
--- a/tests/automatic/producer/aai/check_linux.sh
+++ b/tests/automatic/producer/aai/check_linux.sh
@@ -6,7 +6,7 @@ trap Cleanup EXIT
 
 beamtime_id=11111111
 beamtime_id2=22222222
-stream=python
+data_source=python
 beamline=p07
 receiver_root_folder=/tmp/asapo/receiver/files
 facility=test_facility
@@ -24,8 +24,8 @@ Cleanup() {
     nomad stop authorizer >/dev/null
     nomad stop nginx >/dev/null
     nomad run nginx_kill.nmd  && nomad stop -yes -purge nginx_kill > /dev/null
-    echo "db.dropDatabase()" | mongo ${beamtime_id}_${stream} >/dev/null
-    echo "db.dropDatabase()" | mongo ${beamtime_id2}_${stream} >/dev/null
+    echo "db.dropDatabase()" | mongo ${beamtime_id}_${data_source} >/dev/null
+    echo "db.dropDatabase()" | mongo ${beamtime_id2}_${data_source} >/dev/null
 }
 
 export PYTHONPATH=$2:${PYTHONPATH}
@@ -42,7 +42,7 @@ sleep 1
 echo test > file1
 
 
-$1 $3 $beamline $token $stream "127.0.0.1:8400" > out || cat out
+$1 $3 $beamline $token $data_source "127.0.0.1:8400" > out || cat out
 cat out
 cat out | grep "successfuly sent" | wc -l | grep 3
 cat out | grep "reauthorization\|Broken" | wc -l | grep 1
diff --git a/tests/automatic/producer/aai/check_windows.bat b/tests/automatic/producer/aai/check_windows.bat
index 586959daeb10528de169f4c708d4c3039c096180..a115afb6a12a88cb900b76d570b491ba45cc17cb 100644
--- a/tests/automatic/producer/aai/check_windows.bat
+++ b/tests/automatic/producer/aai/check_windows.bat
@@ -2,12 +2,12 @@ SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 SET beamtime_id=11111111
 SET beamtime_id2=22222222
 SET beamline=p07
-SET stream=python
+SET data_source=python
 SET receiver_root_folder=c:\tmp\asapo\receiver\files
 SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\data\%beamtime_id%"
 SET receiver_folder2="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\data\%beamtime_id2%"
-SET dbname=%beamtime_id%_%stream%
-SET dbname2=%beamtime_id2%_%stream%
+SET dbname=%beamtime_id%_%data_source%
+SET dbname2=%beamtime_id2%_%data_source%
 SET token=-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
@@ -23,7 +23,7 @@ ping 192.0.2.1 -n 1 -w 1000 > nul
 
 set PYTHONPATH=%2
 
-"%1" "%3" %beamline% %token%  %stream% "127.0.0.1:8400" > out
+"%1" "%3" %beamline% %token%  %data_source% "127.0.0.1:8400" > out
 
 type out
 set NUM=0
diff --git a/tests/automatic/producer/aai/producer_aai.py b/tests/automatic/producer/aai/producer_aai.py
index 1734f757f158fddc4692063cd9a23c4dae8c39fd..4c4cdf41f56bade8bd7ad983263b09e8b125641a 100644
--- a/tests/automatic/producer/aai/producer_aai.py
+++ b/tests/automatic/producer/aai/producer_aai.py
@@ -10,7 +10,7 @@ import json
 
 beamline = sys.argv[1]
 token = sys.argv[2]
-stream = sys.argv[3]
+data_source = sys.argv[3]
 endpoint = sys.argv[4]
 
 nthreads = 1
@@ -26,19 +26,19 @@ def callback(header,err):
     lock.release()
 
 
-producer  = asapo_producer.create_producer(endpoint,'processed','auto',beamline, stream, token, nthreads, 60)
+producer  = asapo_producer.create_producer(endpoint,'processed','auto',beamline, data_source, token, nthreads, 60000)
 
 producer.set_log_level("debug")
 
 #send single file
-producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 producer.wait_requests_finished(10000)
 
 time.sleep(2)
 
 #send single file to other beamtime - should be warning on duplicated request (same beamtime, no reauthorization)
-producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 producer.wait_requests_finished(10000)
 
 
@@ -54,7 +54,7 @@ with open(fname, 'w') as outfile:
 time.sleep(2)
 
 #send single file to other beamtime - now ok since receiver authorization timed out
-producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = "processed/"+data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 producer.wait_requests_finished(10000)
 
diff --git a/tests/automatic/producer/beamtime_metadata/beamtime_metadata.cpp b/tests/automatic/producer/beamtime_metadata/beamtime_metadata.cpp
index 4d9fff49c703f95563ade6db55f22788791c33cb..5d91a1fd83076ae400f20452e146cbd31629e058 100644
--- a/tests/automatic/producer/beamtime_metadata/beamtime_metadata.cpp
+++ b/tests/automatic/producer/beamtime_metadata/beamtime_metadata.cpp
@@ -55,7 +55,7 @@ void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
 
 bool SendMetaData(asapo::Producer* producer) {
 
-    auto err = producer->SendMetaData("hello", &ProcessAfterSend);
+    auto err = producer->SendMetadata("hello", &ProcessAfterSend);
     if (err) {
         std::cerr << "Cannot send metadata: " << err << std::endl;
         return false;
@@ -70,7 +70,7 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) {
                                             args.mode == 0 ? asapo::RequestHandlerType::kTcp
                                             : asapo::RequestHandlerType::kFilesystem,
                                             asapo::SourceCredentials{asapo::SourceType::kProcessed,
-                                                                     args.beamtime_id, "", "", ""}, 60, &err);
+                                                                     args.beamtime_id, "", "", ""}, 60000, &err);
     if (err) {
         std::cerr << "Cannot start producer. ProducerError: " << err << std::endl;
         exit(EXIT_FAILURE);
diff --git a/tests/automatic/producer/file_monitor_producer/test.json.in b/tests/automatic/producer/file_monitor_producer/test.json.in
index d88d98b67bd0a0e5264c1169071210c13325c746..fa4d102c24c3ec03eca49b1999cd5907ddcf8fe9 100644
--- a/tests/automatic/producer/file_monitor_producer/test.json.in
+++ b/tests/automatic/producer/file_monitor_producer/test.json.in
@@ -10,8 +10,8 @@
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 
diff --git a/tests/automatic/producer/python_api/check_linux.sh b/tests/automatic/producer/python_api/check_linux.sh
index fe5ae27138a29eecb574769e060f52b8957612db..71e20a339a9a52627eb5035981ef8f525ee41706 100644
--- a/tests/automatic/producer/python_api/check_linux.sh
+++ b/tests/automatic/producer/python_api/check_linux.sh
@@ -5,7 +5,7 @@ set -e
 trap Cleanup EXIT
 
 beamtime_id=asapo_test
-stream=python
+data_source=python
 beamline=test
 receiver_root_folder=/tmp/asapo/receiver/files
 facility=test_facility
@@ -21,12 +21,12 @@ Cleanup() {
     nomad stop authorizer >/dev/null
     nomad stop nginx >/dev/null
     nomad run nginx_kill.nmd  && nomad stop -yes -purge nginx_kill > /dev/null
-    echo "db.dropDatabase()" | mongo ${beamtime_id}_${stream} >/dev/null
+    echo "db.dropDatabase()" | mongo ${beamtime_id}_${data_source} >/dev/null
 }
 
 export PYTHONPATH=$2:${PYTHONPATH}
 
-echo "db.${beamtime_id}_${stream}.insert({dummy:1})" | mongo ${beamtime_id}_${stream}  >/dev/null
+echo "db.${beamtime_id}_${data_source}.insert({dummy:1})" | mongo ${beamtime_id}_${data_source}  >/dev/null
 
 nomad run authorizer.nmd >/dev/null
 nomad run nginx.nmd >/dev/null
@@ -39,7 +39,7 @@ echo test > file1
 
 sleep 1
 
-$1 $3 $stream $beamtime_id  "127.0.0.1:8400" &> out || cat out
+$1 $3 $data_source $beamtime_id  "127.0.0.1:8400" &> out || cat out
 cat out
 echo count successfully send, expect 13
 cat out | grep "successfuly sent" | wc -l | tee /dev/stderr | grep 13
diff --git a/tests/automatic/producer/python_api/check_windows.bat b/tests/automatic/producer/python_api/check_windows.bat
index 60cfaf26aabf9481f96252c11a797477c57dfdaa..c115da36fd80a77128f2248ac1202c360336c9ae 100644
--- a/tests/automatic/producer/python_api/check_windows.bat
+++ b/tests/automatic/producer/python_api/check_windows.bat
@@ -1,10 +1,10 @@
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 SET beamtime_id=asapo_test
 SET beamline=test
-SET stream=python
+SET data_source=python
 SET receiver_root_folder=c:\tmp\asapo\receiver\files
 SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\data\%beamtime_id%"
-SET dbname=%beamtime_id%_%stream%
+SET dbname=%beamtime_id%_%data_source%
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
 
@@ -18,7 +18,7 @@ ping 192.0.2.1 -n 1 -w 1000 > nul
 
 set PYTHONPATH=%2
 
-"%1" "%3" %stream% %beamtime_id%  "127.0.0.1:8400" > out
+"%1" "%3" %data_source% %beamtime_id%  "127.0.0.1:8400" > out
 type out
 set NUM=0
 for /F %%N in ('find /C "successfuly sent" ^< "out"') do set NUM=%%N
diff --git a/tests/automatic/producer/python_api/producer_api.py b/tests/automatic/producer/python_api/producer_api.py
index abc8cac732591fada0b7daf10ce9e82921e8d536..eb14bca76ed10a90c94b978bfb501a007332d1d9 100644
--- a/tests/automatic/producer/python_api/producer_api.py
+++ b/tests/automatic/producer/python_api/producer_api.py
@@ -10,7 +10,7 @@ from datetime import datetime
 
 lock = threading.Lock()
 
-stream = sys.argv[1]
+data_source = sys.argv[1]
 beamtime = sys.argv[2]
 endpoint = sys.argv[3]
 
@@ -37,22 +37,22 @@ def callback(payload, err):
     lock.release()
 
 
-producer = asapo_producer.create_producer(endpoint,'processed', beamtime, 'auto', stream, token, nthreads, 60)
+producer = asapo_producer.create_producer(endpoint,'processed', beamtime, 'auto', data_source, token, nthreads, 60000)
 
 producer.set_log_level("debug")
 
 # send single file
-producer.send_file(1, local_path="./file1", exposed_path="processed/" + stream + "/" + "file1",
+producer.send_file(1, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file1",
                    user_meta='{"test_key":"test_val"}', callback=callback)
 
 # send single file without callback
-producer.send_file(10, local_path="./file1", exposed_path="processed/" + stream + "/" + "file10",
+producer.send_file(10, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file10",
                    user_meta='{"test_key":"test_val"}', callback=None)
 
-# send subsets
-producer.send_file(2, local_path="./file1", exposed_path="processed/" + stream + "/" + "file2", subset=(1, 2),
+# send datasets
+producer.send_file(2, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file2", dataset=(1, 2),
                    user_meta='{"test_key":"test_val"}', callback=callback)
-producer.send_file(2, local_path="./file1", exposed_path="processed/" + stream + "/" + "file3", subset=(2, 2),
+producer.send_file(2, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file3", dataset=(2, 2),
                    user_meta='{"test_key":"test_val"}', callback=callback)
 
 # send meta only
@@ -62,27 +62,27 @@ producer.send_file(3, local_path="./not_exist", exposed_path="./whatever",
 data = np.arange(10, dtype=np.float64)
 
 # send data from array
-producer.send_data(4, "processed/" + stream + "/" + "file5", data,
+producer.send(4, "processed/" + data_source + "/" + "file5", data,
                    ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback)
 
 # send data from string
-producer.send_data(5, "processed/" + stream + "/" + "file6", b"hello",
+producer.send(5, "processed/" + data_source + "/" + "file6", b"hello",
                    ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback)
 
 # send metadata only
-producer.send_data(6, "processed/" + stream + "/" + "file7", None,
+producer.send(6, "processed/" + data_source + "/" + "file7", None,
                    ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback=callback)
 
 # send single file/wrong filename
-producer.send_file(1, local_path="./file2", exposed_path="processed/" + stream + "/" + "file1", callback=callback)
+producer.send_file(1, local_path="./file2", exposed_path="processed/" + data_source + "/" + "file1", callback=callback)
 
 x = np.array([[1, 2, 3], [4, 5, 6]], np.float32)
-producer.send_data(8, "processed/" + stream + "/" + "file8", x,
+producer.send(8, "processed/" + data_source + "/" + "file8", x,
                    ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback)
 
 try:
     x = x.T
-    producer.send_data(8, "processed/" + stream + "/" + "file8", x,
+    producer.send(8, "processed/" + data_source + "/" + "file8", x,
                        ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback)
 except asapo_producer.AsapoWrongInputError as e:
     print(e)
@@ -91,7 +91,7 @@ else:
     sys.exit(1)
 
 try:
-    producer.send_data(0, "processed/" + stream + "/" + "file6", b"hello",
+    producer.send(0, "processed/" + data_source + "/" + "file6", b"hello",
                        ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback)
 except asapo_producer.AsapoWrongInputError as e:
     print(e)
@@ -99,29 +99,29 @@ else:
     print("should be error sending id 0 ")
     sys.exit(1)
 
-# send to another substream
-producer.send_data(1, "processed/" + stream + "/" + "file9", None,
-                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, substream="stream", callback=callback)
+# send to another stream
+producer.send(1, "processed/" + data_source + "/" + "file9", None,
+                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, stream="stream", callback=callback)
 
 # wait normal requests finished before sending duplicates
 
 producer.wait_requests_finished(50000)
 
 # send single file once again
-producer.send_file(1, local_path="./file1", exposed_path="processed/" + stream + "/" + "file1",
+producer.send_file(1, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file1",
                    user_meta='{"test_key":"test_val"}', callback=callback)
 # send metadata only once again
-producer.send_data(6, "processed/" + stream + "/" + "file7", None,
+producer.send(6, "processed/" + data_source + "/" + "file7", None,
                    ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback=callback)
 
 # send same id different data
-producer.send_file(1, local_path="./file1", exposed_path="processed/" + stream + "/" + "file1",
+producer.send_file(1, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file1",
                    user_meta='{"test_key1":"test_val"}', callback=callback)  # send same id different data
-producer.send_data(6, "processed/" + stream + "/" + "file8", None,
+producer.send(6, "processed/" + data_source + "/" + "file8", None,
                    ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback=callback)
 
 # send same id without writing to database, should success
-producer.send_file(1, local_path="./file1", exposed_path="processed/" + stream + "/" + "file18",
+producer.send_file(1, local_path="./file1", exposed_path="processed/" + data_source + "/" + "file18",
                    user_meta='{"test_key1":"test_val"}',
                    ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_DATA | asapo_producer.INGEST_MODE_STORE_IN_FILESYSTEM,callback=callback)
 
@@ -129,9 +129,9 @@ producer.wait_requests_finished(50000)
 n = producer.get_requests_queue_size()
 assert_eq(n, 0, "requests in queue")
 
-# send to another data to substream stream
-producer.send_data(2, "processed/" + stream + "/" + "file10", None,
-                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, substream="stream", callback=callback)
+# send to another data to stream stream
+producer.send(2, "processed/" + data_source + "/" + "file10", None,
+                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, stream="stream", callback=callback)
 
 producer.wait_requests_finished(50000)
 n = producer.get_requests_queue_size()
@@ -151,7 +151,7 @@ print("created: ",datetime.utcfromtimestamp(info['timestampCreated']/1000000000)
 print("last record: ",datetime.utcfromtimestamp(info['timestampLast']/1000000000).strftime('%Y-%m-%d %H:%M:%S.%f'))
 
 info = producer.stream_info('stream')
-assert_eq(info['lastId'], 2, "last id from different substream")
+assert_eq(info['lastId'], 2, "last id from different stream")
 
 info_last = producer.last_stream()
 assert_eq(info_last['name'], "stream", "last stream")
@@ -159,7 +159,7 @@ assert_eq(info_last['timestampCreated'] <= info_last['timestampLast'], True, "la
 
 # create with error
 try:
-    producer = asapo_producer.create_producer(endpoint,'processed', beamtime, 'auto', stream, token, 0, 0)
+    producer = asapo_producer.create_producer(endpoint,'processed', beamtime, 'auto', data_source, token, 0, 0)
 except asapo_producer.AsapoWrongInputError as e:
     print(e)
 else:
diff --git a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
index b8fb9e196f7861b6d44c27341240c5ace7ca371e..0847e13fe075c3aa005dfc4ac4902671efd16d06 100644
--- a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh
@@ -44,4 +44,4 @@ ls -ln ${receiver_folder}/processed/1_1 | awk '{ print $5 }'| grep 100000
 ls -ln ${receiver_folder}/processed/1_2 | awk '{ print $5 }'| grep 100000
 ls -ln ${receiver_folder}/processed/1_3 | awk '{ print $5 }'| grep 100000
 
-echo 'db.data_default.find({"images._id":{$gt:0}},{"images.name":1})' | mongo asapo_test_detector | grep 1_1 | grep 1_2 | grep 1_3
+echo 'db.data_default.find({"messages._id":{$gt:0}},{"messages.name":1})' | mongo asapo_test_detector | grep 1_1 | grep 1_2 | grep 1_3
diff --git a/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat b/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
index 5047a8cdac45abdc460dea0e00f206f6213b63de..f3dd3760212d01bf26192f8c37bfbf2d6b72fda5 100644
--- a/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
@@ -25,9 +25,9 @@ FOR /F "usebackq" %%A IN ('%receiver_folder%\processed\1_3') DO set size=%%~zA
 if %size% NEQ 100000 goto :error
 
 
-echo db.data_default.find({"images._id":{$gt:0}},{"images.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_1  || goto :error
-echo db.data_default.find({"images._id":{$gt:0}},{"images.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_2  || goto :error
-echo db.data_default.find({"images._id":{$gt:0}},{"images.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_3  || goto :error
+echo db.data_default.find({"messages._id":{$gt:0}},{"messages.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_1  || goto :error
+echo db.data_default.find({"messages._id":{$gt:0}},{"messages.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_2  || goto :error
+echo db.data_default.find({"messages._id":{$gt:0}},{"messages.name":1}) | %mongo_exe% %beamtime_id%_detector | findstr 1_3  || goto :error
 
 goto :clean
 
diff --git a/tests/automatic/system_io/read_file_content/read_file_content.cpp b/tests/automatic/system_io/read_file_content/read_file_content.cpp
index fa4aa0a34182ad0e6b4e18fcc0caf96686e8ab38..743fb25e2f302451dfdcc6387a4ab3510feb1b1f 100644
--- a/tests/automatic/system_io/read_file_content/read_file_content.cpp
+++ b/tests/automatic/system_io/read_file_content/read_file_content.cpp
@@ -14,7 +14,7 @@ int main(int argc, char* argv[]) {
     asapo::Error err;
     auto io = std::unique_ptr<asapo::IO> {asapo::GenerateDefaultIO()};
 
-    asapo::FileData data;
+    asapo::MessageData data;
     uint64_t size = 0;
     if (expect == "unknown_size") {
         data = io->GetDataFromFile(argv[1], &size, &err);
diff --git a/tests/automatic/system_io/read_folder_content/read_folder_content.cpp b/tests/automatic/system_io/read_folder_content/read_folder_content.cpp
index 4a8bddd5b7d0cdd1c4e593c807ad1d2f8f30ba80..a820b7a499d5dcef478b3d85362e1745c24d68ea 100644
--- a/tests/automatic/system_io/read_folder_content/read_folder_content.cpp
+++ b/tests/automatic/system_io/read_folder_content/read_folder_content.cpp
@@ -20,12 +20,12 @@ int main(int argc, char* argv[]) {
     std::string result{};
     if (err == nullptr) {
         int64_t id = 0;
-        for(auto file_info : files) {
-            M_AssertEq(file_info.id, ++id);
-            if (file_info.name == "1") {
-                M_AssertEq(4, file_info.size);
+        for(auto message_meta : files) {
+            M_AssertEq(message_meta.id, ++id);
+            if (message_meta.name == "1") {
+                M_AssertEq(4, message_meta.size);
             }
-            result += file_info.name;
+            result += message_meta.name;
         }
     } else {
         result = err->Explain();
diff --git a/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp b/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
index f34c385b9978b5a8e66738f97a5ea7043038c604..5cbe7939899823846e3459bea39279d74357ff82 100644
--- a/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
+++ b/tests/automatic/system_io/write_data_to_file/write_data_to_file.cpp
@@ -5,7 +5,7 @@
 
 using asapo::IO;
 using asapo::Error;
-using asapo::FileData;
+using asapo::MessageData;
 
 
 struct Args {
@@ -27,7 +27,7 @@ Args GetParams(int argc, char* argv[]) {
     return Args{fname, result, message, 3};
 }
 
-void AssertGoodResult(const std::unique_ptr<IO>& io, const Error& err, const FileData& data,
+void AssertGoodResult(const std::unique_ptr<IO>& io, const Error& err, const MessageData& data,
                       const Args& params) {
     if (err) {
         std::cerr << err << std::endl;
@@ -53,7 +53,7 @@ int main(int argc, char* argv[]) {
 
     auto io = std::unique_ptr<asapo::IO> {asapo::GenerateDefaultIO()};
     auto array = new uint8_t[params.length] {'1', '2', '3'};
-    FileData data{array};
+    MessageData data{array};
 
     auto err = io->WriteDataToFile("", params.fname, data, params.length, true, true);
 
diff --git a/tests/manual/asapo_fabric/fabric_client.cpp b/tests/manual/asapo_fabric/fabric_client.cpp
index 47edfc2a22df0111c52d145fa13e0d1e6ebc4bbd..462a84f7c030005b7f761f86d75e4fbe8863f79f 100644
--- a/tests/manual/asapo_fabric/fabric_client.cpp
+++ b/tests/manual/asapo_fabric/fabric_client.cpp
@@ -39,7 +39,7 @@ int main(int argc, char* argv[]) {
     }
 
     size_t dataBufferSize = 1024 * kByte;
-    FileData dataBuffer = FileData{new uint8_t[dataBufferSize]};
+    MessageData dataBuffer = MessageData{new uint8_t[dataBufferSize]};
     std::cout << "Expected file size: " << dataBufferSize << " byte" << std::endl;
 
     auto serverAddress = client->AddServerAddress(serverAddressString, &error);
diff --git a/tests/manual/asapo_fabric/fabric_server.cpp b/tests/manual/asapo_fabric/fabric_server.cpp
index fa6f7865d40db1c021759aea7c79f88b863d4cda..981840e6ec785bb7b7dfc2dc48fc693a55fc1bab 100644
--- a/tests/manual/asapo_fabric/fabric_server.cpp
+++ b/tests/manual/asapo_fabric/fabric_server.cpp
@@ -9,7 +9,7 @@ using namespace asapo::fabric;
 
 volatile bool running = false;
 
-void ServerThread(FabricServer* server, size_t bufferSize, FileData* buffer) {
+void ServerThread(FabricServer* server, size_t bufferSize, MessageData* buffer) {
     Error error;
     while(running && !error) {
         FabricAddress clientAddress;
@@ -69,7 +69,7 @@ int main(int argc, char* argv[]) {
     std::cout << "Server is listening on " << server->GetAddress() << std::endl;
 
     size_t dataBufferSize = 1024 * kByte;
-    FileData dataBuffer = FileData{new uint8_t[dataBufferSize]};
+    MessageData dataBuffer = MessageData{new uint8_t[dataBufferSize]};
     strcpy((char*)dataBuffer.get(), "I (the server) wrote into your buffer.");
     std::cout << "Expected file size: " << dataBufferSize << " byte" << std::endl;
 
diff --git a/tests/manual/broker_debug_local/start_getnext.sh b/tests/manual/broker_debug_local/start_getnext.sh
index a9efa78f00c7c85e8422b776b05c40f8f1d74c28..e40f3951d3cc73dc0b64406609b321cedc5519b5 100755
--- a/tests/manual/broker_debug_local/start_getnext.sh
+++ b/tests/manual/broker_debug_local/start_getnext.sh
@@ -8,6 +8,6 @@ metaonly=0
 nthreads=4
 token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
-exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/consumer/getnext_broker/getnext_broker
+exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/consumer/getnext/getnext
 
 $exec localhost:8400 /tmp/asapo/receiver/files/test/asapo_test asapo_test $nthreads $token $timeout $metaonly
diff --git a/tests/manual/consumer_multithread_python/consumer.py b/tests/manual/consumer_multithread_python/consumer.py
index 9ca4fdb3355b83c9a0718e2ea8ab20bb88e5b1dc..4bde969c0b6b193662bbcb4bdc1beb8cbd25bee9 100644
--- a/tests/manual/consumer_multithread_python/consumer.py
+++ b/tests/manual/consumer_multithread_python/consumer.py
@@ -4,39 +4,39 @@ import _thread
 import time
 
 # Define a function for the thread
-def print_time( threadName, broker):
+def print_time( threadName, consumer):
     while 1:
-        group_id = broker.generate_group_id()
+        group_id = consumer.generate_group_id()
         print (group_id)
 
 print ("consumer: ",asapo_consumer.__version__)
 endpoint = "asap3-utl01.desy.de:8400"
 beamtime = "asapo_test"
 token = "KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc="
-broker = asapo_consumer.create_server_broker(endpoint,"/gpfs/petra3/scratch/yakubov/asapo_shared/test_facility/gpfs/test/2019/data/asapo_test",False, beamtime,"",token,6000)
+consumer = asapo_consumer.create_consumer(endpoint,"/gpfs/petra3/scratch/yakubov/asapo_shared/test_facility/gpfs/test/2019/data/asapo_test",False, beamtime,"",token,6000)
 
 
 try:
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-1", broker, ) )
-    _thread.start_new_thread( print_time, ("Thread-2", broker, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-1", consumer, ) )
+    _thread.start_new_thread( print_time, ("Thread-2", consumer, ) )
 except:
     print ("Error: unable to start thread")
 
diff --git a/tests/manual/maxwell/asapo_test/consume.py b/tests/manual/maxwell/asapo_test/consume.py
index 48a62ac791e8149b7f066afd83500203764e3403..8af7a6776de18eff0d6db6b794b7cdfd718d65a1 100644
--- a/tests/manual/maxwell/asapo_test/consume.py
+++ b/tests/manual/maxwell/asapo_test/consume.py
@@ -9,5 +9,5 @@ with open('asapo_host', 'r') as file:
 token="KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc="
 
 os.system("hostname")
-os.system("./getnext_broker "+host+":8400 /gpfs/petra3/scratch/yakubov/asapo_shared/test/asapo_test asapo_test%stream"+str(rank)+" 8 "+token+" 1000 0")
+os.system("./getnext "+host+":8400 /gpfs/petra3/scratch/yakubov/asapo_shared/test/asapo_test asapo_test%data_source"+str(rank)+" 8 "+token+" 1000 0")
 
diff --git a/tests/manual/maxwell/asapo_test/get_binaries.sh b/tests/manual/maxwell/asapo_test/get_binaries.sh
index 02735d75463d37121f2c3e4962363c938b43e3fe..d08c0c29b5cfae28addec7ea51669e7e6d741fe0 100755
--- a/tests/manual/maxwell/asapo_test/get_binaries.sh
+++ b/tests/manual/maxwell/asapo_test/get_binaries.sh
@@ -1,5 +1,5 @@
 scp zitpcx27016:/home/yakubov/projects/asapo/cmake-build-release/examples/producer/dummy-data-producer/dummy-data-producer .
-scp zitpcx27016:/home/yakubov/projects/asapo/cmake-build-release/examples/consumer/getnext_broker/getnext_broker .
+scp zitpcx27016:/home/yakubov/projects/asapo/cmake-build-release/examples/consumer/getnext/getnext .
 
 
 
diff --git a/tests/manual/maxwell/asapo_test/produce.py b/tests/manual/maxwell/asapo_test/produce.py
index ee2f00c315d7403cea7c9a339ce02192071098e3..80f7292dde89407374e62f06f621e47fa55bf06f 100644
--- a/tests/manual/maxwell/asapo_test/produce.py
+++ b/tests/manual/maxwell/asapo_test/produce.py
@@ -8,6 +8,6 @@ with open('asapo_host', 'r') as file:
 
 token="KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc="
 os.system("hostname")
-os.system("./dummy-data-producer "+host+":8400 asapo_test%stream"+str(rank)+"%"+token+" 1000000 50 8 0 1000")
+os.system("./dummy-data-producer "+host+":8400 asapo_test%data_source"+str(rank)+"%"+token+" 1000000 50 8 0 1000")
 
 
diff --git a/tests/manual/performance_broker/test.sh b/tests/manual/performance_broker/test.sh
index 1091938f10fcb0e655306e4cbe1a5e36450a81b2..9963cb021adf33310ee1bd0e99b1a3c1db6841a8 100755
--- a/tests/manual/performance_broker/test.sh
+++ b/tests/manual/performance_broker/test.sh
@@ -2,7 +2,7 @@
 
 # starts broker, mongodb on $service_node
 # reads fileset into database
-# calls getnext_broker example from $consumer_node
+# calls getnext example from $consumer_node
 
 nthreads=1
 # a directory with many files in it
@@ -58,8 +58,8 @@ ssh ${consumer_node} ${consumer_dir}/folder2db -n ${nthreads} ${dir} ${run_name}
 
 sleep 3
 
-scp ../../../cmake-build-release/examples/consumer/getnext_broker/getnext_broker ${consumer_node}:${consumer_dir}
-ssh ${consumer_node} ${consumer_dir}/getnext_broker ${service_node}:8400 ${run_name} ${nthreads} $token
+scp ../../../cmake-build-release/examples/consumer/getnext/getnext ${consumer_node}:${consumer_dir}
+ssh ${consumer_node} ${consumer_dir}/getnext ${service_node}:8400 ${run_name} ${nthreads} $token
 
 
 
diff --git a/tests/manual/performance_broker_receiver/getlast_broker.cpp b/tests/manual/performance_broker_receiver/getlast_broker.cpp
index 2bb4519d51df197f925d60c11a4b36532011e3d3..59011a35aeee20a613a19c85b37a8d264715e4fc 100644
--- a/tests/manual/performance_broker_receiver/getlast_broker.cpp
+++ b/tests/manual/performance_broker_receiver/getlast_broker.cpp
@@ -58,17 +58,19 @@ std::vector<std::thread> StartThreads(const Args& params,
                                       std::vector<int>* nbuf,
                                       std::vector<int>* nfiles_total) {
     auto exec_next = [&params, nfiles, errors, nbuf, nfiles_total](int i) {
-        asapo::FileInfo fi;
+        asapo::MessageMeta fi;
         Error err;
-        auto broker = asapo::DataBrokerFactory::CreateServerBroker(params.server, params.file_path, true,
-                      asapo::SourceCredentials{asapo::SourceType::kProcessed,params.beamtime_id, "", "", params.token}, &err);
-        broker->SetTimeout((uint64_t) params.timeout_ms);
-        asapo::FileData data;
+        auto consumer = asapo::ConsumerFactory::CreateConsumer(params.server, params.file_path, true,
+                                                             asapo::SourceCredentials{asapo::SourceType::kProcessed,
+                                                                                      params.beamtime_id, "", "",
+                                                                                      params.token}, &err);
+        consumer->SetTimeout((uint64_t) params.timeout_ms);
+        asapo::MessageData data;
 
         lock.lock();
 
         if (group_id.empty()) {
-            group_id = broker->GenerateNewGroupId(&err);
+            group_id = consumer->GenerateNewGroupId(&err);
             if (err) {
                 (*errors)[i] += ProcessError(err);
                 return;
@@ -81,7 +83,7 @@ std::vector<std::thread> StartThreads(const Args& params,
         while (std::chrono::duration_cast<std::chrono::milliseconds>(system_clock::now() - start).count() <
                 params.timeout_ms) {
             if (params.datasets) {
-                auto dataset = broker->GetLastDataset(0, &err);
+                auto dataset = consumer->GetLastDataset(0, "default", &err);
                 if (err == nullptr) {
                     for (auto& fi : dataset.content) {
                         (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1;
@@ -89,7 +91,7 @@ std::vector<std::thread> StartThreads(const Args& params,
                     }
                 }
             } else {
-                err = broker->GetLast(&fi, params.read_data ? &data : nullptr);
+                err = consumer->GetLast(&fi, params.read_data ? &data : nullptr, "default");
                 if (err == nullptr) {
                     (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1;
                     if (params.read_data && (*nfiles)[i] < 10 && fi.size < 10) {
@@ -117,7 +119,7 @@ std::vector<std::thread> StartThreads(const Args& params,
 
 int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nbuf, int* nfiles_total,
                 asapo::NetworkConnectionType* connection_type) {
-    asapo::FileInfo fi;
+    asapo::MessageMeta fi;
     system_clock::time_point t1 = system_clock::now();
 
     std::vector<int> nfiles(params.nthreads, 0);
@@ -167,7 +169,7 @@ int ReadAllData(const Args& params, uint64_t* duration_ms, int* nerrors, int* nb
 }
 
 int main(int argc, char* argv[]) {
-    asapo::ExitAfterPrintVersionIfNeeded("GetLast Broker Example", argc, argv);
+    asapo::ExitAfterPrintVersionIfNeeded("GetLast consumer Example", argc, argv);
     Args params;
     params.datasets = false;
     if (argc != 9 && argc != 10) {
diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh
index 56b1536ad37cac5a6365e4a6e74b1cf74e63983b..d77c60cd4fa5ce38d44dd800cbe9d9289e49f87c 100755
--- a/tests/manual/performance_full_chain_simple/test.sh
+++ b/tests/manual/performance_full_chain_simple/test.sh
@@ -112,7 +112,7 @@ scp ../../../cmake-build-release/broker/asapo-broker ${broker_node}:${broker_dir
 consumer_node=max-display002
 consumer_dir=~/fullchain_tests
 nthreads=16
-scp ../../../cmake-build-release/examples/consumer/getnext_broker/getnext_broker ${consumer_node}:${consumer_dir}
+scp ../../../cmake-build-release/examples/consumer/getnext/getnext ${consumer_node}:${consumer_dir}
 scp ../../../cmake-build-release/asapo_tools/asapo ${consumer_node}:${consumer_dir}
 scp ../../../tests/automatic/settings/auth_secret.key ${consumer_node}:${consumer_dir}/auth_secret.key
 
@@ -147,6 +147,6 @@ sleep 1
 #prepare token
 ssh ${consumer_node} "bash -c '${consumer_dir}/asapo token -secret ${consumer_dir}/auth_secret.key ${beamtime_id} >${consumer_dir}/token'"
 #consumer_start
-ssh ${consumer_node} "bash -c '${consumer_dir}/getnext_broker ${receiver_node}:8400 ${beamtime_id} ${nthreads} \`cat ${consumer_dir}/token\`'"
+ssh ${consumer_node} "bash -c '${consumer_dir}/getnext ${receiver_node}:8400 ${beamtime_id} ${nthreads} \`cat ${consumer_dir}/token\`'"
 
 
diff --git a/tests/manual/producer_cpp/producer.cpp b/tests/manual/producer_cpp/producer.cpp
index 4e72219bc8106ba08c03660224dc17b6ca8988de..c198a7506144518a54c0f81f5c15aea2935c2a49 100644
--- a/tests/manual/producer_cpp/producer.cpp
+++ b/tests/manual/producer_cpp/producer.cpp
@@ -51,7 +51,7 @@ int main(int argc, char* argv[]) {
     auto beamtime = "asapo_test";
 
     auto producer = asapo::Producer::Create(endpoint, 1,asapo::RequestHandlerType::kTcp,
-                                            asapo::SourceCredentials{asapo::SourceType::kProcessed,beamtime, "", "", ""}, 60, &err);
+                                            asapo::SourceCredentials{asapo::SourceType::kProcessed,beamtime, "", "", ""}, 60000, &err);
     exit_if_error("Cannot start producer", err);
 
     uint32_t eventid = 1;
@@ -72,19 +72,19 @@ int main(int argc, char* argv[]) {
                 + "_part" + format_string(part) // file part id (chunk id)
                 + "_m" + format_string(submodule, std::string("%02d"));
             auto send_size = to_send.size() + 1;
-            auto buffer =  asapo::FileData(new uint8_t[send_size]);
+            auto buffer =  asapo::MessageData(new uint8_t[send_size]);
             memcpy(buffer.get(), to_send.c_str(), send_size);
-            std::string substream = std::to_string(start_number);
+            std::string stream = std::to_string(start_number);
             // std::cout<<"submodule:"<<submodule
-            //          <<"- substream:"<<substream
+            //          <<"- stream:"<<stream
             //          <<"- filename:"<<to_send<<std::endl;
 
-            asapo::EventHeader event_header{submodule, send_size, to_send,"", part,modules};
-            // err = producer->SendData(event_header,substream, std::move(buffer),
+            asapo::MessageHeader message_header{submodule, send_size, to_send, "", part, modules};
+            // err = producer->Send(message_header,stream, std::move(buffer),
             //                          asapo::kTransferMetaDataOnly, &ProcessAfterSend);
 
-            err = producer->SendData(event_header,substream, std::move(buffer),
-                                     asapo::kDefaultIngestMode, &ProcessAfterSend);
+            err = producer->Send(message_header, std::move(buffer),
+                                 asapo::kDefaultIngestMode, stream, &ProcessAfterSend);
             exit_if_error("Cannot send file", err);
 
             err = producer->WaitRequestsFinished(1000);
@@ -94,7 +94,7 @@ int main(int argc, char* argv[]) {
             // if(part == number_of_splitted_files)
             // {
 
-            //     err = producer->SendSubstreamFinishedFlag(substream,
+            //     err = producer->SendStreamFinishedFlag(stream,
             //                                               part,
             //                                               std::to_string(start_number+1),
             //                                               &ProcessAfterSend);
diff --git a/tests/manual/python_tests/ASAPO-csb.ipynb b/tests/manual/python_tests/ASAPO-csb.ipynb
index 962c72374b7a247460b6b02084686d7498026729..5e2cb51dc3a8a05185b1bd08f5251b8f272639dc 100644
--- a/tests/manual/python_tests/ASAPO-csb.ipynb
+++ b/tests/manual/python_tests/ASAPO-csb.ipynb
@@ -24,7 +24,7 @@
     "import time\n",
     "from IPython import display\n",
     "\n",
-    "broker, err = asapo_consumer.create_server_broker(\"psana002:8400\", \"/tmp\", \"asapo_test2\",\"\",\"yzgAcLmijSLWIm8dBiGNCbc0i42u5HSm-zR6FRqo__Y=\", 1000000)\n"
+    "broker, err = asapo_consumer.create_consumer(\"psana002:8400\", \"/tmp\", \"asapo_test2\",\"\",\"yzgAcLmijSLWIm8dBiGNCbc0i42u5HSm-zR6FRqo__Y=\", 1000000)\n"
    ]
   },
   {
diff --git a/tests/manual/python_tests/consumer/consumer_api.py b/tests/manual/python_tests/consumer/consumer_api.py
index 60c82c03cdcf5d5a3ced09a71ecbee7a37bce78f..2c72bf03f5a3b52c094868701d68aca2490a6684 100644
--- a/tests/manual/python_tests/consumer/consumer_api.py
+++ b/tests/manual/python_tests/consumer/consumer_api.py
@@ -4,17 +4,17 @@ import asapo_consumer
 import sys
 
 source, path, beamtime, token = sys.argv[1:]
-broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
-group_id = broker.generate_group_id()
+consumer = asapo_consumer.create_consumer(source,path,False, beamtime,"",token,1000)
+group_id = consumer.generate_group_id()
 
-res = broker.query_images("_id > 0", substream="1")
+res = consumer.query_messages("_id > 0", stream="1")
 
 print(res)
 
-#data, meta = broker.get_by_id(5,group_id,"default", meta_only=False)
+#data, meta = consumer.get_by_id(5,group_id,"default", meta_only=False)
 
 #meta["buf_id"]=0
-#data = broker.retrieve_data(meta)
+#data = consumer.retrieve_data(meta)
 
 #print (meta)
 #print (len(data),data[0:100])
diff --git a/tests/manual/python_tests/consumer/consumer_api.py_ b/tests/manual/python_tests/consumer/consumer_api.py_
index 0aa14cb8dbd215b7fe7538d8815a8180af9c6d39..a4c784c0775952c687f7ad3dd388a7990f9947e9 100644
--- a/tests/manual/python_tests/consumer/consumer_api.py_
+++ b/tests/manual/python_tests/consumer/consumer_api.py_
@@ -4,7 +4,7 @@ import asapo_consumer
 import sys
 
 source, path,beamtime, token = sys.argv[1:]
-broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
+consumer = asapo_consumer.create_consumer(source,path,False, beamtime,"",token,1000)
 group_id = broker.generate_group_id()
 
 
diff --git a/tests/manual/python_tests/plot_images_online.py b/tests/manual/python_tests/plot_images_online.py
index 31b0e88e01bf99908bfb377d785331ba77541d34..53e63a99c38a64361d84f9d7309bc95b5c108754 100644
--- a/tests/manual/python_tests/plot_images_online.py
+++ b/tests/manual/python_tests/plot_images_online.py
@@ -9,15 +9,15 @@ import matplotlib.pyplot as plt
 #dset = f.create_dataset("mydataset", data = d1)
 #f.close()
 
-broker, err = asapo_consumer.create_server_broker("psana002:8400", "/tmp", True, "asapo_test2","","yzgAcLmijSLWIm8dBiGNCbc0i42u5HSm-zR6FRqo__Y=", 1000000)
+consumer, err = asapo_consumer.create_consumer("psana002:8400", "/tmp", True, "asapo_test2","","yzgAcLmijSLWIm8dBiGNCbc0i42u5HSm-zR6FRqo__Y=", 1000000)
 
 last_id = 0
 while True:
-    data, meta, err = broker.get_last(meta_only=False)
+    data, meta, err = consumer.get_last(meta_only=False)
     id = meta['_id']
     if id == last_id:
         continue
-    fid = h5py.h5f.open_file_image(data)
+    fid = h5py.h5f.open_file_message(data)
     f = h5py.File(fid)
     data1 = np.array(f['mydataset'])
     print(data1)
@@ -31,7 +31,7 @@ while True:
 #alternative - but tobytes creates an additional copy - not nice.
 #import tables
 #h5file1 = tables.open_file("in-memory-sample.h5", driver="H5FD_CORE",
-#                              driver_core_image=data.tobytes(),
+#                              driver_core_message=data.tobytes(),
 #                              driver_core_backing_store=0)
 #data2 = h5file1.root.mydataset.read()
 
diff --git a/tests/manual/python_tests/producer/cons.py b/tests/manual/python_tests/producer/cons.py
index 04048887f5bdfd3163140981115716d9ba39c0d1..2491ef1aadaf2d6626b4c39c7ce3551a49101ebd 100644
--- a/tests/manual/python_tests/producer/cons.py
+++ b/tests/manual/python_tests/producer/cons.py
@@ -8,10 +8,10 @@ source="127.0.0.1:8400"
 path="/tmp/petra3/gpfs/p01/2019/data/asapo_test"
 beamtime="asapo_test"
 
-broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"test",token,1000)
-group_id = broker.generate_group_id()
+consumer = asapo_consumer.create_consumer(source,path,False, beamtime,"test",token,1000)
+group_id = consumer.generate_group_id()
 
-data, meta = broker.get_by_id(3,group_id,"default", meta_only=False)
+data, meta = consumer.get_by_id(3,group_id,"default", meta_only=False)
 
 print (meta)
 print (data.tostring() )
diff --git a/tests/manual/python_tests/producer/short_test.py b/tests/manual/python_tests/producer/short_test.py
index 9fb04ff223109b7c7b229b3c10604caa0ffe695d..eaf47f8286862472a2313f307cd0abc9f01455e3 100644
--- a/tests/manual/python_tests/producer/short_test.py
+++ b/tests/manual/python_tests/producer/short_test.py
@@ -9,7 +9,7 @@ lock = threading.Lock()
 
 endpoint = "127.0.0.1:8400"
 beamtime = "asapo_test"
-stream = "test"
+data_source = "test"
 token = ""
 nthreads = 8
 
@@ -26,7 +26,7 @@ def assert_err(err):
         print(err)
         sys.exit(1)
 
-producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, nthreads ,0)
+producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, nthreads ,0)
 
 producer.set_log_level("debug")
 
diff --git a/tests/manual/python_tests/producer/test.py b/tests/manual/python_tests/producer/test.py
index da68de94514b4c4a95c14f061db61b995cf263c0..403dc1d4623bba660f17f94a428b274f5770cec5 100644
--- a/tests/manual/python_tests/producer/test.py
+++ b/tests/manual/python_tests/producer/test.py
@@ -10,7 +10,7 @@ lock = threading.Lock()
 
 endpoint = "127.0.0.1:8400"
 beamtime = "asapo_test1"
-stream = "detector"
+data_source = "detector"
 token = ""
 nthreads = 8
 
@@ -27,17 +27,17 @@ def assert_err(err):
         print(err)
         sys.exit(1)
 
-producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, nthreads ,0)
+producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, nthreads ,0)
 
 producer.set_log_level("info")
 
 #send single file
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 
-#send subsets
-producer.send_file(2, local_path = "./file1", exposed_path = stream+"/"+"file2",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
-producer.send_file(3, local_path = "./file1", exposed_path = stream+"/"+"file3",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+#send datasets
+producer.send_file(2, local_path = "./file1", exposed_path = data_source+"/"+"file2",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(3, local_path = "./file1", exposed_path = data_source+"/"+"file3",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send meta only
 producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
@@ -46,25 +46,25 @@ producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
 data = np.arange(10,dtype=np.float64)
 
 #send data from array
-producer.send_data(4, stream+"/"+"file5",data,
+producer.send(4, data_source+"/"+"file5",data,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send data from string
-err = producer.send_data(5, stream+"/"+"file6",b"hello",
+err = producer.send(5, data_source+"/"+"file6",b"hello",
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send metadata only
-producer.send_data(6, stream+"/"+"file7",None,
+producer.send(6, data_source+"/"+"file7",None,
                          ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback = callback)
 
 
 x = np.array([[1, 2, 3], [4, 5, 6]], np.float32)
-producer.send_data(4, stream+"/"+"file5",x,
+producer.send(4, data_source+"/"+"file5",x,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 try:
 	x = x.T
-	producer.send_data(4, stream+"/"+"file5",x,
+	producer.send(4, data_source+"/"+"file5",x,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 except:
 	pass
diff --git a/tests/manual/python_tests/producer_wait_bug_mongo/test.py b/tests/manual/python_tests/producer_wait_bug_mongo/test.py
index 06d658cbc95aa68921d16f2d42a984ee62f92191..ead637fdfcfac7c07b70a9ff243e83861ac3933b 100644
--- a/tests/manual/python_tests/producer_wait_bug_mongo/test.py
+++ b/tests/manual/python_tests/producer_wait_bug_mongo/test.py
@@ -10,7 +10,7 @@ lock = threading.Lock()
 
 endpoint = "127.0.0.1:8400"
 beamtime = "asapo_test1"
-stream = "detector"
+data_source = "detector"
 token = ""
 nthreads = 8
 
@@ -27,17 +27,17 @@ def assert_err(err):
         print(err)
         sys.exit(1)
 
-producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, nthreads, 600)
+producer = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, nthreads, 600000)
 
 producer.set_log_level("debug")
 
 #send single file
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 
-#send subsets
-producer.send_file(2, local_path = "./file1", exposed_path = stream+"/"+"file2",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
-producer.send_file(3, local_path = "./file1", exposed_path = stream+"/"+"file3",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+#send datasets
+producer.send_file(2, local_path = "./file1", exposed_path = data_source+"/"+"file2",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(3, local_path = "./file1", exposed_path = data_source+"/"+"file3",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send meta only
 producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
@@ -46,25 +46,25 @@ producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
 data = np.arange(10,dtype=np.float64)
 
 #send data from array
-producer.send_data(4, stream+"/"+"file5",data,
+producer.send(4, data_source+"/"+"file5",data,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send data from string
-err = producer.send_data(5, stream+"/"+"file6",b"hello",
+err = producer.send(5, data_source+"/"+"file6",b"hello",
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send metadata only
-producer.send_data(6, stream+"/"+"file7",None,
+producer.send(6, data_source+"/"+"file7",None,
                          ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback = callback)
 
 
 x = np.array([[1, 2, 3], [4, 5, 6]], np.float32)
-producer.send_data(4, stream+"/"+"file5",x,
+producer.send(4, data_source+"/"+"file5",x,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 try:
 	x = x.T
-	producer.send_data(4, stream+"/"+"file5",x,
+	producer.send(4, data_source+"/"+"file5",x,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 except:
 	pass
diff --git a/tests/manual/python_tests/producer_wait_threads/producer_api.py b/tests/manual/python_tests/producer_wait_threads/producer_api.py
index 22fc727437f2f18fffa8c31017c1031a1b59c7dc..1c19ec7f15d22b82a85a185c6b6c32e1060bdf89 100644
--- a/tests/manual/python_tests/producer_wait_threads/producer_api.py
+++ b/tests/manual/python_tests/producer_wait_threads/producer_api.py
@@ -7,7 +7,7 @@ import numpy as np
 import threading
 lock = threading.Lock()
 
-stream = "python"
+data_source = "python"
 beamtime = "asapo_test"
 endpoint = "127.0.0.1:8400"
 
@@ -22,19 +22,19 @@ def callback(header,err):
         print ("successfuly sent: ",header)
     lock.release()
 
-producer  = asapo_producer.create_producer(endpoint,'processed',beamtime, 'auto', stream, token, nthreads, 600)
+producer  = asapo_producer.create_producer(endpoint,'processed',beamtime, 'auto', data_source, token, nthreads, 600000)
 
 producer.set_log_level("info")
 
 #send single file
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send single file without callback
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}',callback=None)
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}',callback=None)
 
-#send subsets
-producer.send_file(2, local_path = "./file1", exposed_path = stream+"/"+"file2",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
-producer.send_file(3, local_path = "./file1", exposed_path = stream+"/"+"file3",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+#send datasets
+producer.send_file(2, local_path = "./file1", exposed_path = data_source+"/"+"file2",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(3, local_path = "./file1", exposed_path = data_source+"/"+"file3",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send meta only
 producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
@@ -43,15 +43,15 @@ producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
 data = np.arange(10,dtype=np.float64)
 
 #send data from array
-producer.send_data(4, stream+"/"+"file5",data,
+producer.send(4, data_source+"/"+"file5",data,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send data from string
-producer.send_data(5, stream+"/"+"file6",b"hello",
+producer.send(5, data_source+"/"+"file6",b"hello",
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send metadata only
-producer.send_data(6, stream+"/"+"file7",None,
+producer.send(6, data_source+"/"+"file7",None,
                          ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback = callback)
 
 producer.wait_requests_finished(1000)
@@ -63,7 +63,7 @@ if n!=0:
 
 # create with error
 try:
-    producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, 0, 600)
+    producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, 0, 600000)
 except Exception as Asapo:
     print(e)
 else:
diff --git a/tests/manual/python_tests/producer_wait_threads/test.py b/tests/manual/python_tests/producer_wait_threads/test.py
index d1fbaf05b81c169b0f7295b867fe9b091fc788a8..75983e6b30571a4bec77ea1243d923606a4c1ae1 100644
--- a/tests/manual/python_tests/producer_wait_threads/test.py
+++ b/tests/manual/python_tests/producer_wait_threads/test.py
@@ -6,7 +6,7 @@ import time
 import numpy as np
 lock = threading.Lock()
 
-stream = "python"
+data_source = "python"
 beamtime = "asapo_test"
 endpoint = "127.0.0.1:8400"
 
@@ -22,20 +22,20 @@ def callback(header,err):
         print ("successfuly sent: ",header)
     lock.release()
 
-producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', stream, token, nthreads, 600)
+producer  = asapo_producer.create_producer(endpoint,'processed',beamtime,'auto', data_source, token, nthreads, 600000)
 
 producer.set_log_level("info")
 
 #send single file
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send single file without callback
-producer.send_file(1, local_path = "./file1", exposed_path = stream+"/"+"file1", user_meta = '{"test_key":"test_val"}')
+producer.send_file(1, local_path = "./file1", exposed_path = data_source+"/"+"file1", user_meta = '{"test_key":"test_val"}')
 
 
-#send subsets
-producer.send_file(2, local_path = "./file1", exposed_path = stream+"/"+"file2",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
-producer.send_file(3, local_path = "./file1", exposed_path = stream+"/"+"file3",subset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+#send datasets
+producer.send_file(2, local_path = "./file1", exposed_path = data_source+"/"+"file2",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
+producer.send_file(3, local_path = "./file1", exposed_path = data_source+"/"+"file3",dataset=(2,2),user_meta = '{"test_key":"test_val"}', callback = callback)
 
 #send meta only
 producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
@@ -44,15 +44,15 @@ producer.send_file(3, local_path = "./not_exist",exposed_path = "./whatever",
 data = np.arange(10,dtype=np.float64)
 
 #send data from array
-producer.send_data(4, stream+"/"+"file5",data,
+producer.send(4, data_source+"/"+"file5",data,
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send data from string
-producer.send_data(5, stream+"/"+"file6",b"hello",
+producer.send(5, data_source+"/"+"file6",b"hello",
                          ingest_mode = asapo_producer.DEFAULT_INGEST_MODE, callback = callback)
 
 #send metadata only
-producer.send_data(6, stream+"/"+"file7",None,
+producer.send(6, data_source+"/"+"file7",None,
                          ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback = callback)
 
 producer.wait_requests_finished(1)
diff --git a/tests/manual/python_tests/test_p.py b/tests/manual/python_tests/test_p.py
index 764a2f377f2c06a94d46ba85d698935200d825c3..a41224125fa8c5701d89ba2be0afbaf5830391f6 100644
--- a/tests/manual/python_tests/test_p.py
+++ b/tests/manual/python_tests/test_p.py
@@ -10,17 +10,17 @@ path = "/asapo_shared/asapo/data"
 beamtime = "asapo_test"
 token = "KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc="
 
-broker, err = asapo_consumer.create_server_broker(
+consumer, err = asapo_consumer.create_consumer(
     source, path, True, beamtime, token, 1000)
 
-group_id, err = broker.generate_group_id()
+group_id, err = consumer.generate_group_id()
 if err is not None:
     print('cannot generate group id, err: ', err)
 else:
     print('generated group id: ', group_id)
 
 while True:
-    data, meta, err = broker.get_last(group_id, meta_only=False)
+    data, meta, err = consumer.get_last(group_id, meta_only=False)
     if err is not None:
         print('err: ', err)
     else:
diff --git a/tests/manual/receiver_debug_local/start_getnext.sh b/tests/manual/receiver_debug_local/start_getnext.sh
index a9efa78f00c7c85e8422b776b05c40f8f1d74c28..e40f3951d3cc73dc0b64406609b321cedc5519b5 100755
--- a/tests/manual/receiver_debug_local/start_getnext.sh
+++ b/tests/manual/receiver_debug_local/start_getnext.sh
@@ -8,6 +8,6 @@ metaonly=0
 nthreads=4
 token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
 
-exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/consumer/getnext_broker/getnext_broker
+exec=/home/yakubov/projects/asapo/cmake-build-debug/examples/consumer/getnext/getnext
 
 $exec localhost:8400 /tmp/asapo/receiver/files/test/asapo_test asapo_test $nthreads $token $timeout $metaonly
diff --git a/tests/manual/tests_via_nomad/asapo-test_dummy_producer.nomad.in b/tests/manual/tests_via_nomad/asapo-test_dummy_producer.nomad.in
index e5d995789aa800cb501f2814468c0ed5ff046a30..cae32b82fb9e9a82f4e8997316e23ffe2f816c6c 100644
--- a/tests/manual/tests_via_nomad/asapo-test_dummy_producer.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_dummy_producer.nomad.in
@@ -119,7 +119,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -139,9 +139,9 @@ job "asapo-test" {
       }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -176,7 +176,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -195,9 +195,9 @@ job "asapo-test" {
       }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
 
       }
     }
diff --git a/tests/manual/tests_via_nomad/asapo-test_dummy_producer_only.nomad.in b/tests/manual/tests_via_nomad/asapo-test_dummy_producer_only.nomad.in
index 57c027964539cbb49a392d1f10056fb299d77b20..d4416301aecc43d40ba2ddecd6ea5e8ac88ffde6 100644
--- a/tests/manual/tests_via_nomad/asapo-test_dummy_producer_only.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_dummy_producer_only.nomad.in
@@ -121,7 +121,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -141,9 +141,9 @@ job "asapo-test" {
       }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -179,7 +179,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -198,9 +198,9 @@ job "asapo-test" {
       }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
 
       }
     }
diff --git a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer.nomad.in
index 6aab82d0c2380fd5034e9411e8dcecf294cd61e7..4379531262f22dbec2c58acad98dbd85ace6b8e8 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer.nomad.in
@@ -105,7 +105,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -125,9 +125,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -166,7 +166,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -185,9 +185,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M.nomad.in
index 82e24cdf6d2ef7d6fcd3ed36f714be10f17ee476..6763020d64a5ae5642472f22f2fddbdfa45fbb2b 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M.nomad.in
@@ -105,7 +105,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -125,9 +125,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -166,7 +166,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -185,9 +185,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_batch.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_batch.nomad.in
index 4a92a5b1c5bd48d21a7dc9973c00346d92befb42..9312daeabc7d35f94a0cf3c9f5dc1a6b2a5b6070 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_batch.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_batch.nomad.in
@@ -105,7 +105,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -126,9 +126,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -167,7 +167,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -187,9 +187,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_multisource.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_multisource.nomad.in
index 2707b362534e15f222008d107568ab711fc21304..c24dd1068694439be21ab9f40151210b106d7a60 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_multisource.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_1M_multisource.nomad.in
@@ -105,7 +105,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -126,9 +126,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_batch.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_batch.nomad.in
index 4303b56d2637b5da4a3a5e61c3a49afc4c095596..e864ebb59c39c4419c59eb4024eefbb5043552fa 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_batch.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filegen_consumer_batch.nomad.in
@@ -105,7 +105,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test1/asapo_test1",
@@ -126,9 +126,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
@@ -167,7 +167,7 @@ job "asapo-test" {
     }
 
       config {
-        command = "local/getnext_broker"
+        command = "local/getnext"
         args = [
           "psana002:8400",
           "/bldocuments/support/asapo/data/test2/asapo_test2",
@@ -187,9 +187,9 @@ job "asapo-test" {
 #      }
 
       artifact {
-        source = "http://nims.desy.de/extra/asapo/getnext_broker-@ASAPO_VERSION@"
+        source = "http://nims.desy.de/extra/asapo/getnext-@ASAPO_VERSION@"
         mode = "file"
-        destination = "local/getnext_broker"
+        destination = "local/getnext"
       }
     }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filemon.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filemon.nomad.in
index 929d44cd0270da08cabd3c780fb8e5181c61ad36..be78fb225d36390b0f109a91d0665d9b10417c04 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filemon.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filemon.nomad.in
@@ -41,8 +41,8 @@ job "asapo-filemon" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 }
@@ -102,8 +102,8 @@ job "asapo-filemon" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }
 
diff --git a/tests/manual/tests_via_nomad/asapo-test_filemon_batch.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filemon_batch.nomad.in
index eb73199f22c88df43e3534345b7464296a904021..07c84daeb5b311ec33d8adad5b468bcadd38c0d5 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filemon_batch.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filemon_batch.nomad.in
@@ -41,8 +41,8 @@ job "asapo-filemon_batch" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"batch",
   	"BatchSize": {{ keyOrDefault "monitor_batch_size" "3" }}
  }
@@ -103,8 +103,8 @@ job "asapo-filemon_batch" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"batch",
   	"BatchSize": {{ keyOrDefault "monitor_batch_size" "3" }}
  }
diff --git a/tests/manual/tests_via_nomad/asapo-test_filemon_multisource.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filemon_multisource.nomad.in
index 9d277950f8afc05862ca2a2034698d7a3359776d..7f7b07825033ee28c48161c07d2825ffdad7075e 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filemon_multisource.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filemon_multisource.nomad.in
@@ -41,8 +41,8 @@ job "asapo-filemon_multisource" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"multisource",
   	"SourceId": 1,
   	"NSources":2
@@ -104,8 +104,8 @@ job "asapo-filemon_multisource" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"multisource",
   	"SourceId": 2,
   	"NSources":2
diff --git a/tests/manual/tests_via_nomad/asapo-test_filemon_producer_tolocal.nomad.in b/tests/manual/tests_via_nomad/asapo-test_filemon_producer_tolocal.nomad.in
index b627fe8701b27a7fb9c3daa8f6ed69c9437dad9e..13c332d532289e50bacf15d355a70a2820b2ac14 100644
--- a/tests/manual/tests_via_nomad/asapo-test_filemon_producer_tolocal.nomad.in
+++ b/tests/manual/tests_via_nomad/asapo-test_filemon_producer_tolocal.nomad.in
@@ -41,8 +41,8 @@ job "asapo-produceronly" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
-  "Subset": {
+ "DataSource": "",
+  "Dataset": {
    	"Mode":"none"
   }
 }
@@ -102,8 +102,8 @@ job "asapo-produceronly" {
  "IgnoreExtensions":["tmp"],
  "WhitelistExtensions":[],
  "RemoveAfterSend":true,
- "Stream": "",
- "Subset": {
+ "DataSource": "",
+ "Dataset": {
   	"Mode":"none"
  }