diff --git a/CHANGELOG.md b/CHANGELOG.md
index e6b3d88008fd71db9416646043ae082b7c5d371a..7d137262415f8f0859af2e304b75fa63ba2cc841 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,7 +16,7 @@ BREAKING CHANGES
 * Consumer API - remove group_id argument from get_last/get_by_id/get_last_dataset/get_dataset_by_id functions
 * Producer API - changed meaning of subsets (subset_id replaced with id_in_subset and this means now id of the image within a subset (e.g. module number for multi-module detector)), file_id is now a global id of a multi-set data (i.g. multi-image id) 
     ####  renaming - Producer API
-* stream -> data_source, substream -> stream
+* stream -> data_source, stream -> stream
     ####  renaming - Consumer API
 *
 
@@ -27,14 +27,14 @@ BUG FIXES
 ## 20.09.1
 
 FEATURES
-* New function GetLastSubstream/last_stream in Producer API - returns info for a substream which was created last 
+* New function GetLastStream/last_stream in Producer API - returns info for a stream which was created last 
 
 IMPROVEMENTS
 * Each data tuple automatically gets a timestamp (nanoseconds from Linux epoch) at the moment it is being inserted to a database 
-* GetSubstreamList/get_substream_list returns now sorted (by timestamp of the earliest data tuple) list of substreams. Parameter `from` allows to limit the list
+* GetStreamList/get_stream_list returns now sorted (by timestamp of the earliest data tuple) list of streams. Parameter `from` allows to limit the list
 
 BREAKING CHANGES
-* GetSubstreamList/get_substream_list returns now not an array of strings, but array of StreamInfos/dictionaries
+* GetStreamList/get_stream_list returns now not an array of strings, but array of StreamInfos/dictionaries
 
 ## 20.09.0
 
@@ -75,12 +75,12 @@ IMPROVEMENTS
 ## 20.06.0
 FEATURES
 * implemented acknowledeges - one can acknowledge a data tuple, get last acknowledged tuple id, get list of unacknowledged tuple ids
-* implement getting substream info (contains last id) by producer client (not need to have consumer client)
+* implement getting stream info (contains last id) by producer client (not need to have consumer client)
 
 IMPROVEMENTS
-* change behavior when trying to get data from a substream that does not exist - return EndOfStream instead of WrongInput
-* change behavior of GetLastXX/get_lastXX functions - current pointer is not being set to the end of a substream after this command anymore
-* substream name added to producer callback output for Python
+* change behavior when trying to get data from a stream that does not exist - return EndOfStream instead of WrongInput
+* change behavior of GetLastXX/get_lastXX functions - current pointer is not being set to the end of a stream after this command anymore
+* stream name added to producer callback output for Python
 * added simple C++ examples
 
 BUG FIXES
@@ -88,7 +88,7 @@ BUG FIXES
 
 ## 20.03.0
 FEATURES
-* introduced substreams for producer/consumer
+* introduced streams for producer/consumer
 * introduced timeout for producer requests
 * producer accepts "auto" for beamtime, will automatically select a current one for a given beamline
 * introduced file transfer service - possibility for consumer clients to receive data also in case filesystem is inaccessible
diff --git a/broker/src/asapo_broker/database/database.go b/broker/src/asapo_broker/database/database.go
index 2ec1142d4dde6c50ea2557dff8ea0af6401d939c..0bb12f25217b93f03e3fe4556e6c2c060864d5b1 100644
--- a/broker/src/asapo_broker/database/database.go
+++ b/broker/src/asapo_broker/database/database.go
@@ -22,7 +22,7 @@ type Agent interface {
 
 type DBSettings struct {
 	ReadFromInprocessPeriod int
-	UpdateSubstreamCachePeriodMs int
+	UpdateStreamCachePeriodMs int
 }
 
 type DBError struct {
diff --git a/broker/src/asapo_broker/database/mongodb.go b/broker/src/asapo_broker/database/mongodb.go
index 48fe7150aa3dbcf5faaedf055c5b6a785d5ee503..d0005e7dffb3066659c4cfedd4d6967ad083429b 100644
--- a/broker/src/asapo_broker/database/mongodb.go
+++ b/broker/src/asapo_broker/database/mongodb.go
@@ -69,8 +69,8 @@ const no_session_msg = "database client not created"
 const wrong_id_type = "wrong id type"
 const already_connected_msg = "already connected"
 
-const finish_substream_keyword = "asapo_finish_substream"
-const no_next_substream_keyword = "asapo_no_next"
+const finish_stream_keyword = "asapo_finish_stream"
+const no_next_stream_keyword = "asapo_no_next"
 
 var dbSessionLock sync.Mutex
 
@@ -230,13 +230,13 @@ func (db *Mongodb) incrementField(request Request, max_ind int, res interface{})
 	return nil
 }
 
-func encodeAnswer(id, id_max int, next_substream string) string {
+func encodeAnswer(id, id_max int, next_stream string) string {
 	var r = struct {
 		Op             string `json:"op"`
 		Id             int    `json:"id"`
 		Id_max         int    `json:"id_max"`
-		Next_substream string `json:"next_substream"`
-	}{"get_record_by_id", id, id_max, next_substream}
+		Next_stream string `json:"next_stream"`
+	}{"get_record_by_id", id, id_max, next_stream}
 	answer, _ := json.Marshal(&r)
 	return string(answer)
 }
@@ -361,7 +361,7 @@ func (db *Mongodb) checkDatabaseOperationPrerequisites(request Request) error {
 	}
 
 	if len(request.DbName) == 0 || len(request.DbCollectionName) == 0 {
-		return &DBError{utils.StatusWrongInput, "beamtime_id ans substream must be set"}
+		return &DBError{utils.StatusWrongInput, "beamtime_id ans stream must be set"}
 	}
 
 	return nil
@@ -508,17 +508,17 @@ func (db *Mongodb) getNextAndMaxIndexes(request Request) (int, int, error) {
 func (db *Mongodb) processLastRecord(request Request, data []byte, err error) ([]byte, error) {
 	var r ServiceRecord
 	err = json.Unmarshal(data, &r)
-	if err != nil || r.Name != finish_substream_keyword {
+	if err != nil || r.Name != finish_stream_keyword {
 		return data, err
 	}
-	var next_substream string
-	next_substream, ok := r.Meta["next_substream"].(string)
+	var next_stream string
+	next_stream, ok := r.Meta["next_stream"].(string)
 	if !ok {
-		next_substream = no_next_substream_keyword
+		next_stream = no_next_stream_keyword
 	}
 
-	answer := encodeAnswer(r.ID, r.ID, next_substream)
-	log_str := "reached end of substream " + request.DbCollectionName + " , next_substream: " + next_substream
+	answer := encodeAnswer(r.ID, r.ID, next_stream)
+	log_str := "reached end of stream " + request.DbCollectionName + " , next_stream: " + next_stream
 	logger.Debug(log_str)
 
 
@@ -781,10 +781,10 @@ func (db *Mongodb) getNacks(request Request, min_index, max_index int) ([]int, e
 	return resp[0].Numbers, nil
 }
 
-func (db *Mongodb) getSubstreams(request Request) ([]byte, error) {
-	rec, err := substreams.getSubstreams(db,request.DbName,request.ExtraParam)
+func (db *Mongodb) getStreams(request Request) ([]byte, error) {
+	rec, err := streams.getStreams(db,request.DbName,request.ExtraParam)
 	if err != nil {
-		return db.processQueryError("get substreams", request.DbName, err)
+		return db.processQueryError("get streams", request.DbName, err)
 	}
 	return json.Marshal(&rec)
 }
@@ -810,8 +810,8 @@ func (db *Mongodb) ProcessRequest(request Request) (answer []byte, err error) {
 		return db.getMeta(request)
 	case "queryimages":
 		return db.queryImages(request)
-	case "substreams":
-		return db.getSubstreams(request)
+	case "streams":
+		return db.getStreams(request)
 	case "ackimage":
 		return db.ackRecord(request)
 	case "negackimage":
diff --git a/broker/src/asapo_broker/database/mongodb_streams.go b/broker/src/asapo_broker/database/mongodb_streams.go
new file mode 100644
index 0000000000000000000000000000000000000000..fba70330b025ad6ae762c4574324073b3900a655
--- /dev/null
+++ b/broker/src/asapo_broker/database/mongodb_streams.go
@@ -0,0 +1,130 @@
+//+build !test
+
+package database
+
+import (
+	"asapo_common/utils"
+	"context"
+	"errors"
+	"go.mongodb.org/mongo-driver/bson"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+type StreamInfo struct {
+	Name      string `json:"name"`
+	Timestamp int64  `json:"timestampCreated"`
+}
+
+type StreamsRecord struct {
+	Streams []StreamInfo `json:"streams"`
+}
+
+type Streams struct {
+	records     map[string]StreamsRecord
+	lastUpdated int64
+}
+
+var streams = Streams{lastUpdated: 0, records: make(map[string]StreamsRecord, 0)}
+var streamsLock sync.Mutex
+
+func (ss *Streams) tryGetFromCache(db_name string, updatePeriodMs int) (StreamsRecord, error) {
+	if ss.lastUpdated < time.Now().UnixNano()-int64(updatePeriodMs*1000000) {
+		return StreamsRecord{}, errors.New("cache expired")
+	}
+	rec, ok := ss.records[db_name]
+	if !ok {
+		return StreamsRecord{}, errors.New("no records for " + db_name)
+	}
+	return rec, nil
+}
+
+func readStreams(db *Mongodb, db_name string) (StreamsRecord, error) {
+	database := db.client.Database(db_name)
+	result, err := database.ListCollectionNames(context.TODO(), bson.D{})
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+	var rec = StreamsRecord{[]StreamInfo{}}
+	for _, coll := range result {
+		if strings.HasPrefix(coll, data_collection_name_prefix) {
+			si := StreamInfo{Name: strings.TrimPrefix(coll, data_collection_name_prefix)}
+			rec.Streams = append(rec.Streams, si)
+		}
+	}
+	return rec, nil
+}
+
+func updateTimestamps(db *Mongodb, db_name string, rec *StreamsRecord) {
+	ss,dbFound :=streams.records[db_name]
+	currentStreams := []StreamInfo{}
+	if dbFound {
+		// sort streams by name
+		currentStreams=ss.Streams
+		sort.Slice(currentStreams,func(i, j int) bool {
+			return currentStreams[i].Name>=currentStreams[j].Name
+		})
+	}
+	for i, record := range rec.Streams {
+		ind := sort.Search(len(currentStreams),func(i int) bool {
+			return currentStreams[i].Name>=record.Name
+		})
+		if ind < len(currentStreams) && currentStreams[ind].Name == record.Name { // record found, just skip it
+			rec.Streams[i].Timestamp = currentStreams[ind].Timestamp
+			continue
+		}
+		res, err := db.getEarliestRecord(db_name, record.Name)
+		if err == nil {
+			ts,ok:=utils.InterfaceToInt64(res["timestamp"])
+			if ok {
+				rec.Streams[i].Timestamp = ts
+			}
+		}
+	}
+}
+
+func sortRecords(rec *StreamsRecord) {
+	sort.Slice(rec.Streams[:], func(i, j int) bool {
+		return rec.Streams[i].Timestamp < rec.Streams[j].Timestamp
+	})
+}
+
+func (ss *Streams) updateFromDb(db *Mongodb, db_name string) (StreamsRecord, error) {
+	rec, err := readStreams(db, db_name)
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+	updateTimestamps(db, db_name, &rec)
+	sortRecords(&rec)
+	if len(rec.Streams)>0 {
+		ss.records[db_name] = rec
+		ss.lastUpdated = time.Now().UnixNano()
+	}
+	return rec, nil
+}
+
+func (ss *Streams) getStreams(db *Mongodb, db_name string, from string) (StreamsRecord, error) {
+	streamsLock.Lock()
+	rec, err := ss.tryGetFromCache(db_name,db.settings.UpdateStreamCachePeriodMs)
+	if err != nil {
+		rec, err = ss.updateFromDb(db, db_name)
+	}
+	streamsLock.Unlock()
+	if err != nil {
+		return StreamsRecord{}, err
+	}
+
+	if from != "" {
+		ind := len(rec.Streams)
+		for i, rec := range rec.Streams {
+			if rec.Name == from {
+				ind = i
+				break
+			}
+		}
+		rec.Streams = rec.Streams[ind:]
+	}
+	return rec, nil
+}
diff --git a/broker/src/asapo_broker/database/mongodb_substreams.go b/broker/src/asapo_broker/database/mongodb_substreams.go
deleted file mode 100644
index 999e6fa17b1c2b07b24db67e2d0166d7291d336d..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/database/mongodb_substreams.go
+++ /dev/null
@@ -1,130 +0,0 @@
-//+build !test
-
-package database
-
-import (
-	"asapo_common/utils"
-	"context"
-	"errors"
-	"go.mongodb.org/mongo-driver/bson"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-type SubstreamInfo struct {
-	Name      string `json:"name"`
-	Timestamp int64  `json:"timestampCreated"`
-}
-
-type SubstreamsRecord struct {
-	Substreams []SubstreamInfo `json:"substreams"`
-}
-
-type Substreams struct {
-	records     map[string]SubstreamsRecord
-	lastUpdated int64
-}
-
-var substreams = Substreams{lastUpdated: 0, records: make(map[string]SubstreamsRecord, 0)}
-var substreamsLock sync.Mutex
-
-func (ss *Substreams) tryGetFromCache(db_name string, updatePeriodMs int) (SubstreamsRecord, error) {
-	if ss.lastUpdated < time.Now().UnixNano()-int64(updatePeriodMs*1000000) {
-		return SubstreamsRecord{}, errors.New("cache expired")
-	}
-	rec, ok := ss.records[db_name]
-	if !ok {
-		return SubstreamsRecord{}, errors.New("no records for " + db_name)
-	}
-	return rec, nil
-}
-
-func readSubstreams(db *Mongodb, db_name string) (SubstreamsRecord, error) {
-	database := db.client.Database(db_name)
-	result, err := database.ListCollectionNames(context.TODO(), bson.D{})
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-	var rec = SubstreamsRecord{[]SubstreamInfo{}}
-	for _, coll := range result {
-		if strings.HasPrefix(coll, data_collection_name_prefix) {
-			si := SubstreamInfo{Name: strings.TrimPrefix(coll, data_collection_name_prefix)}
-			rec.Substreams = append(rec.Substreams, si)
-		}
-	}
-	return rec, nil
-}
-
-func updateTimestamps(db *Mongodb, db_name string, rec *SubstreamsRecord) {
-	ss,dbFound :=substreams.records[db_name]
-	currentSubstreams := []SubstreamInfo{}
-	if dbFound {
-		// sort substreams by name
-		currentSubstreams=ss.Substreams
-		sort.Slice(currentSubstreams,func(i, j int) bool {
-			return currentSubstreams[i].Name>=currentSubstreams[j].Name
-		})
-	}
-	for i, record := range rec.Substreams {
-		ind := sort.Search(len(currentSubstreams),func(i int) bool {
-			return currentSubstreams[i].Name>=record.Name
-		})
-		if ind < len(currentSubstreams) && currentSubstreams[ind].Name == record.Name { // record found, just skip it
-			rec.Substreams[i].Timestamp = currentSubstreams[ind].Timestamp
-			continue
-		}
-		res, err := db.getEarliestRecord(db_name, record.Name)
-		if err == nil {
-			ts,ok:=utils.InterfaceToInt64(res["timestamp"])
-			if ok {
-				rec.Substreams[i].Timestamp = ts
-			}
-		}
-	}
-}
-
-func sortRecords(rec *SubstreamsRecord) {
-	sort.Slice(rec.Substreams[:], func(i, j int) bool {
-		return rec.Substreams[i].Timestamp < rec.Substreams[j].Timestamp
-	})
-}
-
-func (ss *Substreams) updateFromDb(db *Mongodb, db_name string) (SubstreamsRecord, error) {
-	rec, err := readSubstreams(db, db_name)
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-	updateTimestamps(db, db_name, &rec)
-	sortRecords(&rec)
-	if len(rec.Substreams)>0 {
-		ss.records[db_name] = rec
-		ss.lastUpdated = time.Now().UnixNano()
-	}
-	return rec, nil
-}
-
-func (ss *Substreams) getSubstreams(db *Mongodb, db_name string, from string) (SubstreamsRecord, error) {
-	substreamsLock.Lock()
-	rec, err := ss.tryGetFromCache(db_name,db.settings.UpdateSubstreamCachePeriodMs)
-	if err != nil {
-		rec, err = ss.updateFromDb(db, db_name)
-	}
-	substreamsLock.Unlock()
-	if err != nil {
-		return SubstreamsRecord{}, err
-	}
-
-	if from != "" {
-		ind := len(rec.Substreams)
-		for i, rec := range rec.Substreams {
-			if rec.Name == from {
-				ind = i
-				break
-			}
-		}
-		rec.Substreams = rec.Substreams[ind:]
-	}
-	return rec, nil
-}
diff --git a/broker/src/asapo_broker/database/mongodb_test.go b/broker/src/asapo_broker/database/mongodb_test.go
index 905b2c3613d40273379bd98663f424c7056725ea..d9fdf51c4550b220b32f5412ecf2d0e7a7b45a61 100644
--- a/broker/src/asapo_broker/database/mongodb_test.go
+++ b/broker/src/asapo_broker/database/mongodb_test.go
@@ -28,17 +28,17 @@ type TestDataset struct {
 var db Mongodb
 
 const dbname = "12345"
-const collection = "substream"
-const collection2 = "substream2"
+const collection = "stream"
+const collection2 = "stream2"
 const dbaddress = "127.0.0.1:27017"
 const groupId = "bid2a5auidddp1vl71d0"
 const metaID = 0
 const metaID_str = "0"
 
-var empty_next = map[string]string{"next_substream": ""}
+var empty_next = map[string]string{"next_stream": ""}
 
 var rec1 = TestRecord{1, empty_next, "aaa", 0}
-var rec_finished = TestRecord{2, map[string]string{"next_substream": "next1"}, finish_substream_keyword, 0}
+var rec_finished = TestRecord{2, map[string]string{"next_stream": "next1"}, finish_stream_keyword, 0}
 var rec2 = TestRecord{2, empty_next, "bbb", 1}
 var rec3 = TestRecord{3, empty_next, "ccc", 2}
 
@@ -101,7 +101,7 @@ func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	defer cleanup()
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
@@ -109,7 +109,7 @@ func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	defer cleanup()
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "last"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) {
@@ -118,7 +118,7 @@ func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
@@ -127,7 +127,7 @@ func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec2)
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetNextOK(t *testing.T) {
@@ -149,7 +149,7 @@ func TestMongoDBGetNextErrorOnFinishedStream(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next1\"}", err.(*DBError).Message)
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_stream\":\"next1\"}", err.(*DBError).Message)
 }
 
 func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) {
@@ -160,7 +160,7 @@ func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) {
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.(*DBError).Message)
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err.(*DBError).Message)
 }
 
 func TestMongoDBGetNextCorrectOrder(t *testing.T) {
@@ -284,7 +284,7 @@ func TestMongoDBGetNextEmptyAfterErasingDatabase(t *testing.T) {
 
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBgetRecordByID(t *testing.T) {
@@ -303,7 +303,7 @@ func TestMongoDBgetRecordByIDFails(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	_, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_substream\":\"\"}", err.Error())
+	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_stream\":\"\"}", err.Error())
 }
 
 func TestMongoDBGetRecordNext(t *testing.T) {
@@ -733,38 +733,38 @@ func TestMongoDBOkOnIncompleteDatasetID(t *testing.T) {
 
 }
 
-type Substream struct {
+type Stream struct {
 	name    string
 	records []TestRecord
 }
 
-var testsSubstreams = []struct {
+var testsStreams = []struct {
 	from               string
-	substreams         []Substream
-	expectedSubstreams SubstreamsRecord
+	streams         []Stream
+	expectedStreams StreamsRecord
 	test               string
 	ok                 bool
 }{
-	{"", []Substream{}, SubstreamsRecord{[]SubstreamInfo{}}, "no substreams", true},
-	{"", []Substream{{"ss1", []TestRecord{rec2, rec1}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}}}, "one substream", true},
-	{"", []Substream{{"ss1", []TestRecord{rec2, rec1}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}, SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "two substreams", true},
-	{"ss2", []Substream{{"ss1", []TestRecord{rec1, rec2}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "with from", true},
+	{"", []Stream{}, StreamsRecord{[]StreamInfo{}}, "no streams", true},
+	{"", []Stream{{"ss1", []TestRecord{rec2, rec1}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss1", Timestamp: 0}}}, "one stream", true},
+	{"", []Stream{{"ss1", []TestRecord{rec2, rec1}}, {"ss2", []TestRecord{rec2, rec3}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss1", Timestamp: 0}, StreamInfo{Name: "ss2", Timestamp: 1}}}, "two streams", true},
+	{"ss2", []Stream{{"ss1", []TestRecord{rec1, rec2}}, {"ss2", []TestRecord{rec2, rec3}}}, StreamsRecord{[]StreamInfo{StreamInfo{Name: "ss2", Timestamp: 1}}}, "with from", true},
 }
 
-func TestMongoDBListSubstreams(t *testing.T) {
-	for _, test := range testsSubstreams {
+func TestMongoDBListStreams(t *testing.T) {
+	for _, test := range testsStreams {
 		db.Connect(dbaddress)
-		for _, substream := range test.substreams {
-			for _, rec := range substream.records {
-				db.insertRecord(dbname, substream.name, &rec)
+		for _, stream := range test.streams {
+			for _, rec := range stream.records {
+				db.insertRecord(dbname, stream.name, &rec)
 			}
 		}
-		var rec_substreams_expect, _ = json.Marshal(test.expectedSubstreams)
+		var rec_streams_expect, _ = json.Marshal(test.expectedStreams)
 
-		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "0", Op: "substreams", ExtraParam: test.from})
+		res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "0", Op: "streams", ExtraParam: test.from})
 		if test.ok {
 			assert.Nil(t, err, test.test)
-			assert.Equal(t, string(rec_substreams_expect), string(res), test.test)
+			assert.Equal(t, string(rec_streams_expect), string(res), test.test)
 		} else {
 			assert.NotNil(t, err, test.test)
 		}
@@ -890,7 +890,7 @@ func TestMongoDBGetNextUsesInprocessedNumRetry(t *testing.T) {
 	assert.Nil(t, err1)
 	assert.NotNil(t, err2)
 	if err2 != nil {
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err2.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err2.Error())
 	}
 	assert.Equal(t, string(rec1_expect), string(res))
 	assert.Equal(t, string(rec1_expect), string(res1))
@@ -976,7 +976,7 @@ func TestMongoDBAckDeletesInprocessed(t *testing.T) {
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err.Error())
 	}
 }
 
@@ -1006,7 +1006,7 @@ func TestMongoDBNegAck(t *testing.T) {
 	assert.NotNil(t, err1)
 	if err1 != nil {
 		assert.Equal(t, utils.StatusNoData, err1.(*DBError).Code)
-		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err1.Error())
+		assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err1.Error())
 	}
 }
 
diff --git a/broker/src/asapo_broker/database/streams_test.go b/broker/src/asapo_broker/database/streams_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c172adf5483c2de061f64f55021663f74988a34b
--- /dev/null
+++ b/broker/src/asapo_broker/database/streams_test.go
@@ -0,0 +1,72 @@
+// +build integration_tests
+
+package database
+
+import (
+	"github.com/stretchr/testify/suite"
+	"testing"
+	"time"
+)
+
+type StreamsTestSuite struct {
+	suite.Suite
+}
+
+func (suite *StreamsTestSuite) SetupTest() {
+	db.Connect(dbaddress)
+}
+
+func (suite *StreamsTestSuite) TearDownTest() {
+	cleanup()
+	streams.records= map[string]StreamsRecord{}
+}
+
+func TestStreamsTestSuite(t *testing.T) {
+	suite.Run(t, new(StreamsTestSuite))
+}
+
+func (suite *StreamsTestSuite) TestStreamsEmpty() {
+	rec, err := streams.getStreams(&db, "test", "")
+	suite.Nil(err)
+	suite.Empty(rec.Streams, 0)
+}
+
+func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenEmpty() {
+	db.settings.UpdateStreamCachePeriodMs = 1000
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(1, len(rec.Streams))
+}
+
+func (suite *StreamsTestSuite) TestStreamsUsesCache() {
+	db.settings.UpdateStreamCachePeriodMs = 1000
+	db.insertRecord(dbname, collection, &rec2)
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(int64(1), rec.Streams[0].Timestamp)
+}
+
+func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenExpired() {
+	db.settings.UpdateStreamCachePeriodMs = 10
+	db.insertRecord(dbname, collection, &rec2)
+	streams.getStreams(&db, dbname, "")
+	db.insertRecord(dbname, collection, &rec1)
+	time.Sleep(time.Millisecond * 100)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Equal(int64(1), rec.Streams[0].Timestamp)
+}
+
+func (suite *StreamsTestSuite) TestStreamRemovesDatabase() {
+	db.settings.UpdateStreamCachePeriodMs = 0
+	db.insertRecord(dbname, collection, &rec1)
+	streams.getStreams(&db, dbname, "")
+	db.dropDatabase(dbname)
+	rec, err := streams.getStreams(&db, dbname, "")
+	suite.Nil(err)
+	suite.Empty(rec.Streams, 0)
+}
diff --git a/broker/src/asapo_broker/database/substreams_test.go b/broker/src/asapo_broker/database/substreams_test.go
deleted file mode 100644
index 6c3ed9be0bc70b6058e3c336f035f329369ad8ff..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/database/substreams_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// +build integration_tests
-
-package database
-
-import (
-	"github.com/stretchr/testify/suite"
-	"testing"
-	"time"
-)
-
-type SubstreamsTestSuite struct {
-	suite.Suite
-}
-
-func (suite *SubstreamsTestSuite) SetupTest() {
-	db.Connect(dbaddress)
-}
-
-func (suite *SubstreamsTestSuite) TearDownTest() {
-	cleanup()
-	substreams.records= map[string]SubstreamsRecord{}
-}
-
-func TestSubstreamsTestSuite(t *testing.T) {
-	suite.Run(t, new(SubstreamsTestSuite))
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsEmpty() {
-	rec, err := substreams.getSubstreams(&db, "test", "")
-	suite.Nil(err)
-	suite.Empty(rec.Substreams, 0)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsNotUsesCacheWhenEmpty() {
-	db.settings.UpdateSubstreamCachePeriodMs = 1000
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(1, len(rec.Substreams))
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsUsesCache() {
-	db.settings.UpdateSubstreamCachePeriodMs = 1000
-	db.insertRecord(dbname, collection, &rec2)
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(int64(1), rec.Substreams[0].Timestamp)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamsNotUsesCacheWhenExpired() {
-	db.settings.UpdateSubstreamCachePeriodMs = 10
-	db.insertRecord(dbname, collection, &rec2)
-	substreams.getSubstreams(&db, dbname, "")
-	db.insertRecord(dbname, collection, &rec1)
-	time.Sleep(time.Millisecond * 100)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Equal(int64(1), rec.Substreams[0].Timestamp)
-}
-
-func (suite *SubstreamsTestSuite) TestSubstreamRemovesDatabase() {
-	db.settings.UpdateSubstreamCachePeriodMs = 0
-	db.insertRecord(dbname, collection, &rec1)
-	substreams.getSubstreams(&db, dbname, "")
-	db.dropDatabase(dbname)
-	rec, err := substreams.getSubstreams(&db, dbname, "")
-	suite.Nil(err)
-	suite.Empty(rec.Substreams, 0)
-}
diff --git a/broker/src/asapo_broker/server/get_commands_test.go b/broker/src/asapo_broker/server/get_commands_test.go
index 2f2f9fadba8fea7297b6d555c07a28d9c33952f5..38bff91556dca4acf8f583db67269709cf28934e 100644
--- a/broker/src/asapo_broker/server/get_commands_test.go
+++ b/broker/src/asapo_broker/server/get_commands_test.go
@@ -34,29 +34,29 @@ func TestGetCommandsTestSuite(t *testing.T) {
 
 var testsGetCommand = []struct {
 	command string
-	substream string
+	stream string
 	groupid string
 	reqString string
 	queryParams string
 	externalParam string
 }{
-	{"last", expectedSubstream, "", expectedSubstream + "/0/last","","0"},
-	{"id", expectedSubstream, "", expectedSubstream + "/0/1","","1"},
+	{"last", expectedStream, "", expectedStream + "/0/last","","0"},
+	{"id", expectedStream, "", expectedStream + "/0/1","","1"},
 	{"meta", "default", "", "default/0/meta/0","","0"},
-	{"nacks", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/nacks","","0_0"},
-	{"next", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/next","",""},
-	{"next", expectedSubstream, expectedGroupID, expectedSubstream + "/" +
+	{"nacks", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/nacks","","0_0"},
+	{"next", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/next","",""},
+	{"next", expectedStream, expectedGroupID, expectedStream + "/" +
 		expectedGroupID + "/next","&resend_nacks=true&delay_sec=10&resend_attempts=3","10_3"},
-	{"size", expectedSubstream, "", expectedSubstream  + "/size","","0"},
-	{"substreams", "0", "", "0/substreams","",""},
-	{"lastack", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/lastack","",""},
+	{"size", expectedStream, "", expectedStream  + "/size","","0"},
+	{"streams", "0", "", "0/streams","",""},
+	{"lastack", expectedStream, expectedGroupID, expectedStream + "/" + expectedGroupID + "/lastack","",""},
 
 }
 
 
 func (suite *GetCommandsTestSuite) TestGetCommandsCallsCorrectRoutine() {
 	for _, test := range testsGetCommand {
-		suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: test.substream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
+		suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: test.stream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
 		logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request "+test.command)))
 		w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + test.reqString+correctTokenSuffix+test.queryParams)
 		suite.Equal(http.StatusOK, w.Code, test.command+ " OK")
diff --git a/broker/src/asapo_broker/server/get_meta_test.go b/broker/src/asapo_broker/server/get_meta_test.go
index 4e305ea3e7077135002a2403603c078678a4bd76..0246ba3015e0dab8216344dd029a02144d4a7fe2 100644
--- a/broker/src/asapo_broker/server/get_meta_test.go
+++ b/broker/src/asapo_broker/server/get_meta_test.go
@@ -33,9 +33,9 @@ func TestGetMetaTestSuite(t *testing.T) {
 }
 
 func (suite *GetMetaTestSuite) TestGetMetaOK() {
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, Op: "meta", ExtraParam: "1"}).Return([]byte(""), nil)
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, Op: "meta", ExtraParam: "1"}).Return([]byte(""), nil)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request meta")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/0/meta"  + "/1" + correctTokenSuffix,"GET")
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/0/meta"  + "/1" + correctTokenSuffix,"GET")
 	suite.Equal(http.StatusOK, w.Code, "meta OK")
 }
 
diff --git a/broker/src/asapo_broker/server/get_streams.go b/broker/src/asapo_broker/server/get_streams.go
new file mode 100644
index 0000000000000000000000000000000000000000..335f15a6eff8b6698bdc338632d6d360b7891b5a
--- /dev/null
+++ b/broker/src/asapo_broker/server/get_streams.go
@@ -0,0 +1,11 @@
+package server
+
+import (
+	"net/http"
+)
+
+func routeGetStreams(w http.ResponseWriter, r *http.Request) {
+	keys := r.URL.Query()
+	from := keys.Get("from")
+	processRequest(w, r, "streams", from, false)
+}
diff --git a/broker/src/asapo_broker/server/get_substreams.go b/broker/src/asapo_broker/server/get_substreams.go
deleted file mode 100644
index ee4600cd6d0670db8b6f7fd0a8362ffb547e9e76..0000000000000000000000000000000000000000
--- a/broker/src/asapo_broker/server/get_substreams.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package server
-
-import (
-	"net/http"
-)
-
-func routeGetSubstreams(w http.ResponseWriter, r *http.Request) {
-	keys := r.URL.Query()
-	from := keys.Get("from")
-	processRequest(w, r, "substreams", from, false)
-}
diff --git a/broker/src/asapo_broker/server/listroutes.go b/broker/src/asapo_broker/server/listroutes.go
index a59d65bca869786882a1bcff2d5d5d5f652f2840..d95f6161b47971e67bac6079f6c50c2af894581d 100644
--- a/broker/src/asapo_broker/server/listroutes.go
+++ b/broker/src/asapo_broker/server/listroutes.go
@@ -8,49 +8,49 @@ var listRoutes = utils.Routes{
 	utils.Route{
 		"GetNext",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/{groupid}/next",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/next",
 		routeGetNext,
 	},
 	utils.Route{
 		"GetSize",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/size",
+		"/database/{dbname}/{datasource}/{stream}/size",
 		routeGetSize,
 	},
 	utils.Route{
-		"GetSubstreams",
+		"GetStreams",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/substreams",
-		routeGetSubstreams,
+		"/database/{dbname}/{datasource}/{stream}/streams",
+		routeGetStreams,
 	},
 	utils.Route{
 		"GetLast",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/0/last",
+		"/database/{dbname}/{datasource}/{stream}/0/last",
 		routeGetLast,
 	},
 	utils.Route{
 		"GetLastAck",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/{groupid}/lastack",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/lastack",
 		routeGetLastAck,
 	},
 	utils.Route{
 		"GetNacks",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/{groupid}/nacks",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/nacks",
 		routeGetNacks,
 	},
 	utils.Route{
 		"GetID",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/0/{id}",
+		"/database/{dbname}/{datasource}/{stream}/0/{id}",
 		routeGetByID,
 	},
 	utils.Route{
 		"GetMeta",
 		"Get",
-		"/database/{dbname}/{datasource}/{substream}/0/meta/{id}",
+		"/database/{dbname}/{datasource}/{stream}/0/meta/{id}",
 		routeGetMeta,
 	},
 	utils.Route{
@@ -62,19 +62,19 @@ var listRoutes = utils.Routes{
 	utils.Route{
 		"QueryImages",
 		"Post",
-		"/database/{dbname}/{datasource}/{substream}/0/queryimages",
+		"/database/{dbname}/{datasource}/{stream}/0/queryimages",
 		routeQueryImages,
 	},
 	utils.Route{
 		"ResetConter",
 		"Post",
-		"/database/{dbname}/{datasource}/{substream}/{groupid}/resetcounter",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/resetcounter",
 		routeResetCounter,
 	},
 	utils.Route{
 		"ImageOp",
 		"Post",
-		"/database/{dbname}/{datasource}/{substream}/{groupid}/{id}",
+		"/database/{dbname}/{datasource}/{stream}/{groupid}/{id}",
 		routeImageOp,
 	},
 	utils.Route{
diff --git a/broker/src/asapo_broker/server/post_op_image_test.go b/broker/src/asapo_broker/server/post_op_image_test.go
index 94fdf49f6057390156cd8656feb93ce5ab4ea6f8..5fcddba9b8d5c0659a277078863b964bdd3b04d3 100644
--- a/broker/src/asapo_broker/server/post_op_image_test.go
+++ b/broker/src/asapo_broker/server/post_op_image_test.go
@@ -34,21 +34,21 @@ func TestImageOpTestSuite(t *testing.T) {
 
 func (suite *ImageOpTestSuite) TestAckImageOpOK() {
 	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId: expectedGroupID, Op: "ackimage", ExtraParam: query_str}).Return([]byte(""), nil)
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId: expectedGroupID, Op: "ackimage", ExtraParam: query_str}).Return([]byte(""), nil)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request ackimage")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
 	suite.Equal(http.StatusOK, w.Code, "ackimage OK")
 }
 
 
 func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongOp() {
 	query_str := "\"Id\":1,\"Op\":\"ackimage\"}"
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
 	suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong")
 }
 
 func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongID() {
 	query_str := "{\"Id\":1,\"Op\":\"ackimage\"}"
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/bla" + correctTokenSuffix,"POST",query_str)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/bla" + correctTokenSuffix,"POST",query_str)
 	suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong")
 }
diff --git a/broker/src/asapo_broker/server/post_query_images_test.go b/broker/src/asapo_broker/server/post_query_images_test.go
index 5ac71bca05115779607015c7c1c604c996c20515..5d53dbf31f63f7ebe376e13e74c91c6eb0fa2b8c 100644
--- a/broker/src/asapo_broker/server/post_query_images_test.go
+++ b/broker/src/asapo_broker/server/post_query_images_test.go
@@ -35,10 +35,10 @@ func TestQueryTestSuite(t *testing.T) {
 func (suite *QueryTestSuite) TestQueryOK() {
 	query_str := "aaaa"
 
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream,Op: "queryimages", ExtraParam: query_str}).Return([]byte("{}"), nil)
+	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedStream,Op: "queryimages", ExtraParam: query_str}).Return([]byte("{}"), nil)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request queryimages")))
 
-	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/0/queryimages"+correctTokenSuffix, "POST", query_str)
+	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedStream+"/0/queryimages"+correctTokenSuffix, "POST", query_str)
 	suite.Equal(http.StatusOK, w.Code, "Query OK")
 }
 
diff --git a/broker/src/asapo_broker/server/post_reset_counter_test.go b/broker/src/asapo_broker/server/post_reset_counter_test.go
index d35f116a15d063dc6be8264f59ac0468b54c3ee1..5ce1fa4cabf15afc21e47ef659a5402796d6fc20 100644
--- a/broker/src/asapo_broker/server/post_reset_counter_test.go
+++ b/broker/src/asapo_broker/server/post_reset_counter_test.go
@@ -33,11 +33,11 @@ func TestResetCounterTestSuite(t *testing.T) {
 }
 
 func (suite *ResetCounterTestSuite) TestResetCounterOK() {
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), nil)
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request resetcounter")))
 
-	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST")
+	w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedStream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST")
 	suite.Equal(http.StatusOK, w.Code, "ResetCounter OK")
 }
diff --git a/broker/src/asapo_broker/server/process_request.go b/broker/src/asapo_broker/server/process_request.go
index ab1c4d5f213257f5a9c92d7874aa636fbb11caae..3e937b879ac297a709b962f6f5d96b597bb6d76a 100644
--- a/broker/src/asapo_broker/server/process_request.go
+++ b/broker/src/asapo_broker/server/process_request.go
@@ -14,14 +14,14 @@ func extractRequestParameters(r *http.Request, needGroupID bool) (string, string
 	db_name, ok1 := vars["dbname"]
 
 	datasource, ok3 := vars["datasource"]
-	substream, ok4 := vars["substream"]
+	stream, ok4 := vars["stream"]
 
 	ok2 := true
 	group_id := ""
 	if needGroupID {
 		group_id, ok2 = vars["groupid"]
 	}
-	return db_name, datasource, substream, group_id, ok1 && ok2 && ok3 && ok4
+	return db_name, datasource, stream, group_id, ok1 && ok2 && ok3 && ok4
 }
 
 func IsLetterOrNumbers(s string) bool {
@@ -52,7 +52,7 @@ func checkGroupID(w http.ResponseWriter, needGroupID bool, group_id string, db_n
 func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_param string, needGroupID bool) {
 	r.Header.Set("Content-type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	db_name, datasource, substream, group_id, ok := extractRequestParameters(r, needGroupID)
+	db_name, datasource, stream, group_id, ok := extractRequestParameters(r, needGroupID)
 	if !ok {
 		w.WriteHeader(http.StatusBadRequest)
 		return
@@ -71,7 +71,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_par
 	request.DbName = db_name+"_"+datasource
 	request.Op = op
 	request.ExtraParam = extra_param
-	request.DbCollectionName = substream
+	request.DbCollectionName = stream
 	request.GroupId = group_id
 	if yes, minSize := datasetRequested(r); yes {
 		request.DatasetOp = true
diff --git a/broker/src/asapo_broker/server/process_request_test.go b/broker/src/asapo_broker/server/process_request_test.go
index 24605fdc0378378341d3fbb4e4d15ec13d4bc973..864ce74f93b44cd6ccddca70fee77685daa9358e 100644
--- a/broker/src/asapo_broker/server/process_request_test.go
+++ b/broker/src/asapo_broker/server/process_request_test.go
@@ -21,7 +21,7 @@ var correctTokenSuffix, wrongTokenSuffix, suffixWithWrongToken, expectedBeamtime
 const expectedGroupID = "bid2a5auidddp1vl71d0"
 const wrongGroupID = "_bid2a5auidddp1vl71"
 const expectedStream = "datasource"
-const expectedSubstream = "substream"
+const expectedStream = "stream"
 
 func prepareTestAuth() {
 	expectedBeamtimeId = "beamtime_id"
@@ -109,7 +109,7 @@ func TestProcessRequestTestSuite(t *testing.T) {
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong token")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + suffixWithWrongToken)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + suffixWithWrongToken)
 
 	suite.Equal(http.StatusUnauthorized, w.Code, "wrong token")
 }
@@ -117,28 +117,28 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithNoToken() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("cannot extract")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + wrongTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + wrongTokenSuffix)
 
 	suite.Equal(http.StatusUnauthorized, w.Code, "no token")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusNoData, ""})
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 
 	suite.Equal(http.StatusConflict, w.Code, "wrong database name")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusServiceUnavailable, ""})
@@ -147,14 +147,14 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 	ExpectReconnect(suite.mock_db)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
 
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	time.Sleep(time.Second)
 	suite.Equal(http.StatusNotFound, w.Code, "data not found")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), errors.New(""))
@@ -162,7 +162,7 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
 
 	ExpectReconnect(suite.mock_db)
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	time.Sleep(time.Second)
 
 	suite.Equal(http.StatusNotFound, w.Code, "internal error")
@@ -170,28 +170,28 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsCounter() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
 
-	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
+	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	suite.Equal(1, statistics.GetCounter(), "ProcessRequest increases counter")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWrongGroupID() {
 	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong groupid")))
-	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + wrongGroupID + "/next" + correctTokenSuffix)
+	w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + wrongGroupID + "/next" + correctTokenSuffix)
 	suite.Equal(http.StatusBadRequest, w.Code, "wrong group id")
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsDataset() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, DatasetOp:true, Op: "next"}
+	expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedStream, GroupId:expectedGroupID, DatasetOp:true, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
 
-	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true")
+	doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true")
 }
diff --git a/broker/src/asapo_broker/server/server.go b/broker/src/asapo_broker/server/server.go
index 957b8006ccb4eae9a0bac7d54645cb4a5342e763..01bf25de7195193e46041c12e0d00b95f722bc42 100644
--- a/broker/src/asapo_broker/server/server.go
+++ b/broker/src/asapo_broker/server/server.go
@@ -10,7 +10,7 @@ import (
 )
 
 const  kDefaultresendInterval = 10
-const  kDefaultSubstreamCacheUpdateIntervalMs = 100
+const  kDefaultStreamCacheUpdateIntervalMs = 100
 
 var db database.Agent
 
@@ -24,7 +24,7 @@ type serverSettings struct {
 	LogLevel            string
 	discoveredDbAddress string
 	CheckResendInterval *int
-	SubstreamCacheUpdateIntervalMs *int
+	StreamCacheUpdateIntervalMs *int
 }
 
 func (s *serverSettings) GetResendInterval() int {
@@ -34,11 +34,11 @@ func (s *serverSettings) GetResendInterval() int {
 	return *s.CheckResendInterval
 }
 
-func (s *serverSettings) GetSubstreamCacheUpdateInterval() int {
-	if s.SubstreamCacheUpdateIntervalMs==nil {
-		return kDefaultSubstreamCacheUpdateIntervalMs
+func (s *serverSettings) GetStreamCacheUpdateInterval() int {
+	if s.StreamCacheUpdateIntervalMs==nil {
+		return kDefaultStreamCacheUpdateIntervalMs
 	}
-	return *s.SubstreamCacheUpdateIntervalMs
+	return *s.StreamCacheUpdateIntervalMs
 }
 
 func (s *serverSettings) GetDatabaseServer() string {
@@ -91,7 +91,7 @@ func InitDB(dbAgent database.Agent) (err error) {
 		log.Debug("Got mongodb server: " + settings.discoveredDbAddress)
 	}
 
-	db.SetSettings(database.DBSettings{ReadFromInprocessPeriod: settings.GetResendInterval(),UpdateSubstreamCachePeriodMs: settings.GetSubstreamCacheUpdateInterval()})
+	db.SetSettings(database.DBSettings{ReadFromInprocessPeriod: settings.GetResendInterval(),UpdateStreamCachePeriodMs: settings.GetStreamCacheUpdateInterval()})
 
 	return db.Connect(settings.GetDatabaseServer())
 }
diff --git a/common/cpp/include/asapo/common/data_structs.h b/common/cpp/include/asapo/common/data_structs.h
index 738b3d8b1aa11e8104d4d4158ff1f431365233bb..598ac41cc494dc930294fa8e0eef03fac93063df 100644
--- a/common/cpp/include/asapo/common/data_structs.h
+++ b/common/cpp/include/asapo/common/data_structs.h
@@ -109,7 +109,7 @@ enum IngestModeFlags : uint64_t {
 
 const uint64_t kDefaultIngestMode = kTransferData | kStoreInFilesystem | kStoreInDatabase;
 
-const std::string kDefaultSubstream = "default";
+const std::string kDefaultStream = "default";
 
 
 }
diff --git a/common/cpp/include/asapo/common/networking.h b/common/cpp/include/asapo/common/networking.h
index c4b4eecedcc6c466ff7173fcd290f5260508e33d..4420df93250ea9a5302acad31a8d5daab74e7709 100644
--- a/common/cpp/include/asapo/common/networking.h
+++ b/common/cpp/include/asapo/common/networking.h
@@ -55,16 +55,16 @@ struct GenericRequestHeader {
         op_code = header.op_code, data_id = header.data_id, data_size = header.data_size, meta_size = header.meta_size,
         memcpy(custom_data, header.custom_data, kNCustomParams * sizeof(uint64_t)),
         memcpy(message, header.message, kMaxMessageSize);
-        strncpy(substream, header.substream, kMaxMessageSize);
+        strncpy(stream, header.stream, kMaxMessageSize);
     }
 
     /* Keep in mind that the message here is just strncpy'ed, you can change the message later */
     GenericRequestHeader(Opcode i_op_code = kOpcodeUnknownOp, uint64_t i_data_id = 0,
                          uint64_t i_data_size = 0, uint64_t i_meta_size = 0, const std::string& i_message = "",
-                         const std::string& i_substream = ""):
+                         const std::string& i_stream = ""):
         op_code{i_op_code}, data_id{i_data_id}, data_size{i_data_size}, meta_size{i_meta_size} {
         strncpy(message, i_message.c_str(), kMaxMessageSize);
-        strncpy(substream, i_substream.c_str(), kMaxMessageSize);
+        strncpy(stream, i_stream.c_str(), kMaxMessageSize);
     }
 
     Opcode      op_code;
@@ -73,11 +73,11 @@ struct GenericRequestHeader {
     uint64_t    meta_size;
     CustomRequestData    custom_data;
     char        message[kMaxMessageSize]; /* Can also be a binary message (e.g. MemoryRegionDetails) */
-    char        substream[kMaxMessageSize]; /* Must be a string (strcpy is used) */
+    char        stream[kMaxMessageSize]; /* Must be a string (strcpy is used) */
     std::string Json() {
         std::string s = "{\"id\":" + std::to_string(data_id) + ","
                         "\"buffer\":\"" + std::string(message) + "\"" + ","
-                        "\"substream\":\"" + std::string(substream) + "\""
+                        "\"stream\":\"" + std::string(stream) + "\""
                         + "}";
         return s;
     };
diff --git a/consumer/api/cpp/include/asapo/consumer/consumer_error.h b/consumer/api/cpp/include/asapo/consumer/consumer_error.h
index cfed6107f38c61188870e2bd83402be0f965ba1b..a7ef5867e50af8feba726ab748cf6f90e620e80d 100644
--- a/consumer/api/cpp/include/asapo/consumer/consumer_error.h
+++ b/consumer/api/cpp/include/asapo/consumer/consumer_error.h
@@ -30,7 +30,7 @@ class ConsumerErrorData : public CustomErrorData {
   public:
     uint64_t id;
     uint64_t id_max;
-    std::string next_substream;
+    std::string next_stream;
 };
 
 
diff --git a/consumer/api/cpp/include/asapo/consumer/data_broker.h b/consumer/api/cpp/include/asapo/consumer/data_broker.h
index d7392829b3d0e654db3e085411e4e52c2c3ee38c..7e21496094f541ccee2d3a377dbbecbd59bd87b3 100644
--- a/consumer/api/cpp/include/asapo/consumer/data_broker.h
+++ b/consumer/api/cpp/include/asapo/consumer/data_broker.h
@@ -20,43 +20,43 @@ class DataBroker {
       \return nullptr of command was successful, otherwise error.
     */
     virtual Error ResetLastReadMarker(std::string group_id) = 0;
-    virtual Error ResetLastReadMarker(std::string group_id, std::string substream) = 0;
+    virtual Error ResetLastReadMarker(std::string group_id, std::string stream) = 0;
 
     virtual Error SetLastReadMarker(uint64_t value, std::string group_id) = 0;
-    virtual Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) = 0;
+    virtual Error SetLastReadMarker(uint64_t value, std::string group_id, std::string stream) = 0;
 
-    //! Acknowledge data tuple for specific group id and substream.
+    //! Acknowledge data tuple for specific group id and stream.
     /*!
         \param group_id - group id to use.
         \param id - data tuple id
-        \param substream (optional) - substream
+        \param stream (optional) - stream
         \return nullptr of command was successful, otherwise error.
     */
-    virtual Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) = 0;
+    virtual Error Acknowledge(std::string group_id, uint64_t id, std::string stream = kDefaultStream) = 0;
 
-    //! Negative acknowledge data tuple for specific group id and substream.
+    //! Negative acknowledge data tuple for specific group id and stream.
     /*!
         \param group_id - group id to use.
         \param id - data tuple id
         \param delay_sec - data tuple will be redelivered after delay, 0 to redeliver immediately
-        \param substream (optional) - substream
+        \param stream (optional) - stream
         \return nullptr of command was successful, otherwise error.
     */
     virtual Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
-                                      std::string substream = kDefaultSubstream) = 0;
+                                      std::string stream = kDefaultStream) = 0;
 
 
-    //! Get unacknowledged tuple for specific group id and substream.
+    //! Get unacknowledged tuple for specific group id and stream.
     /*!
         \param group_id - group id to use.
-        \param substream (optional) - substream
+        \param stream (optional) - stream
         \param from_id - return tuples with ids greater or equal to from (use 0 disable limit)
         \param to_id - return tuples with ids less or equal to to (use 0 to disable limit)
-        \param in (optional) - substream
+        \param in (optional) - stream
         \param err - set to nullptr of operation succeed, error otherwise.
         \return vector of ids, might be empty
     */
-    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, std::string substream, uint64_t from_id, uint64_t to_id,
+    virtual IdList GetUnacknowledgedTupleIds(std::string group_id, std::string stream, uint64_t from_id, uint64_t to_id,
                                              Error* error) = 0;
     virtual IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) = 0;
 
@@ -74,8 +74,8 @@ class DataBroker {
      */
     virtual NetworkConnectionType CurrentConnectionType() const = 0;
 
-    //! Get list of substreams, set from to "" to get all substreams
-    virtual StreamInfos GetSubstreamList(std::string from, Error* err) = 0;
+    //! Get list of streams, set from to "" to get all streams
+    virtual StreamInfos GetStreamList(std::string from, Error* err) = 0;
 
     //! Get current number of datasets
     /*!
@@ -83,7 +83,7 @@ class DataBroker {
       \return number of datasets.
     */
     virtual uint64_t GetCurrentSize(Error* err) = 0;
-    virtual uint64_t GetCurrentSize(std::string substream, Error* err) = 0;
+    virtual uint64_t GetCurrentSize(std::string stream, Error* err) = 0;
 
     //! Generate new GroupID.
     /*!
@@ -107,7 +107,7 @@ class DataBroker {
       \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
     */
     virtual Error GetNext(FileInfo* info, std::string group_id, FileData* data) = 0;
-    virtual Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) = 0;
+    virtual Error GetNext(FileInfo* info, std::string group_id, std::string stream, FileData* data) = 0;
 
     //! Retrieves image using fileinfo.
     /*!
@@ -122,32 +122,32 @@ class DataBroker {
     /*!
       \param err -  will be set to error data cannot be read, nullptr otherwise.
       \param group_id - group id to use.
-      \param substream - substream to use ("" for default).
+      \param stream - stream to use ("" for default).
       \param min_size - wait until dataset has min_size data tuples (0 for maximum size)
       \return DataSet - information about the dataset
 
     */
-    virtual DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) = 0;
+    virtual DataSet GetNextDataset(std::string group_id, std::string stream, uint64_t min_size, Error* err) = 0;
     virtual DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) = 0;
     //! Receive last available dataset which has min_size data tuples.
     /*!
       \param err -  will be set to error data cannot be read, nullptr otherwise.
-      \param substream - substream to use ("" for default).
+      \param stream - stream to use ("" for default).
       \param min_size - amount of data tuples in dataset (0 for maximum size)
       \return DataSet - information about the dataset
     */
-    virtual DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) = 0;
+    virtual DataSet GetLastDataset(std::string stream, uint64_t min_size, Error* err) = 0;
     virtual DataSet GetLastDataset(uint64_t min_size, Error* err) = 0;
 
     //! Receive dataset by id.
     /*!
       \param id - dataset id
       \param err -  will be set to error data cannot be read or dataset size less than min_size, nullptr otherwise.
-      \param substream - substream to use ("" for default).
+      \param stream - stream to use ("" for default).
       \param min_size - wait until dataset has min_size data tuples (0 for maximum size)
       \return DataSet - information about the dataset
     */
-    virtual DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) = 0;
+    virtual DataSet GetDatasetById(uint64_t id, std::string stream, uint64_t min_size, Error* err) = 0;
     virtual DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) = 0;
 
     //! Receive single image by id.
@@ -158,16 +158,16 @@ class DataBroker {
       \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
     */
     virtual Error GetById(uint64_t id, FileInfo* info, FileData* data) = 0;
-    virtual Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) = 0;
+    virtual Error GetById(uint64_t id, FileInfo* info, std::string stream, FileData* data) = 0;
 
     //! Receive id of last acknowledged data tuple
     /*!
       \param group_id - group id to use.
-      \param substream (optional) - substream
+      \param stream (optional) - stream
       \param err -  will be set in case of error, nullptr otherwise.
       \return id of the last acknowledged image, 0 if error
     */
-    virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) = 0;
+    virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string stream, Error* error) = 0;
     virtual uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) = 0;
 
     //! Receive last available image.
@@ -177,7 +177,7 @@ class DataBroker {
       \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise.
     */
     virtual Error GetLast(FileInfo* info, FileData* data) = 0;
-    virtual Error GetLast(FileInfo* info, std::string substream, FileData* data) = 0;
+    virtual Error GetLast(FileInfo* info, std::string stream, FileData* data) = 0;
 
     //! Get all images matching the query.
     /*!
@@ -186,7 +186,7 @@ class DataBroker {
       \return vector of image metadata matchiing to specified query. Empty if nothing found or error
     */
     virtual FileInfos QueryImages(std::string query, Error* err) = 0;
-    virtual FileInfos QueryImages(std::string query, std::string substream, Error* err) = 0;
+    virtual FileInfos QueryImages(std::string query, std::string stream, Error* err) = 0;
 
     //! Configure resending nonacknowledged data
     /*!
diff --git a/consumer/api/cpp/src/server_data_broker.cpp b/consumer/api/cpp/src/server_data_broker.cpp
index b5189be3796ba86631e50da95234707b090e4973..845ab53425f06feb1c89142d07b9ac3b4b7ac60a 100644
--- a/consumer/api/cpp/src/server_data_broker.cpp
+++ b/consumer/api/cpp/src/server_data_broker.cpp
@@ -23,7 +23,7 @@ Error GetNoDataResponseFromJson(const std::string &json_string, ConsumerErrorDat
     JsonStringParser parser(json_string);
     Error err;
     if ((err = parser.GetUInt64("id", &data->id)) || (err = parser.GetUInt64("id_max", &data->id_max))
-        || (err = parser.GetString("next_substream", &data->next_substream))) {
+        || (err = parser.GetString("next_stream", &data->next_stream))) {
         return err;
     }
     return nullptr;
@@ -63,7 +63,7 @@ Error ConsumerErrorFromNoDataResponse(const std::string &response) {
         }
         Error err;
         if (data.id >= data.id_max) {
-            err = data.next_substream.empty() ? ConsumerErrorTemplates::kEndOfStream.Generate() :
+            err = data.next_stream.empty() ? ConsumerErrorTemplates::kEndOfStream.Generate() :
                   ConsumerErrorTemplates::kStreamFinished.Generate();
         } else {
             err = ConsumerErrorTemplates::kNoData.Generate();
@@ -240,14 +240,14 @@ RequestInfo ServerDataBroker::PrepareRequestInfo(std::string api_url, bool datas
     return ri;
 }
 
-Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string group_id, std::string substream,
+Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string group_id, std::string stream,
                                             GetImageServerOperation op,
                                             bool dataset, uint64_t min_size) {
     interrupt_flag_= false;
     std::string request_suffix = OpToUriCmd(op);
     std::string request_group = OpToUriCmd(op);
     std::string request_api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source
-        + "/" + std::move(substream);
+        + "/" + std::move(stream);
     uint64_t elapsed_ms = 0;
     Error no_data_error;
     while (true) {
@@ -295,27 +295,27 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g
 }
 
 Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, FileData* data) {
-    return GetNext(info, std::move(group_id), kDefaultSubstream, data);
+    return GetNext(info, std::move(group_id), kDefaultStream, data);
 }
 
-Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) {
+Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, std::string stream, FileData* data) {
     return GetImageFromServer(GetImageServerOperation::GetNext,
                               0,
                               std::move(group_id),
-                              std::move(substream),
+                              std::move(stream),
                               info,
                               data);
 }
 
 Error ServerDataBroker::GetLast(FileInfo* info, FileData* data) {
-    return GetLast(info, kDefaultSubstream, data);
+    return GetLast(info, kDefaultStream, data);
 }
 
-Error ServerDataBroker::GetLast(FileInfo* info, std::string substream, FileData* data) {
+Error ServerDataBroker::GetLast(FileInfo* info, std::string stream, FileData* data) {
     return GetImageFromServer(GetImageServerOperation::GetLast,
                               0,
                               "0",
-                              std::move(substream),
+                              std::move(stream),
                               info,
                               data);
 }
@@ -329,7 +329,7 @@ std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) {
 }
 
 Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id,
-                                           std::string substream,
+                                           std::string stream,
                                            FileInfo* info,
                                            FileData* data) {
     if (info == nullptr) {
@@ -339,9 +339,9 @@ Error ServerDataBroker::GetImageFromServer(GetImageServerOperation op, uint64_t
     Error err;
     std::string response;
     if (op == GetImageServerOperation::GetID) {
-        err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream));
+        err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(stream));
     } else {
-        err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op);
+        err = GetRecordFromServer(&response, std::move(group_id), std::move(stream), op);
     }
     if (err != nullptr) {
         return err;
@@ -516,21 +516,21 @@ std::string ServerDataBroker::BrokerRequestWithTimeout(RequestInfo request, Erro
 }
 
 Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id) {
-    return SetLastReadMarker(value, std::move(group_id), kDefaultSubstream);
+    return SetLastReadMarker(value, std::move(group_id), kDefaultStream);
 }
 
 Error ServerDataBroker::ResetLastReadMarker(std::string group_id) {
-    return ResetLastReadMarker(std::move(group_id), kDefaultSubstream);
+    return ResetLastReadMarker(std::move(group_id), kDefaultStream);
 }
 
-Error ServerDataBroker::ResetLastReadMarker(std::string group_id, std::string substream) {
-    return SetLastReadMarker(0, group_id, substream);
+Error ServerDataBroker::ResetLastReadMarker(std::string group_id, std::string stream) {
+    return SetLastReadMarker(0, group_id, stream);
 }
 
-Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) {
+Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, std::string stream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/"
-        + std::move(substream) + "/" + std::move(group_id) + "/resetcounter";
+        + std::move(stream) + "/" + std::move(group_id) + "/resetcounter";
     ri.extra_params = "&value=" + std::to_string(value);
     ri.post = true;
 
@@ -539,10 +539,10 @@ Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id,
     return err;
 }
 
-uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) {
+uint64_t ServerDataBroker::GetCurrentSize(std::string stream, Error* err) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) + "/size";
+        +"/" + std::move(stream) + "/size";
     auto responce = BrokerRequestWithTimeout(ri, err);
     if (*err) {
         return 0;
@@ -557,26 +557,26 @@ uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) {
 }
 
 uint64_t ServerDataBroker::GetCurrentSize(Error* err) {
-    return GetCurrentSize(kDefaultSubstream, err);
+    return GetCurrentSize(kDefaultStream, err);
 }
 Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, FileData* data) {
     if (id == 0) {
         return ConsumerErrorTemplates::kWrongInput.Generate("id should be positive");
     }
 
-    return GetById(id, info, kDefaultSubstream, data);
+    return GetById(id, info, kDefaultStream, data);
 }
 
-Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) {
-    return GetImageFromServer(GetImageServerOperation::GetID, id, "0", substream, info, data);
+Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, std::string stream, FileData* data) {
+    return GetImageFromServer(GetImageServerOperation::GetID, id, "0", stream, info, data);
 }
 
 Error ServerDataBroker::GetRecordFromServerById(uint64_t id, std::string* response, std::string group_id,
-                                                std::string substream,
+                                                std::string stream,
                                                 bool dataset, uint64_t min_size) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) +
+        +"/" + std::move(stream) +
         "/" + std::move(
         group_id) + "/" + std::to_string(id);
     if (dataset) {
@@ -606,10 +606,10 @@ DataSet DecodeDatasetFromResponse(std::string response, Error* err) {
     }
 }
 
-FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream, Error* err) {
+FileInfos ServerDataBroker::QueryImages(std::string query, std::string stream, Error* err) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        "/" + std::move(substream) + "/0/queryimages";
+        "/" + std::move(stream) + "/0/queryimages";
     ri.post = true;
     ri.body = std::move(query);
 
@@ -623,36 +623,36 @@ FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream
 }
 
 FileInfos ServerDataBroker::QueryImages(std::string query, Error* err) {
-    return QueryImages(std::move(query), kDefaultSubstream, err);
+    return QueryImages(std::move(query), kDefaultStream, err);
 }
 
 DataSet ServerDataBroker::GetNextDataset(std::string group_id, uint64_t min_size, Error* err) {
-    return GetNextDataset(std::move(group_id), kDefaultSubstream, min_size, err);
+    return GetNextDataset(std::move(group_id), kDefaultStream, min_size, err);
 }
 
-DataSet ServerDataBroker::GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetNext, 0, std::move(group_id), std::move(substream),min_size, err);
+DataSet ServerDataBroker::GetNextDataset(std::string group_id, std::string stream, uint64_t min_size, Error* err) {
+    return GetDatasetFromServer(GetImageServerOperation::GetNext, 0, std::move(group_id), std::move(stream),min_size, err);
 }
 
-DataSet ServerDataBroker::GetLastDataset(std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetLast, 0, "0", std::move(substream),min_size, err);
+DataSet ServerDataBroker::GetLastDataset(std::string stream, uint64_t min_size, Error* err) {
+    return GetDatasetFromServer(GetImageServerOperation::GetLast, 0, "0", std::move(stream),min_size, err);
 }
 
 DataSet ServerDataBroker::GetLastDataset(uint64_t min_size, Error* err) {
-    return GetLastDataset(kDefaultSubstream, min_size, err);
+    return GetLastDataset(kDefaultStream, min_size, err);
 }
 
 DataSet ServerDataBroker::GetDatasetFromServer(GetImageServerOperation op,
                                                uint64_t id,
-                                               std::string group_id, std::string substream,
+                                               std::string group_id, std::string stream,
                                                uint64_t min_size,
                                                Error* err) {
     FileInfos infos;
     std::string response;
     if (op == GetImageServerOperation::GetID) {
-        *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream), true, min_size);
+        *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(stream), true, min_size);
     } else {
-        *err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op, true, min_size);
+        *err = GetRecordFromServer(&response, std::move(group_id), std::move(stream), op, true, min_size);
     }
     if (*err != nullptr && *err!=ConsumerErrorTemplates::kPartialData) {
         return {0, 0,FileInfos{}};
@@ -661,38 +661,38 @@ DataSet ServerDataBroker::GetDatasetFromServer(GetImageServerOperation op,
 }
 
 DataSet ServerDataBroker::GetDatasetById(uint64_t id, uint64_t min_size, Error* err) {
-    return GetDatasetById(id, kDefaultSubstream, min_size, err);
+    return GetDatasetById(id, kDefaultStream, min_size, err);
 }
 
-DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) {
-    return GetDatasetFromServer(GetImageServerOperation::GetID, id, "0", std::move(substream), min_size, err);
+DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string stream, uint64_t min_size, Error* err) {
+    return GetDatasetFromServer(GetImageServerOperation::GetID, id, "0", std::move(stream), min_size, err);
 }
 
-StreamInfos ParseSubstreamsFromResponse(std::string response, Error* err) {
+StreamInfos ParseStreamsFromResponse(std::string response, Error* err) {
     auto parser = JsonStringParser(std::move(response));
-    std::vector<std::string> substreams_endcoded;
-    StreamInfos substreams;
+    std::vector<std::string> streams_endcoded;
+    StreamInfos streams;
     Error parse_err;
-    *err = parser.GetArrayRawStrings("substreams", &substreams_endcoded);
+    *err = parser.GetArrayRawStrings("streams", &streams_endcoded);
     if (*err) {
         return StreamInfos{};
     }
-    for (auto substream_encoded : substreams_endcoded) {
+    for (auto stream_encoded : streams_endcoded) {
         StreamInfo si;
-        auto ok = si.SetFromJson(substream_encoded, false);
+        auto ok = si.SetFromJson(stream_encoded, false);
         if (!ok) {
-            *err = TextError("cannot parse " + substream_encoded);
+            *err = TextError("cannot parse " + stream_encoded);
             return StreamInfos{};
         }
-        substreams.emplace_back(si);
+        streams.emplace_back(si);
     }
-    return substreams;
+    return streams;
 }
 
-StreamInfos ServerDataBroker::GetSubstreamList(std::string from, Error* err) {
+StreamInfos ServerDataBroker::GetStreamList(std::string from, Error* err) {
 
     RequestInfo ri;
-    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/0/substreams";
+    ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source + "/0/streams";
     ri.post = false;
     if (!from.empty()) {
         ri.extra_params = "&from=" + from;
@@ -703,7 +703,7 @@ StreamInfos ServerDataBroker::GetSubstreamList(std::string from, Error* err) {
         return StreamInfos{};
     }
 
-    return ParseSubstreamsFromResponse(std::move(response), err);
+    return ParseStreamsFromResponse(std::move(response), err);
 }
 
 Error ServerDataBroker::UpdateFolderTokenIfNeeded(bool ignore_existing) {
@@ -760,10 +760,10 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData*
     return err;
 }
 
-Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::string substream) {
+Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::string stream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
     ri.body = "{\"Op\":\"ackimage\"}";
@@ -774,13 +774,13 @@ Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::stri
 }
 
 IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
-                                                   std::string substream,
+                                                   std::string stream,
                                                    uint64_t from_id,
                                                    uint64_t to_id,
                                                    Error* error) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/nacks";
     ri.extra_params = "&from=" + std::to_string(from_id) + "&to=" + std::to_string(to_id);
 
@@ -802,13 +802,13 @@ IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id,
                                                    uint64_t from_id,
                                                    uint64_t to_id,
                                                    Error* error) {
-    return GetUnacknowledgedTupleIds(std::move(group_id), kDefaultSubstream, from_id, to_id, error);
+    return GetUnacknowledgedTupleIds(std::move(group_id), kDefaultStream, from_id, to_id, error);
 }
 
-uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) {
+uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std::string stream, Error* error) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/lastack";
 
     auto json_string = BrokerRequestWithTimeout(ri, error);
@@ -829,7 +829,7 @@ uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std:
 }
 
 uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, Error* error) {
-    return GetLastAcknowledgedTulpeId(std::move(group_id), kDefaultSubstream, error);
+    return GetLastAcknowledgedTulpeId(std::move(group_id), kDefaultStream, error);
 }
 
 void ServerDataBroker::SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) {
@@ -841,10 +841,10 @@ void ServerDataBroker::SetResendNacs(bool resend, uint64_t delay_sec, uint64_t r
 Error ServerDataBroker::NegativeAcknowledge(std::string group_id,
                                             uint64_t id,
                                             uint64_t delay_sec,
-                                            std::string substream) {
+                                            std::string stream) {
     RequestInfo ri;
     ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.data_source +
-        +"/" + std::move(substream) +
+        +"/" + std::move(stream) +
         "/" + std::move(group_id) + "/" + std::to_string(id);
     ri.post = true;
     ri.body = R"({"Op":"negackimage","Params":{"DelaySec":)" + std::to_string(delay_sec) + "}}";
diff --git a/consumer/api/cpp/src/server_data_broker.h b/consumer/api/cpp/src/server_data_broker.h
index 2aa4d30e7424260bf4c2dcf1703a0a09e3dbe150..9c595b0d99f77c91e8e95707a481db7a68d1a024 100644
--- a/consumer/api/cpp/src/server_data_broker.h
+++ b/consumer/api/cpp/src/server_data_broker.h
@@ -56,40 +56,40 @@ class ServerDataBroker final : public asapo::DataBroker {
     explicit ServerDataBroker(std::string server_uri, std::string source_path, bool has_filesystem,
                               SourceCredentials source);
 
-    Error Acknowledge(std::string group_id, uint64_t id, std::string substream = kDefaultSubstream) override;
+    Error Acknowledge(std::string group_id, uint64_t id, std::string stream = kDefaultStream) override;
     Error NegativeAcknowledge(std::string group_id, uint64_t id, uint64_t delay_sec,
-                              std::string substream = kDefaultSubstream) override;
+                              std::string stream = kDefaultStream) override;
 
     IdList GetUnacknowledgedTupleIds(std::string group_id,
-                                     std::string substream,
+                                     std::string stream,
                                      uint64_t from_id,
                                      uint64_t to_id,
                                      Error* error) override;
     IdList GetUnacknowledgedTupleIds(std::string group_id, uint64_t from_id, uint64_t to_id, Error* error) override;
 
-    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) override;
+    uint64_t GetLastAcknowledgedTulpeId(std::string group_id, std::string stream, Error* error) override;
     uint64_t GetLastAcknowledgedTulpeId(std::string group_id, Error* error) override;
 
     Error ResetLastReadMarker(std::string group_id) override;
-    Error ResetLastReadMarker(std::string group_id, std::string substream) override;
+    Error ResetLastReadMarker(std::string group_id, std::string stream) override;
 
     Error SetLastReadMarker(uint64_t value, std::string group_id) override;
-    Error SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) override;
+    Error SetLastReadMarker(uint64_t value, std::string group_id, std::string stream) override;
 
     Error GetNext(FileInfo* info, std::string group_id, FileData* data) override;
-    Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) override;
+    Error GetNext(FileInfo* info, std::string group_id, std::string stream, FileData* data) override;
 
     Error GetLast(FileInfo* info, FileData* data) override;
-    Error GetLast(FileInfo* info, std::string substream, FileData* data) override;
+    Error GetLast(FileInfo* info, std::string stream, FileData* data) override;
 
     std::string GenerateNewGroupId(Error* err) override;
     std::string GetBeamtimeMeta(Error* err) override;
 
     uint64_t GetCurrentSize(Error* err) override;
-    uint64_t GetCurrentSize(std::string substream, Error* err) override;
+    uint64_t GetCurrentSize(std::string stream, Error* err) override;
 
     Error GetById(uint64_t id, FileInfo* info, FileData* data) override;
-    Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) override;
+    Error GetById(uint64_t id, FileInfo* info, std::string stream, FileData* data) override;
 
 
     void SetTimeout(uint64_t timeout_ms) override;
@@ -98,20 +98,20 @@ class ServerDataBroker final : public asapo::DataBroker {
     NetworkConnectionType CurrentConnectionType() const override;
 
     FileInfos QueryImages(std::string query, Error* err) override;
-    FileInfos QueryImages(std::string query, std::string substream, Error* err) override;
+    FileInfos QueryImages(std::string query, std::string stream, Error* err) override;
 
     DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) override;
-    DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetNextDataset(std::string group_id, std::string stream, uint64_t min_size, Error* err) override;
 
     DataSet GetLastDataset(uint64_t min_size, Error* err) override;
-    DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetLastDataset(std::string stream, uint64_t min_size, Error* err) override;
 
     DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) override;
-    DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) override;
+    DataSet GetDatasetById(uint64_t id, std::string stream, uint64_t min_size, Error* err) override;
 
     Error RetrieveData(FileInfo* info, FileData* data) override;
 
-    StreamInfos GetSubstreamList(std::string from, Error* err) override;
+    StreamInfos GetStreamList(std::string from, Error* err) override;
     void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) override;
 
     virtual void InterruptCurrentOperation() override;
@@ -127,18 +127,18 @@ class ServerDataBroker final : public asapo::DataBroker {
     static const std::string kBrokerServiceName;
     static const std::string kFileTransferServiceName;
     std::string RequestWithToken(std::string uri);
-    Error GetRecordFromServer(std::string* info, std::string group_id, std::string substream, GetImageServerOperation op,
+    Error GetRecordFromServer(std::string* info, std::string group_id, std::string stream, GetImageServerOperation op,
                               bool dataset = false, uint64_t min_size = 0);
-    Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string substream,
+    Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string stream,
                                   bool dataset = false, uint64_t min_size = 0);
     Error GetDataIfNeeded(FileInfo* info, FileData* data);
     Error DiscoverService(const std::string& service_name, std::string* uri_to_set);
     bool SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri);
     bool SwitchToGetByIdIfPartialData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri);
     Error ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri);
-    Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
+    Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string stream,
                              FileInfo* info, FileData* data);
-    DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream,
+    DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string stream,
                                  uint64_t min_size, Error* err);
     bool DataCanBeInBuffer(const FileInfo* info);
     Error TryGetDataFromBuffer(const FileInfo* info, FileData* data);
diff --git a/consumer/api/cpp/unittests/test_server_broker.cpp b/consumer/api/cpp/unittests/test_server_broker.cpp
index b1a3ce36d82634cc840af17ce57f146e3a89c31a..d4bbf4bb209fa94e9394974ba6c05ec295c937ae 100644
--- a/consumer/api/cpp/unittests/test_server_broker.cpp
+++ b/consumer/api/cpp/unittests/test_server_broker.cpp
@@ -73,7 +73,7 @@ class ServerDataBrokerTests : public Test {
   std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename;
   std::string expected_group_id = "groupid";
   std::string expected_data_source = "source";
-  std::string expected_substream = "substream";
+  std::string expected_stream = "stream";
   std::string expected_metadata = "{\"meta\":1}";
   std::string expected_query_string = "bla";
   std::string expected_folder_token = "folder_token";
@@ -81,7 +81,7 @@ class ServerDataBrokerTests : public Test {
   uint64_t expected_image_size = 100;
   uint64_t expected_dataset_id = 1;
   static const uint64_t expected_buf_id = 123;
-  std::string expected_next_substream = "nextsubstream";
+  std::string expected_next_stream = "nextstream";
   std::string expected_fts_query_string = "{\"Folder\":\"" + expected_path + "\",\"FileName\":\"" + expected_filename +
       "\"}";
   std::string expected_cookie = "Authorization=Bearer " + expected_folder_token;
@@ -225,17 +225,17 @@ TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUri) {
     data_broker->GetNext(&info, expected_group_id, nullptr);
 }
 
-TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUriWithSubstream) {
+TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUriWithStream) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-                                            expected_substream + "/" + expected_group_id + "/next?token="
+                                            expected_stream + "/" + expected_group_id + "/next?token="
                                             + expected_token, _,
                                         _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
-    data_broker->GetNext(&info, expected_group_id, expected_substream, nullptr);
+    data_broker->GetNext(&info, expected_group_id, expected_stream, nullptr);
 }
 
 TEST_F(ServerDataBrokerTests, GetLastUsesCorrectUri) {
@@ -257,7 +257,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsEndOfStreamFromHttpClient) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}")));
 
     auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
 
@@ -266,7 +266,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsEndOfStreamFromHttpClient) {
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream));
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(1));
-    ASSERT_THAT(err_data->next_substream, Eq(""));
+    ASSERT_THAT(err_data->next_stream, Eq(""));
 }
 
 TEST_F(ServerDataBrokerTests, GetImageReturnsStreamFinishedFromHttpClient) {
@@ -275,7 +275,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsStreamFinishedFromHttpClient) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"" + expected_next_substream
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"" + expected_next_stream
                    + "\"}")));
 
     auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
@@ -285,7 +285,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsStreamFinishedFromHttpClient) {
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kStreamFinished));
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(1));
-    ASSERT_THAT(err_data->next_substream, Eq(expected_next_substream));
+    ASSERT_THAT(err_data->next_stream, Eq(expected_next_stream));
 }
 
 TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataFromHttpClient) {
@@ -294,14 +294,14 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataFromHttpClient) {
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_stream\":\"""\"}")));
 
     auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
     ASSERT_THAT(err_data->id, Eq(1));
     ASSERT_THAT(err_data->id_max, Eq(2));
-    ASSERT_THAT(err_data->next_substream, Eq(""));
+    ASSERT_THAT(err_data->next_stream, Eq(""));
 
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
 }
@@ -393,7 +393,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsEofStreamFromHttpClientUntilTimeout
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"""\"}")));
 
     data_broker->SetTimeout(300);
     auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
@@ -409,7 +409,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataAfterTimeoutEvenIfOtherErrorO
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
         Return("{\"op\":\"get_record_by_id\",\"id\":" + std::to_string(expected_dataset_id) +
-            ",\"id_max\":2,\"next_substream\":\"""\"}")));
+            ",\"id_max\":2,\"next_stream\":\"""\"}")));
 
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/default/0/"
                                             + std::to_string(expected_dataset_id) + "?token="
@@ -461,13 +461,13 @@ TEST_F(ServerDataBrokerTests, GetNextRetriesIfConnectionHttpClientErrorUntilTime
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kUnavailableService));
 }
 
-TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnFinshedSubstream) {
+TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnFinshedStream) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_stream\":\"next\"}")));
 
     data_broker->SetTimeout(300);
     auto err = data_broker->GetNext(&info, expected_group_id, nullptr);
@@ -620,18 +620,18 @@ TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUri) {
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUriWithSubstream) {
+TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUriWithStream) {
     MockGetBrokerUri();
     data_broker->SetTimeout(100);
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/" +
+        expected_stream + "/" +
         expected_group_id +
         "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
         Return("")));
-    auto err = data_broker->SetLastReadMarker(10, expected_group_id, expected_substream);
+    auto err = data_broker->SetLastReadMarker(10, expected_group_id, expected_stream);
     ASSERT_THAT(err, Eq(nullptr));
 }
 
@@ -651,18 +651,18 @@ TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUri) {
     ASSERT_THAT(size, Eq(10));
 }
 
-TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUriWithSubstream) {
+TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUriWithStream) {
     MockGetBrokerUri();
     data_broker->SetTimeout(100);
 
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/size?token="
+        expected_stream + "/size?token="
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("{\"size\":10}")));
     asapo::Error err;
-    auto size = data_broker->GetCurrentSize(expected_substream, &err);
+    auto size = data_broker->GetCurrentSize(expected_stream, &err);
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(size, Eq(10));
 }
@@ -745,7 +745,7 @@ TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStream) {
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"""\"}")));
 
     auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
 
@@ -761,7 +761,7 @@ TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStreamWhenIdTooLarge) {
                                             + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::Conflict),
         SetArgPointee<2>(nullptr),
-        Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_substream\":\"""\"}")));
+        Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_stream\":\"""\"}")));
 
     auto err = data_broker->GetById(expected_dataset_id, &info, nullptr);
 
@@ -895,12 +895,12 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) {
     ASSERT_THAT(images[1].name, Eq(rec2.name));
 }
 
-TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithSubstream) {
+TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithStream) {
 
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/0" +
+        expected_stream + "/0" +
         "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll(
         SetArgPointee<3>(HttpCode::OK),
         SetArgPointee<4>(nullptr),
@@ -908,7 +908,7 @@ TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithSubstream) {
 
     data_broker->SetTimeout(100);
     asapo::Error err;
-    auto images = data_broker->QueryImages(expected_query_string, expected_substream, &err);
+    auto images = data_broker->QueryImages(expected_query_string, expected_stream, &err);
 
     ASSERT_THAT(err, Eq(nullptr));
 
@@ -1056,18 +1056,18 @@ TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUri) {
     data_broker->GetLastDataset(2, &err);
 }
 
-TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUriWithSubstream) {
+TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUriWithStream) {
     MockGetBrokerUri();
 
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-                                            expected_substream + "/0/last?token="
+                                            expected_stream + "/0/last?token="
                                             + expected_token + "&dataset=true&minsize=1", _,
                                         _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
         Return("")));
     asapo::Error err;
-    data_broker->GetLastDataset(expected_substream, 1, &err);
+    data_broker->GetLastDataset(expected_stream, 1, &err);
 }
 
 TEST_F(ServerDataBrokerTests, GetDatasetByIdUsesCorrectUri) {
@@ -1084,31 +1084,31 @@ TEST_F(ServerDataBrokerTests, GetDatasetByIdUsesCorrectUri) {
     data_broker->GetDatasetById(expected_dataset_id, 0, &err);
 }
 
-TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUri) {
+TEST_F(ServerDataBrokerTests, GetStreamListUsesCorrectUri) {
     MockGetBrokerUri();
-    std::string return_substreams =
-        R"({"substreams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})";
+    std::string return_streams =
+        R"({"streams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})";
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/substreams"
+                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/streams"
                           + "?token=" + expected_token + "&from=stream_from", _,
                       _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
-        Return(return_substreams)));
+        Return(return_streams)));
 
     asapo::Error err;
-    auto substreams = data_broker->GetSubstreamList("stream_from", &err);
+    auto streams = data_broker->GetStreamList("stream_from", &err);
     ASSERT_THAT(err, Eq(nullptr));
-    ASSERT_THAT(substreams.size(), Eq(2));
-    ASSERT_THAT(substreams.size(), 2);
-    ASSERT_THAT(substreams[0].Json(false), R"({"name":"test","timestampCreated":1000000})");
-    ASSERT_THAT(substreams[1].Json(false), R"({"name":"test1","timestampCreated":2000000})");
+    ASSERT_THAT(streams.size(), Eq(2));
+    ASSERT_THAT(streams.size(), 2);
+    ASSERT_THAT(streams[0].Json(false), R"({"name":"test","timestampCreated":1000000})");
+    ASSERT_THAT(streams[1].Json(false), R"({"name":"test1","timestampCreated":2000000})");
 }
 
-TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUriWithoutFrom) {
+TEST_F(ServerDataBrokerTests, GetStreamListUsesCorrectUriWithoutFrom) {
     MockGetBrokerUri();
     EXPECT_CALL(mock_http_client,
-                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/substreams"
+                Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/0/streams"
                           + "?token=" + expected_token, _,
                       _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
@@ -1116,7 +1116,7 @@ TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUriWithoutFrom) {
         Return("")));;
 
     asapo::Error err;
-    auto substreams = data_broker->GetSubstreamList("", &err);
+    auto streams = data_broker->GetStreamList("", &err);
 }
 
 void ServerDataBrokerTests::MockBeforeFTS(FileData* data) {
@@ -1251,7 +1251,7 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
     auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/" +
+        expected_stream + "/" +
         expected_group_id
                                              + "/" + std::to_string(expected_dataset_id) + "?token="
                                              + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll(
@@ -1259,12 +1259,12 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) {
         SetArgPointee<4>(nullptr),
         Return("")));
 
-    auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id, expected_substream);
+    auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) {
+TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultStream) {
     MockGetBrokerUri();
     auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}";
     EXPECT_CALL(mock_http_client,
@@ -1284,7 +1284,7 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) {
 void ServerDataBrokerTests::ExpectIdList(bool error) {
     MockGetBrokerUri();
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/" +
+        expected_stream + "/" +
         expected_group_id + "/nacks?token=" + expected_token + "&from=1&to=0", _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
@@ -1294,7 +1294,7 @@ void ServerDataBrokerTests::ExpectIdList(bool error) {
 TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) {
     ExpectIdList(false);
     asapo::Error err;
-    auto list = data_broker->GetUnacknowledgedTupleIds(expected_group_id, expected_substream, 1, 0, &err);
+    auto list = data_broker->GetUnacknowledgedTupleIds(expected_group_id, expected_stream, 1, 0, &err);
 
     ASSERT_THAT(list, ElementsAre(1, 2, 3));
     ASSERT_THAT(err, Eq(nullptr));
@@ -1302,7 +1302,7 @@ TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) {
 
 void ServerDataBrokerTests::ExpectLastAckId(bool empty_response) {
     EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/" +
+        expected_stream + "/" +
         expected_group_id + "/lastack?token=" + expected_token, _, _)).WillOnce(DoAll(
         SetArgPointee<1>(HttpCode::OK),
         SetArgPointee<2>(nullptr),
@@ -1314,7 +1314,7 @@ TEST_F(ServerDataBrokerTests, GetLastAcknowledgeUsesOk) {
     ExpectLastAckId(false);
 
     asapo::Error err;
-    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_substream, &err);
+    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_stream, &err);
     ASSERT_THAT(err, Eq(nullptr));
     ASSERT_THAT(ind, Eq(1));
 }
@@ -1324,7 +1324,7 @@ TEST_F(ServerDataBrokerTests, GetLastAcknowledgeReturnsNoData) {
     ExpectLastAckId(true);
 
     asapo::Error err;
-    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_substream, &err);
+    auto ind = data_broker->GetLastAcknowledgedTulpeId(expected_group_id, expected_stream, &err);
     ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData));
     ASSERT_THAT(ind, Eq(0));
 }
@@ -1355,7 +1355,7 @@ TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) {
     MockGetBrokerUri();
     auto expected_neg_acknowledge_command = R"({"Op":"negackimage","Params":{"DelaySec":10}})";
     EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_data_source + "/" +
-        expected_substream + "/" +
+        expected_stream + "/" +
         expected_group_id
                                              + "/" + std::to_string(expected_dataset_id) + "?token="
                                              + expected_token, _, expected_neg_acknowledge_command, _, _)).WillOnce(
@@ -1364,7 +1364,7 @@ TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) {
             SetArgPointee<4>(nullptr),
             Return("")));
 
-    auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10, expected_substream);
+    auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10, expected_stream);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
diff --git a/consumer/api/python/asapo_consumer.pxd b/consumer/api/python/asapo_consumer.pxd
index 9cb5824c0e5d511feeb01c1af82a06ba5ba11e92..121a906eab2f894a490d01933cffd58b1e942176 100644
--- a/consumer/api/python/asapo_consumer.pxd
+++ b/consumer/api/python/asapo_consumer.pxd
@@ -62,24 +62,24 @@ cdef extern from "asapo/asapo_consumer.h" namespace "asapo" nogil:
         void SetTimeout(uint64_t timeout_ms)
         void ForceNoRdma()
         NetworkConnectionType CurrentConnectionType()
-        Error GetNext(FileInfo* info, string group_id,string substream, FileData* data)
-        Error GetLast(FileInfo* info, string substream, FileData* data)
-        Error GetById(uint64_t id, FileInfo* info, string substream, FileData* data)
-        uint64_t GetCurrentSize(string substream, Error* err)
-        Error SetLastReadMarker(uint64_t value, string group_id, string substream)
-        Error ResetLastReadMarker(string group_id, string substream)
-        Error Acknowledge(string group_id, uint64_t id, string substream)
-        Error NegativeAcknowledge(string group_id, uint64_t id, uint64_t delay_sec, string substream)
-        uint64_t GetLastAcknowledgedTulpeId(string group_id, string substream, Error* error)
-        IdList GetUnacknowledgedTupleIds(string group_id, string substream, uint64_t from_id, uint64_t to_id, Error* error)
+        Error GetNext(FileInfo* info, string group_id,string stream, FileData* data)
+        Error GetLast(FileInfo* info, string stream, FileData* data)
+        Error GetById(uint64_t id, FileInfo* info, string stream, FileData* data)
+        uint64_t GetCurrentSize(string stream, Error* err)
+        Error SetLastReadMarker(uint64_t value, string group_id, string stream)
+        Error ResetLastReadMarker(string group_id, string stream)
+        Error Acknowledge(string group_id, uint64_t id, string stream)
+        Error NegativeAcknowledge(string group_id, uint64_t id, uint64_t delay_sec, string stream)
+        uint64_t GetLastAcknowledgedTulpeId(string group_id, string stream, Error* error)
+        IdList GetUnacknowledgedTupleIds(string group_id, string stream, uint64_t from_id, uint64_t to_id, Error* error)
         string GenerateNewGroupId(Error* err)
         string GetBeamtimeMeta(Error* err)
-        FileInfos QueryImages(string query, string substream, Error* err)
-        DataSet GetNextDataset(string group_id, string substream, uint64_t min_size, Error* err)
-        DataSet GetLastDataset(string substream, uint64_t min_size, Error* err)
-        DataSet GetDatasetById(uint64_t id, string substream, uint64_t min_size, Error* err)
+        FileInfos QueryImages(string query, string stream, Error* err)
+        DataSet GetNextDataset(string group_id, string stream, uint64_t min_size, Error* err)
+        DataSet GetLastDataset(string stream, uint64_t min_size, Error* err)
+        DataSet GetDatasetById(uint64_t id, string stream, uint64_t min_size, Error* err)
         Error RetrieveData(FileInfo* info, FileData* data)
-        vector[StreamInfo] GetSubstreamList(string from_substream, Error* err)
+        vector[StreamInfo] GetStreamList(string from_stream, Error* err)
         void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts)
         void InterruptCurrentOperation()
 
@@ -102,4 +102,4 @@ cdef extern from "asapo/asapo_consumer.h" namespace "asapo":
   cdef cppclass ConsumerErrorData:
     uint64_t id
     uint64_t id_max
-    string next_substream
+    string next_stream
diff --git a/consumer/api/python/asapo_consumer.pyx.in b/consumer/api/python/asapo_consumer.pyx.in
index 69e32ee50b6477d87b2ce2f1f83bb25fea0369f4..9f251c07eb2d2ce8817598b05a3b64507a2c98db 100644
--- a/consumer/api/python/asapo_consumer.pyx.in
+++ b/consumer/api/python/asapo_consumer.pyx.in
@@ -47,10 +47,10 @@ class AsapoLocalIOError(AsapoConsumerError):
   pass
 
 class AsapoStreamFinishedError(AsapoConsumerError):
-  def __init__(self,message,id_max=None,next_substream=None):
+  def __init__(self,message,id_max=None,next_stream=None):
     AsapoConsumerError.__init__(self,message)
     self.id_max = id_max
-    self.next_substream = _str(next_substream)
+    self.next_stream = _str(next_stream)
 
 class AsapoEndOfStreamError(AsapoConsumerError):
   def __init__(self,message,id_max=None):
@@ -80,7 +80,7 @@ cdef throw_exception(Error& err, res = None):
     elif err == kStreamFinished:
             data=<ConsumerErrorData*>err.get().GetCustomData()
             if data != NULL:
-                raise AsapoStreamFinishedError(error_string,data.id_max,data.next_substream)
+                raise AsapoStreamFinishedError(error_string,data.id_max,data.next_stream)
             else:
                 raise AsapoStreamFinishedError(error_string)
     elif err == kNoData:
@@ -104,23 +104,23 @@ cdef throw_exception(Error& err, res = None):
 
 cdef class PyDataBroker:
     cdef unique_ptr[DataBroker] c_broker
-    def _op(self, op, group_id, substream, meta_only, uint64_t id):
+    def _op(self, op, group_id, stream, meta_only, uint64_t id):
         cdef FileInfo info
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef FileData data
         cdef FileData* p_data =  <FileData*>NULL if meta_only else &data
         cdef Error err
         cdef np.npy_intp dims[1]
         if op == "next":
             with nogil:
-                err =  self.c_broker.get().GetNext(&info, b_group_id,b_substream, p_data)
+                err =  self.c_broker.get().GetNext(&info, b_group_id,b_stream, p_data)
         elif op == "last":
             with nogil:
-                err =  self.c_broker.get().GetLast(&info, b_substream, p_data)
+                err =  self.c_broker.get().GetLast(&info, b_stream, p_data)
         elif op == "id":
             with nogil:
-                err =  self.c_broker.get().GetById(id, &info, b_substream, p_data)
+                err =  self.c_broker.get().GetById(id, &info, b_stream, p_data)
         if err:
             throw_exception(err)
         info_str = _str(info.Json())
@@ -132,12 +132,12 @@ cdef class PyDataBroker:
         arr =  np.PyArray_SimpleNewFromData(1, dims, np.NPY_BYTE, ptr)
         PyArray_ENABLEFLAGS(arr,np.NPY_OWNDATA)
         return arr,meta
-    def get_next(self, group_id, substream = "default", meta_only = True):
-        return self._op("next",group_id,substream,meta_only,0)
-    def get_last(self, substream = "default", meta_only = True):
-        return self._op("last","",substream,meta_only,0)
-    def get_by_id(self,uint64_t id,substream = "default",meta_only = True):
-        return self._op("id","",substream,meta_only,id)
+    def get_next(self, group_id, stream = "default", meta_only = True):
+        return self._op("next",group_id,stream,meta_only,0)
+    def get_last(self, stream = "default", meta_only = True):
+        return self._op("last","",stream,meta_only,0)
+    def get_by_id(self,uint64_t id,stream = "default",meta_only = True):
+        return self._op("id","",stream,meta_only,id)
     def retrieve_data(self,meta):
         json_str = json.dumps(meta)
         cdef FileInfo info
@@ -157,12 +157,12 @@ cdef class PyDataBroker:
         arr =  np.PyArray_SimpleNewFromData(1, dims, np.NPY_BYTE, ptr)
         PyArray_ENABLEFLAGS(arr,np.NPY_OWNDATA)
         return arr
-    def get_current_size(self, substream = "default"):
+    def get_current_size(self, stream = "default"):
         cdef Error err
         cdef uint64_t size
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         with nogil:
-            size =  self.c_broker.get().GetCurrentSize(b_substream,&err)
+            size =  self.c_broker.get().GetCurrentSize(b_stream,&err)
         err_str = _str(GetErrorString(&err))
         if err:
             throw_exception(err)
@@ -182,22 +182,22 @@ cdef class PyDataBroker:
         elif cased == <int>NetworkConnectionType_kFabric:
             result = "Fabric"
         return result.decode('utf-8')
-    def set_lastread_marker(self,value,group_id, substream = "default"):
+    def set_lastread_marker(self,value,group_id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         cdef uint64_t id = value
         with nogil:
-            err =  self.c_broker.get().SetLastReadMarker(id,b_group_id,b_substream)
+            err =  self.c_broker.get().SetLastReadMarker(id,b_group_id,b_stream)
         if err:
             throw_exception(err)
         return
-    def reset_lastread_marker(self,group_id, substream = "default"):
+    def reset_lastread_marker(self,group_id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err =  self.c_broker.get().ResetLastReadMarker(b_group_id,b_substream)
+            err =  self.c_broker.get().ResetLastReadMarker(b_group_id,b_stream)
         if err:
             throw_exception(err)
         return
@@ -209,56 +209,56 @@ cdef class PyDataBroker:
         if err:
             throw_exception(err)
         return _str(group_id)
-    def get_substream_list(self, from_substream = ""):
+    def get_stream_list(self, from_stream = ""):
         cdef Error err
-        cdef vector[StreamInfo] substreams
-        cdef string b_from_substream = _bytes(from_substream)
+        cdef vector[StreamInfo] streams
+        cdef string b_from_stream = _bytes(from_stream)
         with nogil:
-            substreams = self.c_broker.get().GetSubstreamList(b_from_substream,&err)
+            streams = self.c_broker.get().GetStreamList(b_from_stream,&err)
         if err:
             throw_exception(err)
         list = []
-        for substream in substreams:
-            list.append(json.loads(_str(substream.Json(False))))
+        for stream in streams:
+            list.append(json.loads(_str(stream.Json(False))))
         return list
-    def acknowledge(self, group_id, uint64_t id, substream = "default"):
+    def acknowledge(self, group_id, uint64_t id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err = self.c_broker.get().Acknowledge(b_group_id,id,b_substream)
+            err = self.c_broker.get().Acknowledge(b_group_id,id,b_stream)
         if err:
             throw_exception(err)
-    def neg_acknowledge(self, group_id, uint64_t id, uint64_t delay_sec, substream = "default"):
+    def neg_acknowledge(self, group_id, uint64_t id, uint64_t delay_sec, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         with nogil:
-            err = self.c_broker.get().NegativeAcknowledge(b_group_id,id,delay_sec,b_substream)
+            err = self.c_broker.get().NegativeAcknowledge(b_group_id,id,delay_sec,b_stream)
         if err:
             throw_exception(err)
     def set_resend_nacs(self,bool resend, uint64_t delay_sec, uint64_t resend_attempts):
         with nogil:
             self.c_broker.get().SetResendNacs(resend,delay_sec,resend_attempts)
 
-    def get_last_acknowledged_tuple_id(self, group_id, substream = "default"):
+    def get_last_acknowledged_tuple_id(self, group_id, stream = "default"):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         cdef uint64_t id
         with nogil:
-            id = self.c_broker.get().GetLastAcknowledgedTulpeId(b_group_id,b_substream,&err)
+            id = self.c_broker.get().GetLastAcknowledgedTulpeId(b_group_id,b_stream,&err)
         if err:
             throw_exception(err)
         return id
 
-    def get_unacknowledged_tuple_ids(self, group_id, substream = "default", uint64_t from_id = 0, uint64_t to_id = 0):
+    def get_unacknowledged_tuple_ids(self, group_id, stream = "default", uint64_t from_id = 0, uint64_t to_id = 0):
         cdef Error err
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef IdList ids
         with nogil:
-            ids = self.c_broker.get().GetUnacknowledgedTupleIds(b_group_id, b_substream, from_id, to_id, &err)
+            ids = self.c_broker.get().GetUnacknowledgedTupleIds(b_group_id, b_stream, from_id, to_id, &err)
         if err:
             throw_exception(err)
         list = []
@@ -266,34 +266,34 @@ cdef class PyDataBroker:
             list.append(id)
         return list
 
-    def query_images(self,query, substream = "default"):
+    def query_images(self,query, stream = "default"):
         cdef string b_query = _bytes(query)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef Error err
         cdef FileInfos file_infos
         with nogil:
-            file_infos = self.c_broker.get().QueryImages(b_query,b_substream,&err)
+            file_infos = self.c_broker.get().QueryImages(b_query,b_stream,&err)
         if err:
             throw_exception(err)
         json_list = []
         for fi in file_infos:
             json_list.append(json.loads(_str(fi.Json())))
         return json_list
-    def _op_dataset(self, op, group_id, substream, uint64_t min_size, uint64_t id):
+    def _op_dataset(self, op, group_id, stream, uint64_t min_size, uint64_t id):
         cdef string b_group_id = _bytes(group_id)
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         cdef FileInfos file_infos
         cdef DataSet dataset
         cdef Error err
         if op == "next":
             with nogil:
-                dataset = self.c_broker.get().GetNextDataset(b_group_id,b_substream, min_size, &err)
+                dataset = self.c_broker.get().GetNextDataset(b_group_id,b_stream, min_size, &err)
         elif op == "last":
             with nogil:
-                dataset = self.c_broker.get().GetLastDataset(b_substream, min_size, &err)
+                dataset = self.c_broker.get().GetLastDataset(b_stream, min_size, &err)
         elif op == "id":
             with nogil:
-                dataset = self.c_broker.get().GetDatasetById(id, b_substream, min_size, &err)
+                dataset = self.c_broker.get().GetDatasetById(id, b_stream, min_size, &err)
         json_list = []
         for fi in dataset.content:
             json_list.append(json.loads(_str(fi.Json())))
@@ -301,12 +301,12 @@ cdef class PyDataBroker:
         if err:
             throw_exception(err,res)
         return res
-    def get_next_dataset(self, group_id, substream = "default", min_size = 0):
-        return self._op_dataset("next",group_id,substream,min_size,0)
-    def get_last_dataset(self, substream = "default", min_size = 0):
-        return self._op_dataset("last","0",substream,min_size,0)
-    def get_dataset_by_id(self, uint64_t id, substream = "default", min_size = 0):
-        return self._op_dataset("id","0",substream,min_size,id)
+    def get_next_dataset(self, group_id, stream = "default", min_size = 0):
+        return self._op_dataset("next",group_id,stream,min_size,0)
+    def get_last_dataset(self, stream = "default", min_size = 0):
+        return self._op_dataset("last","0",stream,min_size,0)
+    def get_dataset_by_id(self, uint64_t id, stream = "default", min_size = 0):
+        return self._op_dataset("id","0",stream,min_size,id)
     def get_beamtime_meta(self):
         cdef Error err
         cdef string meta_str
diff --git a/producer/api/cpp/include/asapo/producer/producer.h b/producer/api/cpp/include/asapo/producer/producer.h
index 7a9f0e7f88546b7f77bbdd1deb3353afdf87548a..7c22bfd34d8808638be54efb8d7e44da46fcc806 100644
--- a/producer/api/cpp/include/asapo/producer/producer.h
+++ b/producer/api/cpp/include/asapo/producer/producer.h
@@ -24,21 +24,21 @@ class Producer {
 
     virtual ~Producer() = default;
 
-    //! Get substream information from receiver
+    //! Get stream information from receiver
     /*!
-      \param substream (optional) - substream
+      \param stream (optional) - stream
       \param timeout_sec - operation timeout in seconds
-      \return StreamInfo - a structure with substream information
+      \return StreamInfo - a structure with stream information
     */
-    virtual StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const = 0;
+    virtual StreamInfo GetStreamInfo(std::string stream, uint64_t timeout_sec, Error* err) const = 0;
     virtual StreamInfo GetStreamInfo(uint64_t timeout_sec, Error* err) const = 0;
 
-  //! Get substream that has the newest ingested data
+  //! Get stream that has the newest ingested data
   /*!
     \param timeout_ms - operation timeout in seconds
-    \return StreamInfo - a structure with substream information
+    \return StreamInfo - a structure with stream information
   */
-    virtual StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err) const = 0;
+    virtual StreamInfo GetLastStream(uint64_t timeout_sec, Error* err) const = 0;
 
 
     //! Sends data to the receiver
@@ -62,20 +62,20 @@ class Producer {
       \param data - A pointer to the data to send
       \return Error - Will be nullptr on success
     */
-    virtual Error SendData(const EventHeader& event_header, std::string substream, FileData data, uint64_t ingest_mode,
+    virtual Error SendData(const EventHeader& event_header, std::string stream, FileData data, uint64_t ingest_mode,
                            RequestCallback callback) = 0;
 
 
     //! Sends data to the receiver - same as SendData - memory should not be freed until send is finished
     //! used e.g. for Python bindings
-    virtual Error SendData__(const EventHeader& event_header, std::string substream, void* data, uint64_t ingest_mode,
+    virtual Error SendData__(const EventHeader& event_header, std::string stream, void* data, uint64_t ingest_mode,
                              RequestCallback callback) = 0;
 
     //! Stop processing threads
     //! used e.g. for Python bindings
     virtual void StopThreads__() = 0;
 
-    //! Sends files to the default substream
+    //! Sends files to the default stream
     /*!
       \param event_header - A stucture with the meta information (file name, size is ignored).
       \param full_path - A full path of the file to send
@@ -84,24 +84,24 @@ class Producer {
     virtual Error SendFile(const EventHeader& event_header, std::string full_path, uint64_t ingest_mode,
                            RequestCallback callback) = 0;
 
-    //! Sends files to the substream
+    //! Sends files to the stream
     /*!
       \param event_header - A stucture with the meta information (file name, size is ignored).
       \param full_path - A full path of the file to send
       \return Error - Will be nullptr on success
     */
-    virtual Error SendFile(const EventHeader& event_header, std::string substream, std::string full_path,
+    virtual Error SendFile(const EventHeader& event_header, std::string stream, std::string full_path,
                            uint64_t ingest_mode,
                            RequestCallback callback) = 0;
 
-    //! Marks substream finished
+    //! Marks stream finished
     /*!
-      \param substream - Name of the substream to makr finished
-      \param last_id - ID of the last image in substream
-      \param next_substream - Name of the next substream (empty if not set)
+      \param stream - Name of the stream to makr finished
+      \param last_id - ID of the last image in stream
+      \param next_stream - Name of the next stream (empty if not set)
       \return Error - Will be nullptr on success
     */
-    virtual Error SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+    virtual Error SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                             RequestCallback callback) = 0;
 
 
diff --git a/producer/api/cpp/src/producer_impl.cpp b/producer/api/cpp/src/producer_impl.cpp
index 2fa855984f37a6ef627f3f64a0005642f2331dd1..bd826db67832a6e9c9c6213ee8d23699dabdcd80 100644
--- a/producer/api/cpp/src/producer_impl.cpp
+++ b/producer/api/cpp/src/producer_impl.cpp
@@ -15,8 +15,8 @@
 namespace  asapo {
 
 const size_t ProducerImpl::kDiscoveryServiceUpdateFrequencyMs = 10000; // 10s
-const std::string ProducerImpl::kFinishSubStreamKeyword = "asapo_finish_substream";
-const std::string ProducerImpl::kNoNextSubStreamKeyword = "asapo_no_next";
+const std::string ProducerImpl::kFinishStreamKeyword = "asapo_finish_stream";
+const std::string ProducerImpl::kNoNextStreamKeyword = "asapo_no_next";
 
 
 ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_sec,
@@ -34,10 +34,10 @@ ProducerImpl::ProducerImpl(std::string endpoint, uint8_t n_processing_threads, u
     request_pool__.reset(new RequestPool{n_processing_threads, request_handler_factory_.get(), log__});
 }
 
-GenericRequestHeader ProducerImpl::GenerateNextSendRequest(const EventHeader& event_header, std::string substream,
+GenericRequestHeader ProducerImpl::GenerateNextSendRequest(const EventHeader& event_header, std::string stream,
         uint64_t ingest_mode) {
     GenericRequestHeader request{kOpcodeTransferData, event_header.file_id, event_header.file_size,
-                                 event_header.user_metadata.size(), event_header.file_name, substream};
+                                 event_header.user_metadata.size(), event_header.file_name, stream};
     if (event_header.id_in_subset != 0) {
         request.op_code = kOpcodeTransferSubsetData;
         request.custom_data[kPosDataSetId] = event_header.id_in_subset;
@@ -92,7 +92,7 @@ Error CheckProducerRequest(const EventHeader& event_header, uint64_t ingest_mode
 }
 
 Error ProducerImpl::Send(const EventHeader& event_header,
-                         std::string substream,
+                         std::string stream,
                          FileData data,
                          std::string full_path,
                          uint64_t ingest_mode,
@@ -107,7 +107,7 @@ Error ProducerImpl::Send(const EventHeader& event_header,
         return err;
     }
 
-    auto request_header = GenerateNextSendRequest(event_header, std::move(substream), ingest_mode);
+    auto request_header = GenerateNextSendRequest(event_header, std::move(stream), ingest_mode);
 
     return request_pool__->AddRequest(std::unique_ptr<ProducerRequest> {new ProducerRequest{source_cred_string_, std::move(request_header),
                 std::move(data), std::move(event_header.user_metadata), std::move(full_path), callback, manage_data_memory, timeout_sec_ * 1000}
@@ -133,37 +133,37 @@ Error CheckData(uint64_t ingest_mode, const EventHeader& event_header, const Fil
 
 Error ProducerImpl::SendData(const EventHeader& event_header, FileData data,
                              uint64_t ingest_mode, RequestCallback callback) {
-    return SendData(event_header, kDefaultSubstream, std::move(data), ingest_mode, callback);
+    return SendData(event_header, kDefaultStream, std::move(data), ingest_mode, callback);
 }
 
 Error ProducerImpl::SendData(const EventHeader& event_header,
-                             std::string substream,
+                             std::string stream,
                              FileData data,
                              uint64_t ingest_mode,
                              RequestCallback callback) {
     if (auto err = CheckData(ingest_mode, event_header, &data)) {
         return err;
     }
-    return Send(event_header, std::move(substream), std::move(data), "", ingest_mode, callback, true);
+    return Send(event_header, std::move(stream), std::move(data), "", ingest_mode, callback, true);
 
 }
 
-Error ProducerImpl::SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+Error ProducerImpl::SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                               RequestCallback callback) {
     EventHeader event_header;
-    event_header.file_name = kFinishSubStreamKeyword;
+    event_header.file_name = kFinishStreamKeyword;
     event_header.file_size = 0;
     event_header.file_id = last_id + 1;
-    if (next_substream.empty()) {
-        next_substream = kNoNextSubStreamKeyword;
+    if (next_stream.empty()) {
+        next_stream = kNoNextStreamKeyword;
     }
-    event_header.user_metadata =  std::string("{\"next_substream\":") + "\"" + next_substream + "\"}";
-    return Send(event_header, std::move(substream), nullptr, "", IngestModeFlags::kTransferMetaDataOnly, callback, true);
+    event_header.user_metadata =  std::string("{\"next_stream\":") + "\"" + next_stream + "\"}";
+    return Send(event_header, std::move(stream), nullptr, "", IngestModeFlags::kTransferMetaDataOnly, callback, true);
 }
 
 Error ProducerImpl::SendFile(const EventHeader& event_header, std::string full_path, uint64_t ingest_mode,
                              RequestCallback callback) {
-    return SendFile(event_header, kDefaultSubstream, std::move(full_path), ingest_mode, callback);
+    return SendFile(event_header, kDefaultStream, std::move(full_path), ingest_mode, callback);
 }
 
 
@@ -226,7 +226,7 @@ Error ProducerImpl::SendMetaData(const std::string& metadata, RequestCallback ca
 }
 
 Error ProducerImpl::SendData__(const EventHeader& event_header,
-                               std::string substream,
+                               std::string stream,
                                void* data,
                                uint64_t ingest_mode,
                                RequestCallback callback) {
@@ -237,14 +237,14 @@ Error ProducerImpl::SendData__(const EventHeader& event_header,
         return err;
     }
 
-    return Send(std::move(event_header), std::move(substream), std::move(data_wrapped), "", ingest_mode, callback, false);
+    return Send(std::move(event_header), std::move(stream), std::move(data_wrapped), "", ingest_mode, callback, false);
 }
 
 Error ProducerImpl::SendData__(const EventHeader& event_header,
                                void* data,
                                uint64_t ingest_mode,
                                RequestCallback callback) {
-    return SendData__(event_header, kDefaultSubstream, data, ingest_mode, callback);
+    return SendData__(event_header, kDefaultStream, data, ingest_mode, callback);
 }
 
 uint64_t  ProducerImpl::GetRequestsQueueSize() {
@@ -263,7 +263,7 @@ void ProducerImpl::StopThreads__() {
     request_pool__->StopThreads();
 }
 Error ProducerImpl::SendFile(const EventHeader& event_header,
-                             std::string substream,
+                             std::string stream,
                              std::string full_path,
                              uint64_t ingest_mode,
                              RequestCallback callback) {
@@ -271,7 +271,7 @@ Error ProducerImpl::SendFile(const EventHeader& event_header,
         return ProducerErrorTemplates::kWrongInput.Generate("empty filename");
     }
 
-    return Send(event_header, std::move(substream), nullptr, std::move(full_path), ingest_mode, callback, true);
+    return Send(event_header, std::move(stream), nullptr, std::move(full_path), ingest_mode, callback, true);
 
 }
 
@@ -321,17 +321,17 @@ StreamInfo GetInfoFromCallback(std::future<StreamInfoResult>* promiseResult, uin
 }
 
 
-GenericRequestHeader CreateRequestHeaderFromOp(StreamRequestOp op,std::string substream) {
+GenericRequestHeader CreateRequestHeaderFromOp(StreamRequestOp op,std::string stream) {
     switch (op) {
         case StreamRequestOp::kStreamInfo:
-            return GenericRequestHeader{kOpcodeStreamInfo, 0, 0, 0, "", substream};
+            return GenericRequestHeader{kOpcodeStreamInfo, 0, 0, 0, "", stream};
         case StreamRequestOp::kLastStream:
             return GenericRequestHeader{kOpcodeLastStream, 0, 0, 0, "", ""};
     }
 }
 
-StreamInfo ProducerImpl::StreamRequest(StreamRequestOp op,std::string substream, uint64_t timeout_sec, Error* err) const {
-    auto header = CreateRequestHeaderFromOp(op,substream);
+StreamInfo ProducerImpl::StreamRequest(StreamRequestOp op,std::string stream, uint64_t timeout_sec, Error* err) const {
+    auto header = CreateRequestHeaderFromOp(op,stream);
     std::unique_ptr<std::promise<StreamInfoResult>> promise {new std::promise<StreamInfoResult>};
     std::future<StreamInfoResult> promiseResult = promise->get_future();
 
@@ -347,15 +347,15 @@ StreamInfo ProducerImpl::StreamRequest(StreamRequestOp op,std::string substream,
                                err); // we give two more sec for request to exit by timeout
 }
 
-StreamInfo ProducerImpl::GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const {
-    return StreamRequest(StreamRequestOp::kStreamInfo,substream,timeout_sec,err);
+StreamInfo ProducerImpl::GetStreamInfo(std::string stream, uint64_t timeout_sec, Error* err) const {
+    return StreamRequest(StreamRequestOp::kStreamInfo,stream,timeout_sec,err);
 }
 
 StreamInfo ProducerImpl::GetStreamInfo(uint64_t timeout_sec, Error* err) const {
-    return GetStreamInfo(kDefaultSubstream, timeout_sec, err);
+    return GetStreamInfo(kDefaultStream, timeout_sec, err);
 }
 
-StreamInfo ProducerImpl::GetLastSubstream(uint64_t timeout_sec, Error* err) const {
+StreamInfo ProducerImpl::GetLastStream(uint64_t timeout_sec, Error* err) const {
     return StreamRequest(StreamRequestOp::kLastStream,"",timeout_sec,err);
 }
 
diff --git a/producer/api/cpp/src/producer_impl.h b/producer/api/cpp/src/producer_impl.h
index d5a4dd435bdf6e0e9642183b58834c01a58e6217..71a524dbf7bb523d698982e5f50b73ec7a0b2bd6 100644
--- a/producer/api/cpp/src/producer_impl.h
+++ b/producer/api/cpp/src/producer_impl.h
@@ -24,17 +24,17 @@ class ProducerImpl : public Producer {
   std::unique_ptr<RequestHandlerFactory> request_handler_factory_;
  public:
   static const size_t kDiscoveryServiceUpdateFrequencyMs;
-  static const std::string kFinishSubStreamKeyword;
-  static const std::string kNoNextSubStreamKeyword;
+  static const std::string kFinishStreamKeyword;
+  static const std::string kNoNextStreamKeyword;
 
   explicit ProducerImpl(std::string endpoint, uint8_t n_processing_threads, uint64_t timeout_sec,
                         asapo::RequestHandlerType type);
   ProducerImpl(const ProducerImpl &) = delete;
   ProducerImpl &operator=(const ProducerImpl &) = delete;
 
-  StreamInfo GetStreamInfo(std::string substream, uint64_t timeout_sec, Error* err) const override;
+  StreamInfo GetStreamInfo(std::string stream, uint64_t timeout_sec, Error* err) const override;
   StreamInfo GetStreamInfo(uint64_t timeout_sec, Error* err) const override;
-  StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err) const override;
+  StreamInfo GetLastStream(uint64_t timeout_sec, Error* err) const override;
 
   void SetLogLevel(LogLevel level) override;
   void EnableLocalLog(bool enable) override;
@@ -45,17 +45,17 @@ class ProducerImpl : public Producer {
                  RequestCallback callback) override;
   Error SendData__(const EventHeader &event_header, void* data, uint64_t ingest_mode,
                    RequestCallback callback) override;
-  Error SendData(const EventHeader &event_header, std::string substream, FileData data, uint64_t ingest_mode,
+  Error SendData(const EventHeader &event_header, std::string stream, FileData data, uint64_t ingest_mode,
                  RequestCallback callback) override;
-  Error SendData__(const EventHeader &event_header, std::string substream, void* data, uint64_t ingest_mode,
+  Error SendData__(const EventHeader &event_header, std::string stream, void* data, uint64_t ingest_mode,
                    RequestCallback callback) override;
   void StopThreads__() override;
   Error SendFile(const EventHeader &event_header, std::string full_path, uint64_t ingest_mode,
                  RequestCallback callback) override;
-  Error SendFile(const EventHeader &event_header, std::string substream, std::string full_path, uint64_t ingest_mode,
+  Error SendFile(const EventHeader &event_header, std::string stream, std::string full_path, uint64_t ingest_mode,
                  RequestCallback callback) override;
 
-  Error SendSubstreamFinishedFlag(std::string substream, uint64_t last_id, std::string next_substream,
+  Error SendStreamFinishedFlag(std::string stream, uint64_t last_id, std::string next_stream,
                                   RequestCallback callback) override;
 
   AbstractLogger* log__;
@@ -69,11 +69,11 @@ class ProducerImpl : public Producer {
   uint64_t GetRequestsQueueVolumeMb() override;
   void SetRequestsQueueLimits(uint64_t size, uint64_t volume) override;
  private:
-  StreamInfo StreamRequest(StreamRequestOp op, std::string substream, uint64_t timeout_sec, Error* err) const;
-  Error Send(const EventHeader &event_header, std::string substream, FileData data, std::string full_path,
+  StreamInfo StreamRequest(StreamRequestOp op, std::string stream, uint64_t timeout_sec, Error* err) const;
+  Error Send(const EventHeader &event_header, std::string stream, FileData data, std::string full_path,
              uint64_t ingest_mode,
              RequestCallback callback, bool manage_data_memory);
-  GenericRequestHeader GenerateNextSendRequest(const EventHeader &event_header, std::string substream,
+  GenericRequestHeader GenerateNextSendRequest(const EventHeader &event_header, std::string stream,
                                                uint64_t ingest_mode);
   std::string source_cred_string_;
   uint64_t timeout_sec_;
diff --git a/producer/api/cpp/src/request_handler_filesystem.cpp b/producer/api/cpp/src/request_handler_filesystem.cpp
index 3d2dacc23bde4653653daa1af58e789fb6702121..39a8c3d6935471e656d8a65ca8fecbb233c457e0 100644
--- a/producer/api/cpp/src/request_handler_filesystem.cpp
+++ b/producer/api/cpp/src/request_handler_filesystem.cpp
@@ -39,8 +39,8 @@ bool RequestHandlerFilesystem::ProcessRequestUnlocked(GenericRequest* request, b
 }
 
 void RequestHandlerFilesystem::ProcessRequestTimeout(GenericRequest* request) {
-    log__->Error("request timeout, id:" + std::to_string(request->header.data_id) + " to " + request->header.substream +
-                 " substream");
+    log__->Error("request timeout, id:" + std::to_string(request->header.data_id) + " to " + request->header.stream +
+                 " stream");
 }
 
 }
diff --git a/producer/api/cpp/src/request_handler_tcp.cpp b/producer/api/cpp/src/request_handler_tcp.cpp
index eb9356121cead25edaf9997ed4011a5b3fb441a8..e257c32c0f8b2d4f888b33105485dd8d40f3ed4d 100644
--- a/producer/api/cpp/src/request_handler_tcp.cpp
+++ b/producer/api/cpp/src/request_handler_tcp.cpp
@@ -297,8 +297,8 @@ void RequestHandlerTcp::TearDownProcessingRequestLocked(bool request_processed_s
 void RequestHandlerTcp::ProcessRequestTimeout(GenericRequest* request) {
     auto producer_request = static_cast<ProducerRequest*>(request);
     auto err_string = "request id:" + std::to_string(request->header.data_id) + ", opcode: " + std::to_string(
-                          request->header.op_code) + " for " + request->header.substream +
-                      " substream";
+                          request->header.op_code) + " for " + request->header.stream +
+                      " stream";
     log__->Error("timeout " + err_string);
 
     auto err = ProducerErrorTemplates::kTimeout.Generate(err_string);
diff --git a/producer/api/cpp/unittests/test_producer_impl.cpp b/producer/api/cpp/unittests/test_producer_impl.cpp
index 3947db397e8d7578f38e2f2b9628179e7f164ba6..5cd3bf8820ba02742e86468a5bbb21824b42cc84 100644
--- a/producer/api/cpp/unittests/test_producer_impl.cpp
+++ b/producer/api/cpp/unittests/test_producer_impl.cpp
@@ -30,7 +30,7 @@ using ::testing::HasSubstr;
 using asapo::RequestPool;
 using asapo::ProducerRequest;
 
-MATCHER_P10(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_id, file_size, message, substream,
+MATCHER_P10(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_id, file_size, message, stream,
             ingest_mode,
             subset_id,
             subset_size,
@@ -48,7 +48,7 @@ MATCHER_P10(M_CheckSendDataRequest, op_code, source_credentials, metadata, file_
             == uint64_t(subset_size) : true)
         && ((asapo::GenericRequestHeader) (arg->header)).custom_data[asapo::kPosIngestMode] == uint64_t(ingest_mode)
         && strcmp(((asapo::GenericRequestHeader) (arg->header)).message, message) == 0
-        && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader) (arg->header)).stream, stream) == 0;
 }
 
 TEST(ProducerImpl, Constructor) {
@@ -71,8 +71,8 @@ class ProducerImplTests : public testing::Test {
   uint64_t expected_ingest_mode = asapo::IngestModeFlags::kTransferMetaDataOnly;
 
   char expected_name[asapo::kMaxMessageSize] = "test_name";
-  char expected_substream[asapo::kMaxMessageSize] = "test_substream";
-  std::string expected_next_substream = "next_substream";
+  char expected_stream[asapo::kMaxMessageSize] = "test_stream";
+  std::string expected_next_stream = "next_stream";
 
   asapo::SourceCredentials expected_credentials{asapo::SourceType::kRaw, "beamtime_id", "beamline", "subname", "token"
   };
@@ -172,7 +172,7 @@ TEST_F(ProducerImplTests, UsesDefaultStream) {
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
+                                                               asapo::kDefaultStream.c_str(),
                                                                expected_ingest_mode,
                                                                0,
                                                                0), false)).WillOnce(Return(nullptr));
@@ -192,7 +192,7 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequest) {
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
+                                                               asapo::kDefaultStream.c_str(),
                                                                expected_ingest_mode,
                                                                0,
                                                                0
@@ -205,7 +205,7 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequest) {
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSendDataRequestWithSubstream) {
+TEST_F(ProducerImplTests, OKSendingSendDataRequestWithStream) {
     producer.SetCredentials(expected_credentials);
 
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
@@ -214,7 +214,7 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequestWithSubstream) {
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               expected_substream,
+                                                               expected_stream,
                                                                expected_ingest_mode,
                                                                0,
                                                                0
@@ -222,39 +222,39 @@ TEST_F(ProducerImplTests, OKSendingSendDataRequestWithSubstream) {
         nullptr));
 
     asapo::EventHeader event_header{expected_id, expected_size, expected_name, expected_metadata};
-    auto err = producer.SendData(event_header, expected_substream, nullptr, expected_ingest_mode, nullptr);
+    auto err = producer.SendData(event_header, expected_stream, nullptr, expected_ingest_mode, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSubstreamFinish) {
+TEST_F(ProducerImplTests, OKSendingStreamFinish) {
     producer.SetCredentials(expected_credentials);
 
-    std::string next_stream_meta = std::string("{\"next_substream\":") + "\"" + expected_next_substream + "\"}";
+    std::string next_stream_meta = std::string("{\"next_stream\":") + "\"" + expected_next_stream + "\"}";
 
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
                                                                expected_credentials_str,
                                                                next_stream_meta.c_str(),
                                                                expected_id + 1,
                                                                0,
-                                                               asapo::ProducerImpl::kFinishSubStreamKeyword.c_str(),
-                                                               expected_substream,
+                                                               asapo::ProducerImpl::kFinishStreamKeyword.c_str(),
+                                                               expected_stream,
                                                                asapo::IngestModeFlags::kTransferMetaDataOnly,
                                                                0,
                                                                0
     ), false)).WillOnce(Return(
         nullptr));
 
-    auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, expected_next_substream, nullptr);
+    auto err = producer.SendStreamFinishedFlag(expected_stream, expected_id, expected_next_stream, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSubstreamFinishWithNoNextStream) {
+TEST_F(ProducerImplTests, OKSendingStreamFinishWithNoNextStream) {
     producer.SetCredentials(expected_credentials);
 
     std::string
-        next_stream_meta = std::string("{\"next_substream\":") + "\"" + asapo::ProducerImpl::kNoNextSubStreamKeyword
+        next_stream_meta = std::string("{\"next_stream\":") + "\"" + asapo::ProducerImpl::kNoNextStreamKeyword
         + "\"}";
 
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
@@ -262,15 +262,15 @@ TEST_F(ProducerImplTests, OKSendingSubstreamFinishWithNoNextStream) {
                                                                next_stream_meta.c_str(),
                                                                expected_id + 1,
                                                                0,
-                                                               asapo::ProducerImpl::kFinishSubStreamKeyword.c_str(),
-                                                               expected_substream,
+                                                               asapo::ProducerImpl::kFinishStreamKeyword.c_str(),
+                                                               expected_stream,
                                                                asapo::IngestModeFlags::kTransferMetaDataOnly,
                                                                0,
                                                                0
     ), false)).WillOnce(Return(
         nullptr));
 
-    auto err = producer.SendSubstreamFinishedFlag(expected_substream, expected_id, "", nullptr);
+    auto err = producer.SendStreamFinishedFlag(expected_stream, expected_id, "", nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -283,7 +283,7 @@ TEST_F(ProducerImplTests, OKSendingSendSubsetDataRequest) {
                                                                expected_id,
                                                                expected_size,
                                                                expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
+                                                               asapo::kDefaultStream.c_str(),
                                                                expected_ingest_mode,
                                                                expected_subset_id,
                                                                expected_subset_size), false)).WillOnce(
@@ -354,7 +354,7 @@ TEST_F(ProducerImplTests, OKSendingSendFileRequest) {
                                                                expected_id,
                                                                0,
                                                                expected_name,
-                                                               asapo::kDefaultSubstream.c_str(),
+                                                               asapo::kDefaultStream.c_str(),
                                                                expected_ingest_mode,
                                                                0,
                                                                0), false)).WillOnce(Return(
@@ -366,7 +366,7 @@ TEST_F(ProducerImplTests, OKSendingSendFileRequest) {
     ASSERT_THAT(err, Eq(nullptr));
 }
 
-TEST_F(ProducerImplTests, OKSendingSendFileRequestWithSubstream) {
+TEST_F(ProducerImplTests, OKSendingSendFileRequestWithStream) {
     producer.SetCredentials(expected_credentials);
 
     EXPECT_CALL(mock_pull, AddRequest_t(M_CheckSendDataRequest(asapo::kOpcodeTransferData,
@@ -375,14 +375,14 @@ TEST_F(ProducerImplTests, OKSendingSendFileRequestWithSubstream) {
                                                                expected_id,
                                                                0,
                                                                expected_name,
-                                                               expected_substream,
+                                                               expected_stream,
                                                                expected_ingest_mode,
                                                                0,
                                                                0), false)).WillOnce(Return(
         nullptr));
 
     asapo::EventHeader event_header{expected_id, 0, expected_name};
-    auto err = producer.SendFile(event_header, expected_substream, expected_fullpath, expected_ingest_mode, nullptr);
+    auto err = producer.SendFile(event_header, expected_stream, expected_fullpath, expected_ingest_mode, nullptr);
 
     ASSERT_THAT(err, Eq(nullptr));
 }
@@ -460,23 +460,23 @@ TEST_F(ProducerImplTests, WaitRequestsFinished) {
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
-MATCHER_P3(M_CheckGetSubstreamInfoRequest, op_code, source_credentials, substream,
+MATCHER_P3(M_CheckGetStreamInfoRequest, op_code, source_credentials, stream,
            "Checks if a valid GenericRequestHeader was Send") {
     auto request = static_cast<ProducerRequest*>(arg);
     return ((asapo::GenericRequestHeader) (arg->header)).op_code == op_code
         && request->source_credentials == source_credentials
-        && strcmp(((asapo::GenericRequestHeader) (arg->header)).substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader) (arg->header)).stream, stream) == 0;
 }
 
 TEST_F(ProducerImplTests, GetStreamInfoMakesCorerctRequest) {
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetSubstreamInfoRequest(asapo::kOpcodeStreamInfo,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetStreamInfoRequest(asapo::kOpcodeStreamInfo,
                                                                        expected_credentials_str,
-                                                                       expected_substream), true)).WillOnce(
+                                                                       expected_stream), true)).WillOnce(
         Return(nullptr));
 
     asapo::Error err;
-    producer.GetStreamInfo(expected_substream, 1, &err);
+    producer.GetStreamInfo(expected_stream, 1, &err);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
@@ -491,13 +491,13 @@ TEST(GetStreamInfoTest, GetStreamInfoTimeout) {
 
 TEST_F(ProducerImplTests, GetLastStreamMakesCorerctRequest) {
     producer.SetCredentials(expected_credentials);
-    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetSubstreamInfoRequest(asapo::kOpcodeLastStream,
+    EXPECT_CALL(mock_pull, AddRequest_t(M_CheckGetStreamInfoRequest(asapo::kOpcodeLastStream,
                                                                        expected_credentials_str,
                                                                        ""), true)).WillOnce(
         Return(nullptr));
 
     asapo::Error err;
-    producer.GetLastSubstream(1, &err);
+    producer.GetLastStream(1, &err);
     ASSERT_THAT(err, Eq(asapo::ProducerErrorTemplates::kTimeout));
 }
 
diff --git a/producer/api/cpp/unittests/test_request_handler_tcp.cpp b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
index 4257e6049871550673ca1b423627c5b8339fd850..780bd64a8e7e0ea7184f0a5bae8e7e97561d0866 100644
--- a/producer/api/cpp/unittests/test_request_handler_tcp.cpp
+++ b/producer/api/cpp/unittests/test_request_handler_tcp.cpp
@@ -60,15 +60,15 @@ class RequestHandlerTcpTests : public testing::Test {
 
   char expected_file_name[asapo::kMaxMessageSize] = "test_name";
   char expected_beamtime_id[asapo::kMaxMessageSize] = "test_beamtime_id";
-  char expected_substream[asapo::kMaxMessageSize] = "test_substream";
+  char expected_stream[asapo::kMaxMessageSize] = "test_stream";
 
   uint64_t expected_thread_id = 2;
 
   asapo::Error callback_err;
   asapo::GenericRequestHeader header{expected_op_code, expected_file_id, expected_file_size,
-                                     expected_meta_size, expected_file_name, expected_substream};
+                                     expected_meta_size, expected_file_name, expected_stream};
   asapo::GenericRequestHeader header_fromfile{expected_op_code, expected_file_id, 0, expected_meta_size,
-                                              expected_file_name, expected_substream};
+                                              expected_file_name, expected_stream};
   bool callback_called = false;
   asapo::GenericRequestHeader callback_header;
   std::string callback_response;
@@ -161,13 +161,13 @@ ACTION_P2(A_WriteSendDataResponse, error_code, message) {
     strcpy(((asapo::SendDataResponse*) arg1)->message, message.c_str());
 }
 
-MATCHER_P5(M_CheckSendDataRequest, op_code, file_id, file_size, message, substream,
+MATCHER_P5(M_CheckSendDataRequest, op_code, file_id, file_size, message, stream,
            "Checks if a valid GenericRequestHeader was Send") {
     return ((asapo::GenericRequestHeader*) arg)->op_code == op_code
         && ((asapo::GenericRequestHeader*) arg)->data_id == uint64_t(file_id)
         && ((asapo::GenericRequestHeader*) arg)->data_size == uint64_t(file_size)
         && strcmp(((asapo::GenericRequestHeader*) arg)->message, message) == 0
-        && strcmp(((asapo::GenericRequestHeader*) arg)->substream, substream) == 0;
+        && strcmp(((asapo::GenericRequestHeader*) arg)->stream, stream) == 0;
 
 }
 
@@ -273,7 +273,7 @@ void RequestHandlerTcpTests::ExpectFailSendHeader(bool only_once) {
                                                                         expected_file_id,
                                                                         expected_file_size,
                                                                         expected_file_name,
-                                                                        expected_substream),
+                                                                        expected_stream),
                                     sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
                 DoAll(
@@ -445,7 +445,7 @@ void RequestHandlerTcpTests::ExpectOKSendHeader(bool only_once, asapo::Opcode op
                                                                         expected_file_id,
                                                                         expected_file_size,
                                                                         expected_file_name,
-                                                                        expected_substream),
+                                                                        expected_stream),
                                     sizeof(asapo::GenericRequestHeader), _))
             .WillOnce(
                 DoAll(
@@ -941,7 +941,7 @@ TEST_F(RequestHandlerTcpTests, SendMetaOnlyForFileReadOK) {
 TEST_F(RequestHandlerTcpTests, TimeoutCallsCallback) {
     EXPECT_CALL(mock_logger, Error(AllOf(
         HasSubstr("timeout"),
-        HasSubstr("substream"))
+        HasSubstr("stream"))
     ));
 
     request_handler.ProcessRequestTimeout(&request);
diff --git a/producer/api/python/asapo_producer.pxd b/producer/api/python/asapo_producer.pxd
index c267b701d20be5772ac95a7d593a3f25cd1171ee..a8758311749ce8ea27574648430897b3cddf920f 100644
--- a/producer/api/python/asapo_producer.pxd
+++ b/producer/api/python/asapo_producer.pxd
@@ -104,15 +104,15 @@ cdef extern from "asapo/asapo_producer.h" namespace "asapo" nogil:
     cppclass Producer:
         @staticmethod
         unique_ptr[Producer] Create(string endpoint,uint8_t nthreads,RequestHandlerType type, SourceCredentials source,uint64_t timeout_sec, Error* error)
-        Error SendFile(const EventHeader& event_header, string substream, string full_path, uint64_t ingest_mode,RequestCallback callback)
-        Error SendData__(const EventHeader& event_header, string substream, void* data, uint64_t ingest_mode,RequestCallback callback)
+        Error SendFile(const EventHeader& event_header, string stream, string full_path, uint64_t ingest_mode,RequestCallback callback)
+        Error SendData__(const EventHeader& event_header, string stream, void* data, uint64_t ingest_mode,RequestCallback callback)
         void StopThreads__()
         void SetLogLevel(LogLevel level)
         uint64_t  GetRequestsQueueSize()
         Error WaitRequestsFinished(uint64_t timeout_ms)
-        Error SendSubstreamFinishedFlag(string substream, uint64_t last_id, string next_substream, RequestCallback callback)
-        StreamInfo GetStreamInfo(string substream, uint64_t timeout_sec, Error* err)
-        StreamInfo GetLastSubstream(uint64_t timeout_sec, Error* err)
+        Error SendStreamFinishedFlag(string stream, uint64_t last_id, string next_stream, RequestCallback callback)
+        StreamInfo GetStreamInfo(string stream, uint64_t timeout_sec, Error* err)
+        StreamInfo GetLastStream(uint64_t timeout_sec, Error* err)
 
 
 cdef extern from "asapo/asapo_producer.h" namespace "asapo":
diff --git a/producer/api/python/asapo_producer.pyx.in b/producer/api/python/asapo_producer.pyx.in
index 9e49f3187c41ead4499d3f8dfa76fabe147cc939..e77bafcb7e14d8adf4e08f12e2678cd5f2a3802c 100644
--- a/producer/api/python/asapo_producer.pyx.in
+++ b/producer/api/python/asapo_producer.pyx.in
@@ -107,13 +107,13 @@ cdef class PyProducer:
             return
          self.c_producer.get().SetLogLevel(log_level)
 
-    def __send_np_array(self, id, exposed_path,data, user_meta=None,subset=None,substream="default",ingest_mode = DEFAULT_INGEST_MODE,callback=None):
+    def __send_np_array(self, id, exposed_path,data, user_meta=None,subset=None,stream="default",ingest_mode = DEFAULT_INGEST_MODE,callback=None):
         cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
         if data is None:
             event_header.file_size = 0
         else:
             event_header.file_size = data.nbytes
-        err = self.c_producer.get().SendData__(event_header, _bytes(substream), data_pointer_nparray(data),ingest_mode,
+        err = self.c_producer.get().SendData__(event_header, _bytes(stream), data_pointer_nparray(data),ingest_mode,
             unwrap_callback_with_memory(<RequestCallbackCythonMemory>self.c_callback_ndarr,
              <void*>self,<void*>callback, <void*>data))
         if err:
@@ -139,10 +139,10 @@ cdef class PyProducer:
             event_header.subset_size = subset[1]
         return event_header
 
-    def __send_bytes(self, id, exposed_path,data, user_meta=None,subset=None, substream="default", ingest_mode = DEFAULT_INGEST_MODE,callback=None):
+    def __send_bytes(self, id, exposed_path,data, user_meta=None,subset=None, stream="default", ingest_mode = DEFAULT_INGEST_MODE,callback=None):
         cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
         event_header.file_size = len(data)
-        err = self.c_producer.get().SendData__(event_header,_bytes(substream),  data_pointer_bytes(data), ingest_mode,
+        err = self.c_producer.get().SendData__(event_header,_bytes(stream),  data_pointer_bytes(data), ingest_mode,
             unwrap_callback_with_memory(<RequestCallbackCythonMemory>self.c_callback_bytesaddr,
              <void*>self,<void*>callback, <void*>data))
         if err:
@@ -152,7 +152,7 @@ cdef class PyProducer:
             Py_XINCREF(<PyObject*>callback)
         return
 
-    def send_data(self, uint64_t id, exposed_path, data, user_meta=None, subset=None, substream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
+    def send_data(self, uint64_t id, exposed_path, data, user_meta=None, subset=None, stream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
         """
          :param id: unique data id
          :type id: int
@@ -164,8 +164,8 @@ cdef class PyProducer:
          :type user_meta: JSON string
          :param subset: a tuple with two int values (id in subset, subset size), default None
          :type subset: tuple
-         :param substream: substream name, default "default"
-         :type substream: string
+         :param stream: stream name, default "default"
+         :type stream: string
          :param ingest_mode: ingest mode flag
          :type ingest_mode: int
          :param callback: callback function, default None
@@ -175,33 +175,33 @@ cdef class PyProducer:
             AsapoProducerError: actually should not happen
         """
         if type(data) == np.ndarray or data == None:
-            self.__send_np_array(id,exposed_path,data,user_meta,subset,substream,ingest_mode,callback)
+            self.__send_np_array(id,exposed_path,data,user_meta,subset,stream,ingest_mode,callback)
         elif type(data) == bytes:
-            self.__send_bytes(id,exposed_path,data,user_meta,subset,substream,ingest_mode,callback)
+            self.__send_bytes(id,exposed_path,data,user_meta,subset,stream,ingest_mode,callback)
         else:
             raise(AsapoProducerError("wrong data type: " + str(type(data))))
-    def send_substream_finished_flag(self, substream, uint64_t last_id, next_substream = None, callback = None):
+    def send_stream_finished_flag(self, stream, uint64_t last_id, next_stream = None, callback = None):
         """
-         :param substream: substream name
-         :type substream: string
+         :param stream: stream name
+         :type stream: string
          :param id: id of the last record
-         :param next_substream: name of the next substream or None
-         :type substream: string
+         :param next_stream: name of the next stream or None
+         :type stream: string
          :param callback: callback function, default None
          :type callback: callback(info,err), where info - json string with event header that was used to send data and response, err - error string or None
          :raises:
             AsapoWrongInputError: wrong input (authorization, meta, ...)
             AsapoProducerError: actually should not happen
         """
-        err = self.c_producer.get().SendSubstreamFinishedFlag(_bytes(substream), last_id,_bytes(next_substream) if next_substream != None else "",
+        err = self.c_producer.get().SendStreamFinishedFlag(_bytes(stream), last_id,_bytes(next_stream) if next_stream != None else "",
         unwrap_callback(<RequestCallbackCython>self.c_callback, <void*>self,<void*>callback if callback != None else NULL))
         if err:
             throw_exception(err)
 
-    def stream_info(self, substream = 'default', uint64_t timeout_sec = 1):
+    def stream_info(self, stream = 'default', uint64_t timeout_sec = 1):
         """
-         :param substream: substream name
-         :type substream: string
+         :param stream: stream name
+         :type stream: string
          :param timeout_sec: timeout in seconds
          :type timeout_ms: int
          :raises:
@@ -211,9 +211,9 @@ cdef class PyProducer:
         """
         cdef Error err
         cdef StreamInfo info
-        cdef string b_substream = _bytes(substream)
+        cdef string b_stream = _bytes(stream)
         with nogil:
-            info = self.c_producer.get().GetStreamInfo(b_substream,timeout_sec,&err)
+            info = self.c_producer.get().GetStreamInfo(b_stream,timeout_sec,&err)
         if err:
             throw_exception(err)
         return json.loads(_str(info.Json(True)))
@@ -230,11 +230,11 @@ cdef class PyProducer:
         cdef Error err
         cdef StreamInfo info
         with nogil:
-            info = self.c_producer.get().GetLastSubstream(timeout_sec,&err)
+            info = self.c_producer.get().GetLastStream(timeout_sec,&err)
         if err:
             throw_exception(err)
         return json.loads(_str(info.Json(True)))
-    def send_file(self, uint64_t id, local_path, exposed_path, user_meta=None, subset=None, substream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
+    def send_file(self, uint64_t id, local_path, exposed_path, user_meta=None, subset=None, stream = "default", ingest_mode = DEFAULT_INGEST_MODE, callback=None):
         """
          :param id: unique data id
          :type id: int
@@ -246,8 +246,8 @@ cdef class PyProducer:
          :type user_meta: JSON string
          :param subset: a tuple with two int values (subset id, subset size), default None
          :type subset: tuple
-         :param substream: substream name, default "default"
-         :type substream: string
+         :param stream: stream name, default "default"
+         :type stream: string
          :param ingest_mode: ingest mode flag
          :type ingest_mode: int
          :param callback: callback function, default None
@@ -260,7 +260,7 @@ cdef class PyProducer:
 
         cdef EventHeader event_header = self.create_event_header(id,exposed_path,user_meta,subset,ingest_mode)
         event_header.file_size = 0
-        err = self.c_producer.get().SendFile(event_header, _bytes(substream), _bytes(local_path), ingest_mode,
+        err = self.c_producer.get().SendFile(event_header, _bytes(stream), _bytes(local_path), ingest_mode,
             unwrap_callback(<RequestCallbackCython>self.c_callback, <void*>self,<void*>callback if callback != None else NULL))
         if err:
             throw_exception(err)
diff --git a/receiver/src/request.cpp b/receiver/src/request.cpp
index 808567d3d74a109f5784ab76608faf032b5f9c55..5bf9c1ab2cbdd38f9c0dd8339e0a86912bc163b6 100644
--- a/receiver/src/request.cpp
+++ b/receiver/src/request.cpp
@@ -85,8 +85,8 @@ std::string Request::GetFileName() const {
     return orig_name;
 }
 
-std::string Request::GetSubstream() const {
-    return request_header_.substream;
+std::string Request::GetStream() const {
+    return request_header_.stream;
 }
 
 const std::string& Request::GetOriginUri() const {
diff --git a/receiver/src/request.h b/receiver/src/request.h
index 4811b01aae16b999fa8bee9c1b93f0b51287ba2a..5d462d7f1ab620a5b2c913f88f9b0d9564d72513 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -43,7 +43,7 @@ class Request {
     VIRTUAL uint64_t GetMetaDataSize() const;
     VIRTUAL uint64_t GetDataID() const;
     VIRTUAL std::string GetFileName() const;
-    VIRTUAL std::string GetSubstream() const;
+    VIRTUAL std::string GetStream() const;
     VIRTUAL void* GetData() const;
     VIRTUAL Opcode GetOpCode() const;
     VIRTUAL const char* GetMessage() const;
diff --git a/receiver/src/request_handler/request_handler_db_check_request.cpp b/receiver/src/request_handler/request_handler_db_check_request.cpp
index 94911d3b028047db55230ca59036227a4ba75316..3ce2b9dacff95971fc136f405011f173dc798eaf 100644
--- a/receiver/src/request_handler/request_handler_db_check_request.cpp
+++ b/receiver/src/request_handler/request_handler_db_check_request.cpp
@@ -19,7 +19,7 @@ RequestHandlerDbCheckRequest::RequestHandlerDbCheckRequest(std::string collectio
 Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, FileInfo* record ) const {
     auto op_code = request->GetOpCode();
     auto id = request->GetDataID();
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     Error err;
     if (op_code == Opcode::kOpcodeTransferData) {
         err =  db_client__->GetById(col_name, id, record);
diff --git a/receiver/src/request_handler/request_handler_db_stream_info.cpp b/receiver/src/request_handler/request_handler_db_stream_info.cpp
index 82689f7d65870d7dbba2099f31e1aaed2774204b..20221ba8c3babb5466046cde16baee96f145bfe0 100644
--- a/receiver/src/request_handler/request_handler_db_stream_info.cpp
+++ b/receiver/src/request_handler/request_handler_db_stream_info.cpp
@@ -14,13 +14,13 @@ Error RequestHandlerDbStreamInfo::ProcessRequest(Request* request) const {
         return err;
     }
 
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     StreamInfo info;
     auto err =  db_client__->GetStreamInfo(col_name, &info);
     if (!err) {
         log__->Debug(std::string{"get stream info from "} + col_name + " in " +
                      db_name_ + " at " + GetReceiverConfig()->database_uri);
-        info.name = request->GetSubstream();
+        info.name = request->GetStream();
         request->SetResponseMessage(info.Json(true), ResponseMessageType::kInfo);
     }
     return err;
diff --git a/receiver/src/request_handler/request_handler_db_write.cpp b/receiver/src/request_handler/request_handler_db_write.cpp
index 94241848659a0dd6dfa63f85b9456a2aa6a28055..3dece0e77768e3d4537e3b046852029bee38d1ab 100644
--- a/receiver/src/request_handler/request_handler_db_write.cpp
+++ b/receiver/src/request_handler/request_handler_db_write.cpp
@@ -55,7 +55,7 @@ Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const {
     auto file_info = PrepareFileInfo(request);
 
     auto op_code = request->GetOpCode();
-    auto col_name = collection_name_prefix_ + "_" + request->GetSubstream();
+    auto col_name = collection_name_prefix_ + "_" + request->GetStream();
     Error err;
     if (op_code == Opcode::kOpcodeTransferData) {
         err =  db_client__->Insert(col_name, file_info, false);
diff --git a/receiver/unittests/receiver_mocking.h b/receiver/unittests/receiver_mocking.h
index 4845de5770ef6339dd6fb6d818f26d7ff2e8be47..200aeed190a32fbf9b8b958b87723928e11be734 100644
--- a/receiver/unittests/receiver_mocking.h
+++ b/receiver/unittests/receiver_mocking.h
@@ -65,7 +65,7 @@ class MockRequest: public Request {
         Request(request_header, socket_fd, std::move(origin_uri), nullptr, db_check_handler) {};
 
     MOCK_CONST_METHOD0(GetFileName, std::string());
-    MOCK_CONST_METHOD0(GetSubstream, std::string());
+    MOCK_CONST_METHOD0(GetStream, std::string());
     MOCK_CONST_METHOD0(GetDataSize, uint64_t());
     MOCK_CONST_METHOD0(GetDataID, uint64_t());
     MOCK_CONST_METHOD0(GetSlotId, uint64_t());
diff --git a/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp b/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
index 448b339cac8a95d19c6cdd63481fd838a75b4d2b..1a5d57574f97b60bf5b3539d88413a4ef960a14e 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_check_request.cpp
@@ -65,8 +65,8 @@ TEST(DbCheckRequestHandler, Constructor) {
 
 class DbCheckRequestHandlerTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbCheckRequest handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
@@ -167,8 +167,8 @@ void DbCheckRequestHandlerTests::ExpectRequestParams(asapo::Opcode op_code, cons
         ;
     }
 
-    EXPECT_CALL(*mock_request, GetSubstream())
-    .WillOnce(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream())
+    .WillOnce(Return(expected_stream))
     ;
 
     EXPECT_CALL(*mock_request, GetDataID())
diff --git a/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp b/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
index 7baa2cf98a3dc5a7371465d5d3509458bef374a9..da6740f13b0c4ff9f47b3ec3e70d65273108670a 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_last_stream.cpp
@@ -55,7 +55,7 @@ namespace {
 
 class DbMetaLastStreamTests : public Test {
   public:
-    std::string expectedlaststream = "substream";
+    std::string expectedlaststream = "stream";
     RequestHandlerDbLastStream handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
@@ -63,7 +63,7 @@ class DbMetaLastStreamTests : public Test {
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
     std::string expected_data_source = "source";
-    std::string info_str = R"({"lastId":10,"name":"substream","timestampCreated":1000000,"timestampLast":2000000})";
+    std::string info_str = R"({"lastId":10,"name":"stream","timestampCreated":1000000,"timestampLast":2000000})";
     asapo::StreamInfo expected_stream_info;
     void SetUp() override {
         GenericRequestHeader request_header;
diff --git a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
index bd0aee05660fba242ccc2e44e27aa87c028637ba..2596660e025102afcbb0eb769ab35c37012ccf3a 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_stream_info.cpp
@@ -55,8 +55,8 @@ namespace {
 
 class DbMetaStreamInfoTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbStreamInfo handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
@@ -64,12 +64,12 @@ class DbMetaStreamInfoTests : public Test {
     ReceiverConfig config;
     std::string expected_beamtime_id = "beamtime_id";
     std::string expected_data_source = "source";
-    std::string info_str = R"({"lastId":10,"name":"substream","timestampCreated":1000000,"timestampLast":2000000})";
+    std::string info_str = R"({"lastId":10,"name":"stream","timestampCreated":1000000,"timestampLast":2000000})";
     asapo::StreamInfo expected_stream_info;
     void SetUp() override {
         GenericRequestHeader request_header;
         expected_stream_info.last_id = 10;
-        expected_stream_info.name = expected_substream;
+        expected_stream_info.name = expected_stream;
         expected_stream_info.timestamp_created = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(1));
         expected_stream_info.timestamp_lastentry = std::chrono::time_point<std::chrono::system_clock>(std::chrono::milliseconds(2));
         request_header.data_id = 0;
@@ -92,8 +92,8 @@ TEST_F(DbMetaStreamInfoTests, CallsUpdate) {
 
     EXPECT_CALL(*mock_request, GetDataSource()).WillOnce(ReturnRef(expected_data_source));
 
-    EXPECT_CALL(*mock_request, GetSubstream()).Times(2)
-    .WillRepeatedly(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream()).Times(2)
+    .WillRepeatedly(Return(expected_stream))
     ;
 
     EXPECT_CALL(mock_db, Connect_t(config.database_uri, expected_beamtime_id + "_" + expected_data_source)).
diff --git a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
index 0d7f9e5aad4adb0003bc0b4926ee08cca659b5d1..f3b26a8c47adc8a5791def34adf505bd635f4540 100644
--- a/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_db_writer.cpp
@@ -66,8 +66,8 @@ TEST(DbWriterHandler, Constructor) {
 
 class DbWriterHandlerTests : public Test {
   public:
-    std::string expected_substream = "substream";
-    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_substream;
+    std::string expected_stream = "stream";
+    std::string expected_collection_name = std::string(asapo::kDBDataCollectionNamePrefix) + "_" + expected_stream;
     RequestHandlerDbWrite handler{asapo::kDBDataCollectionNamePrefix};
     std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<MockDatabase> mock_db;
@@ -162,8 +162,8 @@ void DbWriterHandlerTests::ExpectRequestParams(asapo::Opcode op_code, const std:
     .WillOnce(Return(expected_file_name))
     ;
 
-    EXPECT_CALL(*mock_request, GetSubstream())
-    .WillOnce(Return(expected_substream))
+    EXPECT_CALL(*mock_request, GetStream())
+    .WillOnce(Return(expected_stream))
     ;
 
 
diff --git a/receiver/unittests/test_request.cpp b/receiver/unittests/test_request.cpp
index 7f48d0046313f0383ff6ed5b022fd7229a728ffd..07722c9406556bbf62ab21dcd9ccb167c7be2184 100644
--- a/receiver/unittests/test_request.cpp
+++ b/receiver/unittests/test_request.cpp
@@ -79,7 +79,7 @@ class RequestTests : public Test {
     uint64_t expected_slot_id{16};
     std::string expected_origin_uri = "origin_uri";
     std::string expected_metadata = "meta";
-    std::string expected_substream = "substream";
+    std::string expected_stream = "stream";
     uint64_t expected_metadata_size = expected_metadata.size();
     asapo::Opcode expected_op_code = asapo::kOpcodeTransferData;
     char expected_request_message[asapo::kMaxMessageSize] = "test_message";
@@ -186,16 +186,16 @@ void RequestTests::ExpectFileName(std::string sended, std::string received) {
 }
 
 
-TEST_F(RequestTests, GetSubstream) {
-    strcpy(generic_request_header.substream, expected_substream.c_str());
+TEST_F(RequestTests, GetStream) {
+    strcpy(generic_request_header.stream, expected_stream.c_str());
 
     request->io__.release();
     request.reset(new Request{generic_request_header, expected_socket_id, expected_origin_uri, nullptr, nullptr});
     request->io__ = std::unique_ptr<asapo::IO> {&mock_io};;
 
-    auto substream = request->GetSubstream();
+    auto stream = request->GetStream();
 
-    ASSERT_THAT(substream, Eq(expected_substream));
+    ASSERT_THAT(stream, Eq(expected_stream));
 }
 
 
diff --git a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
index b541b5913c49618102d5015aa0084e9a2eb55a28..cf5e490000ce38ef122ca30396ba2a81cc680b1a 100644
--- a/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/parallel_data_transfer.cpp
@@ -41,7 +41,7 @@ void ServerChildThread(FabricServer* server, std::atomic<int>* serverTotalReques
         M_AssertEq(messageId / kEachInstanceRuns, request.data_id); // is client index
         M_AssertEq(messageId % kEachInstanceRuns, request.data_size); // is client run
 
-        server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, expectedRdmaBuffer, kRdmaSize, &err);
+        server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, expectedRdmaBuffer, kRdmaSize, &err);
         M_AssertEq(nullptr, err, "server->RdmaWrite");
 
         GenericNetworkResponse response{};
@@ -96,7 +96,7 @@ void ClientChildThread(const std::string& hostname, uint16_t port, int index, ch
 
         GenericRequestHeader request{};
         strcpy(request.message, "Hello World");
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
         request.data_id = index;
         request.data_size = run;
         FabricMessageId messageId = (index * kEachInstanceRuns) + run;
diff --git a/tests/automatic/asapo_fabric/simple_data_transfer.cpp b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
index e811d4644b6c9ec95daee6c957c00b7fc0070436..28a2b9ce6316fe3d429618494c742bd6c9ee403d 100644
--- a/tests/automatic/asapo_fabric/simple_data_transfer.cpp
+++ b/tests/automatic/asapo_fabric/simple_data_transfer.cpp
@@ -45,7 +45,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port, char* expect
                 M_AssertEq(123 + instanceRuns, messageId);
                 M_AssertEq("Hello World", request.message);
 
-                server->RdmaWrite(clientAddress, (MemoryRegionDetails*) &request.substream, expectedRdmaBuffer, kRdmaSize,
+                server->RdmaWrite(clientAddress, (MemoryRegionDetails*) &request.stream, expectedRdmaBuffer, kRdmaSize,
                                   &err);
                 M_AssertEq(nullptr, err, "server->RdmaWrite");
 
@@ -84,7 +84,7 @@ void ClientThread(const std::string& hostname, uint16_t port, char* expectedRdma
 
             GenericRequestHeader request{};
             strcpy(request.message, "Hello World");
-            memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+            memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
             FabricMessageId messageId = 123 + instanceRuns;
             client->Send(serverAddress, messageId, &request, sizeof(request), &err);
             M_AssertEq(nullptr, err, "client->Send");
diff --git a/tests/automatic/asapo_fabric/wrong_memory_info.cpp b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
index 52259b58e17e324f777bfc8a1aa0190ba78674c4..cb6c444a4db0c04457d8c6529ef134a10773b7ae 100644
--- a/tests/automatic/asapo_fabric/wrong_memory_info.cpp
+++ b/tests/automatic/asapo_fabric/wrong_memory_info.cpp
@@ -40,7 +40,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     M_AssertEq(nullptr, err, "server->RecvAny(1)");
     M_AssertEq(1, messageId);
     M_AssertEq("Hello World", request.message);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(FabricErrorTemplates::kInternalError, err, "server->RdmaWrite(1)");
     err = nullptr; // We have to reset the error by ourselves
     server->Send(clientAddress, messageId, dummyData.get(), kDummyDataSize, &err);
@@ -54,7 +54,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(2)");
     M_AssertEq(2, messageId);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(nullptr, err, "server->RdmaWrite(2)");
     server->Send(clientAddress, messageId, dummyData.get(), kDummyDataSize, &err);
     M_AssertEq(nullptr, err, "server->Send(2)");
@@ -68,7 +68,7 @@ void ServerMasterThread(const std::string& hostname, uint16_t port) {
     } while (err == IOErrorTemplates::kTimeout && tries++ < 2);
     M_AssertEq(nullptr, err, "server->RecvAny(3)");
     M_AssertEq(3, messageId);
-    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.substream, rdmaBuffer.get(), kRdmaSize, &err);
+    server->RdmaWrite(clientAddress, (MemoryRegionDetails*)&request.stream, rdmaBuffer.get(), kRdmaSize, &err);
     M_AssertEq(FabricErrorTemplates::kInternalError, err, "server->RdmaWrite(3)");
 
     std::cout << "[SERVER] Waiting for client to finish" << std::endl;
@@ -98,10 +98,10 @@ void ClientThread(const std::string& hostname, uint16_t port) {
     {
         auto mr = client->ShareMemoryRegion(actualRdmaBuffer.get(), kRdmaSize, &err);
         M_AssertEq(nullptr, err, "client->ShareMemoryRegion");
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
 
         // Simulate faulty memory details
-        ((MemoryRegionDetails*)(&request.substream))->key++;
+        ((MemoryRegionDetails*)(&request.stream))->key++;
         client->Send(serverAddress, messageId, &request, sizeof(request), &err);
         M_AssertEq(nullptr, err, "client->Send(1)");
         client->Recv(serverAddress, messageId, dummyData.get(), kDummyDataSize, &err);
@@ -109,7 +109,7 @@ void ClientThread(const std::string& hostname, uint16_t port) {
         messageId++;
 
         // Simulate correct memory details
-        memcpy(request.substream, mr->GetDetails(), sizeof(MemoryRegionDetails));
+        memcpy(request.stream, mr->GetDetails(), sizeof(MemoryRegionDetails));
         client->Send(serverAddress, messageId, &request, sizeof(request), &err);
         M_AssertEq(nullptr, err, "client->Send(2)");
         client->Recv(serverAddress, messageId, dummyData.get(), kDummyDataSize, &err);
diff --git a/tests/automatic/broker/check_monitoring/check_linux.sh b/tests/automatic/broker/check_monitoring/check_linux.sh
index c52ebd5200da86a0e83041304e5c1131203fcb6c..caf55e77f9b566a57e8b945639adf54b45c861a1 100644
--- a/tests/automatic/broker/check_monitoring/check_linux.sh
+++ b/tests/automatic/broker/check_monitoring/check_linux.sh
@@ -29,7 +29,7 @@ groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
 
 for i in `seq 1 50`;
 do
-    curl --silent 127.0.0.1:5005/database/data/source/substream/${groupid}/next?token=$token >/dev/null 2>&1 &
+    curl --silent 127.0.0.1:5005/database/data/source/stream/${groupid}/next?token=$token >/dev/null 2>&1 &
 done
 
 
diff --git a/tests/automatic/broker/get_last/check_linux.sh b/tests/automatic/broker/get_last/check_linux.sh
index 4154d9112cbf33869a03a0bb8894c0a12052e508..a721a07b225b5a56241819c7dc80c22386438d20 100644
--- a/tests/automatic/broker/get_last/check_linux.sh
+++ b/tests/automatic/broker/get_last/check_linux.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
 database_name=data_detector
-substream=substream
+stream=stream
 
 set -e
 
@@ -13,8 +13,8 @@ Cleanup() {
 	kill -9 $brokerid
 }
 
-echo "db.data_${substream}.insert({"_id":2})" | mongo ${database_name}
-echo "db.data_${substream}.insert({"_id":1})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":2})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":1})" | mongo ${database_name}
 
 token=`$2 token -secret auth_secret.key data`
 
@@ -26,21 +26,21 @@ brokerid=`echo $!`
 
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
 
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr -
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr -
 
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr - | grep '"_id":2'
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr - | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":2'
 
-echo "db.data_${substream}.insert({"_id":3})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":3})" | mongo ${database_name}
 
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr - | grep '"_id":3'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":3'
 
-echo "db.data_${substream}.insert({"_id":4})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":4})" | mongo ${database_name}
 
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr - | grep '"_id":4'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":4'
 
 #with a new group
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/detector/${substream}/0/last?token=$token --stderr - | grep '"_id":4'
\ No newline at end of file
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/${groupid}/next?token=$token --stderr - | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/detector/${stream}/0/last?token=$token --stderr - | grep '"_id":4'
\ No newline at end of file
diff --git a/tests/automatic/broker/get_next/check_linux.sh b/tests/automatic/broker/get_next/check_linux.sh
index 7356f21e0b3fee56ef6382e33096558e7ef1a15a..277f78895f75804530199be5f17a3856bbbe9a63 100644
--- a/tests/automatic/broker/get_next/check_linux.sh
+++ b/tests/automatic/broker/get_next/check_linux.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
 database_name=data_source
-substream=substream
+stream=stream
 
 set -e
 
@@ -13,8 +13,8 @@ Cleanup() {
 	kill -9 $brokerid
 }
 
-echo "db.data_${substream}.insert({"_id":2})" | mongo ${database_name}
-echo "db.data_${substream}.insert({"_id":1})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":2})" | mongo ${database_name}
+echo "db.data_${stream}.insert({"_id":1})" | mongo ${database_name}
 
 token=`$2 token -secret auth_secret.key data`
 
@@ -24,10 +24,10 @@ sleep 0.3
 brokerid=`echo $!`
 
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/source/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":1'
-curl -v  --silent 127.0.0.1:5005/database/data/source/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":2'
-curl -v  --silent 127.0.0.1:5005/database/data/source/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"id_max":2'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":1'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"_id":2'
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr  | grep '"id_max":2'
 
 # with a new group
 groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup`
-curl -v  --silent 127.0.0.1:5005/database/data/source/${substream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr | grep '"_id":1'
\ No newline at end of file
+curl -v  --silent 127.0.0.1:5005/database/data/source/${stream}/${groupid}/next?token=$token --stderr - | tee /dev/stderr | grep '"_id":1'
\ No newline at end of file
diff --git a/tests/automatic/consumer/consumer_api/consumer_api.cpp b/tests/automatic/consumer/consumer_api/consumer_api.cpp
index c0893e14ba311d39c4b1dabf4a546e6d7caedc79..6e2b04f86ac45b4e20c87f8c861f68802efd3b8b 100644
--- a/tests/automatic/consumer/consumer_api/consumer_api.cpp
+++ b/tests/automatic/consumer/consumer_api/consumer_api.cpp
@@ -133,19 +133,19 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str
     M_AssertTrue(err == nullptr, "GetNext stream2 no error");
     M_AssertTrue(fi.name == "21", "GetNext stream2 filename");
 
-    auto substreams = broker->GetSubstreamList("",&err);
-    M_AssertTrue(err == nullptr, "GetSubstreamList no error");
-    M_AssertTrue(substreams.size() == 3, "substreams.size");
-    M_AssertTrue(substreams[0].name == "default", "substreams0.name1");
-    M_AssertTrue(substreams[1].name == "stream1", "substreams1.name2");
-    M_AssertTrue(substreams[2].name == "stream2", "substreams2.name3");
-    std::cout<<substreams[0].Json(false)<<std::endl;
-    std::cout<<substreams[1].Json(false)<<std::endl;
-    std::cout<<substreams[2].Json(false)<<std::endl;
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[0].timestamp_created) == 0, "substreams0.timestamp");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[0].timestamp_lastentry) == 0, "substreams0.timestamp lastentry not set");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[1].timestamp_created) == 1000, "substreams1.timestamp");
-    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(substreams[2].timestamp_created) == 2000, "substreams2.timestamp");
+    auto streams = broker->GetStreamList("",&err);
+    M_AssertTrue(err == nullptr, "GetStreamList no error");
+    M_AssertTrue(streams.size() == 3, "streams.size");
+    M_AssertTrue(streams[0].name == "default", "streams0.name1");
+    M_AssertTrue(streams[1].name == "stream1", "streams1.name2");
+    M_AssertTrue(streams[2].name == "stream2", "streams2.name3");
+    std::cout<<streams[0].Json(false)<<std::endl;
+    std::cout<<streams[1].Json(false)<<std::endl;
+    std::cout<<streams[2].Json(false)<<std::endl;
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[0].timestamp_created) == 0, "streams0.timestamp");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[0].timestamp_lastentry) == 0, "streams0.timestamp lastentry not set");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[1].timestamp_created) == 1000, "streams1.timestamp");
+    M_AssertTrue(asapo::NanosecsEpochFromTimePoint(streams[2].timestamp_created) == 2000, "streams2.timestamp");
 // acknowledges
 
     auto id = broker->GetLastAcknowledgedTulpeId(group_id, &err);
diff --git a/tests/automatic/consumer/consumer_api_python/consumer_api.py b/tests/automatic/consumer/consumer_api_python/consumer_api.py
index 7533e3584b8246f2edfa9ef2cd7eeae4cbf668eb..4ccb7c0b5f52488872a7807ddd2ded197ecced81 100644
--- a/tests/automatic/consumer/consumer_api_python/consumer_api.py
+++ b/tests/automatic/consumer/consumer_api_python/consumer_api.py
@@ -121,14 +121,14 @@ def check_single(broker, group_id):
     _, meta = broker.get_next(group_id, "stream2", meta_only=True)
     assert_metaname(meta, "21", "get next stream2")
 
-    substreams = broker.get_substream_list("")
-    assert_eq(len(substreams), 4, "number of substreams")
-    print(substreams)
-    assert_eq(substreams[0]["name"], "default", "substreams_name1")
-    assert_eq(substreams[1]["name"], "streamfts", "substreams_name2")
-    assert_eq(substreams[2]["name"], "stream1", "substreams_name2")
-    assert_eq(substreams[3]["name"], "stream2", "substreams_name3")
-    assert_eq(substreams[1]["timestampCreated"], 1000, "substreams_timestamp2")
+    streams = broker.get_stream_list("")
+    assert_eq(len(streams), 4, "number of streams")
+    print(streams)
+    assert_eq(streams[0]["name"], "default", "streams_name1")
+    assert_eq(streams[1]["name"], "streamfts", "streams_name2")
+    assert_eq(streams[2]["name"], "stream1", "streams_name2")
+    assert_eq(streams[3]["name"], "stream2", "streams_name3")
+    assert_eq(streams[1]["timestampCreated"], 1000, "streams_timestamp2")
 
     # acks
     try:
diff --git a/tests/automatic/full_chain/CMakeLists.txt b/tests/automatic/full_chain/CMakeLists.txt
index 0d7dd8b9fa95ef4c83f07ceb334ffce0b49c9b92..a13904442843c0b2787fde56dfcc4d01ee5f15e3 100644
--- a/tests/automatic/full_chain/CMakeLists.txt
+++ b/tests/automatic/full_chain/CMakeLists.txt
@@ -1,7 +1,7 @@
 add_subdirectory(simple_chain)
 if (BUILD_PYTHON)
     add_subdirectory(simple_chain_usermeta_python)
-    add_subdirectory(send_recv_substreams_python)
+    add_subdirectory(send_recv_streams_python)
 endif()
 add_subdirectory(simple_chain_metadata)
 add_subdirectory(two_beamlines)
@@ -13,4 +13,4 @@ add_subdirectory(simple_chain_filegen_multisource)
 add_subdirectory(simple_chain_filegen_readdata_cache)
 add_subdirectory(simple_chain_filegen_readdata_file)
 add_subdirectory(simple_chain_dataset)
-add_subdirectory(send_recv_substreams)
+add_subdirectory(send_recv_streams)
diff --git a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt b/tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
similarity index 84%
rename from tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
rename to tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
index 7ddfbbfbea7efa3dd31162a130bd557e15cf3f17..0a2ab15e89032a5278cf4376ede48c82c140b041 100644
--- a/tests/automatic/full_chain/send_recv_substreams/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_streams/CMakeLists.txt
@@ -1,5 +1,5 @@
-set(TARGET_NAME send_recv_substreams)
-set(SOURCE_FILES send_recv_substreams.cpp)
+set(TARGET_NAME send_recv_streams)
+set(SOURCE_FILES send_recv_streams.cpp)
 
 add_executable(${TARGET_NAME} ${SOURCE_FILES})
 target_link_libraries(${TARGET_NAME} asapo-consumer asapo-producer)
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_linux.sh b/tests/automatic/full_chain/send_recv_streams/check_linux.sh
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams/check_linux.sh
rename to tests/automatic/full_chain/send_recv_streams/check_linux.sh
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_windows.bat b/tests/automatic/full_chain/send_recv_streams/check_windows.bat
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams/check_windows.bat
rename to tests/automatic/full_chain/send_recv_streams/check_windows.bat
diff --git a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp b/tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
similarity index 87%
rename from tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
rename to tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
index 8be9df7b4607c35c281592a94f7ecc0b59bce977..df74637c62ace96ea7825711ee2fb178609aa089 100644
--- a/tests/automatic/full_chain/send_recv_substreams/send_recv_substreams.cpp
+++ b/tests/automatic/full_chain/send_recv_streams/send_recv_streams.cpp
@@ -87,9 +87,9 @@ int main(int argc, char* argv[]) {
 
     for (uint64_t i = 0; i < n; i++) {
         asapo::EventHeader event_header{i + 1, 0, std::to_string(i + 1)};
-        producer->SendData(event_header, "substream1", nullptr, asapo::kTransferMetaDataOnly, ProcessAfterSend);
+        producer->SendData(event_header, "stream1", nullptr, asapo::kTransferMetaDataOnly, ProcessAfterSend);
     }
-    producer->SendSubstreamFinishedFlag("substream1", n, "substream2", ProcessAfterSend);
+    producer->SendStreamFinishedFlag("stream1", n, "stream2", ProcessAfterSend);
     producer->WaitRequestsFinished(10000);
 
     Error err;
@@ -101,14 +101,14 @@ int main(int argc, char* argv[]) {
 
     asapo::FileInfo fi;
     for (uint64_t i = 0; i < n; i++) {
-        consumer->GetNext(&fi, group_id, "substream1", nullptr);
+        consumer->GetNext(&fi, group_id, "stream1", nullptr);
     }
 
-    err = consumer->GetNext(&fi, group_id, "substream1", nullptr);
+    err = consumer->GetNext(&fi, group_id, "stream1", nullptr);
     if (err != asapo::ConsumerErrorTemplates::kStreamFinished) {
         return 1;
     }
     auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData());
 
-    return (err_data->next_substream == "substream2") && (files_sent == n + 1) ? 0 : 1;
+    return (err_data->next_stream == "stream2") && (files_sent == n + 1) ? 0 : 1;
 }
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt b/tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
similarity index 78%
rename from tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
rename to tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
index 7a5de77a6e828154c92308725514f0109d59b68a..f592e6f8b6b992a4f2d75125a53453d06a880719 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/CMakeLists.txt
+++ b/tests/automatic/full_chain/send_recv_streams_python/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(TARGET_NAME send_recv_substreams_python)
+set(TARGET_NAME send_recv_streams_python)
 prepare_asapo()
 
 if (UNIX)
@@ -9,6 +9,6 @@ else()
     get_target_property(PYTHON_LIBS_PRODUCER asapo_producer BINARY_DIR)
 endif()
 
-file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/send_recv_substreams.py TEST_SCRIPT )
+file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/send_recv_streams.py TEST_SCRIPT )
 
 add_script_test("${TARGET_NAME}" "${Python_EXECUTABLE} ${PYTHON_LIBS_CONSUMER} ${PYTHON_LIBS_PRODUCER} ${TEST_SCRIPT}" nomem)
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh b/tests/automatic/full_chain/send_recv_streams_python/check_linux.sh
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams_python/check_linux.sh
rename to tests/automatic/full_chain/send_recv_streams_python/check_linux.sh
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat b/tests/automatic/full_chain/send_recv_streams_python/check_windows.bat
similarity index 100%
rename from tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat
rename to tests/automatic/full_chain/send_recv_streams_python/check_windows.bat
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py b/tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
similarity index 68%
rename from tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
rename to tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
index f55f12ae6734b8c5ea81afcaa18aa42b352175de..1fa5b57df4aa0b9d9198b5e8443497f20ad7e896 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/send_recv_substreams.py
+++ b/tests/automatic/full_chain/send_recv_streams_python/send_recv_streams.py
@@ -34,26 +34,26 @@ group_id  = broker.generate_group_id()
 n_send = 10
 
 for i in range(n_send):
-    producer.send_data(i+1, "name"+str(i),None,ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY,substream = "substream", callback = callback)
+    producer.send_data(i+1, "name"+str(i),None,ingest_mode = asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY,stream = "stream", callback = callback)
 
-producer.send_substream_finished_flag("substream", 10, next_substream = "next_substream", callback = callback)
+producer.send_stream_finished_flag("stream", 10, next_stream = "next_stream", callback = callback)
 producer.wait_requests_finished(timeout)
 
 n_recv = 0
-substream_finished=False
+stream_finished=False
 while True:
     try:
-        data, meta = broker.get_next(group_id,substream = "substream", meta_only=True)
+        data, meta = broker.get_next(group_id,stream = "stream", meta_only=True)
         print ("received: ",meta)
         n_recv = n_recv + 1
-    except  asapo_consumer.AsapoStreamFinishedError as finished_substream:
-        substream_finished = True
-        assert_eq(finished_substream.id_max, 11, "last id")
-        assert_eq(finished_substream.next_substream, "next_substream", "next substream")
+    except  asapo_consumer.AsapoStreamFinishedError as finished_stream:
+        stream_finished = True
+        assert_eq(finished_stream.id_max, 11, "last id")
+        assert_eq(finished_stream.next_stream, "next_stream", "next stream")
         break
 
 assert_eq(n_recv, n_send, "send=recv")
-assert_eq(substream_finished, True, "substream finished")
+assert_eq(stream_finished, True, "stream finished")
 print('Using connection type: ' + broker.current_connection_type())
 
 
diff --git a/tests/automatic/producer/python_api/producer_api.py b/tests/automatic/producer/python_api/producer_api.py
index d53aa92795d82929fb28348295391c956dc2a118..0c484944dacca101220b122743bcd5fa6d6c41f9 100644
--- a/tests/automatic/producer/python_api/producer_api.py
+++ b/tests/automatic/producer/python_api/producer_api.py
@@ -99,9 +99,9 @@ else:
     print("should be error sending id 0 ")
     sys.exit(1)
 
-# send to another substream
+# send to another stream
 producer.send_data(1, "processed/" + data_source + "/" + "file9", None,
-                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, substream="stream", callback=callback)
+                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, stream="stream", callback=callback)
 
 # wait normal requests finished before sending duplicates
 
@@ -129,9 +129,9 @@ producer.wait_requests_finished(50000)
 n = producer.get_requests_queue_size()
 assert_eq(n, 0, "requests in queue")
 
-# send to another data to substream stream
+# send to another data to stream stream
 producer.send_data(2, "processed/" + data_source + "/" + "file10", None,
-                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, substream="stream", callback=callback)
+                   ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, stream="stream", callback=callback)
 
 producer.wait_requests_finished(50000)
 n = producer.get_requests_queue_size()
@@ -151,7 +151,7 @@ print("created: ",datetime.utcfromtimestamp(info['timestampCreated']/1000000000)
 print("last record: ",datetime.utcfromtimestamp(info['timestampLast']/1000000000).strftime('%Y-%m-%d %H:%M:%S.%f'))
 
 info = producer.stream_info('stream')
-assert_eq(info['lastId'], 2, "last id from different substream")
+assert_eq(info['lastId'], 2, "last id from different stream")
 
 info_last = producer.last_stream()
 assert_eq(info_last['name'], "stream", "last stream")
diff --git a/tests/manual/producer_cpp/producer.cpp b/tests/manual/producer_cpp/producer.cpp
index 4e72219bc8106ba08c03660224dc17b6ca8988de..cea38446054e1821dfdca84eaedbe0389a19b754 100644
--- a/tests/manual/producer_cpp/producer.cpp
+++ b/tests/manual/producer_cpp/producer.cpp
@@ -74,16 +74,16 @@ int main(int argc, char* argv[]) {
             auto send_size = to_send.size() + 1;
             auto buffer =  asapo::FileData(new uint8_t[send_size]);
             memcpy(buffer.get(), to_send.c_str(), send_size);
-            std::string substream = std::to_string(start_number);
+            std::string stream = std::to_string(start_number);
             // std::cout<<"submodule:"<<submodule
-            //          <<"- substream:"<<substream
+            //          <<"- stream:"<<stream
             //          <<"- filename:"<<to_send<<std::endl;
 
             asapo::EventHeader event_header{submodule, send_size, to_send,"", part,modules};
-            // err = producer->SendData(event_header,substream, std::move(buffer),
+            // err = producer->SendData(event_header,stream, std::move(buffer),
             //                          asapo::kTransferMetaDataOnly, &ProcessAfterSend);
 
-            err = producer->SendData(event_header,substream, std::move(buffer),
+            err = producer->SendData(event_header,stream, std::move(buffer),
                                      asapo::kDefaultIngestMode, &ProcessAfterSend);
             exit_if_error("Cannot send file", err);
 
@@ -94,7 +94,7 @@ int main(int argc, char* argv[]) {
             // if(part == number_of_splitted_files)
             // {
 
-            //     err = producer->SendSubstreamFinishedFlag(substream,
+            //     err = producer->SendStreamFinishedFlag(stream,
             //                                               part,
             //                                               std::to_string(start_number+1),
             //                                               &ProcessAfterSend);
diff --git a/tests/manual/python_tests/consumer/consumer_api.py b/tests/manual/python_tests/consumer/consumer_api.py
index 60c82c03cdcf5d5a3ced09a71ecbee7a37bce78f..63071c21c53bd1ef25fa7752ad3274f3bfdc5d30 100644
--- a/tests/manual/python_tests/consumer/consumer_api.py
+++ b/tests/manual/python_tests/consumer/consumer_api.py
@@ -7,7 +7,7 @@ source, path, beamtime, token = sys.argv[1:]
 broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
 group_id = broker.generate_group_id()
 
-res = broker.query_images("_id > 0", substream="1")
+res = broker.query_images("_id > 0", stream="1")
 
 print(res)