diff --git a/.gitignore b/.gitignore
index d7c8d680d0598ba1ac993843cf1d2ee7f45fad02..c04618641923b19dd84739be023d064d3c645ba3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -54,9 +54,7 @@
 .idea/**/libraries
 
 # CMake
-cmake-build-debug/
-cmake-build-release/
-cmake-build-relwithdebinfo/
+cmake-build-*/
 
 # Mongo Explorer plugin:
 .idea/**/mongoSettings.xml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 95d828b264d677a10a9ad1a921f399c2963bb8a0..bfc7a75e9855e62932384d4bbfbd0462b26ff425 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,8 +1,23 @@
-## 21.12.0 (in progress)
+## 21.12.1 (in progress)
+
+IMPROVEMENTS
+* renamed and hid C++ macros from client code
+
+BUG FIXES
+* Producer API: fixed bug segfault in Python code when sending data object which memory is from some other object
+
+
+## 21.12.0
 
 FEATURES
 * Consumer API: Get last within consumer group returns message only once
 * Producer API: An option to write raw data to core filesystem directly
+* Consumer/Producer API - packages for Debian 11.1, wheel for Python 3.9
+* Consumer/Producer API - dropped Python 2 support for wheels and packages for new Debian/CentOS versions
+
+INTERNAL
+* Improved logging - tags for beamline, beamtime, ...
+* Updated orchestration tools to latest version
 
 ## 21.09.0
 
diff --git a/CMakeIncludes/prepare_version_tag.cmake b/CMakeIncludes/prepare_version_tag.cmake
index 59de5c91e18dec4151309f5bcf23e60e4199c576..32e72dbad95fe982bea5d98bd7e844ead8a18926 100644
--- a/CMakeIncludes/prepare_version_tag.cmake
+++ b/CMakeIncludes/prepare_version_tag.cmake
@@ -1,16 +1,16 @@
 function(cleanup varname)
-    string (REPLACE "-" "_" out ${${varname}})
-    SET( ${varname} ${out} PARENT_SCOPE)
+    string(REPLACE "-" "_" out ${${varname}})
+    SET(${varname} ${out} PARENT_SCOPE)
 endfunction()
 
-execute_process(COMMAND git describe --tags --abbrev=0 
-                OUTPUT_VARIABLE ASAPO_TAG
-                WORKING_DIRECTORY ..)
+execute_process(COMMAND git describe --tags --abbrev=0
+        OUTPUT_VARIABLE ASAPO_TAG
+        WORKING_DIRECTORY ..)
 string(STRIP ${ASAPO_TAG} ASAPO_TAG)
 
 execute_process(COMMAND git rev-parse --abbrev-ref HEAD
-                OUTPUT_VARIABLE BRANCH
-                WORKING_DIRECTORY ..)
+        OUTPUT_VARIABLE BRANCH
+        WORKING_DIRECTORY ..)
 string(STRIP ${BRANCH} BRANCH)
 cleanup(BRANCH)
 
@@ -20,22 +20,37 @@ execute_process(COMMAND git rev-parse --short=10 HEAD
 string(STRIP ${ASAPO_VERSION_COMMIT} ASAPO_VERSION_COMMIT)
 
 if (${BRANCH} STREQUAL "master")
-    SET (ASAPO_VERSION ${ASAPO_TAG})
-    SET (ASAPO_VERSION_COMMIT "")
-    SET (ASAPO_VERSION_DOCKER_SUFFIX "")
-    SET (PYTHON_ASAPO_VERSION ${ASAPO_VERSION})
+    SET(ASAPO_VERSION ${ASAPO_TAG})
+    SET(ASAPO_VERSION_COMMIT "")
+    SET(ASAPO_VERSION_DOCKER_SUFFIX "")
+    SET(PYTHON_ASAPO_VERSION ${ASAPO_VERSION})
     string(REGEX REPLACE "\\.0([0-9]+)\\."
             ".\\1." ASAPO_WHEEL_VERSION
             ${ASAPO_VERSION})
-else()
-    SET (ASAPO_VERSION ${BRANCH})
-    SET (ASAPO_VERSION_COMMIT ", build ${ASAPO_VERSION_COMMIT}")
-    SET (ASAPO_VERSION_DOCKER_SUFFIX "-dev")
+else ()
+    SET(ASAPO_VERSION ${BRANCH})
+    SET(ASAPO_VERSION_COMMIT ", build ${ASAPO_VERSION_COMMIT}")
+    SET(ASAPO_VERSION_DOCKER_SUFFIX "-dev")
     string(REPLACE "_" "-" ASAPO_VERSION ${ASAPO_VERSION})
-    SET (ASAPO_VERSION 100.0.${ASAPO_VERSION})
-    SET (PYTHON_ASAPO_VERSION ${ASAPO_VERSION})
-    SET (ASAPO_WHEEL_VERSION ${ASAPO_VERSION})
-endif()
+    SET(ASAPO_VERSION 100.0.${ASAPO_VERSION})
+    if (${BRANCH} STREQUAL "develop")
+        SET(PYTHON_ASAPO_VERSION 100.0.dev0)
+    else ()
+        string(FIND ${BRANCH} feature_ASAPO pos)
+        if( ${pos} EQUAL 0)
+            string(SUBSTRING ${ASAPO_VERSION} 20 -1 TMP)
+            string(REGEX MATCH "^([0-9]+)|.+$" ISSUE_NUM "${TMP}")
+            if (ISSUE_NUM STREQUAL "")
+                SET(PYTHON_ASAPO_VERSION 100.0.dev1)
+            else ()
+                SET(PYTHON_ASAPO_VERSION 100.0.dev${ISSUE_NUM})
+            endif ()
+        else ()
+            SET(PYTHON_ASAPO_VERSION 100.0.dev1)
+        endif ()
+    endif ()
+    SET(ASAPO_WHEEL_VERSION ${ASAPO_VERSION})
+endif ()
 
 message("Asapo Version: " ${ASAPO_VERSION})
 message("Python Asapo Version: " ${PYTHON_ASAPO_VERSION})
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5d614a59e25475c6a575c45be24b2611d6677ba5..4e5c19ed460f2603f3da25c8e9ecce4250378aa6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -22,6 +22,8 @@ set(ConfigPackageLocation lib/cmake/Asapo)
 # options
 
 option(BUILD_PYTHON "Build python libs" ON)
+option(BUILD_PYTHON2_PACKAGES "Build python2 packages" OFF)
+
 option(BUILD_CLIENTS_ONLY "Build clients only" OFF)
 
 option(BUILD_CONSUMER_TOOLS "Build consumer tools" OFF)
diff --git a/PROTOCOL-VERSIONS.md b/PROTOCOL-VERSIONS.md
index 102219d4c772e56f6059915b94c458f4b8adc16e..af6342b023fe0a1d510da0de8d8485acd4c0add5 100644
--- a/PROTOCOL-VERSIONS.md
+++ b/PROTOCOL-VERSIONS.md
@@ -1,18 +1,18 @@
 ### Producer Protocol
 | Release      | used by client      | Supported by server  | Status           |
 | ------------ | ------------------- | -------------------- | ---------------- |
-| v0.5         |   |    | In development  |
-| v0.4         | 21.09.0 - 21.09.0   | 21.09.0  - 21.09.0   | Current version  |
-| v0.3         | 21.06.0 - 21.06.0   | 21.06.0  - 21.09.0   | Deprecates from 01.09.2022  |
-| v0.2         | 21.03.2 - 21.03.2   | 21.03.2  - 21.09.0   | Deprecates from 01.07.2022  |
-| v0.1         | 21.03.0 - 21.03.1   | 21.03.0  - 21.09.0   | Deprecates from 01.06.2022  |
+| v0.5         | 21.12.0 - 21.12.0  |  21.12.0  - 21.12.0   | Current version  |
+| v0.4         | 21.09.0 - 21.09.0   | 21.09.0  - 21.12.0   | Deprecates from 01.12.2022  |
+| v0.3         | 21.06.0 - 21.06.0   | 21.06.0  - 21.12.0   | Deprecates from 01.09.2022  |
+| v0.2         | 21.03.2 - 21.03.2   | 21.03.2  - 21.12.0   | Deprecates from 01.07.2022  |
+| v0.1         | 21.03.0 - 21.03.1   | 21.03.0  - 21.12.0   | Deprecates from 01.06.2022  |
 
 
 ### Consumer Protocol
 | Release      | used by client      | Supported by server  | Status           |
 | ------------ | ------------------- | -------------------- | ---------------- |
-| v0.5         |   |    | In development  |
-| v0.4         | 21.06.0 - 21.09.0   | 21.06.0  - 21.09.0   | Current version  |
-| v0.3         | 21.03.3 - 21.03.3   | 21.03.3  - 21.09.0   | Deprecates from 01.07.2022  |
-| v0.2         | 21.03.2 - 21.03.2   | 21.03.2  - 21.09.0   | Deprecates from 01.06.2022  |
-| v0.1         | 21.03.0 - 21.03.1   | 21.03.0  - 21.09.0   | Deprecates from 01.06.2022  |
+| v0.5         | 21.12.0 - 21.12.0  |  21.12.0  - 21.12.0   | Current version  |
+| v0.4         | 21.06.0 - 21.09.0   | 21.06.0  - 21.12.0   | Deprecates from 01.12.2022  |
+| v0.3         | 21.03.3 - 21.03.3   | 21.03.3  - 21.12.0   | Deprecates from 01.07.2022  |
+| v0.2         | 21.03.2 - 21.03.2   | 21.03.2  - 21.12.0   | Deprecates from 01.06.2022  |
+| v0.1         | 21.03.0 - 21.03.1   | 21.03.0  - 21.12.0   | Deprecates from 01.06.2022  |
diff --git a/VERSIONS.md b/VERSIONS.md
index e857bfb66bf5b6580a0477c4e623f6b048fd1248..7741bbdbe245155f3b74e489e3364e663dcd339f 100644
--- a/VERSIONS.md
+++ b/VERSIONS.md
@@ -2,25 +2,25 @@
 
 | Release      | API changed\*\* |  Protocol | Supported by server from/to | Status              |Comment|
 | ------------ | ----------- | -------- | ------------------------- | --------------------- | ------- |
-| 21.12.0      | No          |  v0.5     | 21.12.0/21.12.0           | in development  |      |
-| 21.09.0      | No          |  v0.4     | 21.09.0/21.09.0           | current version              |beamline token for raw |
-| 21.06.0      | Yes         |  v0.3     | 21.06.0/21.09.0           | deprecates 01.09.2022         |arbitrary characters|
-| 21.03.3      | No          |  v0.2     | 21.03.2/21.09.0           | deprecates 01.07.2022        |bugfix in server|
-| 21.03.2      | Yes         |  v0.2     | 21.03.2/21.09.0           | deprecates 01.07.2022        |bugfixes, add delete_stream|
-| 21.03.1      | No          |  v0.1     | 21.03.0/21.09.0           | deprecates 01.06.2022   |bugfix in server|
-| 21.03.0      | Yes         |  v0.1     | 21.03.0/21.09.0           |                 |          |
+| 21.12.0      | Yes          |  v0.5     | 21.12.0/21.12.0           | current version  |      |
+| 21.09.0      | Yes         |  v0.4     | 21.09.0/21.12.0           | deprecates 01.12.2022              |beamline token for raw |
+| 21.06.0      | Yes         |  v0.3     | 21.06.0/21.12.0           | deprecates 01.09.2022         |arbitrary characters|
+| 21.03.3      | No          |  v0.2     | 21.03.2/21.12.0           | deprecates 01.07.2022        |bugfix in server|
+| 21.03.2      | Yes         |  v0.2     | 21.03.2/21.12.0           | deprecates 01.07.2022        |bugfixes, add delete_stream|
+| 21.03.1      | No          |  v0.1     | 21.03.0/21.12.0           | deprecates 01.06.2022   |bugfix in server|
+| 21.03.0      | Yes         |  v0.1     | 21.03.0/21.12.0           | deprecates 01.06.2022    |          |
 
 ### Consumer API
 
 | Release      | API changed\*\* |  Protocol | Supported by server from/to | Status         |Comment|
 | ------------ | ----------- | --------- | ------------------------- | ---------------- | ------- |
-| 21.12.0      | Yes         |  v0.5      | 21.12.0/21.12.0           | in development  | |
-| 21.09.0      | No         |  v0.4      | 21.06.0/21.09.0           | current version  | |
-| 21.06.0      | Yes         |  v0.4     | 21.06.0/21.09.0           |   |arbitrary characters, bugfixes |
-| 21.03.3      | Yes         |  v0.3     | 21.03.3/21.09.0           | deprecates 01.06.2022  |bugfix in server, error type for dublicated ack|
-| 21.03.2      | Yes         |  v0.2     | 21.03.2/21.09.0           | deprecates 01.06.2022  |bugfixes, add delete_stream|
-| 21.03.1      | No          |  v0.1     | 21.03.0/21.09.0           | deprecates 01.06.2022       |bugfix in server|
-| 21.03.0      | Yes         |  v0.1     | 21.03.0/21.09.0           |                  |        |
+| 21.12.0      | Yes         |  v0.5      | 21.12.0/21.12.0           | current version  | |
+| 21.09.0      | No          |  v0.4      | 21.06.0/21.12.0           | deprecates 01.12.2022  | |
+| 21.06.0      | Yes         |  v0.4     | 21.06.0/21.12.0           | deprecates 01.09.2022  |arbitrary characters, bugfixes |
+| 21.03.3      | Yes         |  v0.3     | 21.03.3/21.12.0           | deprecates 01.06.2022  |bugfix in server, error type for dublicated ack|
+| 21.03.2      | Yes         |  v0.2     | 21.03.2/21.12.0           | deprecates 01.06.2022  |bugfixes, add delete_stream|
+| 21.03.1      | No          |  v0.1     | 21.03.0/21.12.0           | deprecates 01.06.2022       |bugfix in server|
+| 21.03.0      | Yes         |  v0.1     | 21.03.0/21.12.0           | deprecates 01.06.2022    |        |
 
 \* insignificant changes/bugfixes (e.g. in return type, etc), normally do not require client code changes, but formally might break the client
 
diff --git a/authorizer/src/asapo_authorizer/server/authorize.go b/authorizer/src/asapo_authorizer/server/authorize.go
index 1b984a9c456a3f56e4d4793b3cecdaf9ffcc27eb..d5192fb9803b0df5dafe05c44e2ed0651903c794 100644
--- a/authorizer/src/asapo_authorizer/server/authorize.go
+++ b/authorizer/src/asapo_authorizer/server/authorize.go
@@ -294,7 +294,7 @@ func authorizeMeta(meta common.BeamtimeMeta, request authorizationRequest, creds
 
 	if creds.Beamline != "auto" && meta.Beamline != creds.Beamline {
 		err_string := "given beamline (" + creds.Beamline + ") does not match the found one (" + meta.Beamline + ")"
-		log.Debug(err_string)
+		log.Error(err_string)
 		return nil, errors.New(err_string)
 	}
 
@@ -330,8 +330,14 @@ func authorize(request authorizationRequest, creds SourceCredentials) (common.Be
 	}
 
 	meta.AccessTypes = accessTypes
-	log.Debug("authorized creds bl/bt: ", creds.Beamline+"/"+creds.BeamtimeId+", beamtime "+meta.BeamtimeId+" for "+request.OriginHost+" in "+
-		meta.Beamline+", type "+meta.Type, "online path "+meta.OnlinePath+", offline path "+meta.OfflinePath)
+	log.WithFields(map[string]interface{}{
+		"beamline":creds.Beamline,
+		"beamtime":creds.BeamtimeId,
+		"origin":request.OriginHost,
+		"type":meta.Type,
+		"onlinePath":meta.OnlinePath,
+		"offlinePath":meta.OfflinePath,
+	}).Debug("authorized credentials")
 	return meta, nil
 }
 
@@ -372,7 +378,7 @@ func routeAuthorize(w http.ResponseWriter, r *http.Request) {
 	}
 
 	w.WriteHeader(http.StatusOK)
-	w.Write([]byte(res))
+	w.Write(res)
 }
 
 func checkRole(w http.ResponseWriter, r *http.Request, role string) error {
diff --git a/authorizer/src/asapo_authorizer/server/authorize_test.go b/authorizer/src/asapo_authorizer/server/authorize_test.go
index 1c6e6400554b5ee6fe8fbc6dd324b8ad1ae7d346..e3bb6c11e23b699adc034354b57ced49ca63143e 100644
--- a/authorizer/src/asapo_authorizer/server/authorize_test.go
+++ b/authorizer/src/asapo_authorizer/server/authorize_test.go
@@ -387,3 +387,21 @@ func TestGetBeamtimeInfo(t *testing.T) {
 	}
 
 }
+
+func TestExpiredToken(t *testing.T) {
+	Auth = authorization.NewAuth(utils.NewJWTAuth("secret_user"), utils.NewJWTAuth("secret_admin"), utils.NewJWTAuth("secret"))
+	token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzU3NTMxMDksImp0aSI6ImMyOTR0NWFodHY1am9vZHVoaGNnIiwic3ViIjoiYnRfMTEwMTIxNzEiLCJFeHRyYUNsYWltcyI6eyJBY2Nlc3NUeXBlcyI6WyJyZWFkIiwid3JpdGUiXX19.kITePbv_dXY2ACxpAQ-PeQJPQtnR02bMoFrXq0Pbcm0"
+	request := authorizationRequest{"asapo_test%%"+token, "host"}
+	creds, _ := getSourceCredentials(request)
+
+	creds.Token = token
+	creds.DataSource = "test"
+	creds.BeamtimeId = "11012171"
+	creds.Beamline = "p21.2"
+	_, err := authorizeByToken(creds)
+	assert.Error(t, err, "")
+	if (err!=nil) {
+		assert.Contains(t, err.Error(), "expired")
+	}
+
+}
diff --git a/authorizer/src/asapo_authorizer/server/folder_token.go b/authorizer/src/asapo_authorizer/server/folder_token.go
index 5d10ed86a844902a0c33101d0fd7862f5628e854..9e5c305e1b3f7edacf85110a7bbd537bbd6b7664 100644
--- a/authorizer/src/asapo_authorizer/server/folder_token.go
+++ b/authorizer/src/asapo_authorizer/server/folder_token.go
@@ -125,7 +125,10 @@ func routeFolderToken(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	log.Debug("generated folder token for beamtime " + request.BeamtimeId + ", folder " + request.Folder)
+	log.WithFields(map[string]interface{}{
+		"folder":request.Folder,
+		"beamtime":request.BeamtimeId,
+	}).Debug("issued folder token")
 
 	answer := folderTokenResponce(token)
 	w.WriteHeader(http.StatusOK)
diff --git a/authorizer/src/asapo_authorizer/server/introspect.go b/authorizer/src/asapo_authorizer/server/introspect.go
index 5dd591a2c37f274e552cba63aab7e60829b5431b..7355b3f50d7596c98b54fefec7ad1f7e33d6920e 100644
--- a/authorizer/src/asapo_authorizer/server/introspect.go
+++ b/authorizer/src/asapo_authorizer/server/introspect.go
@@ -46,7 +46,10 @@ func routeIntrospect(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	log.Debug("verified user token for "+response.Sub)
+	log.WithFields(map[string]interface{}{
+		"subject":response.Sub,
+	}).Debug("verified user token")
+
 
 	answer,_ := json.Marshal(&response)
 	w.WriteHeader(http.StatusOK)
diff --git a/authorizer/src/asapo_authorizer/server/issue_token.go b/authorizer/src/asapo_authorizer/server/issue_token.go
index bf2524abf9317081b2dd22f063cc1cf05d1774a9..e8da6089faac9a0fdb83f6f2db1b46b14d0cebb7 100644
--- a/authorizer/src/asapo_authorizer/server/issue_token.go
+++ b/authorizer/src/asapo_authorizer/server/issue_token.go
@@ -30,8 +30,8 @@ func extractUserTokenrequest(r *http.Request) (request structs.IssueTokenRequest
 	}
 
 	for _, ar := range request.AccessTypes {
-		if ar != "read" && ar != "write" && !(ar== "writeraw" && request.Subject["beamline"]!="") {
-			return request, errors.New("wrong requested access rights: "+ar)
+		if ar != "read" && ar != "write" && !(ar == "writeraw" && request.Subject["beamline"] != "") {
+			return request, errors.New("wrong requested access rights: " + ar)
 		}
 	}
 
@@ -72,8 +72,12 @@ func issueUserToken(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-
-	log.Debug("generated user token ")
+	log.WithFields(map[string]interface{}{
+		"id":      claims.Id,
+		"subject": claims.Subject,
+		"validDays": request.DaysValid,
+		"accessTypes": request.AccessTypes,
+	}).Info("issued user token")
 
 	answer := authorization.UserTokenResponce(request, token)
 	w.WriteHeader(http.StatusOK)
diff --git a/authorizer/src/asapo_authorizer/server/revoke_token.go b/authorizer/src/asapo_authorizer/server/revoke_token.go
index c1c98c6f2adf4c3a3fe05fd94b5773352861b75c..eb1dea35dada2d21854f9d9b9ffd822a59bb4c00 100644
--- a/authorizer/src/asapo_authorizer/server/revoke_token.go
+++ b/authorizer/src/asapo_authorizer/server/revoke_token.go
@@ -32,7 +32,12 @@ func revokeToken(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	log.Debug("revoked token " + rec.Token)
+	log.WithFields(map[string]interface{}{
+		"id":      rec.Id,
+		"subject": rec.Subject,
+		"token": rec.Token,
+	}).Info("revoked token")
+
 	answer, _ := json.Marshal(&rec)
 	w.WriteHeader(http.StatusOK)
 	w.Write(answer)
diff --git a/authorizer/src/asapo_authorizer/server/server_test.go b/authorizer/src/asapo_authorizer/server/server_test.go
deleted file mode 100644
index f18a0f738e4b4ed9397f8449a679f8650b7f6928..0000000000000000000000000000000000000000
--- a/authorizer/src/asapo_authorizer/server/server_test.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package server
-
-/*
-
-import (
-	"asapo_authorizer/database"
-	"asapo_common/discovery"
-	"asapo_common/logger"
-	"errors"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/mock"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-)
-
-func setup() *database.MockedDatabase {
-	mock_db := new(database.MockedDatabase)
-	mock_db.On("Connect", mock.AnythingOfType("string")).Return(nil)
-
-	return mock_db
-}
-
-func setup_and_init(t *testing.T) *database.MockedDatabase {
-	mock_db := new(database.MockedDatabase)
-	mock_db.On("Connect", mock.AnythingOfType("string")).Return(nil)
-
-	InitDB(mock_db)
-	assertExpectations(t, mock_db)
-	return mock_db
-}
-
-func assertExpectations(t *testing.T, mock_db *database.MockedDatabase) {
-	mock_db.AssertExpectations(t)
-	mock_db.ExpectedCalls = nil
-	logger.MockLog.AssertExpectations(t)
-	logger.MockLog.ExpectedCalls = nil
-}
-
-var initDBTests = []struct {
-	address string
-	answer  error
-	message string
-}{
-	{"bad address", errors.New(""), "error on get bad address"},
-	{"good address", nil, "no error on good address"},
-}
-
-func TestInitDBWithWrongAddress(t *testing.T) {
-	mock_db := setup()
-
-	mock_db.ExpectedCalls = nil
-
-	settings.DatabaseServer = "0.0.0.0:0000"
-
-	for _, test := range initDBTests {
-		mock_db.On("Connect", "0.0.0.0:0000").Return(test.answer)
-
-		err := InitDB(mock_db)
-
-		assert.Equal(t, test.answer, err, test.message)
-		assertExpectations(t, mock_db)
-	}
-	db = nil
-}
-
-func TestInitDBWithAutoAddress(t *testing.T) {
-	mongo_address := "0.0.0.0:0000"
-	mock_db := setup()
-
-	mock_db.ExpectedCalls = nil
-
-	settings.DatabaseServer = "auto"
-	mock_server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		assert.Equal(t, req.URL.String(), "/asapo-mongodb", "request string")
-		rw.Write([]byte(mongo_address))
-	}))
-	defer mock_server.Close()
-
-	discoveryService = discovery.CreateDiscoveryService(mock_server.Client(), mock_server.URL)
-
-	mock_db.On("Connect", "0.0.0.0:0000").Return(nil)
-
-	err := InitDB(mock_db)
-
-	assert.Equal(t, nil, err, "auto connect ok")
-	assertExpectations(t, mock_db)
-	db = nil
-}
-
-func TestReconnectDB(t *testing.T) {
-	mongo_address := "0.0.0.0:0000"
-	mock_server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		assert.Equal(t, req.URL.String(), "/asapo-mongodb", "request string")
-		rw.Write([]byte(mongo_address))
-	}))
-	discoveryService = discovery.CreateDiscoveryService(mock_server.Client(), mock_server.URL)
-
-	defer mock_server.Close()
-
-	settings.DatabaseServer = "auto"
-	mock_db := setup_and_init(t)
-	mock_db.ExpectedCalls = nil
-
-	mongo_address = "1.0.0.0:0000"
-
-	mock_db.On("Close").Return()
-
-	mock_db.On("Connect", "1.0.0.0:0000").Return(nil)
-
-	err := ReconnectDb()
-	assert.Equal(t, nil, err, "auto connect ok")
-	assertExpectations(t, mock_db)
-
-	db = nil
-}
-
-func TestErrorWhenReconnectNotConnectedDB(t *testing.T) {
-	err := ReconnectDb()
-	assert.NotNil(t, err, "error reconnect")
-	db = nil
-}
-
-
-func TestCleanupDBWithoutInit(t *testing.T) {
-	mock_db := setup()
-
-	mock_db.AssertNotCalled(t, "Close")
-
-	CleanupDB()
-}
-
-func TestCleanupDBInit(t *testing.T) {
-	settings.DatabaseServer = "0.0.0.0"
-	mock_db := setup_and_init(t)
-
-	mock_db.On("Close").Return()
-
-	CleanupDB()
-
-	assertExpectations(t, mock_db)
-}
-*/
\ No newline at end of file
diff --git a/authorizer/src/asapo_authorizer/token_store/token_store.go b/authorizer/src/asapo_authorizer/token_store/token_store.go
index 756d7cc2d970292697c0b2fd4ecd0fcebcd909a9..48e45fbc090ad46b99e28fa9d1daaab2c2394bb2 100644
--- a/authorizer/src/asapo_authorizer/token_store/token_store.go
+++ b/authorizer/src/asapo_authorizer/token_store/token_store.go
@@ -51,7 +51,7 @@ func (store *TokenStore) initDB() (dbaddress string, err error) {
 		if dbaddress == "" {
 			return "", errors.New("no token_store servers found")
 		}
-		log.Debug("Got mongodb server: " + dbaddress)
+		log.WithFields(map[string]interface{}{"address": dbaddress}).Debug("found mongodb server")
 	}
 	return dbaddress, store.db.Connect(dbaddress)
 
@@ -66,7 +66,7 @@ func (store *TokenStore) reconnectIfNeeded(db_error error) {
 	if dbaddress, err := store.reconnectDb(); err != nil {
 		log.Error("cannot reconnect to database: " + err.Error())
 	} else {
-		log.Debug("reconnected to database at" + dbaddress)
+		log.WithFields(map[string]interface{}{"address":dbaddress}).Debug("reconnected to database")
 	}
 }
 
@@ -196,7 +196,7 @@ func (store *TokenStore) loopGetRevokedTokens() {
 			next_update = 1
 			log.Error("cannot get revoked tokens: " + err.Error())
 		} else {
-			log.Debug("received revoked tokens list")
+			//log.Debug("received revoked tokens list")
 			next_update = common.Settings.UpdateRevokedTokensIntervalSec
 			tokens := make([]string, len(res))
 			for i, token := range res {
diff --git a/authorizer/src/asapo_authorizer/token_store/token_store_test.go b/authorizer/src/asapo_authorizer/token_store/token_store_test.go
index d648f0ef3afbcf9d259237f425a3ca7ccc85e449..4c5c271ab12b05d1248f6429ada35ef37ac69eeb 100644
--- a/authorizer/src/asapo_authorizer/token_store/token_store_test.go
+++ b/authorizer/src/asapo_authorizer/token_store/token_store_test.go
@@ -60,6 +60,7 @@ func (suite *TokenStoreTestSuite) TestProcessRequestWithConnectionError() {
 	ExpectReconnect(suite.mock_db)
 	suite.mock_db.On("ProcessRequest", mock.Anything, mock.Anything).Return([]byte(""),
 		&DBError{utils.StatusServiceUnavailable, ""})
+	logger.MockLog.On("WithFields", mock.Anything)
 	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
 
 	err := suite.store.AddToken(TokenRecord{})
@@ -138,8 +139,6 @@ func (suite *TokenStoreTestSuite) TestProcessRequestCheckRevokedToken() {
 		Op:         "read_records",
 	}
 	suite.mock_db.On("ProcessRequest", req, mock.Anything).Return([]byte(""), nil)
-
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("list")))
 	time.Sleep(time.Second*1)
 	res,err := suite.store.IsTokenRevoked("123")
 	suite.Equal(err, nil, "ok")
diff --git a/broker/src/asapo_broker/database/database.go b/broker/src/asapo_broker/database/database.go
index 7e48a6e526a07a1d316f240acba65384c32829a2..814d1a321e701f4e1e91dc5dc958f319a9ee4966 100644
--- a/broker/src/asapo_broker/database/database.go
+++ b/broker/src/asapo_broker/database/database.go
@@ -1,9 +1,13 @@
 package database
 
-import "asapo_common/utils"
+import (
+	"asapo_common/logger"
+	"asapo_common/utils"
+)
 
 type Request struct {
-	DbName         string
+	Beamtime       string
+	DataSource     string
 	Stream         string
 	GroupId        string
 	Op             string
@@ -12,6 +16,20 @@ type Request struct {
 	ExtraParam     string
 }
 
+func (request *Request) Logger() logger.Logger {
+	return logger.WithFields(map[string]interface{}{
+		"beamtime":   request.Beamtime,
+		"dataSource": decodeString(request.DataSource),
+		"stream":     decodeString(request.Stream),
+		"groupId":    decodeString(request.GroupId),
+		"operation":  request.Op,
+	})
+}
+
+func (request *Request) DbName() string {
+	return request.Beamtime + "_" + request.DataSource
+}
+
 type Agent interface {
 	ProcessRequest(request Request) ([]byte, error)
 	Ping() error
@@ -21,7 +39,7 @@ type Agent interface {
 }
 
 type DBSettings struct {
-	ReadFromInprocessPeriod int
+	ReadFromInprocessPeriod   int
 	UpdateStreamCachePeriodMs int
 }
 
@@ -42,4 +60,3 @@ func GetStatusCodeFromError(err error) int {
 		return utils.StatusServiceUnavailable
 	}
 }
-
diff --git a/broker/src/asapo_broker/database/encoding.go b/broker/src/asapo_broker/database/encoding.go
index 6e61d95f0d6a6fe462aca48754223d38da30616b..86b477bd23498b4dae00070fa50caa0d9c710452 100644
--- a/broker/src/asapo_broker/database/encoding.go
+++ b/broker/src/asapo_broker/database/encoding.go
@@ -80,8 +80,9 @@ func encodeStringForColName(original string) (result string) {
 }
 
 func encodeRequest(request *Request) error {
-	request.DbName = encodeStringForDbName(request.DbName)
-	if len(request.DbName)> max_encoded_source_size {
+	request.DataSource = encodeStringForDbName(request.DataSource)
+	request.Beamtime = encodeStringForDbName(request.Beamtime)
+	if len(request.DbName())> max_encoded_source_size {
 		return &DBError{utils.StatusWrongInput, "source name is too long"}
 	}
 
diff --git a/broker/src/asapo_broker/database/encoding_test.go b/broker/src/asapo_broker/database/encoding_test.go
index 1b018289e8f1d6271b97b4a6a1e6ff9925e24ffe..82447de77f75f462db668a160999ea23dcc35b54 100644
--- a/broker/src/asapo_broker/database/encoding_test.go
+++ b/broker/src/asapo_broker/database/encoding_test.go
@@ -18,7 +18,8 @@ func TestEncoding(t *testing.T) {
 	assert.Equal(t, sourceDecoded, source)
 
 	r := Request{
-		DbName:         source,
+		Beamtime:         "bt",
+		DataSource:         source,
 		Stream:         stream,
 		GroupId:        stream,
 		Op:             "",
@@ -29,7 +30,7 @@ func TestEncoding(t *testing.T) {
 	err := encodeRequest(&r)
 	assert.Equal(t, r.Stream, streamEncoded)
 	assert.Equal(t, r.GroupId, streamEncoded)
-	assert.Equal(t, r.DbName, sourceEncoded)
+	assert.Equal(t, r.DataSource, sourceEncoded)
 
 	assert.Nil(t, err)
 }
@@ -61,9 +62,10 @@ func TestEncodingTooLong(t *testing.T) {
 	for _, test := range encodeTests {
 		stream := RandomString(test.streamSize)
 		group := RandomString(test.groupSize)
-		source := RandomString(test.sourceSize)
+		source := RandomString(test.sourceSize-3)
 		r := Request{
-			DbName:         source,
+			Beamtime:         "bt",
+			DataSource:         source,
 			Stream:         stream,
 			GroupId:        group,
 			Op:             "",
diff --git a/broker/src/asapo_broker/database/mongodb.go b/broker/src/asapo_broker/database/mongodb.go
index 0291da19f76a6235dbfcc947306038414a7b53e4..7a9d2c13ddd66cab74161cca616a3f4dc77c0946 100644
--- a/broker/src/asapo_broker/database/mongodb.go
+++ b/broker/src/asapo_broker/database/mongodb.go
@@ -1,9 +1,10 @@
-//+build !test
+//go:build !test
+// +build !test
 
 package database
 
 import (
-	"asapo_common/logger"
+	log "asapo_common/logger"
 	"asapo_common/utils"
 	"context"
 	"encoding/json"
@@ -84,10 +85,10 @@ const (
 
 type fieldChangeRequest struct {
 	collectionName string
-	fieldName string
-	op        int
-	max_ind   int
-	val       int
+	fieldName      string
+	op             int
+	max_ind        int
+	val            int
 }
 
 var dbSessionLock sync.Mutex
@@ -199,7 +200,7 @@ func maxIndexQuery(request Request, returnIncompete bool) bson.M {
 }
 
 func (db *Mongodb) getMaxIndex(request Request, returnIncompete bool) (max_id int, err error) {
-	c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.Stream)
+	c := db.client.Database(request.DbName()).Collection(data_collection_name_prefix + request.Stream)
 	q := maxIndexQuery(request, returnIncompete)
 
 	opts := options.FindOne().SetSort(bson.M{"_id": -1}).SetReturnKey(true)
@@ -227,7 +228,7 @@ func duplicateError(err error) bool {
 func (db *Mongodb) setCounter(request Request, ind int) (err error) {
 	update := bson.M{"$set": bson.M{pointer_field_name: ind}}
 	opts := options.Update().SetUpsert(true)
-	c := db.client.Database(request.DbName).Collection(pointer_collection_name)
+	c := db.client.Database(request.DbName()).Collection(pointer_collection_name)
 	q := bson.M{"_id": request.GroupId + "_" + request.Stream}
 	_, err = c.UpdateOne(context.TODO(), q, update, opts)
 	return
@@ -252,7 +253,7 @@ func (db *Mongodb) changeField(request Request, change fieldChangeRequest, res i
 
 	opts := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)
 	q := bson.M{"_id": request.GroupId + "_" + request.Stream, change.fieldName: bson.M{"$lt": change.max_ind}}
-	c := db.client.Database(request.DbName).Collection(change.collectionName)
+	c := db.client.Database(request.DbName()).Collection(change.collectionName)
 
 	err = c.FindOneAndUpdate(context.TODO(), q, update, opts).Decode(res)
 	if err != nil {
@@ -306,12 +307,11 @@ func recordContainsPartialData(request Request, rec map[string]interface{}) bool
 
 func (db *Mongodb) getRecordFromDb(request Request, id, id_max int) (res map[string]interface{}, err error) {
 	q := bson.M{"_id": id}
-	c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.Stream)
+	c := db.client.Database(request.DbName()).Collection(data_collection_name_prefix + request.Stream)
 	err = c.FindOne(context.TODO(), q, options.FindOne()).Decode(&res)
 	if err != nil {
 		answer := encodeAnswer(id, id_max, "")
-		log_str := "error getting record id " + strconv.Itoa(id) + " for " + request.DbName + " : " + err.Error()
-		logger.Debug(log_str)
+		request.Logger().WithFields(map[string]interface{}{"id": id, "cause": err.Error()}).Debug("error getting record")
 		return res, &DBError{utils.StatusNoData, answer}
 	}
 	return res, err
@@ -327,8 +327,7 @@ func (db *Mongodb) getRecordByIDRaw(request Request, id, id_max int) ([]byte, er
 		return nil, err
 	}
 
-	log_str := "got record id " + strconv.Itoa(id) + " for " + request.DbName
-	logger.Debug(log_str)
+	request.Logger().WithFields(map[string]interface{}{"id": id}).Debug("got record from db")
 
 	record, err := utils.MapToJson(&res)
 	if err != nil {
@@ -392,7 +391,7 @@ func (db *Mongodb) negAckRecord(request Request) ([]byte, error) {
 		return nil, &DBError{utils.StatusWrongInput, err.Error()}
 	}
 
-	err = db.InsertRecordToInprocess(request.DbName, inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, input.Id, input.Params.DelayMs, 1, true)
+	err = db.InsertRecordToInprocess(request.DbName(), inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, input.Id, input.Params.DelayMs, 1, true)
 	return []byte(""), err
 }
 
@@ -402,7 +401,7 @@ func (db *Mongodb) ackRecord(request Request) ([]byte, error) {
 	if err != nil {
 		return nil, &DBError{utils.StatusWrongInput, err.Error()}
 	}
-	c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
+	c := db.client.Database(request.DbName()).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
 	_, err = c.InsertOne(context.Background(), &record)
 	if err != nil {
 		if duplicateError(err) {
@@ -411,7 +410,7 @@ func (db *Mongodb) ackRecord(request Request) ([]byte, error) {
 		return nil, err
 	}
 
-	c = db.client.Database(request.DbName).Collection(inprocess_collection_name_prefix + request.Stream + "_" + request.GroupId)
+	c = db.client.Database(request.DbName()).Collection(inprocess_collection_name_prefix + request.Stream + "_" + request.GroupId)
 	_, err_del := c.DeleteOne(context.Background(), bson.M{"_id": record.ID})
 	if err_del != nil {
 		return nil, &DBError{utils.StatusWrongInput, err.Error()}
@@ -425,7 +424,7 @@ func (db *Mongodb) checkDatabaseOperationPrerequisites(request Request) error {
 		return &DBError{utils.StatusServiceUnavailable, no_session_msg}
 	}
 
-	if len(request.DbName) == 0 || len(request.Stream) == 0 {
+	if len(request.DbName()) <= 1 || len(request.Stream) == 0 {
 		return &DBError{utils.StatusWrongInput, "beamtime_id ans stream must be set"}
 	}
 
@@ -445,9 +444,9 @@ func (db *Mongodb) getCurrentPointer(request Request) (LocationPointer, int, err
 	var curPointer LocationPointer
 	err = db.changeField(request, fieldChangeRequest{
 		collectionName: pointer_collection_name,
-		fieldName: pointer_field_name,
-		op:        field_op_inc,
-		max_ind:   max_ind}, &curPointer)
+		fieldName:      pointer_field_name,
+		op:             field_op_inc,
+		max_ind:        max_ind}, &curPointer)
 	if err != nil {
 		return LocationPointer{}, 0, err
 	}
@@ -455,7 +454,7 @@ func (db *Mongodb) getCurrentPointer(request Request) (LocationPointer, int, err
 	return curPointer, max_ind, nil
 }
 
-func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delayMs int, nResendAttempts int) (int, error) {
+func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delayMs int, nResendAttempts int, rlog log.Logger) (int, error) {
 	var res InProcessingRecord
 	opts := options.FindOneAndUpdate().SetUpsert(false).SetReturnDocument(options.After)
 	tNow := time.Now().UnixNano()
@@ -476,8 +475,7 @@ func (db *Mongodb) getUnProcessedId(dbname string, collection_name string, delay
 		return 0, err
 	}
 
-	log_str := "got unprocessed id " + strconv.Itoa(res.ID) + " for " + dbname
-	logger.Debug(log_str)
+	rlog.WithFields(map[string]interface{}{"id": res.ID}).Debug("got unprocessed message")
 	return res.ID, nil
 }
 
@@ -527,10 +525,10 @@ func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(request Request, ignoreTi
 	t := db.lastReadFromInprocess[request.Stream+"_"+request.GroupId]
 	dbSessionLock.Unlock()
 	if (t <= tNow-int64(db.settings.ReadFromInprocessPeriod)) || ignoreTimeout {
-		record_ind, err = db.getUnProcessedId(request.DbName, inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, delayMs, nResendAttempts)
+		record_ind, err = db.getUnProcessedId(request.DbName(), inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, delayMs, nResendAttempts,
+			request.Logger())
 		if err != nil {
-			log_str := "error getting unprocessed id " + request.DbName + ", groupid: " + request.GroupId + ":" + err.Error()
-			logger.Debug(log_str)
+			request.Logger().WithFields(map[string]interface{}{"cause": err.Error()}).Debug("error getting unprocessed message")
 			return 0, 0, err
 		}
 	}
@@ -552,12 +550,10 @@ func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(request Request, ignoreTi
 func (db *Mongodb) getNextAndMaxIndexesFromCurPointer(request Request) (int, int, error) {
 	curPointer, max_ind, err := db.getCurrentPointer(request)
 	if err != nil {
-		log_str := "error getting next pointer for " + request.DbName + ", groupid: " + request.GroupId + ":" + err.Error()
-		logger.Debug(log_str)
+		request.Logger().WithFields(map[string]interface{}{"cause": err.Error()}).Debug("error getting next pointer")
 		return 0, 0, err
 	}
-	log_str := "got next pointer " + strconv.Itoa(curPointer.Value) + " for " + request.DbName + ", groupid: " + request.GroupId
-	logger.Debug(log_str)
+	request.Logger().WithFields(map[string]interface{}{"id": curPointer.Value}).Debug("got next pointer")
 	return curPointer.Value, max_ind, nil
 }
 
@@ -622,8 +618,7 @@ func checkStreamFinished(request Request, id, id_max int, data map[string]interf
 	if !ok || !r.FinishedStream {
 		return nil
 	}
-	log_str := "reached end of stream " + request.Stream + " , next_stream: " + r.NextStream
-	logger.Debug(log_str)
+	request.Logger().WithFields(map[string]interface{}{"nextStream": r.NextStream}).Debug("reached end of stream")
 
 	answer := encodeAnswer(r.ID-1, r.ID-1, r.NextStream)
 	return &DBError{utils.StatusNoData, answer}
@@ -641,7 +636,7 @@ func (db *Mongodb) getNextRecord(request Request) ([]byte, error) {
 	}
 
 	if err == nil {
-		err_update := db.InsertToInprocessIfNeeded(request.DbName, inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, nextInd, request.ExtraParam)
+		err_update := db.InsertToInprocessIfNeeded(request.DbName(), inprocess_collection_name_prefix+request.Stream+"_"+request.GroupId, nextInd, request.ExtraParam)
 		if err_update != nil {
 			return nil, err_update
 		}
@@ -666,10 +661,10 @@ func (db *Mongodb) getLastRecordInGroup(request Request) ([]byte, error) {
 	var res map[string]interface{}
 	err = db.changeField(request, fieldChangeRequest{
 		collectionName: last_message_collection_name,
-		fieldName: last_message_field_name,
-		op:        field_op_set,
-		max_ind:   max_ind,
-		val:       max_ind,
+		fieldName:      last_message_field_name,
+		op:             field_op_set,
+		max_ind:        max_ind,
+		val:            max_ind,
 	}, &res)
 	if err != nil {
 		return nil, err
@@ -689,7 +684,7 @@ func getSizeFilter(request Request) bson.M {
 }
 
 func (db *Mongodb) getSize(request Request) ([]byte, error) {
-	c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.Stream)
+	c := db.client.Database(request.DbName()).Collection(data_collection_name_prefix + request.Stream)
 
 	filter := getSizeFilter(request)
 	size, err := c.CountDocuments(context.TODO(), filter, options.Count())
@@ -716,7 +711,7 @@ func (db *Mongodb) resetCounter(request Request) ([]byte, error) {
 		return []byte(""), err
 	}
 
-	c := db.client.Database(request.DbName).Collection(inprocess_collection_name_prefix + request.Stream + "_" + request.GroupId)
+	c := db.client.Database(request.DbName()).Collection(inprocess_collection_name_prefix + request.Stream + "_" + request.GroupId)
 	_, err_del := c.DeleteMany(context.Background(), bson.M{"_id": bson.M{"$gte": id}})
 	if err_del != nil {
 		return nil, &DBError{utils.StatusWrongInput, err_del.Error()}
@@ -743,40 +738,35 @@ func (db *Mongodb) getMeta(request Request) ([]byte, error) {
 	}
 	q := bson.M{"_id": id}
 	var res map[string]interface{}
-	c := db.client.Database(request.DbName).Collection(meta_collection_name)
+	c := db.client.Database(request.DbName()).Collection(meta_collection_name)
 	err = c.FindOne(context.TODO(), q, options.FindOne()).Decode(&res)
 	if err != nil {
-		log_str := "error getting meta for " + id + " in " + request.DbName + " : " + err.Error()
-		logger.Debug(log_str)
+		request.Logger().WithFields(map[string]interface{}{"id": id, "cause": err.Error()}).Debug("error getting meta")
 		return nil, &DBError{utils.StatusNoData, err.Error()}
 	}
 	userMeta, ok := res["meta"]
 	if !ok {
-		log_str := "error getting meta for " + id + " in " + request.DbName + " : cannot parse database response"
-		logger.Error(log_str)
-		return nil, errors.New(log_str)
+		request.Logger().WithFields(map[string]interface{}{"id": id, "cause": "cannot parse database response"}).Debug("error getting meta")
+		return nil, errors.New("cannot get metadata")
 	}
-	log_str := "got metadata for " + id + " in " + request.DbName
-	logger.Debug(log_str)
+	request.Logger().WithFields(map[string]interface{}{"id": id}).Error("got metadata")
 	return utils.MapToJson(&userMeta)
 }
 
-func (db *Mongodb) processQueryError(query, dbname string, err error) ([]byte, error) {
-	log_str := "error processing query: " + query + " for " + dbname + " : " + err.Error()
-	logger.Debug(log_str)
+func (db *Mongodb) processQueryError(query, dbname string, err error, rlog log.Logger) ([]byte, error) {
+	rlog.WithFields(map[string]interface{}{"query": query, "cause": err.Error()}).Debug("error processing query")
 	return nil, &DBError{utils.StatusNoData, err.Error()}
 }
 
 func (db *Mongodb) queryMessages(request Request) ([]byte, error) {
 	var res []map[string]interface{}
-	q, sort, err := db.BSONFromSQL(request.DbName, request.ExtraParam)
+	q, sort, err := db.BSONFromSQL(request.DbName(), request.ExtraParam)
 	if err != nil {
-		log_str := "error parsing query: " + request.ExtraParam + " for " + request.DbName + " : " + err.Error()
-		logger.Debug(log_str)
+		request.Logger().WithFields(map[string]interface{}{"query": request.ExtraParam, "cause": err.Error()}).Debug("error parsing query")
 		return nil, &DBError{utils.StatusWrongInput, err.Error()}
 	}
 
-	c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.Stream)
+	c := db.client.Database(request.DbName()).Collection(data_collection_name_prefix + request.Stream)
 	opts := options.Find()
 
 	if len(sort) > 0 {
@@ -786,15 +776,15 @@ func (db *Mongodb) queryMessages(request Request) ([]byte, error) {
 
 	cursor, err := c.Find(context.TODO(), q, opts)
 	if err != nil {
-		return db.processQueryError(request.ExtraParam, request.DbName, err)
+		return db.processQueryError(request.ExtraParam, request.DbName(), err, request.Logger())
 	}
 	err = cursor.All(context.TODO(), &res)
 	if err != nil {
-		return db.processQueryError(request.ExtraParam, request.DbName, err)
+		return db.processQueryError(request.ExtraParam, request.DbName(), err, request.Logger())
 	}
 
-	log_str := "processed query " + request.ExtraParam + " for " + request.DbName + " ,found" + strconv.Itoa(len(res)) + " records"
-	logger.Debug(log_str)
+	request.Logger().WithFields(map[string]interface{}{"query": request.ExtraParam, "recordsFound": len(res)}).Debug("processed query")
+
 	if res != nil {
 		return utils.MapToJson(&res)
 	} else {
@@ -880,11 +870,11 @@ func (db *Mongodb) nacks(request Request) ([]byte, error) {
 }
 
 func (db *Mongodb) deleteCollection(request Request, name string) error {
-	return db.client.Database(request.DbName).Collection(name).Drop(context.Background())
+	return db.client.Database(request.DbName()).Collection(name).Drop(context.Background())
 }
 
 func (db *Mongodb) collectionExist(request Request, name string) (bool, error) {
-	result, err := db.client.Database(request.DbName).ListCollectionNames(context.TODO(), bson.M{"name": name})
+	result, err := db.client.Database(request.DbName()).ListCollectionNames(context.TODO(), bson.M{"name": name})
 	if err != nil {
 		return false, err
 	}
@@ -910,7 +900,7 @@ func (db *Mongodb) deleteDataCollection(errorOnNotexist bool, request Request) e
 
 func (db *Mongodb) deleteDocumentsInCollection(request Request, collection string, field string, pattern string) error {
 	filter := bson.M{field: bson.D{{"$regex", primitive.Regex{Pattern: pattern, Options: "i"}}}}
-	_, err := db.client.Database(request.DbName).Collection(collection).DeleteMany(context.TODO(), filter)
+	_, err := db.client.Database(request.DbName()).Collection(collection).DeleteMany(context.TODO(), filter)
 	return err
 }
 
@@ -923,7 +913,7 @@ func escapeQuery(query string) (res string) {
 }
 
 func (db *Mongodb) deleteCollectionsWithPrefix(request Request, prefix string) error {
-	cols, err := db.client.Database(request.DbName).ListCollectionNames(context.TODO(), bson.M{"name": bson.D{
+	cols, err := db.client.Database(request.DbName()).ListCollectionNames(context.TODO(), bson.M{"name": bson.D{
 		{"$regex", primitive.Regex{Pattern: "^" + escapeQuery(prefix), Options: "i"}}}})
 	if err != nil {
 		return err
@@ -966,7 +956,7 @@ func (db *Mongodb) deleteStream(request Request) ([]byte, error) {
 		return nil, &DBError{utils.StatusWrongInput, "wrong params: " + request.ExtraParam}
 	}
 	if !*params.DeleteMeta {
-		logger.Debug("skipping delete stream meta for " + request.Stream + " in " + request.DbName)
+		request.Logger().Debug("skipping delete stream meta")
 		return nil, nil
 	}
 
@@ -980,7 +970,7 @@ func (db *Mongodb) deleteStream(request Request) ([]byte, error) {
 }
 
 func (db *Mongodb) lastAck(request Request) ([]byte, error) {
-	c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
+	c := db.client.Database(request.DbName()).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
 	opts := options.FindOne().SetSort(bson.M{"_id": -1}).SetReturnKey(true)
 	result := LastAck{0}
 	var q bson.M = nil
@@ -1047,7 +1037,7 @@ func extractNacsFromCursor(err error, cursor *mongo.Cursor) ([]int, error) {
 }
 
 func (db *Mongodb) getNacks(request Request, min_index, max_index int) ([]int, error) {
-	c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
+	c := db.client.Database(request.DbName()).Collection(acks_collection_name_prefix + request.Stream + "_" + request.GroupId)
 
 	if res, err, ok := db.canAvoidDbRequest(min_index, max_index, c); ok {
 		return res, err
@@ -1062,7 +1052,7 @@ func (db *Mongodb) getNacks(request Request, min_index, max_index int) ([]int, e
 func (db *Mongodb) getStreams(request Request) ([]byte, error) {
 	rec, err := streams.getStreams(db, request)
 	if err != nil {
-		return db.processQueryError("get streams", request.DbName, err)
+		return db.processQueryError("get streams", request.DbName(), err, request.Logger())
 	}
 	return json.Marshal(&rec)
 }
diff --git a/broker/src/asapo_broker/database/mongodb_streams.go b/broker/src/asapo_broker/database/mongodb_streams.go
index a182f5080409c00116af1958d2b65dcc49983e75..b57f9973ddfa997af4706f75ece08e6d5969fa61 100644
--- a/broker/src/asapo_broker/database/mongodb_streams.go
+++ b/broker/src/asapo_broker/database/mongodb_streams.go
@@ -36,7 +36,7 @@ var streams = Streams{lastSynced: make(map[string]time.Time, 0),lastUpdated: mak
 var streamsLock sync.Mutex
 
 func (ss *Streams) tryGetFromCache(db_name string, updatePeriodMs int) (StreamsRecord, error) {
-	if time.Now().Sub(ss.lastUpdated[db_name]).Milliseconds() > int64(updatePeriodMs) {
+	if time.Now().Sub(ss.lastUpdated[db_name]).Milliseconds() >= int64(updatePeriodMs) {
 		return StreamsRecord{}, errors.New("cache expired")
 	}
 	rec, ok := ss.records[db_name]
@@ -265,9 +265,9 @@ func (ss *Streams) getStreams(db *Mongodb, request Request) (StreamsRecord, erro
 	}
 
 	streamsLock.Lock()
-	rec, err := ss.tryGetFromCache(request.DbName, db.settings.UpdateStreamCachePeriodMs)
+	rec, err := ss.tryGetFromCache(request.DbName(), db.settings.UpdateStreamCachePeriodMs)
 	if err != nil {
-		rec, err = ss.updateFromDb(db, request.DbName)
+		rec, err = ss.updateFromDb(db, request.DbName())
 	}
 	streamsLock.Unlock()
 	if err != nil {
diff --git a/broker/src/asapo_broker/database/mongodb_test.go b/broker/src/asapo_broker/database/mongodb_test.go
index 09bf8ab1cb9f16605f5261ba17e29929b08b6e08..d7f38ad4745aac84d3a3da6734a171fb63c2b646 100644
--- a/broker/src/asapo_broker/database/mongodb_test.go
+++ b/broker/src/asapo_broker/database/mongodb_test.go
@@ -36,7 +36,9 @@ type TestDataset struct {
 
 var db Mongodb
 
-const dbname = "12345"
+const beamtime = "bt"
+const datasource = "12345"
+const dbname = "bt_12345"
 const collection = "stream"
 const collection2 = "stream2"
 const dbaddress = "127.0.0.1:27017"
@@ -100,17 +102,17 @@ func TestMongoDBConnectOK(t *testing.T) {
 }
 
 func TestMongoDBGetNextErrorWhenNotConnected(t *testing.T) {
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
 func TestMongoDBGetMetaErrorWhenNotConnected(t *testing.T) {
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "meta", ExtraParam: "0"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "meta", ExtraParam: "0"})
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
 func TestMongoDBQueryMessagesErrorWhenNotConnected(t *testing.T) {
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "querymessages", ExtraParam: "0"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "querymessages", ExtraParam: "0"})
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
@@ -124,7 +126,7 @@ func TestMongoDBGetNextErrorWhenWrongDatabasename(t *testing.T) {
 func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: "bla", GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: "bla", GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
@@ -132,7 +134,7 @@ func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) {
 func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: "bla", GroupId: groupId, Op: "last"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: "bla", GroupId: groupId, Op: "last"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
@@ -140,7 +142,7 @@ func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) {
 func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
@@ -150,7 +152,7 @@ func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec2)
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_stream\":\"\"}", err.Error())
 }
@@ -159,7 +161,7 @@ func TestMongoDBGetNextOK(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 }
@@ -170,8 +172,8 @@ func TestMongoDBGetNextErrorOnFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"next1\"}", err.(*DBError).Message)
@@ -183,9 +185,9 @@ func TestMongoDBGetNextErrorOnFinishedStreamAlways(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"next1\"}", err.(*DBError).Message)
@@ -199,7 +201,7 @@ func TestMongoDBGetByIdErrorOnFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"next1\"}", err.(*DBError).Message)
@@ -211,7 +213,7 @@ func TestMongoDBGetLastErrorOnFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last"})
 	fmt.Println(string(res))
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"next1\"}", err.(*DBError).Message)
@@ -221,8 +223,8 @@ func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_stream\":\"\"}", err.(*DBError).Message)
@@ -233,8 +235,8 @@ func TestMongoDBGetNextCorrectOrder(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec2)
 	db.insertRecord(dbname, collection, &rec1)
-	res1, _ := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	res2, _ := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res1, _ := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	res2, _ := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, string(rec1_expect), string(res1))
 	assert.Equal(t, string(rec2_expect), string(res2))
 }
@@ -271,7 +273,7 @@ func getRecords(n int, resend bool) []int {
 	for i := 0; i < n; i++ {
 		go func() {
 			defer wg.Done()
-			res_bin, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: extra_param})
+			res_bin, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: extra_param})
 			if err != nil {
 				fmt.Println("error at read ", i)
 			}
@@ -316,13 +318,13 @@ func TestMongoDBGetLastAfterErasingDatabase(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	insertRecords(10)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	db.dropDatabase(dbname)
 
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec2_expect), string(res))
 }
@@ -331,7 +333,7 @@ func TestMongoDBGetNextAfterErasingDatabase(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	insertRecords(200)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	db.dropDatabase(dbname)
 
 	n := 100
@@ -344,10 +346,10 @@ func TestMongoDBGetNextEmptyAfterErasingDatabase(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	insertRecords(10)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	db.dropDatabase(dbname)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_stream\":\"\"}", err.Error())
 }
@@ -357,7 +359,7 @@ func TestMongoDBgetRecordByID(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 }
@@ -366,7 +368,7 @@ func TestMongoDBgetRecordByIDFails(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "2"})
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 	assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_stream\":\"\"}", err.Error())
 }
@@ -375,7 +377,7 @@ func TestMongoDBGetRecordNext(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 }
@@ -386,8 +388,8 @@ func TestMongoDBGetRecordNextMultipleCollections(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection2, &rec_dataset1)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
-	res_string, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection2, GroupId: groupId, Op: "next", DatasetOp: true})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
+	res_string, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection2, GroupId: groupId, Op: "next", DatasetOp: true})
 	var res_ds TestDataset
 	json.Unmarshal(res_string, &res_ds)
 
@@ -403,7 +405,7 @@ func TestMongoDBGetRecordID(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 }
@@ -412,7 +414,7 @@ func TestMongoDBWrongOp(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "bla"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "bla"})
 	assert.NotNil(t, err)
 }
 
@@ -422,7 +424,7 @@ func TestMongoDBGetRecordLast(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec2_expect), string(res))
 }
@@ -433,13 +435,13 @@ func TestMongoDBGetNextAfterGetLastCorrect(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last", ExtraParam: "0"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec2_expect), string(res))
 
 	db.insertRecord(dbname, collection, &rec3)
 
-	res, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 
@@ -449,14 +451,14 @@ func TestMongoDBGetGetLastInGroupCorrect(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"}) // to check it does not influence groupedlast
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"}) // to check it does not influence groupedlast
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
 
 // first record - ok, then error
-	res, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	res, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
@@ -464,15 +466,15 @@ func TestMongoDBGetGetLastInGroupCorrect(t *testing.T) {
 	}
 // second record - ok, then error
 	db.insertRecord(dbname, collection, &rec2)
-	res, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	res, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec2_expect), string(res))
-	res, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	res, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.NotNil(t, err)
 
 // stream finished - immediately error
 	db.insertRecord(dbname, collection, &rec_finished3)
-	res, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	res, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
@@ -487,9 +489,9 @@ func TestMongoDBGetGetLastInGroupImmediateErrorOnFinishStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 	db.insertRecord(dbname, collection, &rec_finished3)
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.NotNil(t, err)
-	_, err = db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
+	_, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "groupedlast", ExtraParam: ""})
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
@@ -506,7 +508,7 @@ func TestMongoDBGetSize(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec2)
 	db.insertRecord(dbname, collection, &rec3)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(recs1_expect), string(res))
 }
@@ -517,7 +519,7 @@ func TestMongoDBGetSizeWithFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size"})
 	assert.Nil(t, err)
 	var rec_expect, _ = json.Marshal(&SizeRecord{1})
 	assert.Equal(t, string(rec_expect), string(res))
@@ -528,10 +530,10 @@ func TestMongoDBGetSizeForDatasets(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size", ExtraParam: "false"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size", ExtraParam: "false"})
 	assert.Equal(t, utils.StatusWrongInput, err.(*DBError).Code)
 
-	_, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size", ExtraParam: "true"})
+	_, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size", ExtraParam: "true"})
 	assert.Equal(t, utils.StatusWrongInput, err1.(*DBError).Code)
 }
 
@@ -541,7 +543,7 @@ func TestMongoDBGetSizeForDatasetsWithFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	res, _ := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size", ExtraParam: "true"})
+	res, _ := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size", ExtraParam: "true"})
 
 	var rec_expect, _ = json.Marshal(&SizeRecord{1})
 	assert.Equal(t, string(rec_expect), string(res))
@@ -556,7 +558,7 @@ func TestMongoDBGetSizeDataset(t *testing.T) {
 
 	size2_expect, _ := json.Marshal(SizeRecord{2})
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size", ExtraParam: "true"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size", ExtraParam: "true"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(size2_expect), string(res))
 }
@@ -565,7 +567,7 @@ func TestMongoDBGetSizeNoRecords(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "size"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "size"})
 	assert.Nil(t, err)
 	assert.Equal(t, string(recs2_expect), string(res))
 }
@@ -583,7 +585,7 @@ func TestMongoPingNotConected(t *testing.T) {
 }
 
 func TestMongoDBgetRecordByIDNotConnected(t *testing.T) {
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", ExtraParam: "1"})
 	assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code)
 }
 
@@ -593,15 +595,15 @@ func TestMongoDBResetCounter(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 
 	assert.Nil(t, err1)
 	assert.Equal(t, string(rec1_expect), string(res1))
 
-	_, err_reset := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "1"})
+	_, err_reset := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "1"})
 	assert.Nil(t, err_reset)
 
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	res2, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 
 	assert.Nil(t, err2)
 	assert.Equal(t, string(rec2_expect), string(res2))
@@ -613,7 +615,7 @@ func TestMongoDBGetMetaBtOK(t *testing.T) {
 	rec_expect, _ := json.Marshal(recbt.Meta)
 	db.insertMeta(dbname, &recbt)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: "whatever", Op: "meta", ExtraParam: "0"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: "whatever", Op: "meta", ExtraParam: "0"})
 
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec_expect), string(res))
@@ -625,7 +627,7 @@ func TestMongoDBGetMetaStOK(t *testing.T) {
 	rec_expect, _ := json.Marshal(recst.Meta)
 	db.insertMeta(dbname, &recst)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "meta", ExtraParam: "1"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "meta", ExtraParam: "1"})
 
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec_expect), string(res))
@@ -635,7 +637,7 @@ func TestMongoDBGetMetaErr(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "meta", ExtraParam: "1"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "meta", ExtraParam: "1"})
 	assert.NotNil(t, err)
 	assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
 }
@@ -711,7 +713,7 @@ func TestMongoDBQueryMessagesOK(t *testing.T) {
 		//			continue
 		//		}
 
-		res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "querymessages", ExtraParam: test.query})
+		res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "querymessages", ExtraParam: test.query})
 		var res []TestRecordMeta
 		json.Unmarshal(res_string, &res)
 		//		fmt.Println(string(res_string))
@@ -730,7 +732,7 @@ func TestMongoDBQueryMessagesOnEmptyDatabase(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	for _, test := range tests {
-		res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, Op: "querymessages", ExtraParam: test.query})
+		res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, Op: "querymessages", ExtraParam: test.query})
 		var res []TestRecordMeta
 		json.Unmarshal(res_string, &res)
 		assert.Equal(t, 0, len(res))
@@ -756,7 +758,7 @@ func TestMongoDBGetDataset(t *testing.T) {
 
 	db.insertRecord(dbname, collection, &rec_dataset1)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
 
 	assert.Nil(t, err)
 
@@ -772,7 +774,7 @@ func TestMongoDBNoDataOnNotCompletedFirstDataset(t *testing.T) {
 
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
 
 	assert.Equal(t, utils.StatusPartialData, err.(*DBError).Code)
 	var res TestDataset
@@ -787,8 +789,8 @@ func TestMongoDBNoDataOnNotCompletedNextDataset(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 	db.insertRecord(dbname, collection, &rec_dataset2_incomplete)
 
-	_, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
-	_, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
+	_, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
+	_, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", DatasetOp: true})
 
 	assert.Equal(t, utils.StatusPartialData, err1.(*DBError).Code)
 	assert.Equal(t, utils.StatusPartialData, err2.(*DBError).Code)
@@ -804,7 +806,7 @@ func TestMongoDBGetRecordLastDataSetSkipsIncompleteSets(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1)
 	db.insertRecord(dbname, collection, &rec_dataset2)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last", DatasetOp: true, ExtraParam: "0"})
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last", DatasetOp: true, ExtraParam: "0"})
 
 	assert.Nil(t, err)
 
@@ -821,7 +823,7 @@ func TestMongoDBGetRecordLastDataSetReturnsIncompleteSets(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1)
 	db.insertRecord(dbname, collection, &rec_dataset2)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last",
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last",
 		DatasetOp: true, MinDatasetSize: 3, ExtraParam: "0"})
 
 	assert.Nil(t, err)
@@ -839,7 +841,7 @@ func TestMongoDBGetRecordLastDataSetSkipsIncompleteSetsWithMinSize(t *testing.T)
 	db.insertRecord(dbname, collection, &rec_dataset1)
 	db.insertRecord(dbname, collection, &rec_dataset2_incomplete3)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last",
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last",
 		DatasetOp: true, MinDatasetSize: 3, ExtraParam: "0"})
 
 	assert.Nil(t, err)
@@ -856,7 +858,7 @@ func TestMongoDBGetRecordLastDataSetWithFinishedStream(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last",
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last",
 		DatasetOp: true, ExtraParam: "0"})
 
 	assert.NotNil(t, err)
@@ -873,7 +875,7 @@ func TestMongoDBGetRecordLastDataSetWithIncompleteDatasetsAndFinishedStreamRetur
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 	db.insertRecord(dbname, collection, &rec_finished)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last",
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last",
 		DatasetOp: true, MinDatasetSize: 2, ExtraParam: "0"})
 
 	assert.NotNil(t, err)
@@ -890,7 +892,7 @@ func TestMongoDBGetRecordLastDataSetOK(t *testing.T) {
 	db.insertRecord(dbname, collection, &rec_dataset1)
 	db.insertRecord(dbname, collection, &rec_dataset3)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "last", DatasetOp: true, ExtraParam: "0"})
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "last", DatasetOp: true, ExtraParam: "0"})
 
 	assert.Nil(t, err)
 
@@ -905,7 +907,7 @@ func TestMongoDBGetDatasetID(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec_dataset1)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, ExtraParam: "1"})
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, ExtraParam: "1"})
 
 	assert.Nil(t, err)
 
@@ -921,7 +923,7 @@ func TestMongoDBErrorOnIncompleteDatasetID(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, ExtraParam: "1"})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, ExtraParam: "1"})
 
 	assert.Equal(t, utils.StatusPartialData, err.(*DBError).Code)
 
@@ -937,7 +939,7 @@ func TestMongoDBOkOnIncompleteDatasetID(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 
-	res_string, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, MinDatasetSize: 3, ExtraParam: "1"})
+	res_string, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "id", DatasetOp: true, MinDatasetSize: 3, ExtraParam: "1"})
 
 	assert.Nil(t, err)
 
@@ -984,7 +986,7 @@ func TestMongoDBListStreams(t *testing.T) {
 		}
 		var rec_streams_expect, _ = json.Marshal(test.expectedStreams)
 
-		res, err := db.ProcessRequest(Request{DbName: dbname, Stream: "0", Op: "streams", ExtraParam: utils.EncodeTwoStrings(test.from,"")})
+		res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: "0", Op: "streams", ExtraParam: utils.EncodeTwoStrings(test.from,"")})
 		if test.ok {
 			assert.Nil(t, err, test.test)
 			assert.Equal(t, string(rec_streams_expect), string(res), test.test)
@@ -1004,7 +1006,7 @@ func TestMongoDBAckMessage(t *testing.T) {
 
 	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
 
-	request := Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str}
+	request := Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str}
 	res, err := db.ProcessRequest(request)
 	nacks, _ := db.getNacks(request, 0, 0)
 	assert.Nil(t, err)
@@ -1041,12 +1043,12 @@ func TestMongoDBNacks(t *testing.T) {
 			db.insertRecord(dbname, collection, &rec_finished11)
 		}
 		if test.ackRecords {
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
 		}
 
-		res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "nacks", ExtraParam: test.rangeString})
+		res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "nacks", ExtraParam: test.rangeString})
 		if test.ok {
 			assert.Nil(t, err, test.test)
 			assert.Equal(t, test.resString, string(res), test.test)
@@ -1076,12 +1078,12 @@ func TestMongoDBLastAcks(t *testing.T) {
 			db.insertRecord(dbname, collection, &rec_finished11)
 		}
 		if test.ackRecords {
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
-			db.ackRecord(Request{DbName: dbname, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackmessage\"}"})
+			db.ackRecord(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackmessage\"}"})
 		}
 
-		res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "lastack"})
+		res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "lastack"})
 		assert.Nil(t, err, test.test)
 		assert.Equal(t, test.resString, string(res), test.test)
 		cleanup()
@@ -1095,8 +1097,8 @@ func TestMongoDBGetNextUsesInprocessedImmedeatly(t *testing.T) {
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
@@ -1109,9 +1111,9 @@ func TestMongoDBGetNextUsesInprocessedNumRetry(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
-	_, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	_, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
 
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
@@ -1129,10 +1131,10 @@ func TestMongoDBGetNextUsesInprocessedAfterTimeout(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	time.Sleep(time.Second)
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res2, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Nil(t, err2)
@@ -1148,10 +1150,10 @@ func TestMongoDBGetNextReturnsToNormalAfterUsesInprocessed(t *testing.T) {
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
 	db.insertRecord(dbname, collection, &rec_finished3)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	time.Sleep(time.Second)
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
+	res2, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "1000_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Nil(t, err2)
@@ -1166,8 +1168,8 @@ func TestMongoDBGetNextUsesInprocessedImmedeatlyIfFinishedStream(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Equal(t, string(rec1_expect), string(res))
@@ -1180,9 +1182,9 @@ func TestMongoDBGetNextUsesInprocessedImmedeatlyIfEndofStream(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	res2, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
 	assert.Nil(t, err2)
@@ -1196,11 +1198,11 @@ func TestMongoDBAckDeletesInprocessed(t *testing.T) {
 	db.Connect(dbaddress)
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
 
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
-	_, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
+	_, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"})
 	assert.NotNil(t, err)
 	if err != nil {
 		assert.Equal(t, utils.StatusNoData, err.(*DBError).Code)
@@ -1214,8 +1216,8 @@ func TestMongoDBAckTwiceErrors(t *testing.T) {
 	defer cleanup()
 	db.insertRecord(dbname, collection, &rec1)
 	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
-	_,err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
+	_,err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str})
 	assert.Equal(t, utils.StatusWrongInput, err.(*DBError).Code)
 }
 
@@ -1234,14 +1236,14 @@ func TestMongoDBNegAck(t *testing.T) {
 	inputParams.Params.DelayMs = 0
 
 	db.insertRecord(dbname, collection, &rec1)
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})
 	bparam, _ := json.Marshal(&inputParams)
 
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "negackmessage", ExtraParam: string(bparam)})
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"}) // first time message from negack
-	_, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"})  // second time nothing
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "negackmessage", ExtraParam: string(bparam)})
-	_, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next"}) // second time nothing
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "negackmessage", ExtraParam: string(bparam)})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"}) // first time message from negack
+	_, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"})  // second time nothing
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "negackmessage", ExtraParam: string(bparam)})
+	_, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next"}) // second time nothing
 
 	assert.Nil(t, err)
 	assert.Equal(t, string(rec1_expect), string(res))
@@ -1260,12 +1262,12 @@ func TestMongoDBGetNextClearsInprocessAfterReset(t *testing.T) {
 	defer cleanup()
 	err := db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec2)
-	res, err := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
-	res1, err1 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	res, err := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	res1, err1 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
 
-	db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "0"})
-	res2, err2 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
-	res3, err3 := db.ProcessRequest(Request{DbName: dbname, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "0"})
+	res2, err2 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
+	res3, err3 := db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"})
 
 	assert.Nil(t, err)
 	assert.Nil(t, err1)
@@ -1295,16 +1297,16 @@ func TestDeleteStreams(t *testing.T) {
 	for _, test := range testsDeleteStream {
 		db.Connect(dbaddress)
 		db.insertRecord(dbname, encodeStringForColName(test.stream), &rec1)
-		db.ProcessRequest(Request{DbName: dbname, Stream: test.stream, GroupId: "123", Op: "next"})
+		db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: test.stream, GroupId: "123", Op: "next"})
 		query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
-		request := Request{DbName: dbname, Stream: test.stream, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str}
+		request := Request{Beamtime:beamtime, DataSource:datasource, Stream: test.stream, GroupId: groupId, Op: "ackmessage", ExtraParam: query_str}
 		_, err := db.ProcessRequest(request)
 		assert.Nil(t, err, test.message)
-		_, err = db.ProcessRequest(Request{DbName: dbname, Stream: test.stream, GroupId: "", Op: "delete_stream", ExtraParam: test.params})
+		_, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: test.stream, GroupId: "", Op: "delete_stream", ExtraParam: test.params})
 		if test.ok {
-			rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
-			acks_exist,_:= db.collectionExist(Request{DbName: dbname, ExtraParam: ""},acks_collection_name_prefix+test.stream)
-			inprocess_exist,_:= db.collectionExist(Request{DbName: dbname, ExtraParam: ""},inprocess_collection_name_prefix+test.stream)
+			rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
+			acks_exist,_:= db.collectionExist(Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""},acks_collection_name_prefix+test.stream)
+			inprocess_exist,_:= db.collectionExist(Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""},inprocess_collection_name_prefix+test.stream)
 			assert.Equal(t,0,len(rec.Streams),test.message)
 			assert.Equal(t,false,acks_exist,test.message)
 			assert.Equal(t,false,inprocess_exist,test.message)
@@ -1312,7 +1314,7 @@ func TestDeleteStreams(t *testing.T) {
 		} else {
 			assert.NotNil(t, err, test.message)
 		}
-		_, err = db.ProcessRequest(Request{DbName: dbname, Stream: test.stream, GroupId: "", Op: "delete_stream", ExtraParam: test.params})
+		_, err = db.ProcessRequest(Request{Beamtime:beamtime, DataSource:datasource, Stream: test.stream, GroupId: "", Op: "delete_stream", ExtraParam: test.params})
 		if test.ok2 {
 			assert.Nil(t, err, test.message+" 2")
 		} else {
@@ -1323,7 +1325,8 @@ func TestDeleteStreams(t *testing.T) {
 
 
 var testsEncodings = []struct {
-	dbname          string
+	beamtime          string
+	datasource          string
 	collection      string
 	group			string
 	dbname_indb          string
@@ -1332,10 +1335,10 @@ var testsEncodings = []struct {
 	message string
 	ok              bool
 }{
-	{"dbname", "col", "group", "dbname","col","group", "no encoding",true},
-	{"dbname"+badSymbolsDb, "col", "group", "dbname"+badSymbolsDbEncoded,"col","group", "symbols in db",true},
-	{"dbname", "col"+badSymbolsCol, "group"+badSymbolsCol, "dbname","col"+badSymbolsColEncoded,"group"+badSymbolsColEncoded, "symbols in col",true},
-	{"dbname"+badSymbolsDb, "col"+badSymbolsCol, "group"+badSymbolsCol, "dbname"+badSymbolsDbEncoded,"col"+badSymbolsColEncoded,"group"+badSymbolsColEncoded, "symbols in col and db",true},
+	{"bt","dbname", "col", "group", "bt_dbname","col","group", "no encoding",true},
+	{"bt","dbname"+badSymbolsDb, "col", "group", "bt_dbname"+badSymbolsDbEncoded,"col","group", "symbols in db",true},
+	{"bt","dbname", "col"+badSymbolsCol, "group"+badSymbolsCol, "bt_dbname","col"+badSymbolsColEncoded,"group"+badSymbolsColEncoded, "symbols in col",true},
+	{"bt","dbname"+badSymbolsDb, "col"+badSymbolsCol, "group"+badSymbolsCol, "bt_dbname"+badSymbolsDbEncoded,"col"+badSymbolsColEncoded,"group"+badSymbolsColEncoded, "symbols in col and db",true},
 
 }
 
@@ -1343,7 +1346,7 @@ func TestMongoDBEncodingOK(t *testing.T) {
 	for _, test := range testsEncodings {
 		db.Connect(dbaddress)
 		db.insertRecord(test.dbname_indb, test.collection_indb, &rec1)
-		res, err := db.ProcessRequest(Request{DbName: test.dbname, Stream: test.collection, GroupId: test.group, Op: "next"})
+		res, err := db.ProcessRequest(Request{Beamtime:test.beamtime,DataSource: test.datasource, Stream: test.collection, GroupId: test.group, Op: "next"})
 		if test.ok {
 			assert.Nil(t, err, test.message)
 			assert.Equal(t, string(rec1_expect), string(res), test.message)
diff --git a/broker/src/asapo_broker/database/streams_test.go b/broker/src/asapo_broker/database/streams_test.go
index 4ba11e0b3986ff93ea26289054a11f573d670e5c..2bb15c0b575fc8d156d5e186ed6d3f1d4e663daf 100644
--- a/broker/src/asapo_broker/database/streams_test.go
+++ b/broker/src/asapo_broker/database/streams_test.go
@@ -28,16 +28,16 @@ func TestStreamsTestSuite(t *testing.T) {
 }
 
 func (suite *StreamsTestSuite) TestStreamsEmpty() {
-	rec, err := streams.getStreams(&db, Request{DbName: "test", ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:"test",DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Empty(rec.Streams, 0)
 }
 
 func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenEmpty() {
 	db.settings.UpdateStreamCachePeriodMs = 1000
-	streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	db.insertRecord(dbname, collection, &rec1)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(1, len(rec.Streams))
 }
@@ -45,9 +45,9 @@ func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenEmpty() {
 func (suite *StreamsTestSuite) TestStreamsUsesCache() {
 	db.settings.UpdateStreamCachePeriodMs = 1000
 	db.insertRecord(dbname, collection, &rec2)
-	streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	db.insertRecord(dbname, collection, &rec1)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(int64(1), rec.Streams[0].Timestamp)
 	suite.Equal(false, rec.Streams[0].Finished)
@@ -60,15 +60,15 @@ func (suite *StreamsTestSuite) TestStreamsCacheexpires() {
 	var res1 StreamsRecord
 	go func() {
 		db.insertRecord(dbname, collection, &rec1)
-		streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+		streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 		db.insertRecord(dbname, collection, &rec_finished)
-		res1,_ = streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+		res1,_ = streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	}()
 	db.insertRecord(dbname, collection+"1", &rec1_later)
-	res2,_ := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	res2,_ := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	db.insertRecord(dbname, collection+"1", &rec_finished)
 	time.Sleep(time.Second)
-	res3, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	res3, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(true, res3.Streams[0].Finished)
 	fmt.Println(res1,res2)
@@ -80,7 +80,7 @@ func (suite *StreamsTestSuite) TestStreamsGetFinishedInfo() {
 	db.settings.UpdateStreamCachePeriodMs = 1000
 	db.insertRecord(dbname, collection, &rec1)
 	db.insertRecord(dbname, collection, &rec_finished)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(int64(0), rec.Streams[0].Timestamp)
 	suite.Equal(true, rec.Streams[0].Finished)
@@ -92,7 +92,7 @@ func (suite *StreamsTestSuite) TestStreamsDataSetsGetFinishedInfo() {
 	db.settings.UpdateStreamCachePeriodMs = 1000
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 	db.insertRecord(dbname, collection, &rec_finished)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(int64(1), rec.Streams[0].Timestamp)
 	suite.Equal(int64(2), rec.Streams[0].TimestampLast)
@@ -106,8 +106,8 @@ func (suite *StreamsTestSuite) TestStreamsMultipleRequests() {
 	db.insertRecord(dbname, collection, &rec_dataset1_incomplete)
 	db.insertRecord(dbname, collection, &rec_finished)
 	db.insertRecord(dbname, collection2, &rec_dataset1_incomplete)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: "0/unfinished"})
-	rec2, err2 := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: "0/finished"})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: "0/unfinished"})
+	rec2, err2 := streams.getStreams(&db, Request{Beamtime:beamtime, DataSource:datasource, ExtraParam: "0/finished"})
 	suite.Nil(err)
 	suite.Equal(collection2, rec.Streams[0].Name)
 	suite.Equal(1, len(rec.Streams))
@@ -119,10 +119,10 @@ func (suite *StreamsTestSuite) TestStreamsMultipleRequests() {
 func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenExpired() {
 	db.settings.UpdateStreamCachePeriodMs = 10
 	db.insertRecord(dbname, collection, &rec2)
-	streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	streams.getStreams(&db, Request{Beamtime:beamtime,DataSource:datasource, ExtraParam: ""})
 	db.insertRecord(dbname, collection, &rec1)
 	time.Sleep(time.Millisecond * 100)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime,DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Equal(int64(1), rec.Streams[0].Timestamp)
 }
@@ -130,9 +130,9 @@ func (suite *StreamsTestSuite) TestStreamsNotUsesCacheWhenExpired() {
 func (suite *StreamsTestSuite) TestStreamRemovesDatabase() {
 	db.settings.UpdateStreamCachePeriodMs = 0
 	db.insertRecord(dbname, collection, &rec1)
-	streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	streams.getStreams(&db, Request{Beamtime:beamtime,DataSource:datasource, ExtraParam: ""})
 	db.dropDatabase(dbname)
-	rec, err := streams.getStreams(&db, Request{DbName: dbname, ExtraParam: ""})
+	rec, err := streams.getStreams(&db, Request{Beamtime:beamtime,DataSource:datasource, ExtraParam: ""})
 	suite.Nil(err)
 	suite.Empty(rec.Streams, 0)
 }
@@ -143,18 +143,18 @@ var streamFilterTests=[]struct{
 	streams []string
 	message string
 }{
-	{request: Request{DbName:dbname, ExtraParam:""},error: false,streams: []string{collection,collection2},message: "default all streams"},
-	{request: Request{DbName:dbname, ExtraParam:"0/"},error: false,streams: []string{collection,collection2},message: "default 0/ all streams"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection,"")},error: false,streams: []string{collection,collection2},message: "first parameter only -  all streams"},
-	{request: Request{DbName:dbname, ExtraParam:"0/all"},error: false,streams: []string{collection,collection2},message: "second parameter only -  all streams"},
-	{request: Request{DbName:dbname, ExtraParam:"0/finished"},error: false,streams: []string{collection2},message: "second parameter only -  finished streams"},
-	{request: Request{DbName:dbname, ExtraParam:"0/unfinished"},error: false,streams: []string{collection},message: "second parameter only -  unfinished streams"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection2,"all")},error: false,streams: []string{collection2},message: "from stream2"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection2,"unfinished")},error: false,streams: []string{},message: "from stream2 and filter"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection2,"bla")},error: true,streams: []string{},message: "wrong filter"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection2,"all_aaa")},error: true,streams: []string{},message: "wrong filter2"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings("blabla","")},error: false,streams: []string{},message: "from unknown stream returns nothing"},
-	{request: Request{DbName:dbname, ExtraParam:utils.EncodeTwoStrings(collection2,"")},error: false,streams: []string{collection2},message: "from stream2, first parameter only"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource,ExtraParam:""},error: false,streams: []string{collection,collection2},message: "default all streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:"0/"},error: false,streams: []string{collection,collection2},message: "default 0/ all streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection,"")},error: false,streams: []string{collection,collection2},message: "first parameter only -  all streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:"0/all"},error: false,streams: []string{collection,collection2},message: "second parameter only -  all streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:"0/finished"},error: false,streams: []string{collection2},message: "second parameter only -  finished streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:"0/unfinished"},error: false,streams: []string{collection},message: "second parameter only -  unfinished streams"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection2,"all")},error: false,streams: []string{collection2},message: "from stream2"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection2,"unfinished")},error: false,streams: []string{},message: "from stream2 and filter"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection2,"bla")},error: true,streams: []string{},message: "wrong filter"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection2,"all_aaa")},error: true,streams: []string{},message: "wrong filter2"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings("blabla","")},error: false,streams: []string{},message: "from unknown stream returns nothing"},
+	{request: Request{Beamtime:beamtime,DataSource:datasource, ExtraParam:utils.EncodeTwoStrings(collection2,"")},error: false,streams: []string{collection2},message: "from stream2, first parameter only"},
 }
 
 func (suite *StreamsTestSuite) TestStreamFilters() {
diff --git a/broker/src/asapo_broker/server/authorizer_test.go b/broker/src/asapo_broker/server/authorizer_test.go
index a58681460c80b6a412091f6982afca7c6eecdc14..f854b224667b1fc004a9e36ef229ad20fa15b5bd 100644
--- a/broker/src/asapo_broker/server/authorizer_test.go
+++ b/broker/src/asapo_broker/server/authorizer_test.go
@@ -47,7 +47,7 @@ func responseOk() (*http.Response, error) {
 }
 
 func responseUnauth() (*http.Response, error) {
-	r := ioutil.NopCloser(bytes.NewReader([]byte("wrong JWT token")))
+	r := ioutil.NopCloser(bytes.NewReader([]byte("wrong or expired JWT token")))
 	return &http.Response{
 		StatusCode: http.StatusUnauthorized,
 		Body:       r,
diff --git a/broker/src/asapo_broker/server/get_commands_test.go b/broker/src/asapo_broker/server/get_commands_test.go
index 17f3aea5a32a1308f3ec4e7e49f5d894cd6c2aea..0c5b4a91868570757dc9181e1b93aa25771fa8a2 100644
--- a/broker/src/asapo_broker/server/get_commands_test.go
+++ b/broker/src/asapo_broker/server/get_commands_test.go
@@ -60,8 +60,11 @@ var testsGetCommand = []struct {
 
 func (suite *GetCommandsTestSuite) TestGetCommandsCallsCorrectRoutine() {
 	for _, test := range testsGetCommand {
-		suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, Stream: test.stream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
-		logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request "+test.command)))
+		suite.mock_db.On("ProcessRequest", database.Request{Beamtime: expectedBeamtimeId, DataSource: test.source, Stream: test.stream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
+		logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap(test.command)))
+		logger.MockLog.On("Debug", mock.Anything)
+
+
 		w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + test.source + "/" + test.reqString+correctTokenSuffix+test.queryParams)
 		suite.Equal(http.StatusOK, w.Code, test.command+ " OK")
 		suite.Equal("Hello", string(w.Body.Bytes()), test.command+" sends data")
@@ -83,9 +86,9 @@ func (suite *GetCommandsTestSuite) TestGetCommandsCorrectlyProcessedEncoding() {
 		test.reqString = strings.Replace(test.reqString,test.groupid,encodedGroup,1)
 		test.reqString = strings.Replace(test.reqString,test.source,encodedSource,1)
 		test.reqString = strings.Replace(test.reqString,test.stream,encodedStream,1)
-		dbname := expectedBeamtimeId + "_" + newsource
-		suite.mock_db.On("ProcessRequest", database.Request{DbName: dbname, Stream: newstream, GroupId: newgroup, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
-		logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request "+test.command)))
+		suite.mock_db.On("ProcessRequest", database.Request{Beamtime: expectedBeamtimeId,DataSource: newsource, Stream: newstream, GroupId: newgroup, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil)
+		logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap(test.command)))
+		logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
 		w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + encodedSource + "/" + test.reqString+correctTokenSuffix+test.queryParams)
 		suite.Equal(http.StatusOK, w.Code, test.command+ " OK")
 		suite.Equal("Hello", string(w.Body.Bytes()), test.command+" sends data")
diff --git a/broker/src/asapo_broker/server/get_meta_test.go b/broker/src/asapo_broker/server/get_meta_test.go
index b54a72865f02d4358b4cfc8abf4f2a0bb6678acf..75367d998ca893f0533fe8f57732606b7ef3750b 100644
--- a/broker/src/asapo_broker/server/get_meta_test.go
+++ b/broker/src/asapo_broker/server/get_meta_test.go
@@ -33,8 +33,10 @@ func TestGetMetaTestSuite(t *testing.T) {
 }
 
 func (suite *GetMetaTestSuite) TestGetMetaOK() {
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, Stream: expectedStream, Op: "meta", ExtraParam: "0"}).Return([]byte(""), nil)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request meta")))
+	suite.mock_db.On("ProcessRequest", database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, Op: "meta", ExtraParam: "0"}).Return([]byte(""), nil)
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("meta")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/0/meta"  + "/0" + correctTokenSuffix,"GET")
 	suite.Equal(http.StatusOK, w.Code, "meta OK")
 }
diff --git a/broker/src/asapo_broker/server/post_create_group.go b/broker/src/asapo_broker/server/post_create_group.go
index 008e72f14d4bf36022a094c923ab301d7ed2bf36..ba1ae49c478b885c8bdd8339b0707010f5fecd46 100644
--- a/broker/src/asapo_broker/server/post_create_group.go
+++ b/broker/src/asapo_broker/server/post_create_group.go
@@ -14,6 +14,6 @@ func routeCreateGroupID(w http.ResponseWriter, r *http.Request) {
 
 	guid := xid.New()
 	w.Write([]byte(guid.String()))
-	logger.Debug("generated new group: " + guid.String())
+	logger.WithFields(map[string]interface{}{"guid":guid.String()}).Debug("generated new group")
 	statistics.IncreaseCounter()
 }
diff --git a/broker/src/asapo_broker/server/post_create_group_test.go b/broker/src/asapo_broker/server/post_create_group_test.go
index dcef0d009e109426d8cb95e9fc5dabd31a0b7692..3189bb46f0cb815f4644bcc28cb31252f02d1a73 100644
--- a/broker/src/asapo_broker/server/post_create_group_test.go
+++ b/broker/src/asapo_broker/server/post_create_group_test.go
@@ -18,7 +18,9 @@ func GetObjectID(t *testing.T) (xid.ID, error) {
 func TestGetNewGroup(t *testing.T) {
 	statistics.Reset()
 	logger.SetMockLog()
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("generated new group")))
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("guid")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("generated new group")))
+
 
 	id1, err := GetObjectID(t)
 	assert.Nil(t, err, "first is ObjectID")
diff --git a/broker/src/asapo_broker/server/post_op_image_test.go b/broker/src/asapo_broker/server/post_op_image_test.go
index 2cc3159ee6a3469490ed3ec082947270e8e49db4..facf1922c6dec06c2231de978e96d98da5ed162e 100644
--- a/broker/src/asapo_broker/server/post_op_image_test.go
+++ b/broker/src/asapo_broker/server/post_op_image_test.go
@@ -34,8 +34,9 @@ func TestMessageOpTestSuite(t *testing.T) {
 
 func (suite *MessageOpTestSuite) TestAckMessageOpOK() {
 	query_str := "{\"Id\":1,\"Op\":\"ackmessage\"}"
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, Op: "ackmessage", ExtraParam: query_str}).Return([]byte(""), nil)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request ackmessage")))
+	suite.mock_db.On("ProcessRequest", database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, Op: "ackmessage", ExtraParam: query_str}).Return([]byte(""), nil)
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("ackmessage")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str)
 	suite.Equal(http.StatusOK, w.Code, "ackmessage OK")
 }
diff --git a/broker/src/asapo_broker/server/post_query_images_test.go b/broker/src/asapo_broker/server/post_query_images_test.go
index d51d2490ab3b0063ff078b92430b08cfeb28db1f..28809d4aa9eee1837890ba49b4117a4cc322f505 100644
--- a/broker/src/asapo_broker/server/post_query_images_test.go
+++ b/broker/src/asapo_broker/server/post_query_images_test.go
@@ -35,8 +35,10 @@ func TestQueryTestSuite(t *testing.T) {
 func (suite *QueryTestSuite) TestQueryOK() {
 	query_str := "aaaa"
 
-	suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, Stream: expectedStream,Op: "querymessages", ExtraParam: query_str}).Return([]byte("{}"), nil)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request querymessages")))
+	suite.mock_db.On("ProcessRequest", database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream,Op: "querymessages", ExtraParam: query_str}).Return([]byte("{}"), nil)
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("querymessages")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+
 
 	w := doRequest("/beamtime/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/0/querymessages"+correctTokenSuffix, "POST", query_str)
 	suite.Equal(http.StatusOK, w.Code, "Query OK")
diff --git a/broker/src/asapo_broker/server/post_reset_counter_test.go b/broker/src/asapo_broker/server/post_reset_counter_test.go
index 64291bee21024ef2b5dbee377c2ff2b3aec3aeaf..84ace072d9cac0872b8f262dbccb72c918af2280 100644
--- a/broker/src/asapo_broker/server/post_reset_counter_test.go
+++ b/broker/src/asapo_broker/server/post_reset_counter_test.go
@@ -33,10 +33,11 @@ func TestResetCounterTestSuite(t *testing.T) {
 }
 
 func (suite *ResetCounterTestSuite) TestResetCounterOK() {
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), nil)
 
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request resetcounter")))
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("resetcounter")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
 
 	w := doRequest("/beamtime/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST")
 	suite.Equal(http.StatusOK, w.Code, "ResetCounter OK")
diff --git a/broker/src/asapo_broker/server/process_request.go b/broker/src/asapo_broker/server/process_request.go
index 41b6564b7a91f5d2902febfbfbd3f58f8d207ac9..8a0065fc2806ada24caa414e825e3e35b4c97778 100644
--- a/broker/src/asapo_broker/server/process_request.go
+++ b/broker/src/asapo_broker/server/process_request.go
@@ -63,19 +63,21 @@ func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_par
 
 
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	db_name, datasource, stream, group_id, ok := extractRequestParameters(r, needGroupID)
+	beamtime, datasource, stream, group_id, ok := extractRequestParameters(r, needGroupID)
 	if !ok {
+		log.WithFields(map[string]interface{}{"request":r.RequestURI}).Error("cannot extract request parameters")
 		w.WriteHeader(http.StatusBadRequest)
 		return
 	}
 
-	if err := authorize(r, db_name, needWriteAccess(op)); err != nil {
-		writeAuthAnswer(w, "get "+op, db_name, err)
+	if err := authorize(r, beamtime, needWriteAccess(op)); err != nil {
+		writeAuthAnswer(w, "get "+op, beamtime, err)
 		return
 	}
 
 	request := database.Request{}
-	request.DbName = db_name+"_"+datasource
+	request.Beamtime = beamtime
+	request.DataSource = datasource
 	request.Op = op
 	request.ExtraParam = extra_param
 	request.Stream = stream
@@ -85,17 +87,19 @@ func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_par
 		request.MinDatasetSize = minSize
 	}
 
-	answer, code := processRequestInDb(request)
+	rlog:=request.Logger()
+	rlog.Debug("got request")
+	answer, code := processRequestInDb(request,rlog)
 	w.WriteHeader(code)
 	w.Write(answer)
 }
 
-func returnError(err error, log_str string) (answer []byte, code int) {
+func returnError(err error, rlog logger.Logger) (answer []byte, code int) {
 	code = database.GetStatusCodeFromError(err)
 	if code != utils.StatusNoData && code != utils.StatusPartialData{
-		logger.Error(log_str + " - " + err.Error())
+		rlog.WithFields(map[string]interface{}{"cause":err.Error()}).Error("cannot process request")
 	} else {
-		logger.Debug(log_str + " - " + err.Error())
+		rlog.WithFields(map[string]interface{}{"cause":err.Error()}).Debug("no data or partial data")
 	}
 	return []byte(err.Error()), code
 }
@@ -107,20 +111,20 @@ func reconnectIfNeeded(db_error error) {
 	}
 
 	if err := ReconnectDb(); err != nil {
-		log.Error("cannot reconnect to database at : " + settings.GetDatabaseServer() + " " + err.Error())
+		log.WithFields(map[string]interface{}{"address":settings.GetDatabaseServer(),"cause": err.Error()}).Error("cannot reconnect to database")
 	} else {
-		log.Debug("reconnected to database" + settings.GetDatabaseServer())
+		log.WithFields(map[string]interface{}{"address":settings.GetDatabaseServer()}).Debug("reconnected to database")
 	}
 }
 
-func processRequestInDb(request database.Request) (answer []byte, code int) {
+
+
+func processRequestInDb(request database.Request,rlog logger.Logger) (answer []byte, code int) {
 	statistics.IncreaseCounter()
 	answer, err := db.ProcessRequest(request)
-	log_str := "processing request " + request.Op + " in " + request.DbName + " at " + settings.GetDatabaseServer()
 	if err != nil {
 		go reconnectIfNeeded(err)
-		return returnError(err, log_str)
+		return returnError(err, rlog)
 	}
-	logger.Debug(log_str)
 	return answer, utils.StatusOK
 }
diff --git a/broker/src/asapo_broker/server/process_request_test.go b/broker/src/asapo_broker/server/process_request_test.go
index cf0d41626723cd026791b9c9c9ac9471d78c0ce2..a9c5f53f36bd0e760ee7bcb1d9bf624b0d50d9a0 100644
--- a/broker/src/asapo_broker/server/process_request_test.go
+++ b/broker/src/asapo_broker/server/process_request_test.go
@@ -45,7 +45,7 @@ func (a *MockAuthServer) AuthorizeToken(tokenJWT string) (token Token, err error
 		}, nil
 	}
 
-	return Token{}, AuthorizationError{errors.New("wrong JWT token"),http.StatusUnauthorized}
+	return Token{}, &AuthorizationError{errors.New("wrong or expired JWT token"),http.StatusUnauthorized}
 }
 
 func prepareTestAuth() {
@@ -66,7 +66,19 @@ type request struct {
 	message string
 }
 
-func containsMatcher(substrings ...string) func(str string) bool {
+func containsMatcherMap(substrings ...string) func(map[string]interface{}) bool {
+	return func(vals map[string]interface{}) bool {
+		res,_:=utils.MapToJson(vals)
+		for _, substr := range substrings {
+			if !strings.Contains(string(res), substr) {
+				return false
+			}
+		}
+		return true
+	}
+}
+
+func containsMatcherStr(substrings ...string) func(str string) bool {
 	return func(str string) bool {
 		for _, substr := range substrings {
 			if !strings.Contains(str, substr) {
@@ -77,6 +89,7 @@ func containsMatcher(substrings ...string) func(str string) bool {
 	}
 }
 
+
 func doRequest(path string, extra_params ...string) *httptest.ResponseRecorder {
 	m := "GET"
 	if len(extra_params) > 0 {
@@ -134,7 +147,9 @@ func TestProcessRequestTestSuite(t *testing.T) {
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
-	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong JWT token")))
+
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("wrong or expired JWT token")))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcherStr("cannot authorize request")))
 
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + suffixWithWrongToken)
 
@@ -142,7 +157,8 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongToken() {
 }
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithNoToken() {
-	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("cannot extract")))
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("cannot extract")))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcherStr("cannot authorize request")))
 
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + wrongTokenSuffix)
 
@@ -151,12 +167,15 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithNoToken() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusNoData, ""})
 
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next")))
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("no data or partial data")))
+
 
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 
@@ -165,14 +184,16 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName()
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""),
 		&database.DBError{utils.StatusServiceUnavailable, ""})
 
-	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("processing request next")))
+    logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcherStr("cannot process request")))
 	ExpectReconnect(suite.mock_db)
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("reconnected")))
 
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	time.Sleep(time.Second)
@@ -181,11 +202,14 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
 
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), errors.New(""))
-	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("processing request next")))
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected")))
+
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcherStr("cannot process request")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("reconnected")))
 
 	ExpectReconnect(suite.mock_db)
 	w := doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
@@ -196,10 +220,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsCounter() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
 
 	doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix)
 	suite.Equal(1, statistics.GetCounter(), "ProcessRequest increases counter")
@@ -207,10 +232,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestAddsCounter() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestAddsDataset() {
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: expectedGroupID, DatasetOp: true, Op: "next"}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: expectedGroupID, DatasetOp: true, Op: "next"}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName)))
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
 
 	doRequest("/beamtime/" + expectedBeamtimeId + "/" + expectedSource + "/" + expectedStream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true")
 }
@@ -222,7 +248,9 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestErrorOnWrongProtocol() {
 
 func (suite *ProcessRequestTestSuite) TestProcessRequestDeleteStreamReadToken() {
 	query_str := "query_string"
-	logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("wrong token access")))
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("wrong token access")))
+	logger.MockLog.On("Error", mock.MatchedBy(containsMatcherStr("cannot authorize request")))
+
 	w := doRequest("/beamtime/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/delete"+correctTokenSuffix, "POST", query_str)
 	suite.Equal(http.StatusUnauthorized, w.Code, "wrong token type")
 
@@ -231,9 +259,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestDeleteStreamReadToken()
 func (suite *ProcessRequestTestSuite) TestProcessRequestDeleteStreamWriteToken() {
 	query_str := "query_string"
 
-	expectedRequest := database.Request{DbName: expectedDBName, Stream: expectedStream, GroupId: "", Op: "delete_stream", ExtraParam: query_str}
+	expectedRequest := database.Request{Beamtime: expectedBeamtimeId,DataSource: expectedSource, Stream: expectedStream, GroupId: "", Op: "delete_stream", ExtraParam: query_str}
 	suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil)
 
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request delete_stream in "+expectedDBName)))
+	logger.MockLog.On("WithFields", mock.MatchedBy(containsMatcherMap("delete_stream")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcherStr("got request")))
+
 	doRequest("/beamtime/"+expectedBeamtimeId+"/"+expectedSource+"/"+expectedStream+"/delete"+correctTokenSuffixWrite, "POST", query_str)
 }
diff --git a/broker/src/asapo_broker/server/request_common.go b/broker/src/asapo_broker/server/request_common.go
index 1a0d5e875034ed369f85128929ef3baae1c17c1f..cda3a09951c417368a5db5320f3823cc2ae2ff4b 100644
--- a/broker/src/asapo_broker/server/request_common.go
+++ b/broker/src/asapo_broker/server/request_common.go
@@ -8,12 +8,10 @@ import (
 	"strconv"
 )
 
-func writeAuthAnswer(w http.ResponseWriter, requestName string, db_name string, err error) {
-	log_str := "processing " + requestName + " request in " + db_name + " at " + settings.GetDatabaseServer()
-	logger.Error(log_str + " - " + err.Error())
-
+func writeAuthAnswer(w http.ResponseWriter, requestOp string, db_name string, err error) {
+	logger.WithFields(map[string]interface{}{"operation": requestOp, "cause": err.Error()}).Error("cannot authorize request")
 	switch er := err.(type) {
-	case AuthorizationError:
+	case *AuthorizationError:
 		w.WriteHeader(er.statusCode)
 	default:
 		w.WriteHeader(http.StatusServiceUnavailable)
@@ -54,7 +52,7 @@ func authorize(r *http.Request, beamtime_id string, needWriteAccess bool) error
 	tokenJWT := r.URL.Query().Get("token")
 
 	if len(tokenJWT) == 0 {
-		return AuthorizationError{errors.New("cannot extract token from request"),http.StatusBadRequest}
+		return &AuthorizationError{errors.New("cannot extract token from request"), http.StatusBadRequest}
 	}
 
 	token, err := auth.AuthorizeToken(tokenJWT)
@@ -67,23 +65,23 @@ func authorize(r *http.Request, beamtime_id string, needWriteAccess bool) error
 		return err
 	}
 
-	return checkAccessType(token.AccessTypes,needWriteAccess)
+	return checkAccessType(token.AccessTypes, needWriteAccess)
 }
 
 func checkSubject(subject string, beamtime_id string) error {
 	if subject != utils.SubjectFromBeamtime(beamtime_id) {
-		return AuthorizationError{errors.New("wrong token subject"),http.StatusUnauthorized}
+		return &AuthorizationError{errors.New("wrong token subject"), http.StatusUnauthorized}
 	}
 	return nil
 }
 
 func checkAccessType(accessTypes []string, needWriteAccess bool) error {
-	if needWriteAccess && !utils.StringInSlice("write",accessTypes) {
-		return AuthorizationError{errors.New("wrong token access type"),http.StatusUnauthorized}
+	if needWriteAccess && !utils.StringInSlice("write", accessTypes) {
+		return &AuthorizationError{errors.New("wrong token access type"), http.StatusUnauthorized}
 	}
 
-	if !utils.StringInSlice("read",accessTypes) {
-		return AuthorizationError{errors.New("wrong token access type"),http.StatusUnauthorized}
+	if !utils.StringInSlice("read", accessTypes) {
+		return &AuthorizationError{errors.New("wrong token access type"), http.StatusUnauthorized}
 	}
 	return nil
 }
diff --git a/common/cpp/include/asapo/common/error.h b/common/cpp/include/asapo/common/error.h
index 510ea0866dc7c3e1659e3b3eb9680d487fe8fee1..4c38ae3ca1445289b4bd7435141f5b63b08ea412 100644
--- a/common/cpp/include/asapo/common/error.h
+++ b/common/cpp/include/asapo/common/error.h
@@ -21,7 +21,8 @@ class ErrorInterface {
     virtual std::string Explain() const noexcept = 0;
     virtual std::string ExplainPretty(uint8_t shift = 0) const noexcept = 0;
     virtual std::string ExplainInJSON() const noexcept = 0;
-    virtual ErrorInterface* AddContext(std::string key, std::string value) noexcept = 0;
+    virtual ErrorInterface* AddDetails(std::string key, std::string value) noexcept = 0;
+    virtual ErrorInterface* AddDetails(std::string key, uint64_t value) noexcept = 0;
     virtual ErrorInterface* SetCause(Error cause_err) noexcept = 0;
     virtual const Error& GetCause() const noexcept = 0;
     virtual CustomErrorData* GetCustomData() noexcept = 0;
@@ -50,7 +51,7 @@ class ServiceError : public ErrorInterface {
     ServiceErrorType error_type_;
     std::string error_name_;
     std::string error_message_;
-    std::map<std::string, std::string> context_;
+    std::map<std::string, std::string> details_;
     Error cause_err_;
     std::unique_ptr<CustomErrorData> custom_data_;
   public:
@@ -58,7 +59,8 @@ class ServiceError : public ErrorInterface {
     ServiceErrorType GetServiceErrorType() const noexcept;
     CustomErrorData* GetCustomData() noexcept override;
     void SetCustomData(std::unique_ptr<CustomErrorData> data) noexcept override;
-    ErrorInterface* AddContext(std::string key, std::string value) noexcept override;
+    ErrorInterface* AddDetails(std::string key, std::string value) noexcept override;
+    ErrorInterface* AddDetails(std::string key, uint64_t value) noexcept override;
     ErrorInterface* SetCause(Error cause_err) noexcept override;
     const Error& GetCause() const noexcept override;
     std::string Explain() const noexcept override;
diff --git a/common/cpp/include/asapo/common/error.tpp b/common/cpp/include/asapo/common/error.tpp
index 95f6eed6606794d1596731640ce58b58614841c2..931abd205bd37b67dc2c2f017cc5ee9e37721224 100644
--- a/common/cpp/include/asapo/common/error.tpp
+++ b/common/cpp/include/asapo/common/error.tpp
@@ -1,6 +1,7 @@
-
 #include "error.h"
 
+#include "asapo/common/utils.h"
+
 namespace asapo {
 
 template<typename ServiceErrorType>
@@ -37,10 +38,10 @@ std::string ServiceError<ServiceErrorType>::ExplainPretty(uint8_t shift) const n
     if (!error_message_.empty()) {
         err += "\n" + base_shift + shift_s + "message: " + error_message_;
     }
-    if (!context_.empty()) {
-        err += "\n" + base_shift + shift_s + "context: ";
+    if (!details_.empty()) {
+        err += "\n" + base_shift + shift_s + "details: ";
         auto i = 0;
-        for (const auto &kv : context_) {
+        for (const auto &kv : details_) {
             err += (i > 0 ? ", " : "") + kv.first + ":" + kv.second;
             i++;
         }
@@ -58,10 +59,10 @@ std::string ServiceError<ServiceErrorType>::Explain() const noexcept {
     if (!error_message_.empty()) {
         err += ", message: " + error_message_;
     }
-    if (!context_.empty()) {
-        err += ", context: ";
+    if (!details_.empty()) {
+        err += ", details: ";
         auto i = 0;
-        for (const auto &kv : context_) {
+        for (const auto &kv : details_) {
             err += (i > 0 ? ", " : "") + kv.first + ":" + kv.second;
             i++;
         }
@@ -73,8 +74,8 @@ std::string ServiceError<ServiceErrorType>::Explain() const noexcept {
 }
 
 template<typename ServiceErrorType>
-ErrorInterface *ServiceError<ServiceErrorType>::AddContext(std::string key, std::string value) noexcept {
-    context_[std::move(key)] = std::move(value);
+ErrorInterface *ServiceError<ServiceErrorType>::AddDetails(std::string key, std::string value) noexcept {
+    details_[std::move(key)] = std::move(value);
     return this;
 }
 template<typename ServiceErrorType>
@@ -91,13 +92,13 @@ template<typename ServiceErrorType>
 std::string ServiceError<ServiceErrorType>::ExplainInJSON() const noexcept {
     std::string err = WrapInQuotes("error") + ":" + WrapInQuotes(error_name_);
     if (!error_message_.empty()) {
-        err += "," + WrapInQuotes("message") + ":" + WrapInQuotes(error_message_);
+        err += "," + WrapInQuotes("message") + ":" + WrapInQuotes(EscapeJson(error_message_));
     }
-    if (!context_.empty()) {
-        err += "," + WrapInQuotes("context") + ":{";
+    if (!details_.empty()) {
+        err += "," + WrapInQuotes("details") + ":{";
         auto i = 0;
-        for (const auto &kv : context_) {
-            err += (i > 0 ? ", " : "") + WrapInQuotes(kv.first) + ":" + WrapInQuotes(kv.second);
+        for (const auto &kv : details_) {
+            err += (i > 0 ? ", " : "") + WrapInQuotes(kv.first) + ":" + WrapInQuotes(EscapeJson(kv.second));
             i++;
         }
         err += "}";
@@ -112,6 +113,11 @@ const Error &ServiceError<ServiceErrorType>::GetCause() const noexcept {
     return cause_err_;
 }
 
+template<typename ServiceErrorType>
+ErrorInterface *ServiceError<ServiceErrorType>::AddDetails(std::string key, uint64_t value) noexcept {
+    return AddDetails(key,std::to_string(value));
+}
+
 template<typename ServiceErrorType>
 Error ServiceErrorTemplate<ServiceErrorType>::Generate() const noexcept {
     return Generate("");
diff --git a/common/cpp/include/asapo/common/io_error.h b/common/cpp/include/asapo/common/io_error.h
index ef15e630836fe854ea5f51f069f800413e52c5e6..52245430bc6d610dd2de2009b2dabd38d74d973e 100644
--- a/common/cpp/include/asapo/common/io_error.h
+++ b/common/cpp/include/asapo/common/io_error.h
@@ -36,39 +36,39 @@ using IOErrorTemplate = ServiceErrorTemplate<IOErrorType>;
 
 namespace IOErrorTemplates {
 auto const kUnknownIOError = IOErrorTemplate {
-    "Unknown Error", IOErrorType::kUnknownIOError
+    "unknown error", IOErrorType::kUnknownIOError
 };
 
 auto const kFileNotFound = IOErrorTemplate {
-    "No such file or directory", IOErrorType::kFileNotFound
+    "no such file or directory", IOErrorType::kFileNotFound
 };
 auto const kReadError = IOErrorTemplate {
-    "Read error", IOErrorType::kReadError
+    "read error", IOErrorType::kReadError
 };
 auto const kBadFileNumber = IOErrorTemplate {
-    "Bad file number", IOErrorType::kBadFileNumber
+    "bad file number", IOErrorType::kBadFileNumber
 };
 auto const kResourceTemporarilyUnavailable = IOErrorTemplate {
-    "Resource temporarily unavailable", IOErrorType::kResourceTemporarilyUnavailable
+    "resource temporarily unavailable", IOErrorType::kResourceTemporarilyUnavailable
 };
 
 auto const kPermissionDenied = IOErrorTemplate {
-    "Permission denied", IOErrorType::kPermissionDenied
+    "permission denied", IOErrorType::kPermissionDenied
 };
 auto const kUnsupportedAddressFamily = IOErrorTemplate {
-    "Unsupported address family", IOErrorType::kUnsupportedAddressFamily
+    "unsupported address family", IOErrorType::kUnsupportedAddressFamily
 };
 auto const kInvalidAddressFormat = IOErrorTemplate {
-    "Invalid address format", IOErrorType::kInvalidAddressFormat
+    "invalid address format", IOErrorType::kInvalidAddressFormat
 };
 auto const kAddressAlreadyInUse = IOErrorTemplate {
-    "Address already in use", IOErrorType::kAddressAlreadyInUse
+    "address already in use", IOErrorType::kAddressAlreadyInUse
 };
 auto const kConnectionRefused = IOErrorTemplate {
-    "Connection refused", IOErrorType::kConnectionRefused
+    "connection refused", IOErrorType::kConnectionRefused
 };
 auto const kNotConnected = IOErrorTemplate {
-    "Not connected", IOErrorType::kNotConnected
+    "not connected", IOErrorType::kNotConnected
 };
 
 auto const kConnectionResetByPeer = IOErrorTemplate {
@@ -101,11 +101,11 @@ auto const kSocketOperationValueOutOfBound =  IOErrorTemplate {
 };
 
 auto const kAddressNotValid =  IOErrorTemplate {
-    "Address not valid", IOErrorType::kAddressNotValid
+    "address not valid", IOErrorType::kAddressNotValid
 };
 
 auto const kBrokenPipe =  IOErrorTemplate {
-    "Broken pipe/connection", IOErrorType::kBrokenPipe
+    "broken pipe/connection", IOErrorType::kBrokenPipe
 };
 
 
diff --git a/common/cpp/include/asapo/common/utils.h b/common/cpp/include/asapo/common/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..103fb8f0a658cbc8d8802e92a39d419b437a650a
--- /dev/null
+++ b/common/cpp/include/asapo/common/utils.h
@@ -0,0 +1,49 @@
+#ifndef ASAPO_COMMON_CPP_INCLUDE_ASAPO_COMMON_UTILS_H_
+#define ASAPO_COMMON_CPP_INCLUDE_ASAPO_COMMON_UTILS_H_
+
+#include <iomanip>
+#include <sstream>
+
+
+namespace asapo {
+
+inline std::string EscapeJson(const std::string& s) {
+    std::ostringstream o;
+    for (auto c = s.cbegin(); c != s.cend(); c++) {
+        switch (*c) {
+            case '"':
+                o << "\\\"";
+                break;
+            case '\\':
+                o << "\\\\";
+                break;
+            case '\b':
+                o << "\\b";
+                break;
+            case '\f':
+                o << "\\f";
+                break;
+            case '\n':
+                o << "\\n";
+                break;
+            case '\r':
+                o << "\\r";
+                break;
+            case '\t':
+                o << "\\t";
+                break;
+            default:
+                if ('\x00' <= *c && *c <= '\x1f') {
+                    o << "\\u"
+                      << std::hex << std::setw(4) << std::setfill('0') << (int)*c;
+                } else {
+                    o << *c;
+                }
+        }
+    }
+    return o.str();
+}
+
+}
+
+#endif //ASAPO_COMMON_CPP_INCLUDE_ASAPO_COMMON_UTILS_H_
diff --git a/common/cpp/include/asapo/json_parser/json_parser.h b/common/cpp/include/asapo/json_parser/json_parser.h
index d62cc8c89d8259d85cb78ecfc7ba2adda58a0fd6..cf123e712547fb42ef0c693c6f60404e13656405 100644
--- a/common/cpp/include/asapo/json_parser/json_parser.h
+++ b/common/cpp/include/asapo/json_parser/json_parser.h
@@ -52,7 +52,6 @@ class JsonFileParser : public JsonParser {
     JsonFileParser(const std::string& json, const std::unique_ptr<IO>* io = nullptr): JsonParser(json, io) {};
 };
 
-
 }
 
 
diff --git a/common/cpp/include/asapo/logger/logger.h b/common/cpp/include/asapo/logger/logger.h
index e4dc868959f8b5347b95a7a51b19623f166a6e73..4bd212dc240a3f6d8cdaf1f1012cc156d51c100f 100644
--- a/common/cpp/include/asapo/logger/logger.h
+++ b/common/cpp/include/asapo/logger/logger.h
@@ -27,10 +27,11 @@ class LogMessageWithFields {
     LogMessageWithFields& Append(std::string key, uint64_t val);
     LogMessageWithFields& Append(std::string key, double val, int precision);
     LogMessageWithFields& Append(const LogMessageWithFields& log_msg);
+    LogMessageWithFields& Append(std::string key, const LogMessageWithFields& log_msg);
     LogMessageWithFields& Append(std::string key, std::string val);
     std::string LogString() const;
   private:
-    inline std::string QuoteIFNeeded();
+    inline std::string CommaIfNeeded();
     std::string log_string_;
 };
 
diff --git a/common/cpp/include/asapo/preprocessor/definitions.h b/common/cpp/include/asapo/preprocessor/definitions.h
index 1f6b9fd8b7fa081969c3c79a0c033233eb296c2c..bcd62933727db56b25af9e750c0706c2607e6d1c 100644
--- a/common/cpp/include/asapo/preprocessor/definitions.h
+++ b/common/cpp/include/asapo/preprocessor/definitions.h
@@ -2,24 +2,13 @@
 #define ASAPO_DEFINITIONS_H
 
 #ifdef UNIT_TESTS
-#define VIRTUAL virtual
-#define FINAL
+#define ASAPO_VIRTUAL virtual
+#define ASAPO_FINAL
 #else
-#define VIRTUAL
-#define FINAL final
+#define ASAPO_VIRTUAL
+#define ASAPO_FINAL final
 #endif
 
-#if defined(__GNUC__) || defined(__clang__)
-#define DEPRECATED(msg) __attribute__((deprecated(msg)))
-#elif defined(_MSC_VER)
-#define DEPRECATED(msg) __declspec(deprecated(msg))
-#else
-#pragma message("WARNING: You need to implement DEPRECATED for this compiler")
-#define DEPRECATED(msg)
-#endif
-
-
-
 namespace  asapo {
 const char kPathSeparator =
 #ifdef WIN32
diff --git a/common/cpp/include/asapo/preprocessor/deprecated.h b/common/cpp/include/asapo/preprocessor/deprecated.h
new file mode 100644
index 0000000000000000000000000000000000000000..0cf579285d2b3fb73375400f235666461778314d
--- /dev/null
+++ b/common/cpp/include/asapo/preprocessor/deprecated.h
@@ -0,0 +1,13 @@
+#ifndef ASAPO_DEPRECATED_H
+#define ASAPO_DEPRECATED_H
+
+#if defined(__GNUC__) || defined(__clang__)
+#define ASAPO_DEPRECATED(msg) __attribute__((deprecated(msg)))
+#elif defined(_MSC_VER)
+#define ASAPO_DEPRECATED(msg) __declspec(deprecated(msg))
+#else
+#pragma message("WARNING: You need to implement DEPRECATED for this compiler")
+#define ASAPO_DEPRECATED(msg)
+#endif
+
+#endif //ASAPO_DEPRECATED_H
diff --git a/common/cpp/include/asapo/request/request_pool.h b/common/cpp/include/asapo/request/request_pool.h
index 32be614b0f7aaa0a3168778255f916d928ea6ad2..638f98acaa84dadfd6b40e4ffce9a4886e8e0038 100644
--- a/common/cpp/include/asapo/request/request_pool.h
+++ b/common/cpp/include/asapo/request/request_pool.h
@@ -27,13 +27,13 @@ class RequestPool {
     };
   public:
     explicit RequestPool(uint8_t n_threads, RequestHandlerFactory* request_handler_factory, const AbstractLogger* log);
-    VIRTUAL Error AddRequest(GenericRequestPtr request, bool top_priority = false);
-    VIRTUAL void SetLimits(RequestPoolLimits limits);
-    VIRTUAL Error AddRequests(GenericRequests requests);
-    VIRTUAL ~RequestPool();
-    VIRTUAL uint64_t NRequestsInPool();
-    VIRTUAL uint64_t UsedMemoryInPool();
-    VIRTUAL Error WaitRequestsFinished(uint64_t timeout_ms);
+    ASAPO_VIRTUAL Error AddRequest(GenericRequestPtr request, bool top_priority = false);
+    ASAPO_VIRTUAL void SetLimits(RequestPoolLimits limits);
+    ASAPO_VIRTUAL Error AddRequests(GenericRequests requests);
+    ASAPO_VIRTUAL ~RequestPool();
+    ASAPO_VIRTUAL uint64_t NRequestsInPool();
+    ASAPO_VIRTUAL uint64_t UsedMemoryInPool();
+    ASAPO_VIRTUAL Error WaitRequestsFinished(uint64_t timeout_ms);
     void StopThreads();
   private:
     const AbstractLogger* log__;
diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp
index 50db4ae16cccf1f3dacd8701e708cb7e2c3ba968..bfb0d8ffbb6cf083740edbe149962206840251b9 100644
--- a/common/cpp/src/database/mongodb_client.cpp
+++ b/common/cpp/src/database/mongodb_client.cpp
@@ -46,7 +46,7 @@ Error MongoDBClient::Ping() {
     bson_destroy(&reply);
     bson_destroy(command);
 
-    return !retval ? DBErrorTemplates::kConnectionError.Generate() : nullptr;
+    return !retval ? DBErrorTemplates::kConnectionError.Generate("cannot ping database") : nullptr;
 
 }
 MongoDBClient::MongoDBClient() {
@@ -58,7 +58,7 @@ Error MongoDBClient::InitializeClient(const std::string& address) {
     client_ = mongoc_client_new(uri_str.c_str());
 
     if (client_ == nullptr) {
-        return DBErrorTemplates::kBadAddress.Generate();
+        return DBErrorTemplates::kBadAddress.Generate("cannot initialize database");
     }
 
     write_concern_ = mongoc_write_concern_new();
@@ -163,7 +163,7 @@ bson_p PrepareUpdateDocument(const uint8_t* json, Error* err) {
     std::string json_flat;
     auto parser_err = parser.GetFlattenedString("meta", ".", &json_flat);
     if (parser_err) {
-        *err = DBErrorTemplates::kJsonParseError.Generate("cannof flatten meta " + parser_err->Explain());
+        *err = DBErrorTemplates::kJsonParseError.Generate("cannof flatten meta ",std::move(parser_err));
         return nullptr;
     }
     bson_error_t mongo_err;
@@ -832,7 +832,7 @@ Error MongoDBClient::GetMetaFromDb(const std::string& collection, const std::str
     err = parser.Embedded("meta").GetRawString(res);
     if (err) {
         return DBErrorTemplates::kJsonParseError.Generate(
-                   "GetMetaFromDb: cannot parse database response: " + err->Explain());
+                   "GetMetaFromDb: cannot parse database response",std::move(err));
     }
     return nullptr;
 }
diff --git a/common/cpp/src/logger/logger.cpp b/common/cpp/src/logger/logger.cpp
index 546112130bee461f1dc39e9160e0ecddbe865454..e277c2da577b8f2d429b77bdb29627b4c4e3a58d 100644
--- a/common/cpp/src/logger/logger.cpp
+++ b/common/cpp/src/logger/logger.cpp
@@ -6,7 +6,7 @@
 
 namespace asapo {
 
-Logger CreateLogger(std::string name, bool console, bool centralized_log, const std::string& endpoint_uri) {
+Logger CreateLogger(std::string name, bool console, bool centralized_log, const std::string &endpoint_uri) {
     auto logger = new SpdLogger{name, endpoint_uri};
     logger->SetLogLevel(LogLevel::Info);
     if (console) {
@@ -19,15 +19,15 @@ Logger CreateLogger(std::string name, bool console, bool centralized_log, const
     return Logger{logger};
 }
 
-Logger CreateDefaultLoggerBin(const std::string& name) {
+Logger CreateDefaultLoggerBin(const std::string &name) {
     return CreateLogger(name, true, false, "");
 }
 
-Logger CreateDefaultLoggerApi(const std::string& name, const std::string& endpoint_uri) {
+Logger CreateDefaultLoggerApi(const std::string &name, const std::string &endpoint_uri) {
     return CreateLogger(name, false, true, endpoint_uri);
 }
 
-LogLevel StringToLogLevel(const std::string& name, Error* err) {
+LogLevel StringToLogLevel(const std::string &name, Error *err) {
     *err = nullptr;
     if (name == "debug") return LogLevel::Debug;
     if (name == "info") return LogLevel::Info;
@@ -40,7 +40,7 @@ LogLevel StringToLogLevel(const std::string& name, Error* err) {
 }
 
 template<typename ... Args>
-std::string string_format(const std::string& format, Args ... args) {
+std::string string_format(const std::string &format, Args ... args) {
     size_t size = static_cast<size_t>(snprintf(nullptr, 0, format.c_str(), args ...) + 1);
     std::unique_ptr<char[]> buf(new char[size]);
     snprintf(buf.get(), size, format.c_str(), args ...);
@@ -52,39 +52,40 @@ std::string EncloseQuotes(std::string str) {
 }
 
 LogMessageWithFields::LogMessageWithFields(std::string key, uint64_t val) {
-    log_string_ = EncloseQuotes(key) + ":" + std::to_string(val);
+    log_string_ = EncloseQuotes(std::move(key)) + ":" + std::to_string(val);
 }
 
 LogMessageWithFields::LogMessageWithFields(std::string key, double val, int precision) {
-    log_string_ = EncloseQuotes(key) + ":" + string_format("%." + std::to_string(precision) + "f", val);
+    log_string_ = EncloseQuotes(std::move(key)) + ":" + string_format("%." + std::to_string(precision) + "f", val);
 }
 
 LogMessageWithFields::LogMessageWithFields(std::string val) {
     if (!val.empty()) {
-        log_string_ = EncloseQuotes("message") + ":" + EncloseQuotes(escape_json(val));
+        log_string_ = EncloseQuotes("message") + ":" + EncloseQuotes(EscapeJson(val));
     }
 }
 
 LogMessageWithFields::LogMessageWithFields(std::string key, std::string val) {
-    log_string_ = EncloseQuotes(key) + ":" + EncloseQuotes(escape_json(val));
+    log_string_ = EncloseQuotes(std::move(key)) + ":" + EncloseQuotes(EscapeJson(val));
 }
 
-inline std::string LogMessageWithFields::QuoteIFNeeded() {
+inline std::string LogMessageWithFields::CommaIfNeeded() {
     return log_string_.empty() ? "" : ",";
 }
 
-LogMessageWithFields& LogMessageWithFields::Append(std::string key, uint64_t val) {
-    log_string_ += QuoteIFNeeded() + EncloseQuotes(key) + ":" + std::to_string(val);
+LogMessageWithFields &LogMessageWithFields::Append(std::string key, uint64_t val) {
+    log_string_ += CommaIfNeeded() + EncloseQuotes(std::move(key)) + ":" + std::to_string(val);
     return *this;
 }
 
-LogMessageWithFields& LogMessageWithFields::Append(std::string key, double val, int precision) {
-    log_string_ += QuoteIFNeeded() + EncloseQuotes(key) + ":" + string_format("%." + std::to_string(precision) + "f", val);
+LogMessageWithFields &LogMessageWithFields::Append(std::string key, double val, int precision) {
+    log_string_ += CommaIfNeeded() + EncloseQuotes(std::move(key)) + ":"
+        + string_format("%." + std::to_string(precision) + "f", val);
     return *this;
 }
 
-LogMessageWithFields& LogMessageWithFields::Append(std::string key, std::string val) {
-    log_string_ += QuoteIFNeeded() + EncloseQuotes(key) + ":" + EncloseQuotes(escape_json(val));
+LogMessageWithFields &LogMessageWithFields::Append(std::string key, std::string val) {
+    log_string_ += CommaIfNeeded() + EncloseQuotes(std::move(key)) + ":" + EncloseQuotes(EscapeJson(val));
     return *this;
 }
 
@@ -92,11 +93,17 @@ std::string LogMessageWithFields::LogString() const {
     return log_string_;
 }
 
-LogMessageWithFields::LogMessageWithFields(const Error& error) {
+LogMessageWithFields::LogMessageWithFields(const Error &error) {
     log_string_ = error->ExplainInJSON();
 }
-LogMessageWithFields& LogMessageWithFields::Append(const LogMessageWithFields& log_msg) {
-    log_string_ += QuoteIFNeeded() + log_msg.LogString();
+
+LogMessageWithFields &LogMessageWithFields::Append(const LogMessageWithFields &log_msg) {
+    log_string_ += CommaIfNeeded() + log_msg.LogString();
+    return *this;
+}
+
+LogMessageWithFields &LogMessageWithFields::Append(std::string key, const LogMessageWithFields &log_msg) {
+    log_string_ += CommaIfNeeded() + EncloseQuotes(std::move(key)) + ":{" + log_msg.LogString() + "}";
     return *this;
 }
 
diff --git a/common/cpp/src/logger/spd_logger.cpp b/common/cpp/src/logger/spd_logger.cpp
index 21f9f6ec381949f793b39b5cac63d05ab3ceb1e5..cf66d44ff91ffb9b0323d53d701c020a40aa0f59 100644
--- a/common/cpp/src/logger/spd_logger.cpp
+++ b/common/cpp/src/logger/spd_logger.cpp
@@ -3,8 +3,8 @@
 #include "fluentd_sink.h"
 
 #include <sstream>
-#include <iomanip>
 
+#include "asapo/common/utils.h"
 
 namespace asapo {
 
@@ -30,46 +30,9 @@ void SpdLogger::SetLogLevel(LogLevel level) {
     }
 }
 
-std::string escape_json(const std::string& s) {
-    std::ostringstream o;
-    for (auto c = s.cbegin(); c != s.cend(); c++) {
-        switch (*c) {
-        case '"':
-            o << "\\\"";
-            break;
-        case '\\':
-            o << "\\\\";
-            break;
-        case '\b':
-            o << "\\b";
-            break;
-        case '\f':
-            o << "\\f";
-            break;
-        case '\n':
-            o << "\\n";
-            break;
-        case '\r':
-            o << "\\r";
-            break;
-        case '\t':
-            o << "\\t";
-            break;
-        default:
-            if ('\x00' <= *c && *c <= '\x1f') {
-                o << "\\u"
-                  << std::hex << std::setw(4) << std::setfill('0') << (int)*c;
-            } else {
-                o << *c;
-            }
-        }
-    }
-    return o.str();
-}
-
 std::string EncloseMsg(std::string msg) {
     if (msg.find("\"") != 0) {
-        return std::string(R"("message":")") + escape_json(msg) + "\"";
+        return std::string(R"("message":")") + EscapeJson(msg) + "\"";
     } else {
         return msg;
     }
diff --git a/common/cpp/src/logger/spd_logger.h b/common/cpp/src/logger/spd_logger.h
index 1c2485f95f8520012223e06b4609a28aeffd89fd..4b4ea94a2942b18b560575adf0eadae903a372cb 100644
--- a/common/cpp/src/logger/spd_logger.h
+++ b/common/cpp/src/logger/spd_logger.h
@@ -38,7 +38,7 @@ class SpdLogger : public AbstractLogger {
 };
 
 std::string EncloseMsg(std::string msg);
-std::string escape_json(const std::string& s);
+std::string EscapeJson(const std::string& s);
 
 }
 
diff --git a/common/cpp/src/system_io/system_io.cpp b/common/cpp/src/system_io/system_io.cpp
index 3165a6a3d26c5c4654dbcb09f5f6dfd57fd015ed..d19f0f46fada88b333e15763c3ea027937e79e6c 100644
--- a/common/cpp/src/system_io/system_io.cpp
+++ b/common/cpp/src/system_io/system_io.cpp
@@ -116,7 +116,7 @@ MessageData SystemIO::GetDataFromFile(const std::string& fname, uint64_t* fsize,
 
     Read(fd, data_array, (size_t)*fsize, err);
     if (*err != nullptr) {
-        (*err)->AddContext("name", fname)->AddContext("expected size", std::to_string(*fsize));
+        (*err)->AddDetails("name", fname)->AddDetails("expected size", std::to_string(*fsize));
         Close(fd, nullptr);
         return nullptr;
     }
@@ -167,7 +167,8 @@ FileDescriptor SystemIO::OpenWithCreateFolders(const std::string& root_folder, c
     if (*err == IOErrorTemplates::kFileNotFound && create_directories)  {
         size_t pos = fname.rfind(kPathSeparator);
         if (pos == std::string::npos) {
-            *err = IOErrorTemplates::kFileNotFound.Generate(full_name);
+            *err = IOErrorTemplates::kFileNotFound.Generate();
+            (*err)->AddDetails("name",fname);
             return -1;
         }
         *err = CreateDirectoryWithParents(root_folder, fname.substr(0, pos));
@@ -191,7 +192,7 @@ Error SystemIO::WriteDataToFile(const std::string& root_folder, const std::strin
 
     Write(fd, data, length, &err);
     if (err) {
-        err->AddContext("name", fname);
+        err->AddDetails("name", fname);
         return err;
     }
 
@@ -402,7 +403,7 @@ asapo::FileDescriptor asapo::SystemIO::Open(const std::string& filename,
     FileDescriptor fd = _open(filename.c_str(), flags);
     if (fd == -1) {
         *err = GetLastError();
-        (*err)->AddContext("name", filename);
+        (*err)->AddDetails("name", filename);
     } else {
         *err = nullptr;
     }
@@ -616,7 +617,7 @@ Error SystemIO::CreateDirectoryWithParents(const std::string& root_path, const s
         Error err;
         CreateNewDirectory(new_path, &err);
         if (err && err != IOErrorTemplates::kFileAlreadyExists) {
-            err->AddContext("name", new_path);
+            err->AddDetails("name", new_path);
             return err;
         }
         if (iter != path.end()) {
diff --git a/common/cpp/src/system_io/system_io_linux.cpp b/common/cpp/src/system_io/system_io_linux.cpp
index f4256bddba5fdecb3e9bb9869943d03c9c642d81..18c4f1afe81635c58728981800b5a27c5cf9a3d6 100644
--- a/common/cpp/src/system_io/system_io_linux.cpp
+++ b/common/cpp/src/system_io/system_io_linux.cpp
@@ -28,7 +28,7 @@ Error SystemIO::AddToEpool(SocketDescriptor sd) const {
     event.data.fd = sd;
     if((epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, sd, &event) == -1) && (errno != EEXIST)) {
         auto err =  GetLastError();
-        err->AddContext("where", "add to epoll");
+        err->AddDetails("where", "add to epoll");
         close(epoll_fd_);
         return err;
     }
@@ -43,7 +43,7 @@ Error SystemIO::CreateEpoolIfNeeded(SocketDescriptor master_socket) const {
     epoll_fd_ = epoll_create1(0);
     if(epoll_fd_ == kDisconnectedSocketDescriptor) {
         auto err = GetLastError();
-        err->AddContext("where", "create epoll");
+        err->AddDetails("where", "create epoll");
         return err;
     }
     return AddToEpool(master_socket);
@@ -84,7 +84,7 @@ ListSocketDescriptors SystemIO::WaitSocketsActivity(SocketDescriptor master_sock
         }
         if (event_count < 0) {
             *err = GetLastError();
-            (*err)->AddContext("where", "epoll wait");
+            (*err)->AddDetails("where", "epoll wait");
             return {};
         }
 
diff --git a/common/cpp/src/system_io/system_io_linux_mac.cpp b/common/cpp/src/system_io/system_io_linux_mac.cpp
index 371c12e4d3464ac2478bab25c8733bf068f5989a..8aba5f76925436a59239ea434a42f1384dacb780 100644
--- a/common/cpp/src/system_io/system_io_linux_mac.cpp
+++ b/common/cpp/src/system_io/system_io_linux_mac.cpp
@@ -68,7 +68,7 @@ Error GetLastErrorFromErrno() {
         return IOErrorTemplates::kBrokenPipe.Generate();
     default:
         Error err = IOErrorTemplates::kUnknownIOError.Generate();
-        (*err).AddContext("Unknown error code: ", std::to_string(errno));
+            (*err).AddDetails("Unknown error code: ", std::to_string(errno));
         return err;
     }
 }
@@ -122,7 +122,7 @@ MessageMeta GetMessageMeta(const string& name, Error* err) {
 
     auto t_stat = FileStat(name, err);
     if (*err != nullptr) {
-        (*err)->AddContext("name", name);
+        (*err)->AddDetails("name", name);
         return MessageMeta{};
     }
 
@@ -157,7 +157,7 @@ void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList*
     auto dir = opendir((path).c_str());
     if (dir == nullptr) {
         *err = GetLastError();
-        (*err)->AddContext("name", path);
+        (*err)->AddDetails("name", path);
         return;
     }
 
@@ -183,7 +183,7 @@ void SystemIO::CollectMessageMetarmationRecursively(const std::string& path,
     auto dir = opendir((path).c_str());
     if (dir == nullptr) {
         *err = GetLastError();
-        (*err)->AddContext("name", path);
+        (*err)->AddDetails("name", path);
         return;
     }
 
diff --git a/common/cpp/src/system_io/system_io_windows.cpp b/common/cpp/src/system_io/system_io_windows.cpp
index b847d747b70f0a9c4204ce375518453fbc505475..d8df3328f521effb03467346b5f8b8cef581d189 100644
--- a/common/cpp/src/system_io/system_io_windows.cpp
+++ b/common/cpp/src/system_io/system_io_windows.cpp
@@ -66,7 +66,7 @@ Error IOErrorFromGetLastError() {
     default:
         std::cout << "[IOErrorFromGetLastError] Unknown error code: " << last_error << std::endl;
         Error err = IOErrorTemplates::kUnknownIOError.Generate();
-        (*err).AddContext("Unknown error code", std::to_string(last_error));
+            (*err).AddDetails("Unknown error code", std::to_string(last_error));
         return err;
     }
 }
@@ -151,7 +151,7 @@ MessageMeta SystemIO::GetMessageMeta(const std::string& name, Error* err) const
     auto hFind = FindFirstFile(name.c_str(), &f);
     if (hFind == INVALID_HANDLE_VALUE) {
         *err = IOErrorFromGetLastError();
-        (*err)->AddContext("name", name);
+        (*err)->AddDetails("name", name);
         return {};
     }
     FindClose(hFind);
@@ -179,7 +179,7 @@ void SystemIO::GetSubDirectoriesRecursively(const std::string& path, SubDirList*
     HANDLE handle = FindFirstFile((path + "\\*.*").c_str(), &find_data);
     if (handle == INVALID_HANDLE_VALUE) {
         *err = IOErrorFromGetLastError();
-        (*err)->AddContext("name", path);
+        (*err)->AddDetails("name", path);
         return;
     }
 
@@ -208,7 +208,7 @@ void SystemIO::CollectMessageMetarmationRecursively(const std::string& path,
     HANDLE handle = FindFirstFile((path + "\\*.*").c_str(), &find_data);
     if (handle == INVALID_HANDLE_VALUE) {
         *err = IOErrorFromGetLastError();
-        (*err)->AddContext("name", path);
+        (*err)->AddDetails("name", path);
         return;
     }
 
diff --git a/common/cpp/unittests/common/test_error.cpp b/common/cpp/unittests/common/test_error.cpp
index c588ea0526be16f5977367ee9e5fdb36cdaf3cee..4d459ba1a8b9fcfda124f432f13bc2d1702e32d0 100644
--- a/common/cpp/unittests/common/test_error.cpp
+++ b/common/cpp/unittests/common/test_error.cpp
@@ -27,13 +27,13 @@ TEST(ErrorTemplate, Explain) {
     ASSERT_THAT(error->Explain(), HasSubstr("test"));
 }
 
-TEST(ErrorTemplate, Context) {
+TEST(ErrorTemplate, Details) {
     Error error = asapo::GeneralErrorTemplates::kEndOfFile.Generate("test");
-    error->AddContext("key", "value");
-    error->AddContext("key2", "value2");
+    error->AddDetails("key", "value");
+    error->AddDetails("key2", "value2");
 
     ASSERT_THAT(error->Explain(), AllOf(HasSubstr("test"),
-                                        HasSubstr("context"),
+                                        HasSubstr("details"),
                                         HasSubstr("key:value"),
                                         HasSubstr("key2:value2")
                                        ));
@@ -43,8 +43,8 @@ TEST(ErrorTemplate, Cause) {
     Error error = asapo::GeneralErrorTemplates::kEndOfFile.Generate("test");
     Error error_c = asapo::GeneralErrorTemplates::kMemoryAllocationError.Generate("cause_test");
     Error error_c1 = asapo::GeneralErrorTemplates::kSimpleError.Generate("simple error");
-    error->AddContext("key", "value");
-    error_c->AddContext("key2", "value2");
+    error->AddDetails("key", "value");
+    error_c->AddDetails("key2", "value2");
     error_c->SetCause(std::move(error_c1));
     error->SetCause(std::move(error_c));
     ASSERT_THAT(error->Explain(), AllOf(HasSubstr("test"),
@@ -64,10 +64,10 @@ TEST(ErrorTemplate, Cause) {
 TEST(ErrorTemplate, Json) {
     Error error = asapo::GeneralErrorTemplates::kEndOfFile.Generate("test");
     Error error_c = asapo::GeneralErrorTemplates::kMemoryAllocationError.Generate("cause_test");
-    error->AddContext("key", "value");
+    error->AddDetails("key", "value");
     error->SetCause(std::move(error_c));
     auto expected_string =
-        R"("error":"end of file","message":"test","context":{"key":"value"},"cause":{"error":"memory allocation","message":"cause_test"})";
+        R"("error":"end of file","message":"test","details":{"key":"value"},"cause":{"error":"memory allocation","message":"cause_test"})";
     ASSERT_THAT(error->ExplainInJSON(),  Eq(expected_string));
 }
 
diff --git a/common/cpp/unittests/json_parser/test_json_parser.cpp b/common/cpp/unittests/json_parser/test_json_parser.cpp
index 9c2bf4f4fd879d3722c9a36da48e8b5ba689469d..ceed36b1b353aa3597e61217a30a115905de39cd 100644
--- a/common/cpp/unittests/json_parser/test_json_parser.cpp
+++ b/common/cpp/unittests/json_parser/test_json_parser.cpp
@@ -330,4 +330,17 @@ TEST_F(ParseFileTests, Flatten) {
 }
 
 
+TEST(ParseString, RawString) {
+    std::string json = R"({"top":"top","embedded":{"ar":[2,2,3],"str":"text"}})";
+    std::string json_row = R"({"ar":[2,2,3],"str":"text"})";
+    JsonStringParser parser{json};
+
+    std::string res;
+    auto err = parser.Embedded("embedded").GetRawString(&res);
+    ASSERT_THAT(err, Eq(nullptr));
+    ASSERT_THAT(res, Eq(json_row));
+
+}
+
+
 }
diff --git a/common/go/src/asapo_common/logger/logger.go b/common/go/src/asapo_common/logger/logger.go
index 0f026aa18b9e777a1b72089ca32d3685b97cf6f0..6a87f810ce77a1b9549f127d426dee7070413a04 100644
--- a/common/go/src/asapo_common/logger/logger.go
+++ b/common/go/src/asapo_common/logger/logger.go
@@ -17,6 +17,7 @@ const (
 )
 
 type Logger interface {
+	WithFields(args map[string]interface{}) Logger
 	Info(args ...interface{})
 	Debug(args ...interface{})
 	Fatal(args ...interface{})
@@ -28,6 +29,10 @@ type Logger interface {
 
 var my_logger Logger = &logRusLogger{}
 
+func WithFields(args map[string]interface{}) Logger {
+	return my_logger.WithFields(args)
+}
+
 func Info(args ...interface{}) {
 	my_logger.Info(args...)
 }
diff --git a/common/go/src/asapo_common/logger/logrus_logger.go b/common/go/src/asapo_common/logger/logrus_logger.go
index 4625f27492f47efe5d14ed1a1032b4810c418572..88b41114303b39f073050cb86753c101f6122af9 100644
--- a/common/go/src/asapo_common/logger/logrus_logger.go
+++ b/common/go/src/asapo_common/logger/logrus_logger.go
@@ -13,11 +13,22 @@ func (l *logRusLogger) SetSource(source string) {
 	l.source = source
 }
 
+
+func (l *logRusLogger) WithFields(args map[string]interface{}) Logger {
+	new_log:= &logRusLogger{
+		logger_entry: l.entry().WithFields(args),
+		source:       l.source,
+	}
+	return new_log
+}
+
+
 func (l *logRusLogger) entry() *log.Entry {
 	if l.logger_entry != nil {
 		return l.logger_entry
 	}
 
+
 	formatter := &log.JSONFormatter{
 		FieldMap: log.FieldMap{
 			log.FieldKeyMsg: "message",
diff --git a/common/go/src/asapo_common/logger/logrus_test.go b/common/go/src/asapo_common/logger/logrus_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6997f88845eb14da5ebb17cd1111f2d50647e45
--- /dev/null
+++ b/common/go/src/asapo_common/logger/logrus_test.go
@@ -0,0 +1,28 @@
+package logger
+
+import (
+	"github.com/sirupsen/logrus/hooks/test"
+	"github.com/stretchr/testify/assert"
+	"testing"
+)
+
+func logStr(hook *test.Hook) string {
+	s := ""
+	for _, entry := range hook.AllEntries() {
+		ss, _ := entry.String()
+		s += ss
+	}
+	return s
+}
+
+func TestLog(t *testing.T) {
+	l := &logRusLogger{}
+	hook := test.NewLocal(l.entry().Logger)
+	l.WithFields(map[string]interface{}{"testmap1":1}).Info("aaa")
+	assert.Contains(t, logStr(hook),"testmap1")
+
+	hook.Reset()
+	l.WithFields(map[string]interface{}{"testmap2":1}).Info("bbb")
+	assert.NotContains(t, logStr(hook),"testmap1")
+
+}
diff --git a/common/go/src/asapo_common/logger/mock_logger.go b/common/go/src/asapo_common/logger/mock_logger.go
index 484b86cb0175db0e801cc3e11cf42592ecef123a..58c1df744f4b17e4c402290bb22f932347fd7512 100644
--- a/common/go/src/asapo_common/logger/mock_logger.go
+++ b/common/go/src/asapo_common/logger/mock_logger.go
@@ -16,6 +16,11 @@ func SetMockLog() {
 	my_logger = &MockLog
 }
 
+func (l *MockLogger) WithFields(args map[string]interface{}) Logger {
+	l.Called(args)
+	return l
+}
+
 func UnsetMockLog() {
 	my_logger = &logRusLogger{}
 }
diff --git a/common/go/src/asapo_common/utils/authorization.go b/common/go/src/asapo_common/utils/authorization.go
index d707819b9d11758a87f5f3538b204e8d76ed5ee3..8a1b11bb253e16da88b47834816a6235cd4e72f5 100644
--- a/common/go/src/asapo_common/utils/authorization.go
+++ b/common/go/src/asapo_common/utils/authorization.go
@@ -151,7 +151,7 @@ func (a *JWTAuth) CheckAndGetContent(token string, extraClaims interface{}, payl
 	// payload ignored
 	c, ok := CheckJWTToken(token,a.Key)
 	if !ok {
-		return nil,errors.New("wrong JWT token")
+		return nil,errors.New("wrong or expired JWT token")
 	}
 	claim,ok  := c.(*CustomClaims)
 	if !ok {
diff --git a/common/go/src/asapo_common/utils/authorization_test.go b/common/go/src/asapo_common/utils/authorization_test.go
index cda7f43b091bfba5dc5f8228199e6ae125f01933..b0add6be7436fdb17708243a900dddd5846a013d 100644
--- a/common/go/src/asapo_common/utils/authorization_test.go
+++ b/common/go/src/asapo_common/utils/authorization_test.go
@@ -1,11 +1,11 @@
 package utils
 
 import (
+	"github.com/stretchr/testify/assert"
 	"net/http"
-	"testing"
 	"net/http/httptest"
+	"testing"
 	"time"
-	"github.com/stretchr/testify/assert"
 )
 
 type authorizationResponse struct {
diff --git a/consumer/api/cpp/src/consumer.cpp b/consumer/api/cpp/src/consumer.cpp
index ea1fa64a91c5ec917fd05bf6b41c55baaa57523d..df44db24b879b5a268dd738e35f1c1556c69e52c 100644
--- a/consumer/api/cpp/src/consumer.cpp
+++ b/consumer/api/cpp/src/consumer.cpp
@@ -10,7 +10,7 @@ std::unique_ptr<Consumer> Create(const std::string& source_name,
                                  Error* error,
                                  Args&& ... args) noexcept {
     if (source_name.empty()) {
-        *error = ConsumerErrorTemplates::kWrongInput.Generate("Empty Data Source");
+        *error = ConsumerErrorTemplates::kWrongInput.Generate("empty data source");
         return nullptr;
     }
 
diff --git a/consumer/api/cpp/src/consumer_impl.cpp b/consumer/api/cpp/src/consumer_impl.cpp
index 0fda0f43709ff1e1d72915856043e11244415f49..7f623e67b2f9cf6dd09470f127770a6e78eeaa44 100644
--- a/consumer/api/cpp/src/consumer_impl.cpp
+++ b/consumer/api/cpp/src/consumer_impl.cpp
@@ -48,7 +48,9 @@ Error ConsumerErrorFromPartialDataResponse(const std::string& response) {
     PartialErrorData data;
     auto parse_error = GetPartialDataResponseFromJson(response, &data);
     if (parse_error) {
-        return ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response - " + response);
+        auto err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response" );
+        err->AddDetails("response",response);
+        return err;
     }
     auto err = ConsumerErrorTemplates::kPartialData.Generate();
     PartialErrorData* error_data = new PartialErrorData{data};
@@ -124,7 +126,7 @@ Error ProcessRequestResponce(const RequestInfo& request,
     }
 
     if (err != nullptr) {
-        err->AddContext("host", request.host)->AddContext("api", "request.api");
+        err->AddDetails("host", request.host)->AddDetails("api", request.api);
     }
     return err;
 
@@ -219,9 +221,9 @@ Error ConsumerImpl::ProcessDiscoverServiceResult(Error err, std::string* uri_to_
         if (err == ConsumerErrorTemplates::kUnsupportedClient) {
             return err;
         }
-        return ConsumerErrorTemplates::kUnavailableService.Generate(" on " + endpoint_
-                + (err != nullptr ? ": " + err->Explain()
-                   : ""));
+        auto ret_err = ConsumerErrorTemplates::kUnavailableService.Generate(std::move(err));
+        ret_err->AddDetails("destination",endpoint_);
+        return ret_err;
     }
     return nullptr;
 }
@@ -244,7 +246,8 @@ bool ConsumerImpl::SwitchToGetByIdIfPartialData(Error* err,
     if (*err == ConsumerErrorTemplates::kPartialData) {
         auto error_data = static_cast<const PartialErrorData*>((*err)->GetCustomData());
         if (error_data == nullptr) {
-            *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response - " + response);
+            *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response");
+            (*err)->AddDetails("response",response);
             return false;
         }
         *redirect_uri = std::to_string(error_data->id);
@@ -428,7 +431,7 @@ Error ConsumerImpl::GetDataFromFile(MessageMeta* info, MessageData* data) {
                                             (system_clock::now() - start).count());
     }
     if (err != nullptr) {
-        return ConsumerErrorTemplates::kLocalIOError.Generate(err->Explain());
+        return ConsumerErrorTemplates::kLocalIOError.Generate(std::move(err));
     }
     return nullptr;
 }
@@ -654,7 +657,8 @@ std::string ConsumerImpl::GetStreamMeta(const std::string& stream, Error* err) {
 DataSet DecodeDatasetFromResponse(std::string response, Error* err) {
     DataSet res;
     if (!res.SetFromJson(std::move(response))) {
-        *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response:" + response);
+        *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response");
+        (*err)->AddDetails("response",response);
         return {0, 0, MessageMetas{}};
     } else {
         return res;
diff --git a/consumer/api/cpp/src/tcp_connection_pool.h b/consumer/api/cpp/src/tcp_connection_pool.h
index 7d775133a768099c2b48324f2dc756772290e938..963d92036db4f5fcbab92657a70c06fa9ab7df88 100644
--- a/consumer/api/cpp/src/tcp_connection_pool.h
+++ b/consumer/api/cpp/src/tcp_connection_pool.h
@@ -16,10 +16,10 @@ struct TcpConnectionInfo {
 
 class TcpConnectionPool {
   public:
-    VIRTUAL SocketDescriptor GetFreeConnection(const std::string& source, bool* reused, Error* err);
-    VIRTUAL SocketDescriptor Reconnect(SocketDescriptor sd, Error* err);
-    VIRTUAL  void ReleaseConnection(SocketDescriptor sd);
-    VIRTUAL ~TcpConnectionPool() = default;
+    ASAPO_VIRTUAL SocketDescriptor GetFreeConnection(const std::string& source, bool* reused, Error* err);
+    ASAPO_VIRTUAL SocketDescriptor Reconnect(SocketDescriptor sd, Error* err);
+    ASAPO_VIRTUAL  void ReleaseConnection(SocketDescriptor sd);
+    ASAPO_VIRTUAL ~TcpConnectionPool() = default;
     TcpConnectionPool();
     std::unique_ptr<IO> io__;
   private:
diff --git a/consumer/api/python/dist_linux/CMakeLists.txt b/consumer/api/python/dist_linux/CMakeLists.txt
index 0715a4d2ac58f641276470703350ca2c79d7a1f8..fd2b61e5dd54ecfaa3d84c6cbbd642e2730be78f 100644
--- a/consumer/api/python/dist_linux/CMakeLists.txt
+++ b/consumer/api/python/dist_linux/CMakeLists.txt
@@ -9,30 +9,33 @@ if ("source" IN_LIST BUILD_PYTHON_PACKAGES )
 endif()
 
 if ("rpm" IN_LIST BUILD_PYTHON_PACKAGES)
-    ADD_CUSTOM_TARGET(python-rpm-consumer ALL
+    if (BUILD_PYTHON2_PACKAGES)
+        ADD_CUSTOM_TARGET(python-rpm-consumer ALL
             COMMAND PACKAGE_PREFIX=python- python setup.py bdist_rpm --release=1.${PACKAGE_RELEASE_SUFFIX}
             --requires=numpy --binary-only
             COMMAND rm -f dist/python*.gz dist/*debuginfo* dist/*debugsource*
             )
-
+        ADD_DEPENDENCIES(python-rpm-consumer python3-rpm-consumer)
+    endif()
     ADD_CUSTOM_TARGET(python3-rpm-consumer ALL
             COMMAND PACKAGE_PREFIX=python3- python3 setup.py bdist_rpm --release=1.${PACKAGE_RELEASE_SUFFIX}
             --requires=python3-numpy --binary-only
             COMMAND rm -f dist/python3*.gz dist/*debuginfo* dist/*debugsource*
             )
-    ADD_DEPENDENCIES(python3-rpm-consumer python-rpm-consumer)
-    ADD_DEPENDENCIES(python-rpm-consumer copy_python_dist-consumer)
+    ADD_DEPENDENCIES(python3-rpm-consumer copy_python_dist-consumer)
 endif()
 
 if ("deb" IN_LIST BUILD_PYTHON_PACKAGES)
-    ADD_CUSTOM_TARGET(python-deb-consumer ALL
+    if (BUILD_PYTHON2_PACKAGES)
+        ADD_CUSTOM_TARGET(python-deb-consumer ALL
             COMMAND rm -rf deb_dist/*/
             COMMAND PACKAGE_PREFIX= python setup.py --command-packages=stdeb.command
             sdist_dsc --debian-version=${PACKAGE_RELEASE_SUFFIX} --depends=python-numpy bdist_deb
             COMMAND rm -f deb_dist/*dbgsym*
             COMMAND cp deb_dist/*.deb dist/
             )
-
+        ADD_DEPENDENCIES(python-deb-consumer python3-deb-consumer)
+    endif()
     ADD_CUSTOM_TARGET(python3-deb-consumer ALL
             COMMAND rm -rf deb_dist/*/
             COMMAND PACKAGE_PREFIX= python3 setup.py --command-packages=stdeb.command
@@ -40,8 +43,7 @@ if ("deb" IN_LIST BUILD_PYTHON_PACKAGES)
             COMMAND rm -f deb_dist/*dbgsym*
             COMMAND cp deb_dist/*.deb dist/
             )
-    ADD_DEPENDENCIES(python3-deb-consumer python-deb-consumer)
-    ADD_DEPENDENCIES(python-deb-consumer copy_python_dist-consumer)
+    ADD_DEPENDENCIES(python3-deb-consumer copy_python_dist-consumer)
 endif()
 
 ADD_CUSTOM_TARGET(copy_python_dist-consumer ALL
diff --git a/deploy/asapo_services/run_maxwell.sh b/deploy/asapo_services/run_maxwell.sh
index d2a74ed1f22c7cd92adf9e18ce672383dc5f7bd4..13bd97f0a38617cd4092b1c84528df2d44f8459f 100755
--- a/deploy/asapo_services/run_maxwell.sh
+++ b/deploy/asapo_services/run_maxwell.sh
@@ -45,7 +45,7 @@ ASAPO_LIGHTWEIGHT_SERVICE_NODES=`scontrol show hostnames $SLURM_JOB_NODELIST | h
 mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $MONGO_DIR
 chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $MONGO_DIR
 cd $SERVICE_DATA_CLUSTER_SHARED
-mkdir esdatadir fluentd grafana influxdb mongodb
+mkdir esdatadir fluentd grafana influxdb mongodb prometheus alertmanager
 chmod 777 *
 
 #todo: elastic search check
diff --git a/deploy/asapo_services/scripts/asapo-monitoring.nmd.tpl b/deploy/asapo_services/scripts/asapo-monitoring.nmd.tpl
index 677e5413fa0000c45754de72151909eb135b79d4..0f1a154934e3e227e6e9cb3245c494c2bdcb5344 100644
--- a/deploy/asapo_services/scripts/asapo-monitoring.nmd.tpl
+++ b/deploy/asapo_services/scripts/asapo-monitoring.nmd.tpl
@@ -27,6 +27,8 @@ job "asapo-monitoring" {
       driver = "docker"
       user = "${asapo_user}"
       config {
+        security_opt = ["no-new-privileges"]
+        userns_mode = "host"
         image = "prom/alertmanager:${alertmanager_version}"
         args = [
           "--web.route-prefix=/alertmanager/",
@@ -87,6 +89,8 @@ job "asapo-monitoring" {
       driver = "docker"
       user = "${asapo_user}"
       config {
+        security_opt = ["no-new-privileges"]
+        userns_mode = "host"
         image = "prom/prometheus:${prometheus_version}"
         args = [
           "--web.external-url=/prometheus/",
diff --git a/deploy/build_env/centos/build.sh b/deploy/build_env/centos/build.sh
index 103ae9e77c95a0761b89a43d3e2114d1beb604fd..b48e489cf41f2d0e68855a52e25fe36a25d5bb6d 100755
--- a/deploy/build_env/centos/build.sh
+++ b/deploy/build_env/centos/build.sh
@@ -1,6 +1,7 @@
 #!/usr/bin/env bash
 
 cd /asapo/build
+
 cmake \
     -DCMAKE_BUILD_TYPE="Release" \
     -DENABLE_LIBFABRIC=ON \
@@ -15,6 +16,12 @@ cmake \
 make -j 4
 make package
 
+if [ $OS == "el7" ]; then
+  BUILD_PYTHON2_PACKAGES=ON
+else
+  BUILD_PYTHON2_PACKAGES=OFF
+fi
+
 #switch to static curl for Python packages
 rm CMakeCache.txt
 cmake \
@@ -26,6 +33,7 @@ cmake \
     -DBUILD_PYTHON=ON   \
     -DPACKAGE_RELEASE_SUFFIX=1.$OS \
     -DBUILD_PYTHON_PACKAGES="source;rpm"   \
+    -DBUILD_PYTHON2_PACKAGES=$BUILD_PYTHON2_PACKAGES \
     -DBUILD_PYTHON_DOCS=$BUILD_PYTHON_DOCS \
     ..
 make -j 1
diff --git a/deploy/build_env/centos/install_curl.sh b/deploy/build_env/centos/install_curl.sh
index d8017177b0108c8d2b6bf75e084c62bf0cc508a5..ec291cfdac1baabc25b46cfa98c6aa87404a6145 100755
--- a/deploy/build_env/centos/install_curl.sh
+++ b/deploy/build_env/centos/install_curl.sh
@@ -2,7 +2,7 @@
 
 mkdir -p $1
 cd $1
-wget https://curl.haxx.se/download/curl-7.58.0.tar.gz
+wget --no-check-certificate https://curl.haxx.se/download/curl-7.58.0.tar.gz
 tar xzf curl-7.58.0.tar.gz
 cd curl-7.58.0
 ./configure --without-ssl --disable-shared --disable-manual --disable-ares \
diff --git a/deploy/build_env/debians/Dockerfile_debian11.1 b/deploy/build_env/debians/Dockerfile_debian11.1
new file mode 100644
index 0000000000000000000000000000000000000000..8a3d09d44055907264aaeaa2528ec5b6673a3215
--- /dev/null
+++ b/deploy/build_env/debians/Dockerfile_debian11.1
@@ -0,0 +1,23 @@
+from debian:11.1
+
+ENV GOPATH /tmp
+
+ADD install_curl.sh install_curl.sh
+
+RUN apt update && apt install -y g++ git wget python3 python3-numpy python3-pip cmake \
+zlib1g-dev python3-all-dev python3-stdeb
+
+RUN pip3 --no-cache-dir install cython
+
+RUN ./install_curl.sh /curl
+
+ADD install_libfabric.sh install_libfabric.sh
+RUN ./install_libfabric.sh
+
+RUN apt install -y libcurl4-openssl-dev
+
+RUN apt install dh-python
+
+ARG OS
+ENV OS=${OS}
+ADD build.sh /bin/build.sh
\ No newline at end of file
diff --git a/deploy/build_env/debians/build.sh b/deploy/build_env/debians/build.sh
index 9e27fc1ee4cdad925988a7b758854c4ab6804f04..844a4f2c3b80c60769a266de62568fba0fcbc7eb 100755
--- a/deploy/build_env/debians/build.sh
+++ b/deploy/build_env/debians/build.sh
@@ -25,6 +25,12 @@ else
   BUILD_PYTHON_DOCS=OFF
 fi
 
+if [ $OS == "debian9.13" -o $OS == "debian10.7" -o $OS == "ubuntu16.04" -o $OS == "ubuntu18.04" ]; then
+  BUILD_PYTHON2_PACKAGES=ON
+else
+  BUILD_PYTHON2_PACKAGES=OFF
+fi
+
 #switch to static curl for Python packages
 rm CMakeCache.txt
 cmake \
@@ -35,6 +41,7 @@ cmake \
     -DNUMPY_VERSION=0   \
     -DBUILD_PYTHON=ON   \
     -DPACKAGE_RELEASE_SUFFIX=$OS \
+    -DBUILD_PYTHON2_PACKAGES=$BUILD_PYTHON2_PACKAGES \
     -DBUILD_PYTHON_PACKAGES="source;deb"   \
     -DBUILD_PYTHON_DOCS=$BUILD_PYTHON_DOCS \
     ..
diff --git a/deploy/build_env/debians/build_images.sh b/deploy/build_env/debians/build_images.sh
index 365e106d12e27dc980464740e855a864cb275946..56e14df38c08c6187ff6bf4edd0387d3bdd421e0 100755
--- a/deploy/build_env/debians/build_images.sh
+++ b/deploy/build_env/debians/build_images.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 set -e
 
-vers="ubuntu18.04 ubuntu16.04 debian9.13 debian10.7"
+vers="ubuntu18.04 ubuntu16.04 debian9.13 debian10.7 debian11.1"
 
 for ver in $vers
 do
diff --git a/deploy/build_env/debians/install_curl.sh b/deploy/build_env/debians/install_curl.sh
index d400f52352e8714034523c48ef0444524fd600b8..12c6fb561a28a763d4861b31f1a7faaf2c694c27 100755
--- a/deploy/build_env/debians/install_curl.sh
+++ b/deploy/build_env/debians/install_curl.sh
@@ -2,7 +2,7 @@
 
 mkdir -p $1
 cd $1
-wget https://curl.haxx.se/download/curl-7.58.0.tar.gz
+wget --no-check-certificate https://curl.haxx.se/download/curl-7.58.0.tar.gz
 tar xzf curl-7.58.0.tar.gz
 cd curl-7.58.0
 ./configure --without-ssl --disable-shared --disable-manual --disable-ares  \
diff --git a/deploy/build_env/manylinux2010/Dockerfile b/deploy/build_env/manylinux2010/Dockerfile
index cc73faaa30de881eb7cb90b92918d068a2eb50ea..31dde5c597372fbf2cc7054c12e699870a081e41 100644
--- a/deploy/build_env/manylinux2010/Dockerfile
+++ b/deploy/build_env/manylinux2010/Dockerfile
@@ -1,4 +1,4 @@
-FROM quay.io/pypa/manylinux2010_x86_64
+FROM quay.io/pypa/manylinux2010_x86_64:2021-04-05-a6ea1ab
 
 ENV GOPATH /tmp
 
diff --git a/deploy/build_env/manylinux2010/build.sh b/deploy/build_env/manylinux2010/build.sh
index 0fee6f1faebfef1a0776629a1db85c316d46a31b..c39149562a56431309584375c79cdf488c99d33e 100755
--- a/deploy/build_env/manylinux2010/build.sh
+++ b/deploy/build_env/manylinux2010/build.sh
@@ -2,14 +2,13 @@
 set -e
 
 declare -A numpy_versions
-numpy_versions[cp27mu]=1.12.1
-numpy_versions[cp27m]=1.12.1
 numpy_versions[cp35m]=1.12.1
 numpy_versions[cp36m]=1.12.1
 numpy_versions[cp37m]=1.14.5
 numpy_versions[cp38]=1.17.3
+numpy_versions[cp39]=1.19.3
 
-for python_path in /opt/python/cp{27,35,36,37,38}*; do
+for python_path in /opt/python/cp{35,36,37,38,39}*; do
     python_version=$(basename $python_path)
     python_version=${python_version#*-}
     python=$python_path/bin/python
@@ -23,6 +22,7 @@ for python_path in /opt/python/cp{27,35,36,37,38}*; do
           -DBUILD_CLIENTS_ONLY=ON \
           -DLIBCURL_DIR=/curl -DPython_EXECUTABLE=$python \
           -DBUILD_PYTHON_PACKAGES=source \
+          -DBUILD_PYTHON2_PACKAGES=OFF \
           -DNUMPY_VERSION=$numpy_version ..
     cd /asapo/build/consumer/api/python/dist_linux \
         && $pip install -r ../dev-requirements.txt \
diff --git a/deploy/build_env/manylinux2010/build_image.sh b/deploy/build_env/manylinux2010/build_image.sh
index 9a2bfe88613fdfd7b4a467ca527a78289a83ab52..cddfddad4d060ceb2faecd3dfbe925ffedcf1536 100755
--- a/deploy/build_env/manylinux2010/build_image.sh
+++ b/deploy/build_env/manylinux2010/build_image.sh
@@ -3,8 +3,8 @@
 #docker build -t yakser/asapo-env:manylinux2010_ .
 #./docker-squash yakser/asapo-env:manylinux2010_ -t yakser/asapo-env:manylinux2010
 
-docker build -t yakser/asapo-env:manylinux2010 .
-docker push yakser/asapo-env:manylinux2010
+docker build -t yakser/asapo-env:manylinux2010-2021-04-05-a6ea1ab .
+docker push yakser/asapo-env:manylinux2010-2021-04-05-a6ea1ab
 
 
 
diff --git a/deploy/build_env/manylinux2010/install_curl.sh b/deploy/build_env/manylinux2010/install_curl.sh
index d8017177b0108c8d2b6bf75e084c62bf0cc508a5..ec291cfdac1baabc25b46cfa98c6aa87404a6145 100755
--- a/deploy/build_env/manylinux2010/install_curl.sh
+++ b/deploy/build_env/manylinux2010/install_curl.sh
@@ -2,7 +2,7 @@
 
 mkdir -p $1
 cd $1
-wget https://curl.haxx.se/download/curl-7.58.0.tar.gz
+wget --no-check-certificate https://curl.haxx.se/download/curl-7.58.0.tar.gz
 tar xzf curl-7.58.0.tar.gz
 cd curl-7.58.0
 ./configure --without-ssl --disable-shared --disable-manual --disable-ares \
diff --git a/discovery/src/asapo_discovery/server/get_version.go b/discovery/src/asapo_discovery/server/get_version.go
index 7c1127df3bd049016b2d1be5e4ecb132963a6786..655b7052b17b1a934d018a4ca4b5dbbc8c2ff36c 100644
--- a/discovery/src/asapo_discovery/server/get_version.go
+++ b/discovery/src/asapo_discovery/server/get_version.go
@@ -27,10 +27,11 @@ func extractProtocol(r *http.Request) (string, error) {
 }
 
 func routeGetVersion(w http.ResponseWriter, r *http.Request) {
-	log_str := "processing get version"
+	log_str := "processing get version request"
 	logger.Debug(log_str)
 
 	if ok := checkDiscoveryApiVersion(w, r); !ok {
+		logger.Debug("checkDiscoveryApiVersion failed")
 		return
 	}
 	keys := r.URL.Query()
diff --git a/discovery/src/asapo_discovery/server/routes.go b/discovery/src/asapo_discovery/server/routes.go
index 2e0b31972864702c63af499ab9dc7335b13bae25..9cbc5b920a84dd4e2c51d02257d9263ea40b98a2 100644
--- a/discovery/src/asapo_discovery/server/routes.go
+++ b/discovery/src/asapo_discovery/server/routes.go
@@ -15,12 +15,15 @@ func getService(service string) (answer []byte, code int) {
 		answer, err = requestHandler.GetSingleService(service)
 
 	}
-	log_str := "processing get " + service
+	log_str := "processing get " + service + " request"
 	if err != nil {
 		logger.Error(log_str + " - " + err.Error())
 		return []byte(err.Error()), http.StatusInternalServerError
 	}
-	logger.Debug(log_str + " -  got " + string(answer))
+	logger.WithFields(map[string]interface{}{
+		"service": service,
+		"answer":  string(answer),
+	}).Debug("processing get service request")
 	return answer, http.StatusOK
 }
 
@@ -39,7 +42,6 @@ func validateProtocol(w http.ResponseWriter, r *http.Request, client string) boo
 		logger.Error(log_str + " - " + hint)
 		return false
 	}
-	logger.Debug(log_str + " - ok")
 	return true
 }
 
diff --git a/discovery/src/asapo_discovery/server/routes_test.go b/discovery/src/asapo_discovery/server/routes_test.go
index 394a2625047932bd968d8f4b5020c0249369a305..98db36704f24217350427b12e3584f83da491e26 100644
--- a/discovery/src/asapo_discovery/server/routes_test.go
+++ b/discovery/src/asapo_discovery/server/routes_test.go
@@ -1,17 +1,17 @@
 package server
 
 import (
+	"asapo_common/logger"
+	"asapo_common/utils"
 	"asapo_common/version"
+	"asapo_discovery/common"
+	"asapo_discovery/request_handler"
 	"github.com/stretchr/testify/mock"
 	"github.com/stretchr/testify/suite"
-	"asapo_common/logger"
-	"asapo_common/utils"
 	"net/http"
 	"net/http/httptest"
 	"strings"
 	"testing"
-	"asapo_discovery/request_handler"
-	"asapo_discovery/common"
 )
 
 func containsMatcher(substr string) func(str string) bool {
@@ -74,8 +74,8 @@ var receiverTests = []requestTest {
 func (suite *GetServicesTestSuite) TestGetReceivers() {
 	for _,test:= range receiverTests {
 		if test.code == http.StatusOK {
-			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("validating producer")))
-			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameReceiverService)))
+			logger.MockLog.On("WithFields", mock.Anything)
+			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("request")))
 		} else {
 			logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("validating producer")))
 		}
@@ -99,8 +99,8 @@ var brokerTests = []requestTest {
 func (suite *GetServicesTestSuite) TestGetBroker() {
 	for _,test:= range brokerTests {
 		if test.code == http.StatusOK {
-			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("validating consumer")))
-			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameBrokerService)))
+			logger.MockLog.On("WithFields", mock.Anything)
+			logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("request")))
 		} else {
 			logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("validating consumer")))
 		}
@@ -117,7 +117,8 @@ func (suite *GetServicesTestSuite) TestGetBroker() {
 
 
 func (suite *GetServicesTestSuite) TestGetMongo() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameMongoService)))
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("request")))
 
 	w := doRequest("/asapo-mongodb")
 
@@ -127,8 +128,8 @@ func (suite *GetServicesTestSuite) TestGetMongo() {
 }
 
 func (suite *GetServicesTestSuite) TestGetFts() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameFtsService)))
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("validating")))
+	logger.MockLog.On("WithFields", mock.Anything)
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("request")))
 
 	w := doRequest("/" + version.GetDiscoveryApiVersion()+"/asapo-file-transfer?protocol=v0.1")
 
@@ -138,7 +139,7 @@ func (suite *GetServicesTestSuite) TestGetFts() {
 }
 
 func (suite *GetServicesTestSuite) TestGetVersions() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get version")))
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("request")))
 
 	w := doRequest("/" + version.GetDiscoveryApiVersion() + "/version")
 
diff --git a/docs/site/changelog/2021-12-11-21.12.0.md b/docs/site/changelog/2021-12-11-21.12.0.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5cf81074eb4bd85a0140f73ee0fb84e72ecf058
--- /dev/null
+++ b/docs/site/changelog/2021-12-11-21.12.0.md
@@ -0,0 +1,18 @@
+---
+title: Version 21.12.0
+author: Sergey Yakubov
+author_title: DESY IT
+tags: [release]
+---
+
+#Changelog for version 21.12.0
+
+FEATURES
+* Consumer API: Get last within consumer group returns message only once
+* Producer API: An option to write raw data to core filesystem directly
+* Consumer/Producer API - packages for Debian 11.1, wheel for Python 3.9
+* Consumer/Producer API - dropped Python 2 support for wheels and packages for new Debian/CentOS versions
+
+INTERNAL
+* Improved logging - tags for beamline, beamtime, ...
+* Updated orchestration tools to latest version
diff --git a/docs/site/docs/getting-started.mdx b/docs/site/docs/getting-started.mdx
index 726f6f98226b7b65161bf2327a0bf8d4e2af1445..6a45728cd173f631acb0c3f0355a85408d2b2454 100644
--- a/docs/site/docs/getting-started.mdx
+++ b/docs/site/docs/getting-started.mdx
@@ -32,7 +32,7 @@ unix socket or a tcp port for communications)
 }>
 <TabItem value="unix">
 
-```shell content=./examples/start_asapo_socket.sh"
+```shell content="./examples/start_asapo_socket.sh"
 ```
 
 </TabItem>
diff --git a/docs/site/examples/install_python_clients_pip.sh b/docs/site/examples/install_python_clients_pip.sh
index 3a9d1faf519ab8d49dc273f1c4cbd4ec63f09eb8..2acfcaa0c6c76504c733bba2c39bb15b14dbbf4c 100644
--- a/docs/site/examples/install_python_clients_pip.sh
+++ b/docs/site/examples/install_python_clients_pip.sh
@@ -1,13 +1,13 @@
 #!/usr/bin/env bash
 
-pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_producer==100.0.develop
-pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_consumer==100.0.develop
+pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_producer==100.0.dev0
+pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_consumer==100.0.dev0
 # you might need to update pip if the above commands error: pip3 install --upgrade pip
 
 # if that does not work (abi incompatibility, etc) you may try to install source packages
 # take a look at http://nims.desy.de/extra/asapo/linux_packages/ or http://nims.desy.de/extra/asapo/windows10 for your OS. E.g. for Debian 10.7
-# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_producer-100.0.develop.tar.gz
-# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_consumer-100.0.develop.tar.gz
+# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_producer-100.0.dev0.tar.gz
+# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_consumer-100.0.dev0.tar.gz
 
-# pip3 install asapo_producer-100.0.develop.tar.gz
-# pip3 install asapo_consumer-100.0.develop.tar.gz
+# pip3 install asapo_producer-100.0.dev0.tar.gz
+# pip3 install asapo_consumer-100.0.dev0.tar.gz
diff --git a/docs/site/examples/install_python_clients_pkg.sh b/docs/site/examples/install_python_clients_pkg.sh
index 4542fcc48ad4807ff07ae72422d429aa1f38a3c7..2dabe2c13bcb9aaa1143453130347b7425604650 100644
--- a/docs/site/examples/install_python_clients_pkg.sh
+++ b/docs/site/examples/install_python_clients_pkg.sh
@@ -2,8 +2,8 @@
 
 # you can also install Linux/Windows packages if you have root access (or install locally).
 # take a look at http://nims.desy.de/extra/asapo/linux_packages/ or http://nims.desy.de/extra/asapo/windows10 for your OS. E.g. for Debian 10.7
-wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-producer_100.0~develop-debian10.7_amd64.deb
-wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-consumer_100.0~develop-debian10.7_amd64.deb
+wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-producer_100.0~dev0-debian10.7_amd64.deb
+wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-consumer_100.0~dev0-debian10.7_amd64.deb
 
-sudo apt install ./python3-asapo-producer_100.0~develop-debian10.7_amd64.deb
-sudo apt install ./python3-asapo_consumer_100.0~develop-debian10.7_amd64.deb
+sudo apt install ./python3-asapo-producer_100.0~dev0-debian10.7_amd64.deb
+sudo apt install ./python3-asapo_consumer_100.0~dev0-debian10.7_amd64.deb
diff --git a/docs/site/examples/start_asapo_socket.sh b/docs/site/examples/start_asapo_socket.sh
index a50a87ff98087e3a338bb7c9d7e7726d897cd51e..0150a44a8898ad3cc282decdb4f99f52ab82641e 100644
--- a/docs/site/examples/start_asapo_socket.sh
+++ b/docs/site/examples/start_asapo_socket.sh
@@ -16,7 +16,7 @@ mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHAR
 chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
 
 cd $SERVICE_DATA_CLUSTER_SHARED
-mkdir -p fluentd grafana influxdb influxdb2 mongodb
+mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
 chmod 777 *
 
 docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
diff --git a/docs/site/examples/start_asapo_tcp.sh b/docs/site/examples/start_asapo_tcp.sh
index 007595fe59d1df7a3bcc8c0c0772faaa1b536ff9..4debb8e82089a4dc0d4bdb9fa744c41afa2b67f4 100644
--- a/docs/site/examples/start_asapo_tcp.sh
+++ b/docs/site/examples/start_asapo_tcp.sh
@@ -22,7 +22,7 @@ mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHAR
 chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
 
 cd $SERVICE_DATA_CLUSTER_SHAREDdetector
-mkdir -p fluentd grafana influxdb2 mongodb
+mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
 chmod 777 *
 
 docker run --privileged --userns=host --security-opt no-new-privileges --rm \
diff --git a/docs/site/freeze_version.sh b/docs/site/freeze_version.sh
index 1e88279cb496804ccf9e7c1dbddcadf8c0ae14dc..dcaf17ac587000085516eb8d842574c32df3c475 100755
--- a/docs/site/freeze_version.sh
+++ b/docs/site/freeze_version.sh
@@ -2,7 +2,6 @@
 
 if [[ -z "${DOCS_VERSION}" ]]; then
     echo No version specified
-
     exit 1
 fi
 
@@ -27,21 +26,28 @@ CONTENT='content=\"\.\/'
 #replace the links to the code examples to the frozen copies
 for file in $(find ./versioned_docs/version-$DOCS_VERSION -type f)
 do
-ed -s $file <<ED_COMMANDS > /dev/null 2>&1
-,s/content=\"\?\.\/examples/content=\".\/${VERSIONED_EXAMPLES_ESCAPED}/g
-w
-ED_COMMANDS
+if [[ `uname -s` == "Darwin" ]]; then
+  sed -i '' -e "s/content=\"\.\/examples/content=\".\/${VERSIONED_EXAMPLES_ESCAPED}/g" $file
+else
+  sed -i -e "s/content=\"\.\/examples/content=\".\/${VERSIONED_EXAMPLES_ESCAPED}/g" $file
+fi
 done
 
 #replace the links to the dev-packages to the versioned ones
+read -r -d '' template << EOM
+-e s/asapo-cluster-dev:100\.0\.develop/asapo-cluster:${DOCS_VERSION}/g
+-e s/==100\.0\.dev0/==${VERSION_FOR_PIP}/g
+-e s/100\.0[~.]develop/${DOCS_VERSION}/g
+-e s/100\.0[~.]dev0/${DOCS_VERSION}/g
+EOM
+
 for file in $(find ./${VERSIONED_EXAMPLES} -type f)
 do
-ed -s $file <<ED_COMMANDS > /dev/null 2>&1
-,s/asapo-cluster-dev:100\.0\.develop/asapo-cluster:${DOCS_VERSION}/g
-,s/==100\.0\.develop/==${VERSION_FOR_PIP}/g
-,s/100\.0[~.]develop/${DOCS_VERSION}/g
-w
-ED_COMMANDS
+if [[ `uname -s` == "Darwin" ]]; then
+  sed -i '' $template $file
+else
+  sed -i $template $file
+fi
 done
 
 exit 0
diff --git a/docs/site/versioned_docs/version-21.12.0/compare-to-others.md b/docs/site/versioned_docs/version-21.12.0/compare-to-others.md
new file mode 100644
index 0000000000000000000000000000000000000000..97e0bd7fc30ae8166e3493742164a3bb6044fc31
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/compare-to-others.md
@@ -0,0 +1,49 @@
+---
+title: Comparison to Other Solutions
+---
+
+Here we consider how ASAPO is different from other workflows practiced at DESY. The possible candidates are:
+
+### Filesystem
+Probably the most often used approach for now. Files are written to the beamline filesystem directly via NFS/SMB mount or by HiDRA and copied to the core filesystem by a copy daemon. A user (software) then reads the files from the filesystem.
+
+### Filesystem + Kafka
+Previous workflow + there is a Kafka instance that produces messages when a file appears in the core filesystem. These messages can then be consumed by user software.
+
+### HiDRA
+HiDRA can work in two modes - wether data is transferred via it or data is written over NFS/SMB mounts and HiDRA monitors a folder in a beamline filesystem. In both case one can subscribe to HiDRA's data queue to be informed about a new file.
+
+### ASAPO
+
+ASAPO does not work with files, rather with data streams. Well, behind the scene it does use files, but in principle what a user see is a stream of messages, where a message typically consists of metadata and data blobs. Data blob can be a single image, a file with arbitrary content or whatever else, even null. Important is - what goes to one end, appears on the other. And that each message must have a consecutive index. These messages must be ingested to an ASAPO data stream in some way (e.g. using ASAPO Producer API or HiDRA) and data, if not yet, will be transferred and stored in the data center. A user can then read the messages from the stream and process it in a way he likes.
+
+### Compare by categories
+
+In the table below we compare the approaches from different points of view and in the [next table](#compare-by-features) we compare the approaches by available features.
+
+|     Category     |                                                                            Filesystem                                                                           |                                                                                                                                                                                                                            Filesystem+Kafka                                                                                                                                                                                                                           |                                                                                                                                              HiDRA                                                                                                                                             |                                                                                                                                                                                                                                                                                                                                                            ASAPO                                                                                                                                                                                                                                                                                                                                                           |
+|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Data ingest      | traditional way - write to disk                                                                                                                                 | same as Filesystem, additionally a message is send to a Kafka topic with basic file metadata (name, size, creation date, ...)                                                                                                                                                                                                                                                                                                                                         | outperforms data ingest via NFS/SMB, uses ZeroMQ(TCP), saturates 10GE bandwidth. To be tested with 100GE. Same metadata as with Kafka                                                                                                                                                          | uses parallel TCP connections & in the future RDMA. Can add arbitrary metadata (JSON format). Can send data to various streams to create arbitrary data hierarchy (e.g. stream per scan). Saves data to a memory buffer and on disk.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       |
+| Offline analysis | traditional way - read from disk. Need to know the filename to read, usually not a problem.                                                                     | same as Filesystem, there is no benefits reading Kafka queue after all data is arrived                                                                                                                                                                                                                                                                                                                                                                                | not possible, need to fallback to Filesystem                                                                                                                                                                                                                                                   | same as online analysis (see below)                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        |
+| Online analysis  | no efficient way to recognise that new data is available - periodically read folder content and compare with previous state?, periodically check file appeared? | using a subscription to a "new files" topic, a user software can be made aware of new data quite soon and react correspondingly.                                                                                                                                                                                                                                                                                                                                      | one can subscribe to a HiDRA's ZeroMQ stream and consume data. If data arrives faster than it can be processed or e.g. user software crashes - data might be skipped.                                                                                                                          | one can get data from various streams in different ways - get next unprocessed message ordered by index, get last, get by id. Since everything is stored in persistent storage, processing is possible with arbitrary slow (but also fast) consumers. Resilient to connections loss or consumer crashes.                                                                                                                                                                                                                                                                                                                                                                                                                   |
+| Performance      | as good as read/write to disk. Can be an issue, especially with new detectors 100GE+ networks.                                                                  | as good as read/write to disk + some latency for the file to be written to the beamline filesystem, copied to the core filesystem and a message to go through Kafka.                                                                                                                                                                                                                                                                                                  | data is available as soon as it is received by HiDRA. If multiple consumer groups want to read same data (one consumer is known - the process that writes the file), data will be transferred multiple times, which influences the network performance.                                        | data is available to be consumed as soon as it is arrived and saved to beamline filesystem (later can be optimised by using persistent memory instead). No need to read from disk since it also remains in memory buffer.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  |
+| Parallelisation  | Parallelisation is easily possible e.g. with an MPI library.                                                                                                    | Parallelisation is possible if Kafka's topics are partitioned (which is not the case in the moment)                                                                                                                                                                                                                                                                                                                                                                   | Not out of the box, possible with some changes from user's side                                                                                                                                                                                                                                | Parallelisation is easily possible, one can consume data concurrently with different consumers from the same stream. Normally, synchronisation between consumers is not needed, but this might depend on a use case. When configured, data can be resent if not acknowledged during a specified time  period.                                                                                                                                                                                                                                                                                                                                                                                                              |
+| Search/filter    | hardly possible, manually parsing some metadata file, using POSIX commands?                                                                                     | same as Filesystem. There is Kafka SQL query language which could be used if there would be metadata in messages, which is not the case (yet?).                                                                                                                                                                                                                                                                                                                       | not possible                                                                                                                                                                                                                                                                                   | one can use a set of  SQL queries to ingested metadata.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    |
+| General comments | Might be ok for slow detectors or/and a use case without online analysis requirements. Might be the only way to work with legacy applications                   | Fits well for the cases where a software just need a trigger that some new data has arrived, when processing order, extra metadata, parallelisation is not that important or implemented by other means. Some delay between an image is generated and the event is emitted by Kafka is there, but probably not that significant (maximum a couple of seconds). Might be not that appropriate for very fast detectors since still using filesystem to write/read data. | Works quite well to transfer files from detector to the data center. Also a good candidate for live viewers, where the last available "image" should be displayed. Does not work for offline analysis or for near real-time analysis where image processing can take longer than image taking. | Tries to be a general solution which improves in areas where other approaches not suffice: single code for offline/near real-time/online analysis, parallelisation, extended metadata, efficient memory/storage management, getting data without access to filesystem (e.g. from detector pc without core filesystem mounted), computational pipelines, ... Everything has its own price: user software must be modified to use ASAPO, a wrapper might be needed for legacy software that cannot be modified, user/beamtime scientist should better structure the data - e.g. consecutive indexes must be available for each image, one has to define to which stream write/read data, what is the format of the data, ... |
+
+
+### Compare by features
+
+| Feature                                                                                                            | Filesystem                         | Filesystem+Kafka                   | HiDRA                              | ASAPO                              |
+|--------------------------------------------------------------------------------------------------------------------|------------------------------------|------------------------------------|------------------------------------|------------------------------------|
+| send metadata with image                                                                                           | No                                 | No                                 | No                                 | Yes                                |
+| get last image                                                                                                     | No                                 | No                                 | Yes                                | Yes                                |
+| get image by id                                                                                                    | No                                 | No                                 | No                                 | Yes                                |
+| get image in order                                                                                                 | No                                 | No                                 | No                                 | Yes                                |
+| Immediately get informed that a new image is arrived                                                               | No                                 | Yes                                | Yes                                | Yes                                |
+| access image remotely, without reading filesystem                                                                  | No                                 | No                                 | Yes, if it is still in buffer      | Yes                                |
+| access past images                                                                                                 | Yes                                | Yes                                | No                                 | Yes                                |
+| need to change user code                                                                                           | No                                 | Yes                                | Yes                                | Yes                                |
+| parallelisation                                                                                                    | Yes (if user software allows that) | Not out of the box                 | Not out of the box                 | Yes                                |
+| legacy applications                                                                                                | Yes                                | No (wrapper could be a workaround) | No (wrapper could be a workaround) | No (wrapper could be a workaround) |
+| transparent restart/continuation of simulations in case e.g. worker process crashes, also for parallel simulations | Not out of the box                 | Yes                                | No                                 | Yes                                |
diff --git a/docs/site/versioned_docs/version-21.12.0/consumer-clients.md b/docs/site/versioned_docs/version-21.12.0/consumer-clients.md
new file mode 100644
index 0000000000000000000000000000000000000000..d04c14034585b2ef69f335b0104b2bfafddf69d1
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/consumer-clients.md
@@ -0,0 +1,33 @@
+---
+title: Consumer Clients
+---
+
+Consumer client (or consumer) is a part of a distributed streaming system that is responsible for processing streams of data that were created by producer. It is usually a user (beamline scientist, detector developer, physicist, ... ) responsibility to develop a client for specific beamline, detector or experiment using ASAPO Consumer API and ASAPO responsibility to make sure data is delivered to consumers in an efficient and reliable way.
+
+![Docusaurus](/img/consumer-clients.png)
+
+Consumer API is available for C++ and Python and has the following main functionality:
+
+- Create a consumer instance and bind it to a specific beamtime and data source
+    - multiple instances can be created (also within a single application) to receive data from different sources
+    - a beamtime token is used for access control
+- If needed (mainly for get_next_XX operations), create a consumer group that allows to process messages independently from other groups
+- Receive messages  from a specific stream (you can read more [here](data-in-asapo) about data in ASAPO)
+    - GetNext to receive process messages one after another without need to know message indexes
+        - Consumer API returns a message with index 1, then 2, ... as they were set by producer.
+        - This also works in parallel so that payload is distributed within multiple consumers within same consumer group or between threads of a single consumer instance. In parallel case order of indexes of the messages is not determined.
+    - GetLast to receive last available message - for e.g. live visualisation
+    - GetById - get message by index - provides random access
+- Make queries based on metadata contained in a message - returns all messages in a stream with specific metadata. A subset of SQL language is used
+
+
+All of the above functions can return only metadata part of the message, so that an application can e.g. extract the filename and pass it to a 3rd party tool for processing. Alternative, a function may return the complete message with metadata and data so that consumer can directly process it. An access to the filesystem where data is actually stored is not required in this case.
+
+:::note
+In case of dataset family of functions, only list of dataset messages is returned, the data can be retrieved in a separate call.
+:::
+    
+Please refer to [C++](http://asapo.desy.de/cpp/) and [Python](http://asapo.desy.de/python/) documentation for specific details (available from DESY intranet only).
+
+
+
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/acknowledgements.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/acknowledgements.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..9319f856a955165856e62d7ed4756b03444cdd4d
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/acknowledgements.mdx
@@ -0,0 +1,63 @@
+---
+title: Acknowledgements
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+While consuming the messages we could issue acknowledgements, to denote that the messages were (or were not) processed successfully.
+
+Here is the snippet that expects 10 sample messages in the default stream. When consuming the messages, the message #3 receives a negative acknowledgement, which puts is back in the stream for the repeated processing, and the messages 5 and 7 remain unacknowledged. On the second attempt the message #3 gets acknowledged.
+
+You can found the full example in git repository.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/acknowledgements.py" snippetTag="consume"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/acknowledgements.cpp" snippetTag="consume"
+```
+
+</TabItem>
+</Tabs>
+
+The list of unacknowledged messages can be accessed at any time. This snippet prints the list of unacknowledged messages.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/acknowledgements.py" snippetTag="print"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/acknowledgements.cpp" snippetTag="print"
+```
+
+</TabItem>
+</Tabs>
+
+The output will show the order in which the messages receive their acknowledgements. You may notice that the second acknowledgement of the message #3 happens with a delay, which was deliberatly chosen. The unacknowledged messages are retrieved separately at the end, after the consumer timeout.
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/datasets.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/datasets.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..5f8299fa0e030f214ee94525f3ed669dbb869ddc
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/datasets.mdx
@@ -0,0 +1,67 @@
+---
+title: Datasets
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+The messages in the stream can be multi-parted. If you have several producers (e.g. sub-detectors) that produces several parts of the single message, you can use datasets to assemble a single message from several parts.
+
+## Dataset Producer
+
+Here is the code snippet that can be used to produce a three-parted dataset. The full usable example can be found in git repository.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/produce_dataset.py" snippetTag="dataset"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/produce_dataset.cpp" snippetTag="dataset"
+```
+
+</TabItem>
+</Tabs>
+
+You should see the "successfuly sent" message in the logs, and the file should appear in the corresponding folder (by default in ```/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test```)
+
+## Dataset Consumer
+
+Here is the snippet that can be used to consume a dataset. The full example is also in git.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/consume_dataset.py" snippetTag="dataset"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/consume_dataset.cpp" snippetTag="dataset"
+```
+
+</TabItem>
+</Tabs>
+
+The details about the received dataset should appear in the logs, together with the message "stream finished" (if the "finished" flag was sent for the stream). The "stream ended" message will appear for non-finished streams, but may also mean that the stream does not exist (or was deleted).
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/metadata.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/metadata.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..48ccfcfe8befab7a69c1c7fdc2566c77ec81297d
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/metadata.mdx
@@ -0,0 +1,202 @@
+---
+title: Metadata
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+You can also store any custom metadata with your beamtime, stream, and each message. This tutorial shows you how you can store, update and access this metadata. The metadata is stored in JSON, and any JSON structure is supported.
+
+:::info
+Since C++ doesn't have a built-in JSON support, you'd have to use 3rd party libs if you want JSON parsing. In this tutorial we won't use any JSON parsing for C++, and will treat JSONs as regular strings. Please note, that ASAP::O only supports valid JSONs, and providing invalid input will result in error.
+:::
+
+
+## Send Metadata
+
+The following snippet shows how to send the beamtime metadata.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="beamtime_set"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="beamtime_set"
+```
+
+</TabItem>
+</Tabs>
+
+Each metadata can be updated at any moment. Here is the example on how to do it with beamtime metadata.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="beamtime_update"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="beamtime_update"
+```
+
+</TabItem>
+</Tabs>
+
+The same way the metadata can be set for each stream.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="stream_set"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="stream_set"
+```
+
+</TabItem>
+</Tabs>
+
+And for each message
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="message_set"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="message_set"
+```
+
+</TabItem>
+</Tabs>
+
+## Read Metadata
+
+Here we will read the beamtime metadata. In this example it will already incorporate the changes we did during the update
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="beamtime_get"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="beamtime_get"
+```
+
+</TabItem>
+</Tabs>
+
+Same for the stream.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="stream_get"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="stream_get"
+```
+
+</TabItem>
+</Tabs>
+
+And for the message.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/metadata.py" snippetTag="message_get"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/metadata.cpp" snippetTag="message_get"
+```
+
+</TabItem>
+</Tabs>
+
+The output will show the metadata retrieved from the beamtime, stream and message.
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/next_stream.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/next_stream.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..530b48d9ee23e498bbce4285937b2de976033a71
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/next_stream.mdx
@@ -0,0 +1,61 @@
+---
+title: Stream Finishing
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+When all the data in the stream is sent, the stream can be finished, and it is posiible to set the "next stream" to follow up the first. In this tutorial it'll be shown how several streams can be chained together in single consumer by using the stream finishing.
+
+The setting of the next stream is done by providing an additional parameter while finishing the stream
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/next_stream.py" snippetTag="next_stream_set"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/next_stream.cpp" snippetTag="next_stream_set"
+```
+
+</TabItem>
+</Tabs>
+
+The reading of the streams can be then chained together. When one stream finishes, and the next stream is provided, the reading of the next stream can immediately start. This example will read the whole chain of streams, until it encounters the non-finished stream, or the stream that was finished without the ```next```.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/next_stream.py" snippetTag="read_stream"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/next_stream.cpp" snippetTag="read_stream"
+```
+
+</TabItem>
+</Tabs>
+
+The output will show the messages being consumed from the streams in order. For this example (full file can be found in git repository) it'll be first the ```default``` stream, then the ```next```.
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/overview.md b/docs/site/versioned_docs/version-21.12.0/cookbook/overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..97db96a408a253119c2091af786a8a3bd3820726
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/overview.md
@@ -0,0 +1,13 @@
+---
+title: Code Examples Overview
+---
+
+Here you can find the code examples for various common asapo usecases. Make sure that the ASAP::O instance and client libraries are properly installed, see [Getting Started page](../) for details.
+
+For the most basic usecase, see the [Simple Producer](simple-producer) and [Simple Consumer](simple-consumer). There are also the basic examples of CMake and makefile configurations for client compilation.
+
+The API documentation can be found [here](http://asapo.desy.de/cpp) (for C++) or [here](http://asapo.desy.de/python) (for python).
+
+:::tip
+You can see more examples in ASAPO [source code](https://stash.desy.de/projects/ASAPO/repos/asapo/browse/examples)
+:::
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/query.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/query.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..5353f4775c0e657a4e8684a4cf17c912e54cec29
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/query.mdx
@@ -0,0 +1,142 @@
+---
+title: Message query
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+Messages in streams can be retrieved based on their metadata. Both the technical information (e.g. ID or timestamp) and the user metadata (see [this tutorial](metadata) for details) can be used to make a query. In this tutorial several examples of the queries are shown. The standard SQL sysntaxis is used.
+
+For this example we expect several messages in the default stream with the metadata consisting of two fields: a string named ```condition``` and an integer named ```somevalue```. Go to the git repository for the full example.
+
+:::info
+Keep in mind, that the query requests return only the list of metadatas for the found messages, not the messages itself. You need to explicitly retrieve the actual data for each message.
+:::
+
+Here we can pick a message with the specific ID.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/query.py" snippetTag="by_id"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/query.cpp" snippetTag="by_id"
+```
+
+</TabItem>
+</Tabs>
+
+We can also use the simple rule for picking a range of IDs
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/query.py" snippetTag="by_ids"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/query.cpp" snippetTag="by_ids"
+```
+
+</TabItem>
+</Tabs>
+
+We can query the messages based on their metadata, for example request a specific value of the string field.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/query.py" snippetTag="string_equal"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/query.cpp" snippetTag="string_equal"
+```
+
+</TabItem>
+</Tabs>
+
+We can also require some more complex constraints on the metadata, e.g. a range for an integer field
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/query.py" snippetTag="int_compare"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/query.cpp" snippetTag="int_compare"
+```
+
+</TabItem>
+</Tabs>
+
+Since every message comes with a timestamp, we can make constraints on it as well. For example, request all the messages from the last 15 minutes.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/query.py" snippetTag="timestamp"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/query.cpp" snippetTag="timestamp"
+```
+
+</TabItem>
+</Tabs>
+
+The output of the full example will show the message selection together with the conditions used for selection.
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/simple-consumer.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-consumer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..22d01f03464b415c152802c8658ec86ae0dd57f0
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-consumer.mdx
@@ -0,0 +1,195 @@
+---
+title: Simple Consumer
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This example shows how to consume a message. This page provides snippets for simple consumer. You can go to BitBucket to see the whole example at once. The files there is a working example ready for launch.
+
+A special access token is needed to create a consumer. For the purpose of this tutorial a special "test" token is used. It will only work for the beamtime called "asapo_test".
+
+First step is to create an instance of the consumer.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+    { label: 'C', value: 'c', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/consume.py" snippetTag="create"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/consume.cpp" snippetTag="create"
+```
+
+</TabItem>
+
+<TabItem value="c">
+
+```c content="./versioned_examples/version-21.12.0/c/consume.c" snippetTag="create"
+```
+
+</TabItem>
+
+</Tabs>
+
+You can list all the streams within the beamtime.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+    { label: 'C', value: 'c', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/consume.py" snippetTag="list"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/consume.cpp" snippetTag="list"
+```
+
+</TabItem>
+
+</Tabs>
+
+The actual consuming of the message will probably be done in a loop. Here is an example how such a loop could be organized. It will run until the stream is finished, or no new messages are received within the timeout.
+
+You need to use the group ID that can be used by several consumer in parallel. You can either generate one or use a random string.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+    { label: 'C', value: 'c', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/consume.py" snippetTag="consume"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/consume.cpp" snippetTag="consume"
+```
+
+</TabItem>
+
+<TabItem value="c">
+
+```c content="./versioned_examples/version-21.12.0/c/consume.c" snippetTag="consume"
+```
+
+</TabItem>
+
+</Tabs>
+
+After consuming the stream you can delete it.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+    { label: 'C', value: 'c', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/consume.py" snippetTag="delete"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/consume.cpp" snippetTag="delete"
+```
+
+</TabItem>
+
+<TabItem value="c">
+
+```c content="./versioned_examples/version-21.12.0/c/consume.c" snippetTag="delete"
+```
+
+</TabItem>
+
+</Tabs>
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+    { label: 'C', value: 'c', },
+  ]
+}>
+<TabItem value="python">
+For Python example just launch it with python interpreter (be sure that the ASAP::O client python modules are installed)
+
+```
+$ python3 consumer.py
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+For C++ example you need to compiled it first. The easiest way to do it is by installing ASAP::O client dev packages and using the CMake find_package function. CMake will generate the makefile that you can then use to compile the example.
+
+The example CMake file can look like this
+
+```cmake content="./versioned_examples/version-21.12.0/cpp/CMakeLists.txt" snippetTag="#consumer"
+```
+
+You can use it like this
+
+```bash
+$ cmake . && make
+$ ./asapo-consume
+```
+
+</TabItem>
+
+<TabItem value="c">
+Compile e.g. using Makefile and pkg-config (although we recommend CMake -  see C++ section) and execute. This example assumes asapo is installed to /opt/asapo. Adjust correspondingly.
+
+```makefile content="./versioned_examples/version-21.12.0/c/Makefile" snippetTag="#consumer"
+```
+
+```
+$ make
+$ ./asapo-consume
+```
+
+
+</TabItem>
+
+</Tabs>
+
+The details about the received message should appear in the logs, together with the message "stream finished" (if the "finished" flag was sent for the stream). The "stream ended" message will appear for non-finished streams, but may also mean that the stream does not exist (or was deleted).
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/simple-pipeline.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-pipeline.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..b9c05bcd4564dd3d15fef5c4987f457157968a9c
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-pipeline.mdx
@@ -0,0 +1,61 @@
+---
+title: Simple Pipeline
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+The consumer and a producer could be combined together in order to create pipelines. Look at the corresponding examples to learn about producers and consumers in detailes.
+
+Here is the snippet that shows how to organize a pipelined loop. The full runnable example can be found in git repository.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/pipeline.py" snippetTag="pipeline"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/pipeline.cpp" snippetTag="pipeline"
+```
+
+</TabItem>
+</Tabs>
+
+Just like with any produced stream, the pipelined stream can be marked as "finished". Here's the snippet that shows how to access the last message id in the stream.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/pipeline.py" snippetTag="finish"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/pipeline.cpp" snippetTag="finish"
+```
+
+</TabItem>
+</Tabs>
+
+The details about the received message should appear in the logs, together with the message "stream finished" (if the "finished" flag was sent for the stream). The "stream ended" message will appear for non-finished streams, but may also mean that the stream does not exist (or was deleted). The processed file should appear in the corresponding folder (by default in ```/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test```)
diff --git a/docs/site/versioned_docs/version-21.12.0/cookbook/simple-producer.mdx b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-producer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..8de752d1493f3e2bad32c9ca4dd705ceb9e50ea3
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/cookbook/simple-producer.mdx
@@ -0,0 +1,148 @@
+---
+title: Simple Producer
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+This example produces a simple message. This page provides snippets for simple producer for both Python and C++. You can go to BitBucket to see the whole example at once. The files there is a working example ready for launch.
+
+First step is to create an instance of the producer.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/produce.py" snippetTag="create"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/produce.cpp" snippetTag="create"
+```
+
+</TabItem>
+</Tabs>
+
+Then, we need to define a callback that would be used for sending. The callback is called when the message is actually sent, which may happen with a delay.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/produce.py" snippetTag="callback"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/produce.cpp" snippetTag="callback"
+```
+
+</TabItem>
+</Tabs>
+
+Next we schedule the actual sending. This function call does not perform the actual sending, only schedules it. The sending will happen in background, and when it is done the callbeack will be called (if provided).
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/produce.py" snippetTag="send"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/produce.cpp" snippetTag="send"
+```
+
+</TabItem>
+</Tabs>
+
+The sending of the messages will probably be done in a loop. After all the data is sent, some additional actions might be done. You may want to wait for all the background requests to be finished before doing something else or exiting the application.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+
+```python content="./versioned_examples/version-21.12.0/python/produce.py" snippetTag="finish"
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+
+```cpp content="./versioned_examples/version-21.12.0/cpp/produce.cpp" snippetTag="finish"
+```
+
+</TabItem>
+</Tabs>
+
+You can get the full example from BitBucket and test it locally.
+
+<Tabs
+  groupId="language"
+  defaultValue="python"
+  values={[
+    { label: 'Python', value: 'python', },
+    { label: 'C++', value: 'cpp', },
+  ]
+}>
+<TabItem value="python">
+For Python example just launch it with python interpreter (be sure that the ASAP::O client python modules are installed).
+
+```bash
+$ python3 produce.py
+```
+
+</TabItem>
+
+<TabItem value="cpp">
+For C++ example you need to compiled it first. The easiest way to do it is by installing ASAP::O client dev packages and using the CMake find_package function. CMake will generate the makefile that you can then use to compile the example.
+
+The example CMake file can look like this.
+
+```cmake content="./versioned_examples/version-21.12.0/cpp/CMakeLists.txt" snippetTag="#producer"
+```
+
+You can use it like this.
+
+```bash
+$ cmake . && make
+$ ./asapo-produce
+```
+
+</TabItem>
+</Tabs>
+
+You should see the "successfuly sent" message in the logs, and the file should appear in the corresponding folder (by default in ```/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test```).
diff --git a/docs/site/versioned_docs/version-21.12.0/core-architecture.md b/docs/site/versioned_docs/version-21.12.0/core-architecture.md
new file mode 100644
index 0000000000000000000000000000000000000000..a02048e96d33c687f38622661b735974541a36e4
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/core-architecture.md
@@ -0,0 +1,29 @@
+---
+title: Core Architecture
+---
+
+For those who are curious about ASAPO architecture, the diagram shows some details. Here arrows with numbers is an example of data workflow explained below.
+
+![Docusaurus](/img/core-architecture.png)
+
+## Data Workflow (example)
+the workflow can be split into two more or less independent tasks - data ingestion and data retrieval
+
+### Data ingestion (numbers with i on the diagram)
+1i) As we [know](producer-clients.md), producer client is responsible for ingesting data in the system. Therefore the first step is to detect that the new message is available. This can be done using another tool developed at DESY named [HiDRA](https://confluence.desy.de/display/FSEC/HiDRA). This tool monitors the source of data (e.g. by monitoring a filesystem or using HTTP request or ZeroMQ streams, depending on detector type)
+
+2i) HiDRA (or other user application) then uses ASAPO Producer API to send messages (M1 and M2 in our case) in parallel to ASAPO Receiver. TCP/IP or RDMA protocols are used to send data most efficiently. ASAPO Receiver receives data in a memory cache
+
+3i) - 4i) ASAPO saves data to a filesystem and adds a metadata record to a database
+
+5i) A feedback is send to the producer client with success or error message (in case of error, some of the step above may not happen)
+
+### Data retrieval (numbers with r on the diagram)
+
+[Consumer client](consumer-clients.md)) is usually a user application that retrieves data from the system to analyse/process it.
+
+The first step to retrieve a message via Consumer API is to pass the request to the Data Broker (1r). The Data Broker retrieves the metadata information about the message from the database (2r) and returns it to the Consumer Client. The Consumer Client analyses the metadata information and decides how to get the data. It the data is still in the  Receiver memory cache, the client requests data from there via a Data Server (which is a part of ASAPO Receiver). Otherwise, client gets the data from the filesystem - directly if the filesystem is accessible on the machine where the client is running or via File Transfer Service if not.
+
+
+
+
diff --git a/docs/site/versioned_docs/version-21.12.0/data-in-asapo.md b/docs/site/versioned_docs/version-21.12.0/data-in-asapo.md
new file mode 100644
index 0000000000000000000000000000000000000000..96abf43c86c13873641ea767b7766e3145baa5d3
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/data-in-asapo.md
@@ -0,0 +1,29 @@
+---
+title: Data in ASAPO
+---
+All data that is produced, stored and consumed via ASAPO is structured on several levels. 
+
+#### Beamtime 
+This is the top level. Contains all data collected/produced during a single beamtime (Beamtime is the term used at DESY. Can also be Run, Experiment, Proposal, ...). Each beamtime has its own unique ID.
+
+#### Data Source
+During a beamtime, data can be produced by different sources. For example, a detector is a data source, if multiple detectors are used during an experiment, they can be different data sources or the same data source (more details below in datasets section). A user application that simulates or analyses data can also act as an ASAPO data source. Each data source has its own unique name within a beamtime.
+
+#### Data Stream
+Each data source can emit multiple data streams. Each stream has a unique within a specific data source name.
+
+#### Message
+Data streams consist of smaller entities - messages. The content of a message is quite flexible, to be able to cover a broad amount of use cases. Usually it is a metadata and some binary data (e.g. a detector image, or an hdf5 file with multiple images). At the moment ASAPO itself is agnostic to the data and sees it as a binary array. Later some specific cases might be handled as well (the most prominent use case - an hdf5 file with multiple images). 
+
+An important aspect is that each message within a data stream must be assigned a consecutive integer index. Therefore, a streams always contain messages with index = 1,2,3 ... . This is different to traditional messaging systems where messages have timestamps or arbitrary unique hash IDs. The reason is that with timestamps the order of messages saved in the system might differ from the order the were generated by the data source (e.g. detector). And keeping correct order is required in many cases. Second reason is that it makes a random access to a specific message quite straightforward.
+
+#### Datasets/Dataset substreams
+In some cases multiple detectors are using during an experiment. E.g. a 3D image is composed from multiple 2D images created by different detectors. In this case these 2D images can be composed to a dataset so that it a be processed later as a whole. One would then use a single data source (which would mean a set of detectors or "multi-detector" in this case), single data stream and, to compose a dataset, for each of it's components (each 2D image in our example) the corresponding detector would send a message with same id but to a different dataset substream.
+
+So, for the case without datasets (single detector) the data hierarchy is Beamtime→Data Source → Data Stream → Message:
+
+![Docusaurus](/img/data-in-asapo-workflow.png)
+
+And with datasets (multi-detector) the data hierarchy is Beamtime→Data Source → Data Stream → Dataset→ Message in Dataset Substream:
+
+![Docusaurus](/img/data-in-asapo-workflow2.png)
diff --git a/docs/site/versioned_docs/version-21.12.0/getting-started.mdx b/docs/site/versioned_docs/version-21.12.0/getting-started.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..352a663f8cbdaee6a561bdbb37f4e66938e31ac3
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/getting-started.mdx
@@ -0,0 +1,117 @@
+---
+title: Getting Started
+slug: /
+---
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Start ASAPO services {#step-1}
+
+If you already have running ASAPO services and know the endpoint, you don't need this, and can go to [Client Libraries](#step-2).
+
+Otherwise, for testing purposes one can start ASAPO services in a standalone mode (this is not recommended for  production deployment).
+
+
+The easiest way is to use a Docker container.
+So, make sure Docker is installed and you have necessary permissions to use it.
+Please note that this will only work on a Linux machine. Also please note that ASAPO needs some ports to be available. You can check the list
+[here](https://stash.desy.de/projects/ASAPO/repos/asapo/browse/deploy/asapo_services/scripts/asapo.auto.tfvars.in#37).
+
+Now, depending on how your Docker daemon is configured (if it uses a
+unix socket or a tcp port for communications)
+ you can use pick corresponding script below, adjust and execute it to start ASAPO services.
+
+<Tabs
+  defaultValue="unix"
+  values={[
+    { label: 'Docker with unix socket (default)', value: 'unix', },
+    { label: 'Docker with tcp (used on FS machines)', value: 'tcp', },
+  ]
+}>
+<TabItem value="unix">
+
+```shell content="./versioned_examples/version-21.12.0/start_asapo_socket.sh"
+```
+
+</TabItem>
+
+<TabItem value="tcp">
+
+```shell content="./versioned_examples/version-21.12.0/start_asapo_tcp.sh"
+```
+
+</TabItem>
+</Tabs>
+
+at the end you should see
+
+<p className="green-text"><strong>Apply complete! Resources: 19 added, 0 changed, 0 destroyed.</strong></p>
+
+which means ASAPO services successfully  started. Your ASAPO endpoint for API calls will be **localhost:8400**.
+
+### Create data directories
+
+Next, you need to create directories where ASAPO will store the data
+(the structure matches the one used at DESY experiments).
+Since we are going to use beamline `test` and beamtime `asapo_test` in following examples,
+we must create two folders, one for the beamline filesystem and one for the core file system:
+
+```shell
+ASAPO_HOST_DIR=/var/tmp/asapo # the folder used in step 1
+mkdir -p $ASAPO_HOST_DIR/global_shared/online_data/test/current/raw
+mkdir -p $ASAPO_HOST_DIR/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test
+```
+
+:::note ASAP::O in production mode
+
+We have a running instance for processing data collected during experiments. Please get in touch with FS-SC group for more information.
+
+:::
+
+### Services shutdown
+
+After you've done with your instance of ASAPO, you might want to gracefully shutdown the running services. If you don't do it, your machine will become bloated with the unused docker images.
+
+```shell content="./versioned_examples/version-21.12.0/cleanup.sh"
+```
+
+<br/><br/>
+
+## Install client libraries {#step-2}
+
+Now you can install Python packages or C++ libraries for ASAPO Producer and Consumer API (you need to be in DESY intranet to access files).
+
+<Tabs
+  defaultValue="python-pip"
+  values={[
+    { label: 'Python - pip', value: 'python-pip', },
+    { label: 'Python - packages', value: 'python-packages', },
+    { label: 'C++ packages', value: 'cpp', },
+  ]
+}>
+<TabItem value="python-pip">
+
+```shell content="./versioned_examples/version-21.12.0/install_python_clients_pip.sh" snippetTag="#snippet1"
+```
+
+</TabItem>
+<TabItem value="python-packages">
+
+```shell content="./versioned_examples/version-21.12.0/install_python_clients_pkg.sh"
+```
+
+</TabItem>
+<TabItem value="cpp">
+
+```shell content="./versioned_examples/version-21.12.0/install_cpp_clients.sh"
+```
+
+</TabItem>
+</Tabs>
+
+## Code examples
+
+Please refer to the [Code Examples](cookbook/overview) sections to see the code snippets for various usage scenarious.
+
diff --git a/docs/site/versioned_docs/version-21.12.0/overview.md b/docs/site/versioned_docs/version-21.12.0/overview.md
new file mode 100644
index 0000000000000000000000000000000000000000..7af1f0471cfe9deb243d8d5de447c76ebcf9b30a
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/overview.md
@@ -0,0 +1,40 @@
+---
+title: Overview
+---
+
+
+
+ASAP::O (or ASAPO) is a high performance distributed streaming platform. It is being developed at DESY and is mainly aimed to support online/offline analysis of experimental data produced at its facilities. The ideas behind are quite similar to that of Apache Kafka and similar messaging solutions, but ASAPO is developed and tuned for scientific use cases with their specific workflows and where the size of the messages is much large (MBs to GBs as compared to KBs in traditional systems).
+
+
+
+ASAPO has the following key capabilities:
+
+- Deliver data produced by an experimental facility (e.g. detector) to a data center in a high-performant fault-tolerant way
+- Consume this data in various modes (as soon as new data occurs, random access, latest available data, in parallel, ...)
+- Ingest own data/ create computational pipelines
+
+
+ASAPO consists of the following three components:
+
+- Core services (run in background on a single node or a cluster and provide ASAPO functionality)
+- Producer API to ingest data into the system
+- Consumer API to retrieve data from the system
+
+### Bird's eye view
+
+A workflow when using ASAPO can be represented as follows:
+
+![Docusaurus](/img/asapo_bird_eye.png)
+        
+
+Usually, an end user can see ASAPO core services as a black box. But some more details are given [here](core-architecture).
+
+Next, one can learn more about following concepts:
+
+- [Data in ASAPO](data-in-asapo)
+- [Producer clients](producer-clients)
+- [Consumer clients](consumer-clients)
+
+You can also compare with other solutions, jump directly to [Getting Started](getting-started.mdx) or have a look in use cases section
+
diff --git a/docs/site/versioned_docs/version-21.12.0/p02.1.md b/docs/site/versioned_docs/version-21.12.0/p02.1.md
new file mode 100644
index 0000000000000000000000000000000000000000..271b2bf10a91d2ecf398e2a571b49b0df874bbe7
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/p02.1.md
@@ -0,0 +1,43 @@
+---
+title: ASAP::O at P02.1
+---
+
+Online analysis at P02.1 has two main goals:
+
+- Doing as much beamline specific data analysis as possible for the user, so that they can concentrate on analyzing the experiment specific details. This will lead to a comprehensive support for the user from beamline side and therefore lead to a higher user satisfaction. Automatization of the analysis is essential to achieve the necessary high throughput, which is mandatory for current and future diffraction applications.
+- Enabling timely decisions through a "live" view of raw images and analyzed data. Problems with the measurement can often be more easily detected in the analyzed data, which should be made available to the user as early as possible to avoid wasting valuable measurement time on suboptimal experimental conditions.
+
+## Description of a typical beamtime at P02.1
+
+- A beamtime consists of a number of scans
+- Each scan consists of one or more steps
+- At each step, an image is taken by the detectors, as well as several other scalar sensors values are gathered, e.g., temperature, electric current, position, etc.
+- The parameters for the analysis are fixed during one scan but might need to change from one scan to the next
+
+## Analysis Pipeline
+
+- Images are taken by one or two detectors
+- Optionally, a number of consecutive images of a single detector are merged into one averaged image to reduce the noise
+- The (averaged) images are stored into one NeXus file per detector per scan
+- Each (averaged) image is analyzed independently
+- The analyzed data is written to one NeXus file per detector per scan
+- All scalar sensor data and additional metadata is written to one NeXus file per scan that links to the other NeXus files with the (averaged) images and analyzed data
+- A viewer displays the live and history output of all relevant processing steps
+
+![Docusaurus](/img/Asapo_Analysis_Pipeline_P02-1.png)
+
+## Use of ASAPO
+
+In the following, ASAPO specific details for the pipeline of a single detector are given. For multiple detectors, all stream names are suffixed by the detector ID.
+
+1. The data acquisition software stores the parameters for the analysis in a "scan-metadata" stream with one substream per scan and one metadata entry per substream
+2. Images are ingested into ASAPO
+3. The images taken by the detectors are written to the beamline filesystem by HiDRA (one file per image)
+4. HiDRA inserts the files into ASAPO. It assigns the files to the correct "detector" stream based on the file name. Each stream uses one substream per scan, its name is also extracted from the filename by HiDRA. This applies to the index within a substream as well.
+5. If enabled, one "averager" worker per detector stream reads the files from the "detector" stream and emits the averaged images into the "averaged" stream. The name of the substream of the input is used for the name of the output substream. The indices within a substream are chosen consecutively.
+6. One "nexus-writer" worker per detector reads the images either from the "detector" or the "averaged" stream. All images of a single substream are stored into one file. The filename is constructed from the name of the stream and substream the image belongs to. The index within a substream corresponds to the index within the HDF5 dataset.
+7. Multiple "asapo-dawn" worker read their parameters from the "scan-metadata" stream at the start of each substream. The images are read from the "detector" or "averaged" stream. The worker emit the resulting data into an "analyzed" stream with the same substream name as the input and the same index.
+8. One "nexus-write" worker per detector reads the analyzed data from the "analyzed" stream and writes it into one NeXus file per substream. The filename is constructed from the name of the stream and substream the data belongs to. The index within a substream corresponds to the index within the HDF5 dataset.
+9. The data acquisition software stores all scalar data and all additional scan-metadata in a master NeXus file that links to the NeXus files produced by the ASAPO workers.
+10. The viewer listens to all streams and parses the metadata to create a continuously updated tree view of all available data. Clicking on an item uses get_by_id to retrieve the actual data. A "live" mode automatically retrieves the latest data.
+ 
diff --git a/docs/site/versioned_docs/version-21.12.0/producer-clients.md b/docs/site/versioned_docs/version-21.12.0/producer-clients.md
new file mode 100644
index 0000000000000000000000000000000000000000..d74adb0b60e04ae6cf0ab5b9c2b264a25c46419a
--- /dev/null
+++ b/docs/site/versioned_docs/version-21.12.0/producer-clients.md
@@ -0,0 +1,23 @@
+---
+title: Producer Clients
+---
+
+Producer client (or producer) is a part of a distributed streaming system that is responsible for creating data streams (i.e. ingesting data in the system). It is usually a user (beamline scientist, detector developer, physicist, ... ) responsibility to develop a client for specific beamline, detector or experiment using ASAPO Producer API and ASAPO responsibility to make sure data is transferred and saved an in efficient and reliable way.
+
+![Docusaurus](/img/producer-clients.png)
+
+Producer API is available for C++ and Python and has the following main functionality:
+
+- Create a producer instance and bind it to a specific beamtime and data source
+multiple instances can be created (also within a single application) to send data from different sources
+- Send messages to a specific stream (you can read more [here](data-in-asapo) about data in ASAPO)
+    - each message must have a consecutive integer index, ASAPO does not create indexes automatically
+    - to compose datasets, dataset substream (and dataset size) should be send along with each message
+    - messages are sent asynchronously, in parallel using multiple threads 
+    - retransfer will be attempted in case of system failure
+    - a callback function can be provided to react after data was sent/process error
+    
+Please refer to [C++](http://asapo.desy.de/cpp/) and [Python](http://asapo.desy.de/python/) documentation for specific details (available from DESY intranet only).
+
+
+
diff --git a/docs/site/versioned_examples/version-21.12.0/c/Makefile b/docs/site/versioned_examples/version-21.12.0/c/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..ba3d4a872835ae3d20e6dfe43452a6ede2771332
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/c/Makefile
@@ -0,0 +1,29 @@
+PROGRAM=asapo-consume
+
+LDFLAGS = "-Wl,-rpath,/opt/asapo/lib"
+CFLAGS += `PKG_CONFIG_PATH=/opt/asapo/lib/pkgconfig pkg-config --cflags libasapo-consumer`
+LIBS = `PKG_CONFIG_PATH=/opt/asapo/lib/pkgconfig pkg-config --libs libasapo-consumer`
+
+# for default installation
+#LDFLAGS =
+#CFLAGS += `pkg-config --cflags libasapo-consumer`
+#LIBS = `pkg-config --libs libasapo-consumer`
+
+RM=rm -f
+
+SRCS=consume.c
+OBJS=$(subst .c,.o,$(SRCS))
+
+all: $(PROGRAM)
+
+$(PROGRAM): $(OBJS)
+	$(CC) $(LDFLAGS) -o $@ $^ $(LIBS)
+
+%.o: %.cpp
+	$(CC) $(CFLAGS) $(INCLUDE) -c -o $@ $<
+
+clean:
+	$(RM) $(OBJS)
+
+distclean: clean
+	$(RM) $(PROGRAM)
diff --git a/docs/site/versioned_examples/version-21.12.0/c/consume.c b/docs/site/versioned_examples/version-21.12.0/c/consume.c
new file mode 100644
index 0000000000000000000000000000000000000000..a29537c61fa0c950f4a4c29e55f6c10e634d3da4
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/c/consume.c
@@ -0,0 +1,66 @@
+#include "asapo/consumer_c.h"
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void exit_if_error(const char *error_string, const AsapoErrorHandle err) {
+    if (asapo_is_error(err)) {
+        char buf[1024];
+        asapo_error_explain(err, buf, sizeof(buf));
+        printf("%s %s\n", error_string, buf);
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    AsapoErrorHandle err = asapo_new_handle();
+    AsapoMessageMetaHandle mm = asapo_new_handle();
+    AsapoMessageDataHandle data = asapo_new_handle();
+
+    /* create snippet_start */
+    const char *endpoint = "localhost:8400";
+    const char *beamtime = "asapo_test";
+    const char *token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    const char * path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"; //set it according to your configuration.
+
+    AsapoSourceCredentialsHandle cred = asapo_create_source_credentials(kProcessed,
+                                                                        beamtime,
+                                                                        "", "test_source", token);
+    AsapoConsumerHandle consumer = asapo_create_consumer(endpoint,
+                                                         path_to_files, 1,
+                                                         cred,
+                                                         &err);
+    asapo_free_handle(&cred);
+    /* create snippet_end */
+
+    exit_if_error("Cannot create consumer", err);
+    asapo_consumer_set_timeout(consumer, 5000ull);
+
+    /* consume snippet_start */
+    AsapoStringHandle group_id = asapo_consumer_generate_new_group_id(consumer, &err);
+    exit_if_error("Cannot create group id", err);
+
+    asapo_consumer_get_next(consumer, group_id, &mm, &data, "default",&err);
+    exit_if_error("Cannot get next record", err);
+
+    printf("id: %llu\n", (unsigned long long)asapo_message_meta_get_id(mm));
+    printf("file name: %s\n", asapo_message_meta_get_name(mm));
+    printf("file content: %s\n", asapo_message_data_get_as_chars(data));
+    /* consume snippet_end */
+
+    /* delete snippet_start */
+    asapo_consumer_delete_stream(consumer,"default", 1,1,&err);
+    exit_if_error("Cannot delete stream", err);
+    printf("stream deleted\n");
+    /* delete snippet_end */
+
+    asapo_free_handle(&err);
+    asapo_free_handle(&mm);
+    asapo_free_handle(&data);
+    asapo_free_handle(&consumer);
+    asapo_free_handle(&group_id);
+    return EXIT_SUCCESS;
+}
+
diff --git a/docs/site/versioned_examples/version-21.12.0/cleanup.sh b/docs/site/versioned_examples/version-21.12.0/cleanup.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7344a690f3905218aa423a7f6feec4b7b0e0e394
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cleanup.sh
@@ -0,0 +1,5 @@
+ASAPO_HOST_DIR=/var/tmp/asapo # you can change this if needed
+
+docker exec asapo jobs-stop
+docker stop asapo
+rm -rf $ASAPO_HOST_DIR
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/CMakeLists.txt b/docs/site/versioned_examples/version-21.12.0/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1d7e53c78e14292c2b9dc2e5dadf91100021a969
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/CMakeLists.txt
@@ -0,0 +1,31 @@
+cmake_minimum_required(VERSION 3.3)
+
+project(asapo-client)
+
+set(CMAKE_CXX_STANDARD 11)
+
+# optionally use some other curl lib (e.g. static)
+# set (CURL_LIBRARY /usr/local/lib/libasapo-curl.a)
+# optionally linh gcc and stdc++ statically
+# set (ASAPO_STATIC_CXX_LIBS ON)
+# optionally link asapo as shared libs (ASAPO_STATIC_CXX_LIBS not used then)
+# set (ASAPO_SHARED_LIBS ON)
+
+#consumer snippet_start_remove
+find_package (Asapo REQUIRED COMPONENTS Producer)
+
+set(TARGET_NAME asapo-produce)
+set(SOURCE_FILES produce.cpp)
+
+add_executable(${TARGET_NAME} ${SOURCE_FILES})
+target_link_libraries(${TARGET_NAME} imported::asapo-producer)
+#consumer snippet_end_remove
+#producer snippet_start_remove
+find_package (Asapo REQUIRED COMPONENTS Consumer)
+
+set(TARGET_NAME asapo-consume)
+set(SOURCE_FILES consume.cpp)
+
+add_executable(${TARGET_NAME} ${SOURCE_FILES})
+target_link_libraries(${TARGET_NAME} imported::asapo-consumer)
+#producer snippet_end_remove
\ No newline at end of file
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/acknowledgements.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/acknowledgements.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d8992ea60452fff6c5f15422cda94e658099ac1d
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/acknowledgements.cpp
@@ -0,0 +1,132 @@
+#include "asapo/asapo_producer.h"
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+#include <set>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+
+    producer->SetLogLevel(asapo::LogLevel::Error);
+
+    err = producer->DeleteStream("default", 1000, asapo::DeleteStreamOptions{true, true});
+    exit_if_error("Cannot delete stream", err);
+
+    // let's start with producing a sample of 10 simple messages
+    for (uint64_t i = 1; i <= 10; i++) {
+        std::string to_send = "message#" + std::to_string(i);
+        auto send_size = to_send.size() + 1;
+        auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+        memcpy(buffer.get(), to_send.c_str(), send_size);
+
+        asapo::MessageHeader message_header{i, send_size, "processed/test_file_" + std::to_string(i)};
+        err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+        exit_if_error("Cannot send message", err);
+    }
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    exit_if_error("Cannot start consumer", err);
+    consumer->SetTimeout(5000);
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err);
+
+    // consume snippet_start
+    asapo::MessageMeta mm;
+    asapo::MessageData data;
+
+    const std::set<int> ids {3, 5, 7};
+
+    // the flag to separate the first attempt for message #3
+    bool firstTryNegative = true;
+
+    do {
+        err = consumer->GetNext(group_id, &mm, &data, "default");
+
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            std::cout << "stream finished" << std::endl;
+            break;
+        }
+
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err); // snippet_end_remove
+
+        // acknowledge all the messages except the ones in the set
+        if (ids.find(mm.id) == ids.end()) {
+            std::cout << "Acknowledge the message #" << mm.id << std::endl;
+            consumer->Acknowledge(group_id, mm.id, "default");
+        }
+
+        // for message #3 we issue a negative acknowledgement, which will put it at the next place in the stream
+        // in this case, it will be put in the end of a stream
+        if (mm.id == 3) {
+            if (firstTryNegative) {
+                std::cout << "Negative acknowledgement of the message #" << mm.id << std::endl;
+                // make the acknowledgement with a delay of 1 second
+                consumer->NegativeAcknowledge(group_id, mm.id, 2000, "default");
+                firstTryNegative = false;
+            } else {
+                // on our second attempt we acknowledge the message
+                std::cout << "Second try of the message #" << mm.id << std::endl;
+                consumer->Acknowledge(group_id, mm.id, "default");
+            }
+        }
+    } while (1);
+    // consume snippet_end
+
+    // print snippet_start
+    auto unacknowledgedMessages = consumer->GetUnacknowledgedMessages(group_id, 0, 0, "default", &err);
+    exit_if_error("Could not get list of messages", err); // snippet_end_remove
+
+    for (int i = 0; i < unacknowledgedMessages.size(); i++) {
+        err = consumer->GetById(unacknowledgedMessages[i], &mm, &data, "default");
+        exit_if_error("Cannot get message", err); // snippet_end_remove
+
+        std::cout << "Unacknowledged message: " << reinterpret_cast<char const*>(data.get()) << std::endl;
+        std::cout << "id: " << mm.id << std::endl;
+        std::cout << "file name: " << mm.name << std::endl;
+    }
+    // print snippet_end
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/consume.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/consume.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f13db95c0520395cd781660459a4557374d1460b
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/consume.cpp
@@ -0,0 +1,98 @@
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << std::endl << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+// create snippet_start
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    // test token. In production it is created during the start of the beamtime
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    // set it according to your configuration.
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials
+            {
+                asapo::SourceType::kProcessed, // should be kProcessed or kRaw, kProcessed writes to the core FS
+                beamtime,                      // the folder should exist
+                "",                            // can be empty or "auto", if beamtime_id is given
+                "test_source",                 // source
+                token                          // athorization token
+            };
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer
+        (endpoint,
+         path_to_files,
+         true,             // True if the path_to_files is accessible locally, False otherwise
+         credentials,      // same as for producer
+         &err);
+// create snippet_end
+    exit_if_error("Cannot create consumer", err);
+    consumer->SetTimeout(5000); // How long do you want to wait on non-finished stream for a message.
+
+// list snippet_start
+    for (const auto& stream : consumer->GetStreamList("", asapo::StreamFilter::kAllStreams, &err))
+    {
+        std::cout << "Stream name: " << stream.name << std::endl;
+        std::cout << "LastId: " << stream.last_id << std::endl;
+        std::cout << "Stream finished: " << stream.finished << std::endl;
+        std::cout << "Next stream: " << stream.next_stream << std::endl;
+    }
+// list snippet_end
+
+// consume snippet_start
+    // Several consumers can use the same group_id to process messages in parallel
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err); // snippet_end_remove
+
+    asapo::MessageMeta mm;
+    asapo::MessageData data;
+
+    do {
+        // GetNext is the main function to get messages from streams. You would normally call it in loop.
+        // you can either manually compare the mm.id to the stream.last_id, or wait for the error to happen
+        err = consumer->GetNext(group_id, &mm, &data, "default");
+
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            // all the messages in the stream were processed
+            std::cout << "stream finished" << std::endl;
+            break;
+        }
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            // not-finished stream timeout, or wrong or empty stream
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err); // snippet_end_remove
+
+        std::cout << "id: " << mm.id << std::endl;
+        std::cout << "file name: " << mm.name << std::endl;
+        std::cout << "message content: " << reinterpret_cast<char const*>(data.get()) << std::endl;
+    } while (1);
+// consume snippet_end
+
+// delete snippet_start
+    // you can delete the stream after consuming
+    err = consumer->DeleteStream("default", asapo::DeleteStreamOptions{true, true});
+    exit_if_error("Cannot delete stream", err); // snippet_end_remove
+    std::cout << "stream deleted" << std::endl;
+// delete snippet_end
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/consume_dataset.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/consume_dataset.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8b11aebedd40f84ff2a80b5b55df110e6858f068
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/consume_dataset.cpp
@@ -0,0 +1,71 @@
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << std::endl << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    exit_if_error("Cannot create consumer", err);
+    consumer->SetTimeout((uint64_t) 5000);
+
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err);
+
+    // dataset snippet_start
+    asapo::DataSet ds;
+    asapo::MessageData data;
+
+    do {
+        ds = consumer->GetNextDataset(group_id, 0, "default", &err);
+
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            std::cout << "stream finished" << std::endl;
+            break;
+        }
+
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err); // snippet_end_remove
+
+        std::cout << "Dataset Id: " << ds.id << std::endl;
+
+        for(int i = 0; i < ds.content.size(); i++)
+        {
+            err = consumer->RetrieveData(&ds.content[i], &data);
+            exit_if_error("Cannot get dataset content", err); // snippet_end_remove
+
+            std::cout << "Part " << ds.content[i].dataset_substream << " out of " << ds.expected_size << std:: endl;
+            std::cout << "message content: " << reinterpret_cast<char const*>(data.get()) << std::endl;
+        }
+    } while (1);
+    // dataset snippet_end
+
+    err = consumer->DeleteStream("default", asapo::DeleteStreamOptions{true, true});
+    exit_if_error("Cannot delete stream", err);
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/metadata.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/metadata.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f63eb1d420f4d89705041e22823a1869e59d48ce
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/metadata.cpp
@@ -0,0 +1,163 @@
+#include "asapo/asapo_producer.h"
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+    producer->SetLogLevel(asapo::LogLevel::Error);
+
+    // beamtime_set snippet_start
+    // sample beamtime metadata. You can add any data you want, with any level of complexity
+    // in this example we use strings and ints, and one nested structure
+    auto beamtime_metadata = "{"
+    "   \"name\": \"beamtime name\","
+    "   \"condition\": \"beamtime condition\","
+    "   \"intvalue1\": 5,"
+    "   \"intvalue2\": 10,"
+    "   \"structure\": {"
+    "       \"structint1\": 20,"
+    "       \"structint2\": 30"
+    "   }"
+    "}";
+
+    // send the metadata
+    // with this call the new metadata will completely replace the one that's already there
+    err = producer->SendBeamtimeMetadata(beamtime_metadata, asapo::MetaIngestMode{asapo::MetaIngestOp::kReplace, true}, &ProcessAfterSend);
+    // beamtime_set snippet_end
+    exit_if_error("Cannot send metadata", err);
+
+    // beamtime_update snippet_start
+    // we can update the existing metadata if we want, by modifying the existing fields, or adding new ones
+    auto beamtime_metadata_update = "{"
+    "    \"condition\": \"updated beamtime condition\","
+    "    \"newintvalue\": 15"
+    "}";
+
+    // send the metadata in the 'kUpdate' mode
+    err = producer->SendBeamtimeMetadata(beamtime_metadata_update, asapo::MetaIngestMode{asapo::MetaIngestOp::kUpdate, true}, &ProcessAfterSend);
+    // beamtime_update snippet_end
+    exit_if_error("Cannot send metadata", err);
+
+    // stream_set snippet_start
+    // sample stream metadata
+    auto stream_metadata = "{"
+    "    \"name\": \"stream name\","
+    "    \"condition\": \"stream condition\","
+    "    \"intvalue\": 44"
+    "}";
+
+    // works the same way: for the initial set we use 'kReplace' the stream metadata, but update is also possible
+    // update works exactly the same as for beamtime, but here we will only do 'kReplace'
+    err = producer->SendStreamMetadata(stream_metadata, asapo::MetaIngestMode{asapo::MetaIngestOp::kUpdate, true}, "default", &ProcessAfterSend);
+    // stream_set snippet_end
+    exit_if_error("Cannot send metadata", err);
+
+    // message_set snippet_start
+    // sample message metadata
+    auto message_metadata = "{"
+    "    \"name\": \"message name\","
+    "    \"condition\": \"message condition\","
+    "    \"somevalue\": 55"
+    "}";
+
+    std::string data_string = "hello";
+    auto send_size = data_string.size() + 1;
+    auto buffer = asapo::MessageData(new uint8_t[send_size]);
+    memcpy(buffer.get(), data_string.c_str(), send_size);
+
+    // the message metadata is sent together with the message itself
+    // in case of datasets each part has its own metadata
+    asapo::MessageHeader message_header{1, send_size, "processed/test_file", message_metadata};
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+    // message_set snippet_end
+    exit_if_error("Cannot send message", err);
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    exit_if_error("Cannot start consumer", err);
+
+    // beamtime_get snippet_start
+    // read the beamtime metadata
+    auto beamtime_metadata_read = consumer->GetBeamtimeMeta(&err);
+    exit_if_error("Cannot get metadata", err); // snippet_end_remove
+
+    std::cout << "Updated beamtime metadata:" << std::endl << beamtime_metadata_read << std::endl;
+    // beamtime_get snippet_end
+
+    // stream_get snippet_start
+    // read the stream metadata
+    auto stream_metadata_read = consumer->GetStreamMeta("default", &err);
+    exit_if_error("Cannot get metadata", err);
+
+    std::cout << "Stream metadata:" << std::endl << stream_metadata_read << std::endl;
+    // stream_get snippet_end
+
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err);
+
+    asapo::MessageMeta mm;
+    asapo::MessageData data;
+
+    do {
+        // message_get snippet_start
+        err = consumer->GetNext(group_id, &mm, &data, "default");
+
+        // message_get snippet_start_remove
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            std::cout << "stream finished" << std::endl;
+            break;
+        }
+
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err);
+        // message_get snippet_end_remove
+
+        std::cout << "Message #" << mm.id << std::endl;
+        // our custom metadata is stored inside the message metadata
+        std::cout << "Message metadata:" << std::endl << mm.metadata << std::endl;
+        // message_get snippet_end
+    } while (1);
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/next_stream.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/next_stream.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..41ecf9f5f68c0b32841efae24e85ee9cb06ff578
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/next_stream.cpp
@@ -0,0 +1,127 @@
+#include "asapo/asapo_producer.h"
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+
+    producer->SetLogLevel(asapo::LogLevel::Error);
+
+    // let's start with producing a sample of 10 simple messages
+    for (uint64_t i = 1; i <= 10; i++) {
+        std::string to_send = "content of the message #" + std::to_string(i);
+        auto send_size = to_send.size() + 1;
+        auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+        memcpy(buffer.get(), to_send.c_str(), send_size);
+
+        asapo::MessageHeader message_header{i, send_size, "processed/test_file_" + std::to_string(i)};
+        err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+        exit_if_error("Cannot send message", err);
+    }
+
+    // next_stream_set snippet_start
+    // finish the stream and set the next stream to be called 'next'
+    producer->SendStreamFinishedFlag("default", 10, "next", &ProcessAfterSend);
+    // next_stream_set snippet_end
+
+    // populate the 'next' stream as well
+    for (uint64_t i = 1; i <= 5; i++) {
+        std::string to_send = "content of the message #" + std::to_string(i);
+        auto send_size = to_send.size() + 1;
+        auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+        memcpy(buffer.get(), to_send.c_str(), send_size);
+
+        asapo::MessageHeader message_header{i, send_size, "processed/test_file_next_" + std::to_string(i)};
+        err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "next", &ProcessAfterSend);
+        exit_if_error("Cannot send message", err);
+    }
+
+    // we leave the 'next' stream unfinished, but the chain of streams can be of any length
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    consumer->SetTimeout(5000);
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err);
+
+    asapo::MessageMeta mm;
+    asapo::MessageData data;
+
+    // read_stream snippet_start
+    // we start with the 'default' stream (the first one)
+    std::string stream_name = "default";
+
+    do {
+        err = consumer->GetNext(group_id, &mm, &data, stream_name);
+
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            // when the stream finishes, we look for the info on the next stream
+            auto streams = consumer->GetStreamList("", asapo::StreamFilter::kAllStreams, &err);
+            // first, we find the stream with our name in the list of streams
+            auto stream = std::find_if(streams.begin(), streams.end(), [&stream_name](const asapo::StreamInfo & s) {
+                return s.name == stream_name;
+            });
+
+            // then we look if the field 'nextStream' is set and not empty
+            if (stream != streams.end() && !stream->next_stream.empty()) {
+                // if it's not, we continue with the next stream
+                stream_name = stream->next_stream;
+                std::cout << "Changing stream to the next one: " << stream_name << std::endl;
+                continue;
+            } else {
+                // otherwise we stop
+                std::cout << "stream finished" << std::endl;
+                break;
+            }
+        }
+
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err); // snippet_end_remove
+
+        std::cout << "Message #" << mm.id << ", message content: " << reinterpret_cast<char const*>(data.get()) << std::endl;
+    } while (1);
+    // read_stream snippet_end
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/pipeline.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/pipeline.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c63c7ee5f41348d3726cc4715f015c3b61840f24
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/pipeline.cpp
@@ -0,0 +1,100 @@
+#include "asapo/asapo_producer.h"
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    exit_if_error("Cannot start consumer", err);
+    consumer->SetTimeout(5000);
+    auto group_id = consumer->GenerateNewGroupId(&err);
+    exit_if_error("Cannot create group id", err);
+
+    // pipeline snippet_start
+    // put the processed message into the new stream
+    auto pipelined_stream_name = "pipelined";
+
+    asapo::MessageMeta mm;
+    asapo::MessageData data;
+
+    do {
+        // we expect the message to be in the 'default' stream already
+        err = consumer->GetNext(group_id, &mm, &data, "default");
+
+        if (err && err == asapo::ConsumerErrorTemplates::kStreamFinished) {
+            std::cout << "stream finished" << std::endl;
+            break;
+        }
+
+        if (err && err == asapo::ConsumerErrorTemplates::kEndOfStream) {
+            std::cout << "stream ended" << std::endl;
+            break;
+        }
+        exit_if_error("Cannot get next record", err); // snippet_end_remove
+
+        // work on our data
+        auto processed_string = std::string(reinterpret_cast<char const*>(data.get())) + " processed";
+        auto send_size = processed_string.size() + 1;
+        auto buffer = asapo::MessageData(new uint8_t[send_size]);
+        memcpy(buffer.get(), processed_string.c_str(), send_size);
+
+        // you may use the same filename, if you want to rewrite the source file. This will result in warning, but it is a valid usecase
+        asapo::MessageHeader message_header{mm.id, send_size, std::string("processed/test_file_") + std::to_string(mm.id)};
+        err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, pipelined_stream_name, &ProcessAfterSend);
+        exit_if_error("Cannot send message", err); // snippet_end_remove
+    } while (1);
+    // pipeline snippet_end
+
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    // finish snippet_start
+    // the meta from the last iteration corresponds to the last message
+    auto last_id = mm.id;
+
+    err = producer->SendStreamFinishedFlag("pipelined",last_id, "", &ProcessAfterSend);
+    // finish snippet_end
+    exit_if_error("Cannot finish stream", err);
+
+    // you can remove the source stream if you do not need it anymore
+    err = consumer->DeleteStream("default", asapo::DeleteStreamOptions{true, true});
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/produce.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/produce.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..157d53d060d731827ec42c751f54f7aa310f268f
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/produce.cpp
@@ -0,0 +1,86 @@
+#include "asapo/asapo_producer.h"
+#include <iostream>
+
+// callback snippet_start
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        // the data was not sent. Something is terribly wrong.
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        // The data was sent, but there was some unexpected problem, e.g. the file was overwritten.
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        // all fine
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+// callback snippet_end
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+// create snippet_start
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto credentials = asapo::SourceCredentials
+            {
+                asapo::SourceType::kProcessed, // should be kProcessed or kRaw, kProcessed writes to the core FS
+                beamtime,                      // the folder should exist
+                "",                            // can be empty or "auto", if beamtime_id is given
+                "test_source",                 // source
+                ""                             // athorization token
+            };
+
+    auto producer = asapo::Producer::Create(endpoint,
+                                            1,                               // number of threads. Increase, if the sending speed seems slow
+                                            asapo::RequestHandlerType::kTcp, // Use kTcp. Use kFilesystem for direct storage of files
+                                            credentials,
+                                            60000,                           // timeout. Do not change.
+                                            &err);
+// create snippet_end
+    exit_if_error("Cannot start producer", err);
+
+// send snippet_start
+    // the message must be manually copied to the buffer of the relevant size
+    std::string to_send = "hello";
+    auto send_size = to_send.size() + 1;
+    auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+    memcpy(buffer.get(), to_send.c_str(), send_size);
+
+    // we are sending a message with with index 1. Filename must start with processed/
+    asapo::MessageHeader message_header{1, send_size, "processed/test_file"};
+    // use the default stream
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+// send snippet_end
+    exit_if_error("Cannot send message", err);
+
+    // send data in loop
+
+    // add the following at the end of the script
+
+// finish snippet_start
+    err = producer->WaitRequestsFinished(2000); // will synchronously wait for all the data to be sent.
+                                                // Use it when no more data is expected.
+    exit_if_error("Producer exit on timeout", err); // snippet_end_remove
+
+    // you may want to mark the stream as finished
+    err = producer->SendStreamFinishedFlag("default",          // name of the stream.
+                                           1,                  // the number of the last message in the stream
+                                           "",                 // next stream or empty
+                                           &ProcessAfterSend);
+    exit_if_error("Cannot finish stream", err); // snippet_end_remove
+    std::cout << "stream finished" << std::endl;
+// finish snippet_end
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/produce_dataset.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/produce_dataset.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3de79fa422f994b8aeaf5eacc27f6903380cfa91
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/produce_dataset.cpp
@@ -0,0 +1,75 @@
+#include "asapo/asapo_producer.h"
+#include <iostream>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", ""};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+
+    // dataset snippet_start
+    std::string to_send = "hello dataset 1";
+    auto send_size = to_send.size() + 1;
+    auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+    memcpy(buffer.get(), to_send.c_str(), send_size);
+
+    // add the additional paremeters to the header: part number in the dataset and the total number of parts
+    asapo::MessageHeader message_header{1, send_size, "processed/test_file_dataset_1", "", 1, 3};
+
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+    exit_if_error("Cannot send message", err); // snippet_end_remove
+
+    // this can be done from different producers in any order
+    // we do not recalculate send_size since we know it to be the same
+    // we reuse the header to shorten the code
+    to_send = "hello dataset 2";
+    buffer =  asapo::MessageData(new uint8_t[send_size]);
+    memcpy(buffer.get(), to_send.c_str(), send_size);
+
+    message_header.dataset_substream = 2;
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+    exit_if_error("Cannot send message", err); // snippet_end_remove
+
+    to_send = "hello dataset 3";
+    buffer =  asapo::MessageData(new uint8_t[send_size]);
+    memcpy(buffer.get(), to_send.c_str(), send_size);
+
+    message_header.dataset_substream = 3;
+    err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+    exit_if_error("Cannot send message", err); // snippet_end_remove
+    // dataset snippet_end
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    // the dataset parts are not counted towards the number of messages in the stream
+    // the last message id in this example is still 1
+    err = producer->SendStreamFinishedFlag("default", 1, "", &ProcessAfterSend);
+    exit_if_error("Cannot finish stream", err);
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/cpp/query.cpp b/docs/site/versioned_examples/version-21.12.0/cpp/query.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78370839e74ee95eb0b9f55fadfc5e7a77e2044e
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/cpp/query.cpp
@@ -0,0 +1,128 @@
+#include "asapo/asapo_producer.h"
+#include "asapo/asapo_consumer.h"
+#include <iostream>
+#include <chrono>
+
+void ProcessAfterSend(asapo::RequestCallbackPayload payload, asapo::Error err) {
+    if (err && err != asapo::ProducerErrorTemplates::kServerWarning) {
+        std::cerr << "error during send: " << err << std::endl;
+        return;
+    } else if (err) {
+        std::cout << "warning during send: " << err << std::endl;
+    } else {
+        std::cout << "successfuly send " << payload.original_header.Json() << std::endl;
+        return;
+    }
+}
+
+void PrintMessages(asapo::MessageMetas metas, std::unique_ptr<asapo::Consumer>& consumer) {
+    asapo::MessageData data;
+    asapo::Error err;
+    for (int i = 0; i < metas.size(); i++) {
+        err = consumer->RetrieveData(&metas[i], &data);
+        std::cout << "Message #" << metas[i].id
+                  << ", content: " << reinterpret_cast<char const*>(data.get())
+                  << ", user metadata: " << metas[i].metadata << std::endl;
+    }
+}
+
+void exit_if_error(std::string error_string, const asapo::Error& err) {
+    if (err) {
+        std::cerr << error_string << err << std::endl;
+        exit(EXIT_FAILURE);
+    }
+}
+
+int main(int argc, char* argv[]) {
+    asapo::Error err;
+
+    auto endpoint = "localhost:8400";
+    auto beamtime = "asapo_test";
+
+    auto token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJl"
+                 "eHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmN"
+                 "DNhbGZwOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdG"
+                 "VzdCIsIkV4dHJhQ2xhaW1zIjp7IkFjY2Vzc1R5cGV"
+                 "zIjpbIndyaXRlIiwicmVhZCJdfX0.dkWupPO-ysI4"
+                 "t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU";
+
+    auto path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test";
+
+    auto credentials = asapo::SourceCredentials{asapo::SourceType::kProcessed, beamtime, "", "test_source", token};
+
+    auto producer = asapo::Producer::Create(endpoint, 1, asapo::RequestHandlerType::kTcp, credentials, 60000, &err);
+    exit_if_error("Cannot start producer", err);
+
+    producer->SetLogLevel(asapo::LogLevel::Error);
+
+    err = producer->DeleteStream("default", 0, asapo::DeleteStreamOptions{true, true});
+    exit_if_error("Cannot delete stream", err);
+
+    // let's start with producing some messages with metadata
+    for (uint64_t i = 1; i <= 10; i++) {
+        auto message_metadata = "{"
+        "    \"condition\": \"condition #" + std::to_string(i) + "\","
+        "    \"somevalue\": " + std::to_string(i * 10) +
+        "}";
+
+        std::string to_send = "message#" + std::to_string(i);
+        auto send_size = to_send.size() + 1;
+        auto buffer =  asapo::MessageData(new uint8_t[send_size]);
+        memcpy(buffer.get(), to_send.c_str(), send_size);
+
+        asapo::MessageHeader message_header{i, send_size, "processed/test_file_" + std::to_string(i), message_metadata};
+        err = producer->Send(message_header, std::move(buffer), asapo::kDefaultIngestMode, "default", &ProcessAfterSend);
+        exit_if_error("Cannot send message", err);
+    }
+
+    err = producer->WaitRequestsFinished(2000);
+    exit_if_error("Producer exit on timeout", err);
+
+    auto consumer = asapo::ConsumerFactory::CreateConsumer(endpoint, path_to_files, true, credentials, &err);
+    exit_if_error("Cannot create group id", err);
+    consumer->SetTimeout(5000);
+
+    // by_id snippet_start
+    // simple query, same as GetById
+    auto metadatas = consumer->QueryMessages("_id = 1", "default", &err);
+    // by_id snippet_end
+    exit_if_error("Cannot query messages", err);
+    std::cout << "Message with ID = 1" << std::endl;
+    PrintMessages(metadatas, consumer);
+
+    // by_ids snippet_start
+    // the query that requests the range of IDs
+    metadatas = consumer->QueryMessages("_id >= 8", "default", &err);
+    // by_ids snippet_end
+    exit_if_error("Cannot query messages", err);
+    std::cout << "essages with ID >= 8" << std::endl;
+    PrintMessages(metadatas, consumer);
+
+    // string_equal snippet_start
+    // the query that has some specific requirement for message metadata
+    metadatas = consumer->QueryMessages("meta.condition = \"condition #7\"", "default", &err);
+    // string_equal snippet_end
+    exit_if_error("Cannot query messages", err);
+    std::cout << "Message with condition = 'condition #7'" << std::endl;
+    PrintMessages(metadatas, consumer);
+
+    // int_compare snippet_start
+    // the query that has several requirements for user metadata
+    metadatas = consumer->QueryMessages("meta.somevalue > 30 AND meta.somevalue < 60", "default", &err);
+    // int_compare snippet_end
+    exit_if_error("Cannot query messages", err);
+    std::cout << "Message with 30 < somevalue < 60" << std::endl;
+    PrintMessages(metadatas, consumer);
+
+    // timestamp snippet_start
+    // the query that is based on the message's timestamp
+    auto now = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
+    auto fifteen_minutes_ago = std::chrono::duration_cast<std::chrono::nanoseconds>((std::chrono::system_clock::now() - std::chrono::minutes(15)).time_since_epoch()).count();
+    metadatas = consumer->QueryMessages("timestamp < " + std::to_string(now) + " AND timestamp > " + std::to_string(fifteen_minutes_ago), "default", &err);
+    // timestamp snippet_end
+    exit_if_error("Cannot query messages", err);
+    std::cout << "Messages in the last 15 minutes" << std::endl;
+    PrintMessages(metadatas, consumer);
+
+    return EXIT_SUCCESS;
+}
diff --git a/docs/site/versioned_examples/version-21.12.0/install_cpp_clients.sh b/docs/site/versioned_examples/version-21.12.0/install_cpp_clients.sh
new file mode 100644
index 0000000000000000000000000000000000000000..de3e64b161af6f0169168858722a905e7aa5e0e9
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/install_cpp_clients.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# you can also install Linux/Windows packages if you have root access (or install locally).
+# take a look at http://nims.desy.de/extra/asapo/linux_packages/ or http://nims.desy.de/extra/asapo/windows10 for your OS. E.g. for Debian 10.7
+wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo-dev-21.12.0-debian10.7.x86_64.deb
+sudo apt install ./asapo-dev-21.12.0-debian10.7.x86_64.deb
+
+
diff --git a/docs/site/versioned_examples/version-21.12.0/install_python_clients_pip.sh b/docs/site/versioned_examples/version-21.12.0/install_python_clients_pip.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7adbdcdaa459dc34c5d96ec1a8db4844627dccc3
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/install_python_clients_pip.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_producer==21.12.0
+pip3 install --user --trusted-host nims.desy.de --find-links=http://nims.desy.de/extra/asapo/linux_wheels asapo_consumer==21.12.0
+# you might need to update pip if the above commands error: pip3 install --upgrade pip
+
+# if that does not work (abi incompatibility, etc) you may try to install source packages
+# take a look at http://nims.desy.de/extra/asapo/linux_packages/ or http://nims.desy.de/extra/asapo/windows10 for your OS. E.g. for Debian 10.7
+# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_producer-21.12.0.tar.gz
+# wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/asapo_consumer-21.12.0.tar.gz
+
+# pip3 install asapo_producer-21.12.0.tar.gz
+# pip3 install asapo_consumer-21.12.0.tar.gz
diff --git a/docs/site/versioned_examples/version-21.12.0/install_python_clients_pkg.sh b/docs/site/versioned_examples/version-21.12.0/install_python_clients_pkg.sh
new file mode 100644
index 0000000000000000000000000000000000000000..159ff2a8449f41e9c06662b164697d5cab50846b
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/install_python_clients_pkg.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+# you can also install Linux/Windows packages if you have root access (or install locally).
+# take a look at http://nims.desy.de/extra/asapo/linux_packages/ or http://nims.desy.de/extra/asapo/windows10 for your OS. E.g. for Debian 10.7
+wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-producer_21.12.0-debian10.7_amd64.deb
+wget http://nims.desy.de/extra/asapo/linux_packages/debian10.7/python-asapo-consumer_21.12.0-debian10.7_amd64.deb
+
+sudo apt install ./python3-asapo-producer_21.12.0-debian10.7_amd64.deb
+sudo apt install ./python3-asapo_consumer_21.12.0-debian10.7_amd64.deb
diff --git a/docs/site/versioned_examples/version-21.12.0/python/acknowledgements.py b/docs/site/versioned_examples/version-21.12.0/python/acknowledgements.py
new file mode 100644
index 0000000000000000000000000000000000000000..32d3055d059dd0e37d926dbf812bdf05694ef62d
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/acknowledgements.py
@@ -0,0 +1,74 @@
+import asapo_consumer
+import asapo_producer
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+producer.set_log_level('error')
+
+# let's start with producing a sample of 10 simple messages
+for i in range(1, 11):
+    producer.send(i, "processed/test_file_ack_" + str(i), ('message #' + str(i)).encode(), stream = "default", callback = callback)
+
+producer.wait_requests_finished(2000)
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+group_id = consumer.generate_group_id()
+
+# the flag to separate the first attempt for message #3
+firstTryNegative = True
+
+# consume snippet_start
+try:
+    while True:
+        data, meta = consumer.get_next(group_id, meta_only = False)
+        text_data = data.tobytes().decode("utf-8")
+        message_id = meta['_id']
+
+        # acknowledge all the messages except these
+        if message_id not in [3,5,7]:
+            print('Acknowledge the message #', message_id)
+            consumer.acknowledge(group_id, message_id)
+
+        # for message #3 we issue a negative acknowledgement, which will put it at the next place in the stream
+        # in this case, it will be put in the end of a stream
+        if message_id == 3:
+            if firstTryNegative:
+                print('Negative acknowledgement of the message #', message_id)
+                # make the acknowledgement with a delay of 1 second
+                consumer.neg_acknowledge(group_id, message_id, delay_ms=2000)
+                firstTryNegative = False
+            else:
+                # on our second attempt we acknowledge the message
+                print('Second try of the message #', message_id)
+                consumer.acknowledge(group_id, message_id)
+
+except asapo_consumer.AsapoStreamFinishedError:
+    print('stream finished')
+
+except asapo_consumer.AsapoEndOfStreamError:
+    print('stream ended')
+# consume snippet_end
+
+# print snippet_start
+for message_id in consumer.get_unacknowledged_messages(group_id):
+    data, meta = consumer.get_by_id(message_id, meta_only = False)
+    print('Unacknowledged message:', data.tobytes().decode("utf-8"), meta)
+# print snippet_end
diff --git a/docs/site/versioned_examples/version-21.12.0/python/consume.py b/docs/site/versioned_examples/version-21.12.0/python/consume.py
new file mode 100644
index 0000000000000000000000000000000000000000..6180fef9ee83e0a37d85ae338d322fcc5248a41a
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/consume.py
@@ -0,0 +1,55 @@
+import asapo_consumer
+
+#create snippet_start
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+# test token. In production it is created during the start of the beamtime
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+# set it  according to your configuration.
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+
+consumer = asapo_consumer \
+                .create_consumer(endpoint,
+                                 path_to_files,
+                                 True,           # True if the path_to_files is accessible locally, False otherwise
+                                 beamtime,       # Same as for the producer
+                                 "test_source",  # Same as for the producer
+                                 token,          # Access token
+                                 5000)           # Timeout. How long do you want to wait on non-finished stream for a message.
+#create snippet_end
+
+#list snippet_start
+for stream in consumer.get_stream_list():
+    print("Stream name: ", stream['name'], "\n",
+          "LastId: ", stream['lastId'], "\n",
+          "Stream finished: ", stream['finished'], "\n",
+          "Next stream: ", stream['nextStream'])
+#list snippet_end
+
+#consume snippet_start
+group_id = consumer.generate_group_id() # Several consumers can use the same group_id to process messages in parallel
+
+try:
+    # get_next is the main function to get messages from streams. You would normally call it in loop.
+    # you can either manually compare the meta['_id'] to the stream['lastId'], or wait for the exception to happen
+    while True:
+        data, meta = consumer.get_next(group_id, meta_only = False)
+        print(data.tobytes().decode("utf-8"), meta)
+
+except asapo_consumer.AsapoStreamFinishedError:
+    print('stream finished') # all the messages in the stream were processed
+
+except asapo_consumer.AsapoEndOfStreamError:
+    print('stream ended')    # not-finished stream timeout, or wrong or empty stream
+#consume snippet_end
+
+#delete snippet_start
+consumer.delete_stream(error_on_not_exist = True) # you can delete the stream after consuming
+#delete cnippet_end
diff --git a/docs/site/versioned_examples/version-21.12.0/python/consume_dataset.py b/docs/site/versioned_examples/version-21.12.0/python/consume_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ed7711d784c5e8b5ef65f99cdeed5846d5ebbec
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/consume_dataset.py
@@ -0,0 +1,39 @@
+import asapo_consumer
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+
+group_id = consumer.generate_group_id()
+
+# dataset snippet_start
+try:
+    # get_next_dataset behaves similarly to the regular get_next
+    while True:
+        dataset = consumer.get_next_dataset(group_id, stream = 'default')
+        print ('Dataset Id:', dataset['id'])
+        # the initial response only contains the metadata
+        # the actual content should be retrieved separately
+        for metadata in dataset['content']:
+            data = consumer.retrieve_data(metadata)
+            print ('Part ' + str(metadata['dataset_substream']) + ' out of ' + str(dataset['expected_size']))
+            print (data.tobytes().decode("utf-8"), metadata)
+
+except asapo_consumer.AsapoStreamFinishedError:
+    print('stream finished')
+
+except asapo_consumer.AsapoEndOfStreamError:
+    print('stream ended')
+# dataset snippet_end
diff --git a/docs/site/versioned_examples/version-21.12.0/python/metadata.py b/docs/site/versioned_examples/version-21.12.0/python/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..11346dc0890c59e9950655fc2e8e1fec0c6331a6
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/metadata.py
@@ -0,0 +1,128 @@
+import asapo_consumer
+import asapo_producer
+
+import json
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+producer.set_log_level('error')
+
+# beamtime_set snippet_start
+# sample beamtime metadata. You can add any data you want, with any level of complexity
+# in this example we use strings and ints, and one nested structure
+beamtime_metadata = {
+    'name': 'beamtime name',
+    'condition': 'beamtime condition',
+    'intvalue1': 5,
+    'intvalue2': 10,
+    'structure': {
+        'structint1': 20,
+        'structint2': 30
+    }
+}
+
+# send the metadata
+# by default the new metadata will completely replace the one that's already there
+producer.send_beamtime_meta(json.dumps(beamtime_metadata), callback = callback)
+# beamtime_set snippet_end
+
+# beamtime_update snippet_start
+# we can update the existing metadata if we want, by modifying the existing fields, or adding new ones
+beamtime_metadata_update = {
+    'condition': 'updated beamtime condition',
+    'newintvalue': 15
+}
+
+# send the metadata in the 'update' mode
+producer.send_beamtime_meta(json.dumps(beamtime_metadata_update), mode = 'update', callback = callback)
+# beamtime_update snippet_end
+
+# stream_set snippet_start
+# sample stream metadata
+stream_metadata = {
+    'name': 'stream name',
+    'condition': 'stream condition',
+    'intvalue': 44
+}
+
+# works the same way: by default we replace the stream metadata, but update is also possible
+# update works exactly the same as for beamtime, but here we will only do 'replace'
+producer.send_stream_meta(json.dumps(stream_metadata), callback = callback)
+# stream_set snippet_end
+
+# message_set snippet_start
+# sample message metadata
+message_metadata = {
+    'name': 'message name',
+    'condition': 'message condition',
+    'somevalue': 55
+}
+
+# the message metadata is sent together with the message itself
+# in case of datasets each part has its own metadata
+producer.send(1, "processed/test_file", b'hello', user_meta = json.dumps(message_metadata), stream = "default", callback = callback)
+# message_set snippet_end
+
+producer.wait_requests_finished(2000)
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+
+# beamtime_get snippet_start
+# read the beamtime metadata
+beamtime_metadata_read = consumer.get_beamtime_meta()
+
+# the structure is the same as the one that was sent, and the updated values are already there
+print('Name:', beamtime_metadata_read['name'])
+print('Condition:', beamtime_metadata_read['condition'])
+print('Updated value exists:', 'newintvalue' in beamtime_metadata_read)
+print('Sum of int values:', beamtime_metadata_read['intvalue1'] + beamtime_metadata_read['intvalue2'])
+print('Nested structure value', beamtime_metadata_read['structure']['structint1'])
+# beamtime_get snippet_end
+
+# stream_get snippet_start
+# read the stream metadata
+stream_metadata_read = consumer.get_stream_meta(stream = 'default')
+
+# access various fields from it
+print('Stream Name:', stream_metadata_read['name'])
+print('Stream Condition:', stream_metadata_read['condition'])
+print('Stream int value:', stream_metadata_read['intvalue'])
+# stream_get snippet_end
+
+group_id = consumer.generate_group_id()
+try:
+    while True:
+        # message_get snippet_start
+        # right now we are only interested in metadata
+        data, meta = consumer.get_next(group_id, meta_only = True)
+        print('Message #', meta['_id'])
+
+        # our custom metadata is stored inside the message metadata
+        message_metadata_read = meta['meta']
+        print('Message Name:', message_metadata_read['name'])
+        print('Message Condition:', message_metadata_read['condition'])
+        print('Message int value:', message_metadata_read['somevalue'])
+        # message_get snippet_end
+except asapo_consumer.AsapoStreamFinishedError:
+    print('stream finished')
+
+except asapo_consumer.AsapoEndOfStreamError:
+    print('stream ended')
diff --git a/docs/site/versioned_examples/version-21.12.0/python/next_stream.py b/docs/site/versioned_examples/version-21.12.0/python/next_stream.py
new file mode 100644
index 0000000000000000000000000000000000000000..d88638185b1d64f81023461d59111409c2c40af6
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/next_stream.py
@@ -0,0 +1,72 @@
+import asapo_consumer
+import asapo_producer
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+producer.set_log_level('error')
+
+# let's start with producing a sample of 10 simple messages
+for i in range(1, 11):
+    producer.send(i, "processed/test_file_" + str(i), ('content of the message #' + str(i)).encode(), stream = 'default', callback = callback)
+
+# next_stream_set snippet_start
+# finish the stream and set the next stream to be called 'next'
+producer.send_stream_finished_flag('default', i, next_stream = 'next', callback = callback)
+# next_stream_set snippet_end
+
+# populate the 'next' stream as well
+for i in range(1, 6):
+    producer.send(i, "processed/test_file_next_" + str(i), ('content of the message #' + str(i)).encode(), stream = 'next', callback = callback)
+
+# we leave the 'next' stream unfinished, but the chain of streams can be of any length
+
+producer.wait_requests_finished(2000)
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+group_id = consumer.generate_group_id()
+
+# read_stream snippet_start
+# we start with the 'default' stream (the first one)
+stream_name = 'default'
+
+while True:
+    try:
+        data, meta = consumer.get_next(group_id, meta_only = False, stream = stream_name)
+        text_data = data.tobytes().decode("utf-8")
+        message_id = meta['_id']
+        print('Message #', message_id, ':', text_data)
+    except asapo_consumer.AsapoStreamFinishedError:
+        # when the stream finishes, we look for the info on the next stream
+        # first, we find the stream with our name in the list of streams
+        stream = next(s for s in consumer.get_stream_list() if s['name'] == stream_name)
+        # then we look if the field 'nextStream' is set and not empty
+        if 'nextStream' in stream and stream['nextStream']:
+            # if it's not, we continue with the next stream
+            stream_name = stream['nextStream']
+            print('Changing stream to the next one:', stream_name)
+            continue
+        # otherwise we stop
+        print('stream finished')
+        break
+    except asapo_consumer.AsapoEndOfStreamError:
+        print('stream ended')
+        break
+# read_stream snippet_end
diff --git a/docs/site/versioned_examples/version-21.12.0/python/pipeline.py b/docs/site/versioned_examples/version-21.12.0/python/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2f8152b09dc6951db8a46dcb6396944e6cb1a71
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/pipeline.py
@@ -0,0 +1,62 @@
+import asapo_consumer
+import asapo_producer
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+
+group_id = consumer.generate_group_id()
+# pipeline snippet_start
+# put the processed message into the new stream
+pipelined_stream_name = 'pipelined'
+
+try:
+    while True:
+        # we expect the message to be in the 'default' stream already
+        data, meta = consumer.get_next(group_id, meta_only = False)
+        message_id = meta['_id']
+        
+        # work on our data
+        text_data = data.tobytes().decode("utf-8")
+        pipelined_message = (text_data + ' processed').encode()
+        
+        # you may use the same filename, if you want to rewrite the source file. This will result in warning, but it is a valid usecase
+        producer.send(message_id, "processed/test_file_" + message_id, pipelined_message, pipelined_stream_name, callback = callback)
+        
+
+except asapo_consumer.AsapoStreamFinishedError:
+    print('stream finished')
+        
+except asapo_consumer.AsapoEndOfStreamError:
+    print('stream ended')
+# pipeline snippet_end
+producer.wait_requests_finished(2000)
+
+# finish snippet_start
+# the meta from the last iteration corresponds to the last message
+last_id = meta['_id']
+
+producer.send_stream_finished_flag("pipelined", last_id)
+# finish snippet_end
+
+# you can remove the source stream if you do not need it anymore
+consumer.delete_stream(stream = 'default', error_on_not_exist = True)
diff --git a/docs/site/versioned_examples/version-21.12.0/python/produce.py b/docs/site/versioned_examples/version-21.12.0/python/produce.py
new file mode 100644
index 0000000000000000000000000000000000000000..262015b25fd99be947f1756222d6a74a1bb54acb
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/produce.py
@@ -0,0 +1,51 @@
+import asapo_producer
+
+# callback snippet_start
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        # the data was not sent. Something is terribly wrong.
+        print("could not send: ",payload,err)
+    elif err is not None:
+        # The data was sent, but there was some unexpected problem, e.g. the file was overwritten.
+        print("sent with warning: ",payload,err)
+    else:
+        # all fine
+        print("successfuly sent: ",payload)
+# callback snippet_end
+
+# create snippet_start
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+producer = asapo_producer \
+                .create_producer(endpoint,
+                                 'processed',    # should be 'processed' or 'raw', 'processed' writes to the core FS
+                                 beamtime,       # the folder should exist
+                                 'auto',         # can be 'auto', if beamtime_id is given
+                                 'test_source',  # source
+                                 '',             # athorization token
+                                 1,              # number of threads. Increase, if the sending speed seems slow
+                                 60000)          # timeout. Do not change.
+
+producer.set_log_level("error") # other values are "warning", "info" or "debug".
+# create snippet_end
+
+# send snippet_start
+# we are sending a message with with index 1 to the default stream. Filename must start with processed/
+producer.send(1,                     # message number. Should be unique and ordered.
+              "processed/test_file", # name of the file. Should be unique, or it will be overwritten
+              b"hello",              # binary data
+              callback = callback)   # callback
+# send snippet_end
+# send data in loop
+
+# add the following at the end of the script
+
+# finish snippet_start
+producer.wait_requests_finished(2000) # will synchronously wait for all the data to be sent.
+                                      # Use it when no more data is expected.
+
+# you may want to mark the stream as finished
+producer.send_stream_finished_flag("default", # name of the stream. If you didn't specify the stream in 'send', it would be 'default'
+                                   1)         # the number of the last message in the stream
+# finish snippet_end
diff --git a/docs/site/versioned_examples/version-21.12.0/python/produce_dataset.py b/docs/site/versioned_examples/version-21.12.0/python/produce_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..106229c430b979bba1e547fcc1fca4c1de4a7eb1
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/produce_dataset.py
@@ -0,0 +1,29 @@
+import asapo_producer
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+
+# dataset snippet_start
+#assuming we have three different producers for a single dataset
+
+# add the additional 'dataset' paremeter, which should be (<part_number>, <total_parts_in_dataset>)
+producer.send(1, "processed/test_file_dataset_1", b"hello dataset 1", dataset = (1,3), callback = callback)
+# this can be done from different producers in any order
+producer.send(1, "processed/test_file_dataset_1", b"hello dataset 2", dataset = (2,3), callback = callback)
+producer.send(1, "processed/test_file_dataset_1", b"hello dataset 3", dataset = (3,3), callback = callback)
+# dataset snippet_end
+
+producer.wait_requests_finished(2000)
+# the dataset parts are not counted towards the number of messages in the stream
+# the last message id in this example is still 1
+producer.send_stream_finished_flag("default", 1)
diff --git a/docs/site/versioned_examples/version-21.12.0/python/query.py b/docs/site/versioned_examples/version-21.12.0/python/query.py
new file mode 100644
index 0000000000000000000000000000000000000000..83cfb51c9ef8dc00cd8e518fd5869f6e557453f5
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/python/query.py
@@ -0,0 +1,86 @@
+import asapo_consumer
+import asapo_producer
+
+import json
+from datetime import datetime, timedelta
+
+def callback(payload,err):
+    if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
+        print("could not send: ",payload,err)
+    elif err is not None:
+        print("sent with warning: ",payload,err)
+    else:
+        print("successfuly sent: ",payload)
+
+endpoint = "localhost:8400"
+beamtime = "asapo_test"
+
+token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
+"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
+"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
+"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
+"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
+
+path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
+
+producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
+producer.set_log_level('error')
+
+# let's start with producing some messages with metadata
+for i in range(1, 11):
+    metadata = {
+        'condition': 'condition #' + str(i),
+        'somevalue': i * 10
+    }
+    producer.send(i, "processed/test_file_" + str(i), ('message #' + str(i)).encode(), user_meta = json.dumps(metadata), stream = "default", callback = callback)
+
+producer.wait_requests_finished(2000)
+
+consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
+
+# helper function to print messages
+def print_messages(metadatas):
+    # the query will return the list of metadatas
+    for meta in metadatas:
+        # for each metadata we need to obtain the actual message first
+        data = consumer.retrieve_data(meta)
+        print('Message #', meta['_id'], ', content:', data.tobytes().decode("utf-8"), ', usermetadata:', meta['meta'])
+
+# by_id snippet_start
+# simple query, same as get_by_id
+metadatas = consumer.query_messages('_id = 1')
+# by_id snippet_end
+print('Message with ID = 1')
+print_messages(metadatas)
+
+# by_ids snippet_start
+# the query that requests the range of IDs
+metadatas = consumer.query_messages('_id >= 8')
+# by_ids snippet_end
+print('Messages with ID >= 8')
+print_messages(metadatas)
+
+# string_equal snippet_start
+# the query that has some specific requirement for message metadata
+metadatas = consumer.query_messages('meta.condition = "condition #7"')
+# string_equal snippet_end
+print('Message with condition = "condition #7"')
+print_messages(metadatas)
+
+# int_compare snippet_start
+# the query that has several requirements for user metadata
+metadatas = consumer.query_messages('meta.somevalue > 30 AND meta.somevalue < 60')
+# int_compare snippet_end
+print('Message with 30 < somevalue < 60')
+print_messages(metadatas)
+
+# timestamp snippet_start
+# the query that is based on the message's timestamp
+now = datetime.now()
+fifteen_minutes_ago = now - timedelta(minutes = 15)
+# python uses timestamp in seconds, while ASAP::O in nanoseconds, so we need to multiply it by a billion
+metadatas = consumer.query_messages('timestamp < {} AND timestamp > {}'.format(now.timestamp() * 10**9, fifteen_minutes_ago.timestamp() * 10**9))
+# timestamp snippet_end
+print('Messages in the last 15 minutes')
+print_messages(metadatas)
+
diff --git a/docs/site/versioned_examples/version-21.12.0/start_asapo_socket.sh b/docs/site/versioned_examples/version-21.12.0/start_asapo_socket.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aa1d4e344e619a63ecfd91e94589a933245ab4f7
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/start_asapo_socket.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+set -e
+
+ASAPO_HOST_DIR=/var/tmp/asapo # you can change this if needed, make sure there is enough space ( >3GB on disk)
+
+NOMAD_ALLOC_HOST_SHARED=$ASAPO_HOST_DIR/container_host_shared/nomad_alloc
+SERVICE_DATA_CLUSTER_SHARED=$ASAPO_HOST_DIR/asapo_cluster_shared/service_data
+DATA_GLOBAL_SHARED=$ASAPO_HOST_DIR/global_shared/data
+DATA_GLOBAL_SHARED_ONLINE=$ASAPO_HOST_DIR/global_shared/online_data
+MONGO_DIR=$SERVICE_DATA_CLUSTER_SHARED/mongodb
+
+ASAPO_USER=`id -u`:`id -g`
+
+mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
+chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
+
+cd $SERVICE_DATA_CLUSTER_SHARED
+mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
+chmod 777 *
+
+docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
+  -u $ASAPO_USER \
+  --group-add `getent group docker | cut -d: -f3` \
+  -v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
+  -v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
+  -v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
+  -e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
+  -e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
+  -e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
+  -e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
+  -e TF_VAR_mongo_dir=$MONGO_DIR \
+  -e TF_VAR_asapo_user=$ASAPO_USER \
+  -e ACL_ENABLED=true \
+  --name asapo --net=host -d yakser/asapo-cluster:21.12.0
+
+sleep 15
+docker exec asapo jobs-start -var elk_logs=false -var influxdb_version=1.8.4
diff --git a/docs/site/versioned_examples/version-21.12.0/start_asapo_tcp.sh b/docs/site/versioned_examples/version-21.12.0/start_asapo_tcp.sh
new file mode 100644
index 0000000000000000000000000000000000000000..028f23e4ee931fa518fd5a1d5caff7d815b8670c
--- /dev/null
+++ b/docs/site/versioned_examples/version-21.12.0/start_asapo_tcp.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+set -e
+
+ASAPO_HOST_DIR=/var/tmp/asapo # you can change this if needed, make sure there is enough space ( >3GB on disk)
+# change this according to your Docker configuration
+DOCKER_ENDPOINT="127.0.0.1:2376"
+DOCKER_TLS_CA=/usr/local/docker/certs/$USER/ca.pem
+DOCKER_TLS_KEY=/usr/local/docker/certs/$USER/key.pem
+DOCKER_TLS_CERT=/usr/local/docker/certs/$USER/cert.pem
+
+
+NOMAD_ALLOC_HOST_SHARED=$ASAPO_HOST_DIR/container_host_shared/nomad_alloc
+SERVICE_DATA_CLUSTER_SHARED=$ASAPO_HOST_DIR/asapo_cluster_shared/service_data
+DATA_GLOBAL_SHARED=$ASAPO_HOST_DIR/global_shared/data
+DATA_GLOBAL_SHARED_ONLINE=$ASAPO_HOST_DIR/global_shared/online_data
+MONGO_DIR=$SERVICE_DATA_CLUSTER_SHARED/mongodb
+
+ASAPO_USER=`id -u`:`id -g`
+
+mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
+chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
+
+cd $SERVICE_DATA_CLUSTER_SHAREDdetector
+mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
+chmod 777 *
+
+docker run --privileged --userns=host --security-opt no-new-privileges --rm \
+  -u $ASAPO_USER \
+  -v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
+  -v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
+  -v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
+  -e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
+  -e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
+  -e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
+  -e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
+  -e TF_VAR_mongo_dir=$MONGO_DIR \
+  -e TF_VAR_asapo_user=$ASAPO_USER \
+  -e ACL_ENABLED=true \
+  -v $DOCKER_TLS_CA:/etc/nomad/ca.pem \
+  -v $DOCKER_TLS_KEY:/etc/nomad/key.pem \
+  -v $DOCKER_TLS_CERT:/etc/nomad/cert.pem \
+  -e DOCKER_ENDPOINT=$DOCKER_ENDPOINT \
+  --name asapo --net=host -d yakser/asapo-cluster:21.12.0
+
+sleep 15
+docker exec asapo jobs-start -var elk_logs=false
diff --git a/docs/site/versioned_sidebars/version-21.12.0-sidebars.json b/docs/site/versioned_sidebars/version-21.12.0-sidebars.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa5dee0d0fdec3d114dcc62a19ad1c084ba40178
--- /dev/null
+++ b/docs/site/versioned_sidebars/version-21.12.0-sidebars.json
@@ -0,0 +1,93 @@
+{
+  "version-21.12.0/docs": [
+    {
+      "type": "doc",
+      "id": "version-21.12.0/getting-started"
+    },
+    {
+      "type": "doc",
+      "id": "version-21.12.0/overview"
+    },
+    {
+      "type": "doc",
+      "id": "version-21.12.0/compare-to-others"
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Concepts And Architecture",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-21.12.0/data-in-asapo"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/producer-clients"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/consumer-clients"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/core-architecture"
+        }
+      ]
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Use Cases",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-21.12.0/p02.1"
+        }
+      ]
+    },
+    {
+      "collapsed": true,
+      "type": "category",
+      "label": "Code Examples",
+      "items": [
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/overview"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/simple-producer"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/simple-consumer"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/simple-pipeline"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/datasets"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/acknowledgements"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/metadata"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/next_stream"
+        },
+        {
+          "type": "doc",
+          "id": "version-21.12.0/cookbook/query"
+        }
+      ]
+    }
+  ]
+}
diff --git a/docs/site/versions.json b/docs/site/versions.json
index 3e46cb3bc2745e5c81f82cfbd62ec61dd3c46bd1..e4f75c466b8295b10f918a5fa14d42e0fc499519 100644
--- a/docs/site/versions.json
+++ b/docs/site/versions.json
@@ -1,4 +1,5 @@
 [
+  "21.12.0",
   "21.09.0",
   "21.06.0"
 ]
diff --git a/file_transfer/src/asapo_file_transfer/server/transfer.go b/file_transfer/src/asapo_file_transfer/server/transfer.go
index 2e0cfb92755eae706c617ecb038d11664ad45c6d..9dffe4b1339814018318f456350db6125fcc44ba 100644
--- a/file_transfer/src/asapo_file_transfer/server/transfer.go
+++ b/file_transfer/src/asapo_file_transfer/server/transfer.go
@@ -11,10 +11,8 @@ import (
 	"os"
 	"path"
 	"path/filepath"
-	"strconv"
 )
 
-
 type fileTransferRequest struct {
 	Folder   string
 	FileName string
@@ -23,109 +21,113 @@ type fileTransferRequest struct {
 func Exists(name string) bool {
 	f, err := os.Open(name)
 	defer f.Close()
-	return err==nil
+	return err == nil
 }
 
-func checkClaim(r *http.Request,ver utils.VersionNum,request* fileTransferRequest) (int,error) {
+func checkClaim(r *http.Request, ver utils.VersionNum, request *fileTransferRequest) (int, error) {
 	var extraClaim structs.FolderTokenTokenExtraClaim
 	if err := utils.JobClaimFromContext(r, nil, &extraClaim); err != nil {
-		return http.StatusInternalServerError,err
+		return http.StatusInternalServerError, err
 	}
 	if ver.Id > 1 {
 		request.Folder = extraClaim.RootFolder
-		return http.StatusOK,nil
+		return http.StatusOK, nil
 	}
 
-	if extraClaim.RootFolder!=request.Folder {
-		err_txt := "access forbidden for folder "+request.Folder
-		log.Error("cannot transfer file: "+err_txt)
+	if extraClaim.RootFolder != request.Folder {
+		err_txt := "access forbidden for folder " + request.Folder
+		log.Error("cannot transfer file: " + err_txt)
 		return http.StatusUnauthorized, errors.New(err_txt)
 	}
-	return http.StatusOK,nil
+	return http.StatusOK, nil
 }
 
-func checkFileExists(r *http.Request,name string) (int,error) {
+func checkFileExists(r *http.Request, name string) (int, error) {
 	if !Exists(name) {
-		err_txt := "file "+name+" does not exist or cannot be read"
-		log.Error("cannot transfer file: "+err_txt)
-		return http.StatusNotFound,errors.New(err_txt)
+		err_txt := "file " + name + " does not exist or cannot be read"
+		log.Error("cannot transfer file: " + err_txt)
+		return http.StatusNotFound, errors.New(err_txt)
 	}
-	return http.StatusOK,nil
+	return http.StatusOK, nil
 
 }
 
-func checkRequest(r *http.Request, ver utils.VersionNum) (string,int,error) {
+func checkRequest(r *http.Request, ver utils.VersionNum) (string, int, error) {
 	var request fileTransferRequest
-	err := utils.ExtractRequest(r,&request)
+	err := utils.ExtractRequest(r, &request)
 	if err != nil {
-		return "",http.StatusBadRequest,err
+		return "", http.StatusBadRequest, err
 	}
 
-	if status,err := checkClaim(r,ver, &request); err != nil {
-		return "",status,err
+	if status, err := checkClaim(r, ver, &request); err != nil {
+		return "", status, err
 	}
 	var fullName string
 	if ver.Id == 1 { // protocol v0.1
-		fullName = filepath.Clean(request.Folder+string(os.PathSeparator)+request.FileName)
+		fullName = filepath.Clean(request.Folder + string(os.PathSeparator) + request.FileName)
 	} else {
-		fullName = filepath.Clean(request.Folder+string(os.PathSeparator)+request.FileName)
+		fullName = filepath.Clean(request.Folder + string(os.PathSeparator) + request.FileName)
 	}
 
-	if status,err := checkFileExists(r,fullName); err != nil {
-		return "",status,err
+	if status, err := checkFileExists(r, fullName); err != nil {
+		return "", status, err
 	}
-	return fullName,http.StatusOK,nil
+	return fullName, http.StatusOK, nil
 }
 
 func serveFile(w http.ResponseWriter, r *http.Request, fullName string) {
 	_, file := path.Split(fullName)
 	w.Header().Set("Content-Disposition", "attachment; filename=\""+file+"\"")
-	log.Debug("Transferring file " + fullName)
-	http.ServeFile(w,r, fullName)
+
+	log.WithFields(map[string]interface{}{
+		"name": fullName,
+	}).Debug("transferring file")
+
+	http.ServeFile(w, r, fullName)
 }
 
 func serveFileSize(w http.ResponseWriter, r *http.Request, fullName string) {
 	var fsize struct {
-		FileSize int64  `json:"file_size"`
+		FileSize int64 `json:"file_size"`
 	}
 
 	fi, err := os.Stat(fullName)
 	if err != nil {
-		utils.WriteServerError(w,err,http.StatusBadRequest)
-		log.Error("Error getting file size for " + fullName+": "+err.Error())
+		utils.WriteServerError(w, err, http.StatusBadRequest)
+		log.Error("error getting file size for " + fullName + ": " + err.Error())
 	}
-	log.Debug("Sending file size "+strconv.FormatInt(fi.Size(),10)+" for " + fullName)
+
+	log.WithFields(map[string]interface{}{
+		"name": fullName,
+		"size": fi.Size(),
+	}).Debug("sending file size")
 
 	fsize.FileSize = fi.Size()
-	b,_ := json.Marshal(&fsize)
+	b, _ := json.Marshal(&fsize)
 	w.Write(b)
 }
 
-
-func checkFtsApiVersion(w http.ResponseWriter, r *http.Request) (utils.VersionNum,bool) {
+func checkFtsApiVersion(w http.ResponseWriter, r *http.Request) (utils.VersionNum, bool) {
 	return utils.PrecheckApiVersion(w, r, version.GetFtsApiVersion())
 }
 
 func routeFileTransfer(w http.ResponseWriter, r *http.Request) {
-	ver, ok := checkFtsApiVersion(w, r);
+	ver, ok := checkFtsApiVersion(w, r)
 	if !ok {
 		return
 	}
 
-	fullName, status,err := checkRequest(r,ver);
+	fullName, status, err := checkRequest(r, ver)
 	if err != nil {
-		utils.WriteServerError(w,err,status)
+		utils.WriteServerError(w, err, status)
 		return
 	}
 
 	sizeonly := r.URL.Query().Get("sizeonly")
- 	if (sizeonly != "true") {
-		serveFile(w,r,fullName)
+	if sizeonly != "true" {
+		serveFile(w, r, fullName)
 	} else {
-		serveFileSize(w,r,fullName)
+		serveFileSize(w, r, fullName)
 	}
 
-
-
-
 }
diff --git a/producer/api/cpp/include/asapo/producer/producer.h b/producer/api/cpp/include/asapo/producer/producer.h
index f77aae08db7c90be4c523a48128a01670e8269af..6876202b773f64e1ee1115d8c85be60a3207b1bc 100644
--- a/producer/api/cpp/include/asapo/producer/producer.h
+++ b/producer/api/cpp/include/asapo/producer/producer.h
@@ -7,7 +7,7 @@
 #include "asapo/logger/logger.h"
 #include "common.h"
 #include "asapo/common/data_structs.h"
-#include "asapo/preprocessor/definitions.h"
+#include "asapo/preprocessor/deprecated.h"
 
 namespace asapo {
 
@@ -127,7 +127,7 @@ class Producer {
       \param callback - callback function
       \return Error - will be nullptr on success
     */
-    virtual Error DEPRECATED("obsolates 01.07.2022, use SendBeamtimeMetadata instead") SendMetadata(
+    virtual Error ASAPO_DEPRECATED("obsolates 01.07.2022, use SendBeamtimeMetadata instead") SendMetadata(
         const std::string& metadata,
         RequestCallback callback)  = 0;
 
diff --git a/producer/api/cpp/src/receiver_discovery_service.h b/producer/api/cpp/src/receiver_discovery_service.h
index a2949ec1b638905ed61f81a14a6daaa316b584c2..0eef099913e22811046eae8c5227fb279684971f 100644
--- a/producer/api/cpp/src/receiver_discovery_service.h
+++ b/producer/api/cpp/src/receiver_discovery_service.h
@@ -19,11 +19,11 @@ using ReceiversList = std::vector<std::string>;
 class ReceiverDiscoveryService {
   public:
     explicit ReceiverDiscoveryService(std::string endpoint, uint64_t update_frequency_ms);
-    VIRTUAL void StartCollectingData();
-    VIRTUAL ~ReceiverDiscoveryService();
-    VIRTUAL uint64_t MaxConnections();
-    VIRTUAL ReceiversList RotatedUriList(uint64_t nthread);
-    VIRTUAL uint64_t UpdateFrequency();
+    ASAPO_VIRTUAL void StartCollectingData();
+    ASAPO_VIRTUAL ~ReceiverDiscoveryService();
+    ASAPO_VIRTUAL uint64_t MaxConnections();
+    ASAPO_VIRTUAL ReceiversList RotatedUriList(uint64_t nthread);
+    ASAPO_VIRTUAL uint64_t UpdateFrequency();
   public:
     std::unique_ptr<HttpClient> httpclient__;
     const AbstractLogger* log__;
diff --git a/producer/api/cpp/src/request_handler_tcp.cpp b/producer/api/cpp/src/request_handler_tcp.cpp
index 9595e3146b790beae689cb043978678cea7285db..defaa31a51e326d0c4329bf9fba0c6f6212b19e3 100644
--- a/producer/api/cpp/src/request_handler_tcp.cpp
+++ b/producer/api/cpp/src/request_handler_tcp.cpp
@@ -93,22 +93,22 @@ Error RequestHandlerTcp::ReceiveResponse(std::string* response) {
     switch (sendDataResponse.error_code) {
     case kNetAuthorizationError : {
         auto res_err = ProducerErrorTemplates::kWrongInput.Generate();
-        res_err->AddContext("response", sendDataResponse.message);
+        res_err->AddDetails("response", sendDataResponse.message);
         return res_err;
     }
     case kNetErrorNotSupported : {
         auto res_err = ProducerErrorTemplates::kUnsupportedClient.Generate();
-        res_err->AddContext("response", sendDataResponse.message);
+        res_err->AddDetails("response", sendDataResponse.message);
         return res_err;
     }
     case kNetErrorWrongRequest : {
         auto res_err = ProducerErrorTemplates::kWrongInput.Generate();
-        res_err->AddContext("response", sendDataResponse.message);
+        res_err->AddDetails("response", sendDataResponse.message);
         return res_err;
     }
     case kNetErrorWarning: {
         auto res_err = ProducerErrorTemplates::kServerWarning.Generate();
-        res_err->AddContext("response", sendDataResponse.message);
+        res_err->AddDetails("response", sendDataResponse.message);
         return res_err;
     }
     case kNetErrorReauthorize: {
@@ -122,7 +122,7 @@ Error RequestHandlerTcp::ReceiveResponse(std::string* response) {
         return nullptr;
     default:
         auto res_err = ProducerErrorTemplates::kInternalServerError.Generate();
-        res_err->AddContext("response", sendDataResponse.message);
+            res_err->AddDetails("response", sendDataResponse.message);
         return res_err;
     }
 }
diff --git a/producer/api/python/asapo_producer.pyx.in b/producer/api/python/asapo_producer.pyx.in
index 1a02131818ade7a0ff13908c9cc3a6dbbdbcbb5f..059a4c812fb26f684284f916dcab7e7c6df83be0 100644
--- a/producer/api/python/asapo_producer.pyx.in
+++ b/producer/api/python/asapo_producer.pyx.in
@@ -153,9 +153,6 @@ cdef class PyProducer:
         if err:
             throw_exception(err)
         if data is not None:
-          if data.base is not None:
-            Py_XINCREF(<PyObject*>data.base)
-          else:
             Py_XINCREF(<PyObject*>data)
         if callback != None:
             Py_XINCREF(<PyObject*>callback)
@@ -456,9 +453,6 @@ cdef class PyProducer:
     cdef void c_callback_ndarr(self,py_callback,nd_array,RequestCallbackPayload payload, Error err) with gil:
         self.c_callback_python(py_callback,nd_array,payload,err)
         if nd_array is not None:
-          if nd_array.base is not None:
-            Py_XDECREF(<PyObject*>nd_array.base)
-          else:
             Py_XDECREF(<PyObject*>nd_array)
 
     cdef void c_callback_bytesaddr(self,py_callback,bytes_array,RequestCallbackPayload payload, Error err) with gil:
diff --git a/producer/api/python/dist_linux/CMakeLists.txt b/producer/api/python/dist_linux/CMakeLists.txt
index 578201ad36272f1efa751a9a1a290cfbc29feb83..0180f47dbf568844211a643cf7195de4f38e432b 100644
--- a/producer/api/python/dist_linux/CMakeLists.txt
+++ b/producer/api/python/dist_linux/CMakeLists.txt
@@ -4,30 +4,33 @@ configure_file(MANIFEST.in MANIFEST.in @ONLY)
 file(GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/setup.py INPUT ${CMAKE_CURRENT_BINARY_DIR}/setup.py)
 
 if ("rpm" IN_LIST BUILD_PYTHON_PACKAGES)
-	ADD_CUSTOM_TARGET(python-rpm-producer ALL
+	if (BUILD_PYTHON2_PACKAGES)
+		ADD_CUSTOM_TARGET(python-rpm-producer ALL
 		COMMAND PACKAGE_PREFIX=python- python setup.py bdist_rpm --release=1.${PACKAGE_RELEASE_SUFFIX}
 			--requires=numpy --binary-only
 			COMMAND rm -f dist/python*.gz dist/*debuginfo* dist/*debugsource*
 		)
-
+		ADD_DEPENDENCIES(python-rpm-producer python3-rpm-producer)
+	endif()
 	ADD_CUSTOM_TARGET(python3-rpm-producer ALL
 		COMMAND PACKAGE_PREFIX=python3- python3 setup.py bdist_rpm --release=1.${PACKAGE_RELEASE_SUFFIX}
 			--requires=python3-numpy --binary-only
 		COMMAND rm -f dist/python3*.gz dist/*debuginfo* dist/*debugsource*
 		)
-	ADD_DEPENDENCIES(python3-rpm-producer python-rpm-producer)
-	ADD_DEPENDENCIES(python-rpm-producer copy_python_dist-producer)
+	ADD_DEPENDENCIES(python3-rpm-producer copy_python_dist-producer)
 endif()
 
 if ("deb" IN_LIST BUILD_PYTHON_PACKAGES)
-	ADD_CUSTOM_TARGET(python-deb-producer ALL
+	if (BUILD_PYTHON2_PACKAGES)
+		ADD_CUSTOM_TARGET(python-deb-producer ALL
 			COMMAND rm -rf deb_dist/*/
 			COMMAND PACKAGE_PREFIX= python setup.py --command-packages=stdeb.command
 			sdist_dsc --debian-version=${PACKAGE_RELEASE_SUFFIX} --depends=python-numpy bdist_deb
 			COMMAND rm -f deb_dist/*dbgsym*
 			COMMAND cp deb_dist/*.deb dist/
 			)
-
+		ADD_DEPENDENCIES(python-deb-producer python3-deb-producer)
+	endif()
 	ADD_CUSTOM_TARGET(python3-deb-producer ALL
 			COMMAND rm -rf deb_dist/*/
 			COMMAND PACKAGE_PREFIX= python3 setup.py --command-packages=stdeb.command
@@ -35,8 +38,7 @@ if ("deb" IN_LIST BUILD_PYTHON_PACKAGES)
 			COMMAND rm -f deb_dist/*dbgsym*
 			COMMAND cp deb_dist/*.deb dist/
 			)
-	ADD_DEPENDENCIES(python3-deb-producer python-deb-producer)
-	ADD_DEPENDENCIES(python-deb-producer copy_python_dist-producer)
+	ADD_DEPENDENCIES(python3-deb-producer copy_python_dist-producer)
 endif()
 
 if ("source" IN_LIST BUILD_PYTHON_PACKAGES )
diff --git a/producer/event_monitor_producer/src/inotify_linux.h b/producer/event_monitor_producer/src/inotify_linux.h
index 44e82ce3700f86754f5a03e11d82e51f1bc25366..8a5890fdeead61e378274927cf76c435dae436a9 100644
--- a/producer/event_monitor_producer/src/inotify_linux.h
+++ b/producer/event_monitor_producer/src/inotify_linux.h
@@ -11,10 +11,10 @@ namespace asapo {
 
 class Inotify {
   public:
-    VIRTUAL int Init();
-    VIRTUAL int AddWatch(int fd, const char* name, uint32_t mask);
-    VIRTUAL int DeleteWatch(int fd, int wd);
-    VIRTUAL ssize_t Read(int fd, void* buf, size_t nbytes);
+    ASAPO_VIRTUAL int Init();
+    ASAPO_VIRTUAL int AddWatch(int fd, const char* name, uint32_t mask);
+    ASAPO_VIRTUAL int DeleteWatch(int fd, int wd);
+    ASAPO_VIRTUAL ssize_t Read(int fd, void* buf, size_t nbytes);
 };
 
 }
diff --git a/producer/event_monitor_producer/src/system_folder_watch_linux.h b/producer/event_monitor_producer/src/system_folder_watch_linux.h
index 914e1f297c7102ea9845916ce5aa67498ed1cddd..828f4dec22bfdfbb049b3607124aac2b16b7accd 100644
--- a/producer/event_monitor_producer/src/system_folder_watch_linux.h
+++ b/producer/event_monitor_producer/src/system_folder_watch_linux.h
@@ -32,8 +32,8 @@ const uint32_t kInotifyWatchFlags  = IN_CLOSE_WRITE |
 
 class SystemFolderWatch {
   public:
-    VIRTUAL Error StartFolderMonitor(const std::string& root_folder, const std::vector<std::string>& monitored_folders);
-    VIRTUAL FilesToSend GetFileList(Error* err);
+    ASAPO_VIRTUAL Error StartFolderMonitor(const std::string& root_folder, const std::vector<std::string>& monitored_folders);
+    ASAPO_VIRTUAL FilesToSend GetFileList(Error* err);
     SystemFolderWatch();
     std::unique_ptr<IO> io__;
     std::unique_ptr<Inotify> inotify__;
diff --git a/producer/event_monitor_producer/src/system_folder_watch_macos_dummy.h b/producer/event_monitor_producer/src/system_folder_watch_macos_dummy.h
index 3c22b6aa7dfab48d66e2fb9af24bf6b8b24e1f82..0ef23b75a73299a0c3a501fe5bbe926e03769254 100644
--- a/producer/event_monitor_producer/src/system_folder_watch_macos_dummy.h
+++ b/producer/event_monitor_producer/src/system_folder_watch_macos_dummy.h
@@ -12,11 +12,11 @@ namespace asapo {
 
 class SystemFolderWatch {
   public:
-    VIRTUAL ~SystemFolderWatch() = default;
-    VIRTUAL Error StartFolderMonitor(const std::string&, const std::vector<std::string>&) {
+    ASAPO_VIRTUAL ~SystemFolderWatch() = default;
+    ASAPO_VIRTUAL Error StartFolderMonitor(const std::string&, const std::vector<std::string>&) {
         return nullptr;
     };
-    VIRTUAL FilesToSend GetFileList(Error*) {
+    ASAPO_VIRTUAL FilesToSend GetFileList(Error*) {
         return {};
     };
 };
diff --git a/producer/event_monitor_producer/src/system_folder_watch_windows.h b/producer/event_monitor_producer/src/system_folder_watch_windows.h
index 77cfb6fb4a617dfea7e08787136aad5e436a043e..3fdd02c9c8428af80291ff5c3324fb0be657de9c 100644
--- a/producer/event_monitor_producer/src/system_folder_watch_windows.h
+++ b/producer/event_monitor_producer/src/system_folder_watch_windows.h
@@ -18,9 +18,9 @@ namespace asapo {
 class SystemFolderWatch {
   public:
     SystemFolderWatch();
-    VIRTUAL Error StartFolderMonitor(const std::string& root_folder,
+    ASAPO_VIRTUAL Error StartFolderMonitor(const std::string& root_folder,
                                      const std::vector<std::string>& monitored_folders);
-    VIRTUAL FilesToSend GetFileList(Error* err);
+    ASAPO_VIRTUAL FilesToSend GetFileList(Error* err);
     std::unique_ptr<IO> io__;
   private:
     SharedEventList event_list_;
diff --git a/producer/event_monitor_producer/src/watch_io.h b/producer/event_monitor_producer/src/watch_io.h
index c941109898bd2643d0cbec0c094dfb8d9d6e3b83..afa567d5e01fd77379cc7bbe980990a6a7692a5a 100644
--- a/producer/event_monitor_producer/src/watch_io.h
+++ b/producer/event_monitor_producer/src/watch_io.h
@@ -12,9 +12,9 @@ namespace asapo {
 class WatchIO {
   public:
     explicit WatchIO();
-    VIRTUAL HANDLE Init(const char* folder, Error* err);
-    VIRTUAL Error ReadDirectoryChanges(HANDLE handle, LPVOID buffer, DWORD buffer_length, LPDWORD bytes_returned);
-    VIRTUAL bool IsDirectory(const std::string& path);
+    ASAPO_VIRTUAL HANDLE Init(const char* folder, Error* err);
+    ASAPO_VIRTUAL Error ReadDirectoryChanges(HANDLE handle, LPVOID buffer, DWORD buffer_length, LPDWORD bytes_returned);
+    ASAPO_VIRTUAL bool IsDirectory(const std::string& path);
   private:
     std::unique_ptr<IO>io_;
 };
diff --git a/receiver/src/data_cache.h b/receiver/src/data_cache.h
index 0db4fb66e9a0d06adff3853466b6905998e65b01..78264e62285a0abffe98fd1e06114400628ef513 100644
--- a/receiver/src/data_cache.h
+++ b/receiver/src/data_cache.h
@@ -21,10 +21,10 @@ struct CacheMeta {
 class DataCache {
   public:
     explicit DataCache(uint64_t cache_size_gb, float keepunlocked_ratio);
-    VIRTUAL void* GetFreeSlotAndLock(uint64_t size, CacheMeta** meta);
-    VIRTUAL void* GetSlotToReadAndLock(uint64_t id, uint64_t data_size, CacheMeta** meta);
-    VIRTUAL bool UnlockSlot(CacheMeta* meta);
-    VIRTUAL ~DataCache() = default;
+    ASAPO_VIRTUAL void* GetFreeSlotAndLock(uint64_t size, CacheMeta** meta);
+    ASAPO_VIRTUAL void* GetSlotToReadAndLock(uint64_t id, uint64_t data_size, CacheMeta** meta);
+    ASAPO_VIRTUAL bool UnlockSlot(CacheMeta* meta);
+    ASAPO_VIRTUAL ~DataCache() = default;
   private:
     uint64_t cache_size_;
     float keepunlocked_ratio_;
diff --git a/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp b/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp
index a31ab18747093992b53c9665bbfaa5f6906f53ea..4df46716de5beb55056628bdbe6784685b255b9a 100644
--- a/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp
+++ b/receiver/src/receiver_data_server/net_server/rds_fabric_server.cpp
@@ -30,7 +30,7 @@ Error RdsFabricServer::Initialize() {
         return err;
     }
 
-    log__->Info("Started Fabric ReceiverDataServer at '" + server__->GetAddress() + "'");
+    log__->Info(LogMessageWithFields("started fabric data server").Append("address",server__->GetAddress()));
 
     return err;
 }
diff --git a/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp b/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
index fc8efd3fee6b67a12a13681e71832a3be0ba7319..6f24fc62a6555b94540efb41a01d1c11b0794cc2 100644
--- a/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
+++ b/receiver/src/receiver_data_server/net_server/rds_tcp_server.cpp
@@ -1,34 +1,38 @@
 #include "rds_tcp_server.h"
 #include "../receiver_data_server_logger.h"
+#include "../receiver_data_server_error.h"
 
 #include "asapo/io/io_factory.h"
 #include "asapo/common/networking.h"
 
 namespace asapo {
 
-RdsTcpServer::RdsTcpServer(std::string address, const AbstractLogger* logger) : io__{GenerateDefaultIO()}, log__{logger},
-    address_{std::move(address)} {}
+RdsTcpServer::RdsTcpServer(std::string address, const AbstractLogger *logger) : io__{GenerateDefaultIO()},
+                                                                                log__{logger},
+                                                                                address_{std::move(address)} {}
 
 Error RdsTcpServer::Initialize() {
-    Error err;
-    if (master_socket_ == kDisconnectedSocketDescriptor) {
-        master_socket_ = io__->CreateAndBindIPTCPSocketListener(address_, kMaxPendingConnections, &err);
-        if (!err) {
-            log__->Info("Started TCP ReceiverDataServer at '" + address_ + "'");
-        } else {
-            log__->Error("TCP ReceiverDataServer cannot listen on " + address_ + ": " + err->Explain());
-        }
+    if (master_socket_ != kDisconnectedSocketDescriptor) {
+        return GeneralErrorTemplates::kSimpleError.Generate("server was already initialized");
+    }
+    Error io_err;
+    master_socket_ = io__->CreateAndBindIPTCPSocketListener(address_, kMaxPendingConnections, &io_err);
+    if (!io_err) {
+        log__->Info(LogMessageWithFields("started TCP data server").Append("address", address_));
     } else {
-        err = GeneralErrorTemplates::kSimpleError.Generate("Server was already initialized");
+        auto err =
+            ReceiverDataServerErrorTemplates::kServerError.Generate("cannot start TCP data server", std::move(io_err));
+        err->AddDetails("address", address_);
+        return err;
     }
-    return err;
+    return nullptr;
 }
 
-ListSocketDescriptors RdsTcpServer::GetActiveSockets(Error* err) {
+ListSocketDescriptors RdsTcpServer::GetActiveSockets(Error *err) {
     std::vector<std::string> new_connections;
     auto sockets = io__->WaitSocketsActivity(master_socket_, &sockets_to_listen_, &new_connections, err);
-    for (auto& connection : new_connections) {
-        log__->Debug("new connection from " + connection);
+    for (auto &connection: new_connections) {
+        log__->Debug(LogMessageWithFields("new connection").Append("origin", connection));
     }
     return sockets;
 }
@@ -36,42 +40,45 @@ ListSocketDescriptors RdsTcpServer::GetActiveSockets(Error* err) {
 void RdsTcpServer::CloseSocket(SocketDescriptor socket) {
     sockets_to_listen_.erase(std::remove(sockets_to_listen_.begin(), sockets_to_listen_.end(), socket),
                              sockets_to_listen_.end());
-    log__->Debug("connection " + io__->AddressFromSocket(socket) + " closed");
+    log__->Debug(LogMessageWithFields("connection closed").Append("origin", io__->AddressFromSocket(socket)));
     io__->CloseSocket(socket, nullptr);
 }
 
-ReceiverDataServerRequestPtr RdsTcpServer::ReadRequest(SocketDescriptor socket, Error* err) {
+ReceiverDataServerRequestPtr RdsTcpServer::ReadRequest(SocketDescriptor socket, Error *err) {
     GenericRequestHeader header;
+    Error io_err;
+    *err = nullptr;
     io__->Receive(socket, &header,
-                  sizeof(GenericRequestHeader), err);
-    if (*err == GeneralErrorTemplates::kEndOfFile) {
+                  sizeof(GenericRequestHeader), &io_err);
+    if (io_err == GeneralErrorTemplates::kEndOfFile) {
+        *err = std::move(io_err);
         CloseSocket(socket);
         return nullptr;
-    } else if (*err) {
-        log__->Error("error getting next request from " + io__->AddressFromSocket(socket) + ": " + (*err)->
-                     Explain()
-                    );
+    } else if (io_err) {
+        *err = ReceiverDataServerErrorTemplates::kServerError.Generate("error getting next request",std::move(io_err));
+        (*err)->AddDetails("origin",io__->AddressFromSocket(socket));
         return nullptr;
     }
     return ReceiverDataServerRequestPtr{new ReceiverDataServerRequest{header, (uint64_t) socket}};
 }
 
-GenericRequests RdsTcpServer::ReadRequests(const ListSocketDescriptors& sockets) {
+GenericRequests RdsTcpServer::ReadRequests(const ListSocketDescriptors &sockets) {
     GenericRequests requests;
-    for (auto client : sockets) {
+    for (auto client: sockets) {
         Error err;
         auto request = ReadRequest(client, &err);
         if (err) {
             continue;
         }
-        log__->Debug("received request opcode: " + std::to_string(request->header.op_code) + " id: " + std::to_string(
-                         request->header.data_id));
+        log__->Debug(LogMessageWithFields("received request").
+            Append("operation", OpcodeToString(request->header.op_code)).
+            Append("id", request->header.data_id));
         requests.emplace_back(std::move(request));
     }
     return requests;
 }
 
-GenericRequests RdsTcpServer::GetNewRequests(Error* err) {
+GenericRequests RdsTcpServer::GetNewRequests(Error *err) {
     auto sockets = GetActiveSockets(err);
     if (*err) {
         return {};
@@ -82,7 +89,7 @@ GenericRequests RdsTcpServer::GetNewRequests(Error* err) {
 
 RdsTcpServer::~RdsTcpServer() {
     if (!io__) return; // need for test that override io__ to run
-    for (auto client : sockets_to_listen_) {
+    for (auto client: sockets_to_listen_) {
         io__->CloseSocket(client, nullptr);
     }
     io__->CloseSocket(master_socket_, nullptr);
@@ -92,28 +99,31 @@ void RdsTcpServer::HandleAfterError(uint64_t source_id) {
     CloseSocket(static_cast<int>(source_id));
 }
 
-Error RdsTcpServer::SendResponse(const ReceiverDataServerRequest* request, const GenericNetworkResponse* response) {
-    Error err;
-    io__->Send(static_cast<int>(request->source_id), response, sizeof(*response), &err);
-    if (err) {
-        log__->Error("cannot send to consumer" + err->Explain());
+Error RdsTcpServer::SendResponse(const ReceiverDataServerRequest *request, const GenericNetworkResponse *response) {
+    Error io_err,err;
+    auto socket= static_cast<int>(request->source_id);
+    io__->Send(socket, response, sizeof(*response), &io_err);
+    if (io_err) {
+        err = ReceiverDataServerErrorTemplates::kServerError.Generate("error sending response",std::move(io_err));
+        err->AddDetails("origin",io__->AddressFromSocket(socket));
     }
     return err;
 }
 
 Error
-RdsTcpServer::SendResponseAndSlotData(const ReceiverDataServerRequest* request, const GenericNetworkResponse* response,
-                                      const CacheMeta* cache_slot) {
+RdsTcpServer::SendResponseAndSlotData(const ReceiverDataServerRequest *request, const GenericNetworkResponse *response,
+                                      const CacheMeta *cache_slot) {
     Error err;
-
     err = SendResponse(request, response);
     if (err) {
         return err;
     }
-
-    io__->Send(static_cast<int>(request->source_id), cache_slot->addr, cache_slot->size, &err);
-    if (err) {
-        log__->Error("cannot send slot to worker" + err->Explain());
+    Error io_err;
+    auto socket= static_cast<int>(request->source_id);
+    io__->Send(socket, cache_slot->addr, cache_slot->size, &io_err);
+    if (io_err) {
+        err = ReceiverDataServerErrorTemplates::kServerError.Generate("error sending slot data",std::move(io_err));
+        err->AddDetails("origin",io__->AddressFromSocket(socket));
     }
     return err;
 }
diff --git a/receiver/src/receiver_data_server/receiver_data_server.cpp b/receiver/src/receiver_data_server/receiver_data_server.cpp
index 1dd311a908276934e5f0ef80c8baca839d203044..3ac686056c5e58887e388e3711e6a9283db230c9 100644
--- a/receiver/src/receiver_data_server/receiver_data_server.cpp
+++ b/receiver/src/receiver_data_server/receiver_data_server.cpp
@@ -24,12 +24,11 @@ void ReceiverDataServer::Run() {
         if (err == IOErrorTemplates::kTimeout) {
             continue;
         }
-
         if (!err) {
             err = request_pool__->AddRequests(std::move(requests));
         }
         if (err) {
-            log__->Error(std::string("receiver data server stopped: ") + err->Explain());
+            log__->Error(LogMessageWithFields("receiver data server stopped").Append("cause",std::move(err)));
             return;
         }
     }
diff --git a/receiver/src/receiver_data_server/receiver_data_server_error.h b/receiver/src/receiver_data_server/receiver_data_server_error.h
index 3e2e48601aa4b2d886ba216b7d731a15c593bfd7..be9bd21d5311d405f3cf8f1e993cb5a197a6db26 100644
--- a/receiver/src/receiver_data_server/receiver_data_server_error.h
+++ b/receiver/src/receiver_data_server/receiver_data_server_error.h
@@ -7,7 +7,8 @@ namespace asapo {
 
 enum class ReceiverDataServerErrorType {
     kMemoryPool,
-    kWrongRequest
+    kWrongRequest,
+    kServerError
 };
 
 using ReceiverDataServerErrorTemplate = ServiceErrorTemplate<ReceiverDataServerErrorType>;
@@ -21,6 +22,9 @@ auto const kWrongRequest = ReceiverDataServerErrorTemplate {
     "wrong request", ReceiverDataServerErrorType::kWrongRequest
 };
 
+auto const kServerError = ReceiverDataServerErrorTemplate {
+    "server error", ReceiverDataServerErrorType::kServerError
+};
 
 }
 }
diff --git a/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler.cpp b/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler.cpp
index f2409de029b5cb2cfcb49b469ed16a8d5b13f987..3ecec5d8811bb8551e6c671e08dc8befa9a2ecab 100644
--- a/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler.cpp
+++ b/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler.cpp
@@ -92,7 +92,10 @@ void ReceiverDataServerRequestHandler::ProcessRequestTimeoutUnlocked(GenericRequ
 
 void ReceiverDataServerRequestHandler::HandleInvalidRequest(const ReceiverDataServerRequest* receiver_request,
         NetworkErrorCode code) {
-    SendResponse(receiver_request, code);
+    auto err = SendResponse(receiver_request, code);
+    if (err) {
+        log__->Error(err);
+    }
     server_->HandleAfterError(receiver_request->source_id);
     switch (code) {
     case NetworkErrorCode::kNetErrorWrongRequest:
@@ -111,8 +114,8 @@ void ReceiverDataServerRequestHandler::HandleValidRequest(const ReceiverDataServ
         const CacheMeta* meta) {
     auto err = SendResponseAndSlotData(receiver_request, meta);
     if (err) {
-        log__->Error("failed to send slot:" + err->Explain());
         server_->HandleAfterError(receiver_request->source_id);
+        log__->Error(err);
     } else {
         statistics__->IncreaseRequestCounter();
         statistics__->IncreaseRequestDataVolume(receiver_request->header.data_size);
diff --git a/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler_factory.h b/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler_factory.h
index 45550411da798f66067fefec64b0a6d5465e7683..3c49704cf35c3173d2140c63878632aebb247599 100644
--- a/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler_factory.h
+++ b/receiver/src/receiver_data_server/request_handler/receiver_data_server_request_handler_factory.h
@@ -14,7 +14,7 @@ namespace asapo {
 class ReceiverDataServerRequestHandlerFactory : public RequestHandlerFactory {
   public:
     ReceiverDataServerRequestHandlerFactory(RdsNetServer* server, DataCache* data_cache, Statistics* statistics);
-    VIRTUAL std::unique_ptr<RequestHandler> NewRequestHandler(uint64_t thread_id, uint64_t* shared_counter) override;
+    ASAPO_VIRTUAL std::unique_ptr<RequestHandler> NewRequestHandler(uint64_t thread_id, uint64_t* shared_counter) override;
   private:
     RdsNetServer* server_;
     DataCache* data_cache_;
diff --git a/receiver/src/request.cpp b/receiver/src/request.cpp
index 0c4ed721b105dc68bf7a93c044ecc8b71150d212..25172a9abf261044f31f6fc272b5d9ea23aedb3f 100644
--- a/receiver/src/request.cpp
+++ b/receiver/src/request.cpp
@@ -18,7 +18,9 @@ Error Request::PrepareDataBufferAndLockIfNeeded() {
         try {
             data_buffer_.reset(new uint8_t[(size_t)request_header_.data_size]);
         } catch(std::exception& e) {
-            auto err = GeneralErrorTemplates::kMemoryAllocationError.Generate(e.what());
+            auto err = GeneralErrorTemplates::kMemoryAllocationError.Generate(
+                std::string("cannot allocate memory for request"));
+            err->AddDetails("reason", e.what())->AddDetails("size", std::to_string(request_header_.data_size));
             return err;
         }
     } else {
@@ -27,7 +29,9 @@ Error Request::PrepareDataBufferAndLockIfNeeded() {
         if (data_ptr) {
             slot_meta_ = slot;
         } else {
-            return GeneralErrorTemplates::kMemoryAllocationError.Generate("cannot allocate slot in cache");
+            auto err = GeneralErrorTemplates::kMemoryAllocationError.Generate("cannot allocate slot in cache");
+            err->AddDetails("size", std::to_string(request_header_.data_size));
+            return err;
         }
     }
     return nullptr;
diff --git a/receiver/src/request.h b/receiver/src/request.h
index 47e3eacffb43d6aaaebce476aaa8f9146f741ec6..a7d8402e8df7a694fdf263e57b8d5493675932a1 100644
--- a/receiver/src/request.h
+++ b/receiver/src/request.h
@@ -26,56 +26,56 @@ class RequestHandlerDbCheckRequest;
 
 class Request {
   public:
-    VIRTUAL Error Handle(ReceiverStatistics*);
-    VIRTUAL ~Request() = default;
+    ASAPO_VIRTUAL Error Handle(ReceiverStatistics*);
+    ASAPO_VIRTUAL ~Request() = default;
     Request() = delete;
     Request(const GenericRequestHeader& request_header, SocketDescriptor socket_fd, std::string origin_uri,
             DataCache* cache, const RequestHandlerDbCheckRequest* db_check_handler);
-    VIRTUAL void AddHandler(const ReceiverRequestHandler*);
-    VIRTUAL const RequestHandlerList& GetListHandlers() const;
-    VIRTUAL uint64_t GetDataSize() const;
-    VIRTUAL uint64_t GetMetaDataSize() const;
-    VIRTUAL uint64_t GetDataID() const;
-    VIRTUAL std::string GetFileName() const;
-    VIRTUAL std::string GetStream() const;
-    VIRTUAL std::string GetApiVersion() const;
-    VIRTUAL void* GetData() const;
-    VIRTUAL Opcode GetOpCode() const;
-    VIRTUAL const char* GetMessage() const;
-
-    VIRTUAL const std::string& GetOriginUri() const;
-    VIRTUAL const std::string& GetOriginHost() const;
-    VIRTUAL const std::string& GetMetaData() const;
-    VIRTUAL const std::string& GetBeamtimeId() const;
-    VIRTUAL void SetBeamtimeId(std::string beamtime_id);
-    VIRTUAL void SetBeamline(std::string beamline);
-
-    VIRTUAL void SetSourceType(SourceType);
-    VIRTUAL SourceType GetSourceType() const;
-
-    VIRTUAL const std::string& GetDataSource() const;
-    VIRTUAL void SetDataSource(std::string data_source);
-    VIRTUAL void SetMetadata(std::string metadata);
-
-    VIRTUAL void SetOnlinePath(std::string facility);
-    VIRTUAL void SetOfflinePath(std::string path);
-    VIRTUAL const std::string& GetOnlinePath() const;
-    VIRTUAL const std::string& GetOfflinePath() const;
-
-    VIRTUAL const std::string& GetBeamline() const;
-    VIRTUAL const CustomRequestData& GetCustomData() const;
-    VIRTUAL Error PrepareDataBufferAndLockIfNeeded();
-    VIRTUAL void UnlockDataBufferIfNeeded();
-    VIRTUAL  SocketDescriptor GetSocket() const ;
+    ASAPO_VIRTUAL void AddHandler(const ReceiverRequestHandler*);
+    ASAPO_VIRTUAL const RequestHandlerList& GetListHandlers() const;
+    ASAPO_VIRTUAL uint64_t GetDataSize() const;
+    ASAPO_VIRTUAL uint64_t GetMetaDataSize() const;
+    ASAPO_VIRTUAL uint64_t GetDataID() const;
+    ASAPO_VIRTUAL std::string GetFileName() const;
+    ASAPO_VIRTUAL std::string GetStream() const;
+    ASAPO_VIRTUAL std::string GetApiVersion() const;
+    ASAPO_VIRTUAL void* GetData() const;
+    ASAPO_VIRTUAL Opcode GetOpCode() const;
+    ASAPO_VIRTUAL const char* GetMessage() const;
+
+    ASAPO_VIRTUAL const std::string& GetOriginUri() const;
+    ASAPO_VIRTUAL const std::string& GetOriginHost() const;
+    ASAPO_VIRTUAL const std::string& GetMetaData() const;
+    ASAPO_VIRTUAL const std::string& GetBeamtimeId() const;
+    ASAPO_VIRTUAL void SetBeamtimeId(std::string beamtime_id);
+    ASAPO_VIRTUAL void SetBeamline(std::string beamline);
+
+    ASAPO_VIRTUAL void SetSourceType(SourceType);
+    ASAPO_VIRTUAL SourceType GetSourceType() const;
+
+    ASAPO_VIRTUAL const std::string& GetDataSource() const;
+    ASAPO_VIRTUAL void SetDataSource(std::string data_source);
+    ASAPO_VIRTUAL void SetMetadata(std::string metadata);
+
+    ASAPO_VIRTUAL void SetOnlinePath(std::string facility);
+    ASAPO_VIRTUAL void SetOfflinePath(std::string path);
+    ASAPO_VIRTUAL const std::string& GetOnlinePath() const;
+    ASAPO_VIRTUAL const std::string& GetOfflinePath() const;
+
+    ASAPO_VIRTUAL const std::string& GetBeamline() const;
+    ASAPO_VIRTUAL const CustomRequestData& GetCustomData() const;
+    ASAPO_VIRTUAL Error PrepareDataBufferAndLockIfNeeded();
+    ASAPO_VIRTUAL void UnlockDataBufferIfNeeded();
+    ASAPO_VIRTUAL  SocketDescriptor GetSocket() const ;
     std::unique_ptr<IO> io__;
     DataCache* cache__ = nullptr;
-    VIRTUAL uint64_t GetSlotId() const;
-    VIRTUAL bool WasAlreadyProcessed() const;
-    VIRTUAL void SetAlreadyProcessedFlag();
-    VIRTUAL void SetResponseMessage(std::string message, ResponseMessageType type);
-    VIRTUAL ResponseMessageType GetResponseMessageType() const;
-    VIRTUAL const std::string& GetResponseMessage() const;
-    VIRTUAL Error CheckForDuplicates();
+    ASAPO_VIRTUAL uint64_t GetSlotId() const;
+    ASAPO_VIRTUAL bool WasAlreadyProcessed() const;
+    ASAPO_VIRTUAL void SetAlreadyProcessedFlag();
+    ASAPO_VIRTUAL void SetResponseMessage(std::string message, ResponseMessageType type);
+    ASAPO_VIRTUAL ResponseMessageType GetResponseMessageType() const;
+    ASAPO_VIRTUAL const std::string& GetResponseMessage() const;
+    ASAPO_VIRTUAL Error CheckForDuplicates();
   private:
     const GenericRequestHeader request_header_;
     const SocketDescriptor socket_fd_;
diff --git a/receiver/src/request_handler/authorization_client.cpp b/receiver/src/request_handler/authorization_client.cpp
index 183fac7cb8fd1e2d2b9de895704f22f7993c5303..2a4ab650ee005ebc3bc949e60409bed9f96e99f6 100644
--- a/receiver/src/request_handler/authorization_client.cpp
+++ b/receiver/src/request_handler/authorization_client.cpp
@@ -22,15 +22,16 @@ Error ErrorFromAuthorizationServerResponse(Error err, const std::string response
     if (err) {
         return_err = asapo::ReceiverErrorTemplates::kInternalServerError.Generate(
             "cannot authorize request");
+        return_err->AddDetails("response", response);
         return_err->SetCause(std::move(err));
     } else {
         if (code != HttpCode::Unauthorized) {
             return_err = asapo::ReceiverErrorTemplates::kInternalServerError.Generate();
-            return_err->AddContext("response", response)->AddContext("errorCode", std::to_string(int(
-                code)));
         } else {
             return_err = asapo::ReceiverErrorTemplates::kAuthorizationFailure.Generate();
         }
+        return_err->AddDetails("response", response)->AddDetails("errorCode", std::to_string(int(
+            code)));
     }
     return return_err;
 }
@@ -45,7 +46,7 @@ Error CheckAccessType(SourceType source_type, const std::vector<std::string> &ac
         for (size_t i = 0; i < access_types.size(); i++) {
             types += (i > 0 ? "," : "") + access_types[i];
         }
-        err->AddContext("expected", source_type == SourceType::kProcessed ? "write" : "writeraw")->AddContext("have",
+        err->AddDetails("expected", source_type == SourceType::kProcessed ? "write" : "writeraw")->AddDetails("have",
                                                                                                               types);
         return err;
     }
@@ -56,6 +57,7 @@ Error ParseServerResponse(const std::string &response,
                           std::vector<std::string> *access_types,
                           AuthorizationData *data) {
     Error err;
+
     AuthorizationData creds;
     JsonStringParser parser{response};
     std::string stype;
@@ -69,7 +71,7 @@ Error ParseServerResponse(const std::string &response,
         (err = GetSourceTypeFromString(stype, &data->source_type)) ||
         (err = parser.GetString("beamline", &data->beamline));
     if (err) {
-        return ErrorFromAuthorizationServerResponse(std::move(err), "", code);
+        return ErrorFromAuthorizationServerResponse(std::move(err), response, code);
     }
     return nullptr;
 }
@@ -82,7 +84,7 @@ Error UpdateDataFromServerResponse(const std::string &response, HttpCode code, A
     err = ParseServerResponse(response, code, &access_types, data);
     if (err) {
         *data = old_data;
-        return ErrorFromAuthorizationServerResponse(std::move(err), response, code);
+        return err;
     }
 
     err = CheckAccessType(data->source_type, access_types);
diff --git a/receiver/src/request_handler/authorization_client.h b/receiver/src/request_handler/authorization_client.h
index 2312a41dbbad39b757cd6b29cd0e55fd24736ab2..30deb97db0085287d33a4ee8b3dbccb8b70d33e3 100644
--- a/receiver/src/request_handler/authorization_client.h
+++ b/receiver/src/request_handler/authorization_client.h
@@ -14,10 +14,10 @@ class AbstractLogger;
 class AuthorizationClient {
   public:
     AuthorizationClient();
-    VIRTUAL Error Authorize(const Request* request, AuthorizationData* data) const;
+    ASAPO_VIRTUAL Error Authorize(const Request* request, AuthorizationData* data) const;
     const AbstractLogger* log__;
     std::unique_ptr<HttpClient> http_client__;
-    VIRTUAL ~AuthorizationClient() = default;
+    ASAPO_VIRTUAL ~AuthorizationClient() = default;
   private:
     Error DoServerRequest(const std::string& request_string, std::string* response, HttpCode* code) const;
 
diff --git a/receiver/src/request_handler/file_processors/receive_file_processor.cpp b/receiver/src/request_handler/file_processors/receive_file_processor.cpp
index 3ff0d607e6c98e7d30d9f5dcc0e0e4cc12d0ae08..02da8d258b2471bbd524cdd71989eeea9ecacdf2 100644
--- a/receiver/src/request_handler/file_processors/receive_file_processor.cpp
+++ b/receiver/src/request_handler/file_processors/receive_file_processor.cpp
@@ -5,6 +5,7 @@
 #include "../../receiver_error.h"
 #include "../../request.h"
 #include "../../receiver_config.h"
+#include "../../receiver_logger.h"
 
 namespace asapo {
 
@@ -23,7 +24,8 @@ Error ReceiveFileProcessor::ProcessFile(const Request* request, bool overwrite)
     }
     err =  io__->ReceiveDataToFile(socket, root_folder, fname, (size_t) fsize, true, overwrite);
     if (!err) {
-        log__->Debug("received file of size " + std::to_string(fsize) + " to " + root_folder + kPathSeparator + fname);
+        log__->Debug(RequestLog("received file", request).Append("size",std::to_string(fsize)).Append("name",
+                root_folder + kPathSeparator + fname));
     }
     return err;
 }
diff --git a/receiver/src/request_handler/file_processors/write_file_processor.cpp b/receiver/src/request_handler/file_processors/write_file_processor.cpp
index 3dcc5ae0076116e780f2d4ebc76e0bdaebfab68f..e9886926f32c749692b6af7dc8d204144fb5b446 100644
--- a/receiver/src/request_handler/file_processors/write_file_processor.cpp
+++ b/receiver/src/request_handler/file_processors/write_file_processor.cpp
@@ -3,6 +3,7 @@
 #include "asapo/preprocessor/definitions.h"
 #include "../../receiver_error.h"
 #include "../../request.h"
+#include "../../receiver_logger.h"
 
 namespace asapo {
 
@@ -14,7 +15,9 @@ WriteFileProcessor::WriteFileProcessor() : FileProcessor()  {
 Error WriteFileProcessor::ProcessFile(const Request* request, bool overwrite) const {
     auto fsize = request->GetDataSize();
     if (fsize <= 0) {
-        return ReceiverErrorTemplates::kBadRequest.Generate("wrong file size");
+        auto err = ReceiverErrorTemplates::kBadRequest.Generate("wrong file size");
+        err->AddDetails("size",std::to_string(fsize));
+        return err;
     }
 
     auto data = request->GetData();
@@ -27,7 +30,8 @@ Error WriteFileProcessor::ProcessFile(const Request* request, bool overwrite) co
 
     err =  io__->WriteDataToFile(root_folder, fname, (uint8_t*)data, (size_t) fsize, true, overwrite);
     if (!err) {
-        log__->Debug("saved file of size " + std::to_string(fsize) + " to " + root_folder + kPathSeparator + fname);
+        log__->Debug(RequestLog("saved file", request).Append("size",std::to_string(fsize)).Append("name",
+                                                                                                      root_folder + kPathSeparator + fname));
     }
 
     return err;
diff --git a/receiver/src/request_handler/request_handler_authorize.cpp b/receiver/src/request_handler/request_handler_authorize.cpp
index f29681fec5f094fadd82c1394605d7d985d90933..bb158c38e4d168ea1d71292edc79091f196e02a7 100644
--- a/receiver/src/request_handler/request_handler_authorize.cpp
+++ b/receiver/src/request_handler/request_handler_authorize.cpp
@@ -15,7 +15,7 @@ Error RequestHandlerAuthorize::CheckVersion(const Request* request) const {
     int verService = VersionToNumber(GetReceiverApiVersion());
     if (verClient > verService) {
         auto err = asapo::ReceiverErrorTemplates::kUnsupportedClient.Generate();
-        err->AddContext("client",version_from_client)->AddContext("server",GetReceiverApiVersion());
+        err->AddDetails("client", version_from_client)->AddDetails("server", GetReceiverApiVersion());
         return err;
     }
     return nullptr;
diff --git a/receiver/src/request_handler/request_handler_db.cpp b/receiver/src/request_handler/request_handler_db.cpp
index fea6e82467656c132cd83a3300bb24f38f659d05..54b9fadc0c8e1c755cdf4bd8d7342e88325b86dc 100644
--- a/receiver/src/request_handler/request_handler_db.cpp
+++ b/receiver/src/request_handler/request_handler_db.cpp
@@ -43,13 +43,14 @@ Error RequestHandlerDb::GetDatabaseServerUri(std::string* uri) const {
                      Append("origin", GetReceiverConfig()->discovery_server));
         auto err = ReceiverErrorTemplates::kInternalServerError.Generate("http error while discovering database server",
                 std::move(http_err));
-        err->AddContext("discoveryEndpoint",GetReceiverConfig()->discovery_server);
+        err->AddDetails("discoveryEndpoint", GetReceiverConfig()->discovery_server);
         return err;
     }
 
     if (code != HttpCode::OK) {
         auto err =  ReceiverErrorTemplates::kInternalServerError.Generate("error when discover database server");
-        err->AddContext("discoveryEndpoint",GetReceiverConfig()->discovery_server)->AddContext("errorCode",std::to_string((int) code));
+        err->AddDetails("discoveryEndpoint", GetReceiverConfig()->discovery_server)->AddDetails("errorCode",
+                                                                                                std::to_string((int) code));
         return err;
     }
 
@@ -80,9 +81,9 @@ Error RequestHandlerDb::DBErrorToReceiverError(Error err) const {
     Error return_err;
     if (err == DBErrorTemplates::kWrongInput || err == DBErrorTemplates::kNoRecord
             || err == DBErrorTemplates::kJsonParseError) {
-        return_err = ReceiverErrorTemplates::kBadRequest.Generate();
+        return_err = ReceiverErrorTemplates::kBadRequest.Generate("error from database");
     } else {
-        return_err = ReceiverErrorTemplates::kInternalServerError.Generate();
+        return_err = ReceiverErrorTemplates::kInternalServerError.Generate("error from database");
     }
     return_err->SetCause(std::move(err));
     return return_err;
diff --git a/receiver/src/request_handler/request_handler_db_check_request.h b/receiver/src/request_handler/request_handler_db_check_request.h
index d28dc7dffc3f98b749ca6448a20a54862e7bce62..d08d356c651154f8dea2e7db758cb0a2be7f4010 100644
--- a/receiver/src/request_handler/request_handler_db_check_request.h
+++ b/receiver/src/request_handler/request_handler_db_check_request.h
@@ -9,7 +9,7 @@
 
 namespace asapo {
 
-class RequestHandlerDbCheckRequest FINAL : public RequestHandlerDb {
+class RequestHandlerDbCheckRequest ASAPO_FINAL : public RequestHandlerDb {
   public:
     RequestHandlerDbCheckRequest(std::string collection_name_prefix);
     Error ProcessRequest(Request* request) const override;
diff --git a/receiver/src/request_handler/request_handler_receive_data.cpp b/receiver/src/request_handler/request_handler_receive_data.cpp
index 8c840c0266e684acae2eb7f3e4801c710102e44c..e81a5e30402ef70c533930b0b4b662ad42933146 100644
--- a/receiver/src/request_handler/request_handler_receive_data.cpp
+++ b/receiver/src/request_handler/request_handler_receive_data.cpp
@@ -18,8 +18,16 @@ Error RequestHandlerReceiveData::ProcessRequest(Request* request) const {
     if (err) {
         return err;
     }
-    io__->Receive(request->GetSocket(), request->GetData(), (size_t) request->GetDataSize(), &err);
+    Error io_err;
+    io__->Receive(request->GetSocket(), request->GetData(), (size_t) request->GetDataSize(), &io_err);
+    if (io_err) {
+        err = ReceiverErrorTemplates::kProcessingError.Generate("cannot receive data",std::move(io_err));
+    }
     request->UnlockDataBufferIfNeeded();
+    if (err == nullptr) {
+        log__->Debug(RequestLog("received request data", request).Append("size",request->GetDataSize()));
+    }
+
     return err;
 }
 
diff --git a/receiver/src/request_handler/request_handler_receive_metadata.cpp b/receiver/src/request_handler/request_handler_receive_metadata.cpp
index 71aab0b7423f5c767aa03075b2eb5e25bea1f41d..6e0826a15012a7875853109d90cf6933d7c3b93f 100644
--- a/receiver/src/request_handler/request_handler_receive_metadata.cpp
+++ b/receiver/src/request_handler/request_handler_receive_metadata.cpp
@@ -15,9 +15,9 @@ Error RequestHandlerReceiveMetaData::ProcessRequest(Request* request) const {
     auto buf = std::unique_ptr<uint8_t[]> {new uint8_t[meta_size]};
     io__->Receive(request->GetSocket(), (void*) buf.get(), meta_size, &err);
     if (err) {
-        return err;
+        return ReceiverErrorTemplates::kProcessingError.Generate("cannot receive metadata",std::move(err));
     }
-
+    log__->Debug(RequestLog("received request metadata", request).Append("size",meta_size));
     request->SetMetadata(std::string((char*)buf.get(), meta_size));
     return nullptr;
 }
diff --git a/receiver/src/request_handler/requests_dispatcher.h b/receiver/src/request_handler/requests_dispatcher.h
index 5f4bcc4687703da64f7e12511f40ec8d24d836ae..67d9169df8007d87700480acd4d1783ddcad9f2a 100644
--- a/receiver/src/request_handler/requests_dispatcher.h
+++ b/receiver/src/request_handler/requests_dispatcher.h
@@ -15,9 +15,9 @@ namespace asapo {
 class RequestsDispatcher {
   public:
     RequestsDispatcher(SocketDescriptor socket_fd, std::string address, ReceiverStatistics* statistics, SharedCache cache, KafkaClient* kafka_client);
-    VIRTUAL Error ProcessRequest(const std::unique_ptr<Request>& request) const noexcept;
-    VIRTUAL std::unique_ptr<Request> GetNextRequest(Error* err) const noexcept;
-    VIRTUAL ~RequestsDispatcher() = default;
+    ASAPO_VIRTUAL Error ProcessRequest(const std::unique_ptr<Request>& request) const noexcept;
+    ASAPO_VIRTUAL std::unique_ptr<Request> GetNextRequest(Error* err) const noexcept;
+    ASAPO_VIRTUAL ~RequestsDispatcher() = default;
     ReceiverStatistics* statistics__;
     std::unique_ptr<IO> io__;
     const AbstractLogger* log__;
diff --git a/receiver/src/statistics/receiver_statistics.h b/receiver/src/statistics/receiver_statistics.h
index 4bffa21c1c9f0664f4190355934ed541ef873ec9..fed59994b622f125f20600f75f4df26b3b5f83fd 100644
--- a/receiver/src/statistics/receiver_statistics.h
+++ b/receiver/src/statistics/receiver_statistics.h
@@ -17,8 +17,8 @@ static const std::vector<std::string> kStatisticEntityNames = {"db_share", "disk
 class ReceiverStatistics : public Statistics {
   public:
     ReceiverStatistics(unsigned int write_interval = kDefaultStatisticWriteIntervalMs);
-    VIRTUAL void StartTimer(const StatisticEntity& entity) noexcept;
-    VIRTUAL void StopTimer() noexcept;
+    ASAPO_VIRTUAL void StartTimer(const StatisticEntity& entity) noexcept;
+    ASAPO_VIRTUAL void StopTimer() noexcept;
   private:
     StatisticsToSend PrepareStatisticsToSend() const noexcept override;
     void ResetStatistics() noexcept override;
diff --git a/receiver/src/statistics/statistics.h b/receiver/src/statistics/statistics.h
index c6dff99969a4b9b37c165cd1f31bb8450d2d4b2e..6a8ee7c575e9a6a482e2d5421198d7465cff888b 100644
--- a/receiver/src/statistics/statistics.h
+++ b/receiver/src/statistics/statistics.h
@@ -27,10 +27,10 @@ struct StatisticsToSend {
 class Statistics {
   public:
     explicit Statistics(unsigned int write_interval = kDefaultStatisticWriteIntervalMs);
-    VIRTUAL void SendIfNeeded(bool send_always = false) noexcept;
-    VIRTUAL void IncreaseRequestCounter() noexcept;
-    VIRTUAL void IncreaseRequestDataVolume(uint64_t transferred_data_volume) noexcept;
-    VIRTUAL void AddTag(const std::string& name, const std::string& value) noexcept;
+    ASAPO_VIRTUAL void SendIfNeeded(bool send_always = false) noexcept;
+    ASAPO_VIRTUAL void IncreaseRequestCounter() noexcept;
+    ASAPO_VIRTUAL void IncreaseRequestDataVolume(uint64_t transferred_data_volume) noexcept;
+    ASAPO_VIRTUAL void AddTag(const std::string& name, const std::string& value) noexcept;
     void SetWriteInterval(uint64_t interval_ms);
     std::vector<std::unique_ptr<StatisticsSender>> statistics_sender_list__;
     virtual ~Statistics() = default;
diff --git a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
index 22a0aae334f80e050b2d7470f614f2ee59276daa..a80f9f0c86dc125a5c4c33531633bb379dbc80e2 100644
--- a/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
+++ b/receiver/unittests/receiver_data_server/net_server/test_rds_tcp_server.cpp
@@ -126,10 +126,6 @@ void RdsTCPServerTests::MockReceiveRequest(bool ok ) {
             DoAll(SetArgPointee<3>(ok ? nullptr : asapo::IOErrorTemplates::kUnknownIOError.Generate().release()),
                   Return(0))
         );
-        if (!ok) {
-            std::string connected_uri = std::to_string(conn);
-            EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("request"), HasSubstr(connected_uri))));
-        }
     }
 }
 
@@ -163,8 +159,7 @@ void RdsTCPServerTests::ExpectReceiveOk() {
                 A_ReceiveData(asapo::kOpcodeGetBufferData, conn),
                 testing::ReturnArg<2>()
             ));
-        EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("request"), HasSubstr("id: " + std::to_string(conn)),
-                                             HasSubstr("opcode: " + std::to_string(asapo::kOpcodeGetBufferData)))));
+        EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("request"), HasSubstr(std::to_string(conn)))));
     }
 }
 
@@ -249,8 +244,6 @@ TEST_F(RdsTCPServerTests, SendResponse) {
             Return(1)
         ));
 
-    EXPECT_CALL(mock_logger, Error(HasSubstr("cannot send")));
-
     auto err = tcp_server.SendResponse(&expectedRequest, &tmp);
 
     ASSERT_THAT(err, Ne(nullptr));
@@ -272,8 +265,6 @@ TEST_F(RdsTCPServerTests, SendResponseAndSlotData_SendResponseError) {
                   testing::SetArgPointee<3>(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()),
                   Return(0)
               ));
-    EXPECT_CALL(mock_logger, Error(HasSubstr("cannot send")));
-
     auto err = tcp_server.SendResponseAndSlotData(&expectedRequest, &tmp, &expectedMeta);
 
     ASSERT_THAT(err, Ne(nullptr));
@@ -298,8 +289,6 @@ TEST_F(RdsTCPServerTests, SendResponseAndSlotData_SendError) {
             Return(0)
         ));
 
-    EXPECT_CALL(mock_logger, Error(HasSubstr("cannot send")));
-
     auto err = tcp_server.SendResponseAndSlotData(&expectedRequest, &tmp, &expectedMeta);
 
     ASSERT_THAT(err, Ne(nullptr));
diff --git a/receiver/unittests/receiver_data_server/request_handler/test_request_handler.cpp b/receiver/unittests/receiver_data_server/request_handler/test_request_handler.cpp
index fb85f18e56798d664529c1223ad59e39dce1c61d..b6a0951a3ccba60c097091aecabfb2306396c3ca 100644
--- a/receiver/unittests/receiver_data_server/request_handler/test_request_handler.cpp
+++ b/receiver/unittests/receiver_data_server/request_handler/test_request_handler.cpp
@@ -112,7 +112,7 @@ TEST_F(RequestHandlerTests, RequestAlwaysReady) {
 
 TEST_F(RequestHandlerTests, ProcessRequest_WrongOpCode) {
     request.header.op_code = asapo::kOpcodeUnknownOp;
-    MockSendResponse(asapo::kNetErrorWrongRequest, false);
+    MockSendResponse(asapo::kNetErrorWrongRequest, true);
     EXPECT_CALL(mock_net, HandleAfterError_t(expected_source_id));
 
     EXPECT_CALL(mock_logger, Error(HasSubstr("wrong request")));
@@ -124,7 +124,7 @@ TEST_F(RequestHandlerTests, ProcessRequest_WrongOpCode) {
 
 TEST_F(RequestHandlerTests, ProcessRequest_WrongClientVersion) {
     strcpy(request.header.api_version, "v0.2");
-    MockSendResponse(asapo::kNetErrorNotSupported, false);
+    MockSendResponse(asapo::kNetErrorNotSupported, true);
     EXPECT_CALL(mock_net, HandleAfterError_t(expected_source_id));
 
     EXPECT_CALL(mock_logger, Error(HasSubstr("unsupported client")));
diff --git a/receiver/unittests/receiver_data_server/test_receiver_data_server.cpp b/receiver/unittests/receiver_data_server/test_receiver_data_server.cpp
index b3ce88a40f7cc44333e7cac70eed46aa4ad6adb4..004915c1f8694fa9150574ddc61a48e4d491d10e 100644
--- a/receiver/unittests/receiver_data_server/test_receiver_data_server.cpp
+++ b/receiver/unittests/receiver_data_server/test_receiver_data_server.cpp
@@ -94,8 +94,6 @@ TEST_F(ReceiverDataServerTests, TimeoutGetNewRequests) {
     data_server.Run();
 }
 
-
-
 TEST_F(ReceiverDataServerTests, ErrorGetNewRequests) {
     EXPECT_CALL(mock_net, GetNewRequests_t(_)).WillOnce(
         DoAll(SetArgPointee<0>(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()),
@@ -103,9 +101,7 @@ TEST_F(ReceiverDataServerTests, ErrorGetNewRequests) {
              )
     );
 
-    auto errtext = asapo::IOErrorTemplates::kUnknownIOError.Generate()->Explain();
-
-    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("stopped"), HasSubstr(errtext))));
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("stopped"), HasSubstr("unknown error"))));
 
     data_server.Run();
 }
@@ -121,9 +117,7 @@ TEST_F(ReceiverDataServerTests, ErrorAddingRequests) {
         Return(asapo::ReceiverDataServerErrorTemplates::kMemoryPool.Generate("cannot add request to pool").release())
     );
 
-    auto errtext = asapo::ReceiverDataServerErrorTemplates::kMemoryPool.Generate("cannot add request to pool")->Explain();
-
-    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("stopped"), HasSubstr(errtext))));
+    EXPECT_CALL(mock_logger, Error(AllOf(HasSubstr("stopped"), HasSubstr("pool"))));
 
     data_server.Run();
 }
diff --git a/receiver/unittests/request_handler/file_processors/test_receive_file_processor.cpp b/receiver/unittests/request_handler/file_processors/test_receive_file_processor.cpp
index 0fe70c057a2890bc0cb0a01fe5c8d7aabb419014..518ef7d189c458066af15c17d7af883ce99db193 100644
--- a/receiver/unittests/request_handler/file_processors/test_receive_file_processor.cpp
+++ b/receiver/unittests/request_handler/file_processors/test_receive_file_processor.cpp
@@ -27,7 +27,7 @@ class ReceiveFileProcessorTests : public Test {
   public:
     ReceiveFileProcessor processor;
     NiceMock<MockIO> mock_io;
-    std::unique_ptr<MockRequest> mock_request;
+    std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<asapo::MockLogger> mock_logger;
     SocketDescriptor expected_socket_id = SocketDescriptor{1};
     std::string expected_file_name = std::string("processed") + asapo::kPathSeparator + std::string("2");
@@ -54,8 +54,10 @@ class ReceiveFileProcessorTests : public Test {
         asapo::ReceiverConfig test_config;
         asapo::SetReceiverConfig(test_config, "none");
         processor.log__ = &mock_logger;
-        mock_request.reset(new MockRequest{request_header, 1, "", nullptr});
+        mock_request.reset(new NiceMock<MockRequest>{request_header, 1, "", nullptr});
         processor.io__ = std::unique_ptr<asapo::IO> {&mock_io};
+        SetDefaultRequestCalls(mock_request.get(),expected_beamtime_id);
+
     }
     void TearDown() override {
         processor.io__.release();
@@ -118,8 +120,6 @@ TEST_F(ReceiveFileProcessorTests, WritesToLog) {
     .WillOnce(Return(nullptr));
 
     EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("received file"),
-                                         HasSubstr(expected_file_name),
-                                         HasSubstr(expected_beamtime_id),
                                          HasSubstr(std::to_string(expected_file_size))
                                         )
                                   )
diff --git a/receiver/unittests/request_handler/file_processors/test_write_file_processor.cpp b/receiver/unittests/request_handler/file_processors/test_write_file_processor.cpp
index bead654171b2962415c58e6e9c28aa08c6c9d4b8..5a4e77115230a99db29e52e9e3edd96d0179daf4 100644
--- a/receiver/unittests/request_handler/file_processors/test_write_file_processor.cpp
+++ b/receiver/unittests/request_handler/file_processors/test_write_file_processor.cpp
@@ -27,7 +27,7 @@ class WriteFileProcessorTests : public Test {
   public:
     WriteFileProcessor processor;
     NiceMock<MockIO> mock_io;
-    std::unique_ptr<MockRequest> mock_request;
+    std::unique_ptr<NiceMock<MockRequest>> mock_request;
     NiceMock<asapo::MockLogger> mock_logger;
     std::string expected_file_name = std::string("raw") + asapo::kPathSeparator + std::string("2");
     asapo::SourceType expected_source_type = asapo::SourceType::kRaw;
@@ -53,8 +53,10 @@ class WriteFileProcessorTests : public Test {
         asapo::ReceiverConfig test_config;
         asapo::SetReceiverConfig(test_config, "none");
         processor.log__ = &mock_logger;
-        mock_request.reset(new MockRequest{request_header, 1, "", nullptr});
+        mock_request.reset(new NiceMock<MockRequest>{request_header, 1, "", nullptr});
         processor.io__ = std::unique_ptr<asapo::IO> {&mock_io};
+        SetDefaultRequestCalls(mock_request.get(),expected_beamtime_id);
+
     }
     void TearDown() override {
         processor.io__.release();
@@ -114,15 +116,8 @@ TEST_F(WriteFileProcessorTests, WritesToLog) {
 
     ExpectFileWrite(nullptr);
 
-    EXPECT_CALL(mock_logger, Debug(AllOf(HasSubstr("saved file"),
-                                         HasSubstr(expected_file_name),
-                                         HasSubstr(expected_beamtime_id),
-                                         HasSubstr(expected_facility),
-                                         HasSubstr(expected_year),
-                                         HasSubstr(std::to_string(expected_file_size))
-                                        )
-                                  )
-               );
+    EXPECT_CALL(mock_logger, Debug(HasSubstr("saved file")));
+
     auto err = processor.ProcessFile(mock_request.get(), expected_overwrite);
     ASSERT_THAT(err, Eq(nullptr));
 }
diff --git a/receiver/unittests/request_handler/test_request_handler_receive_data.cpp b/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
index d3e679b7c4f57a3459975acf9e0ff18698001b82..8d734e18cad50406bf4e1b06dad2acb6da103e30 100644
--- a/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_receive_data.cpp
@@ -109,7 +109,7 @@ TEST_F(ReceiveDataHandlerTests, HandleDoesNotReceiveDataWhenMetadataOnlyWasSent)
 TEST_F(ReceiveDataHandlerTests, HandleReturnsErrorOnDataReceive) {
     ExpectReceiveData(false);
     auto err = handler.ProcessRequest(request.get());
-    ASSERT_THAT(err, Eq(asapo::IOErrorTemplates::kReadError));
+    ASSERT_THAT(err, Eq(asapo::ReceiverErrorTemplates::kProcessingError));
 }
 
 TEST_F(ReceiveDataHandlerTests, HandleReturnsOK) {
diff --git a/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp b/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
index 99b3e4ed5136293068adacbfa3413e763c858266..0b15844d47183ea15cb24edda92596dafa46030d 100644
--- a/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
+++ b/receiver/unittests/request_handler/test_request_handler_receive_metadata.cpp
@@ -87,7 +87,7 @@ TEST_F(ReceiveMetaDataHandlerTests, CheckStatisticEntity) {
 TEST_F(ReceiveMetaDataHandlerTests, HandleReturnsErrorOnMetaDataReceive) {
     ExpectReceiveMetaData(false);
     auto err = handler.ProcessRequest(request.get());
-    ASSERT_THAT(err, Eq(asapo::IOErrorTemplates::kReadError));
+    ASSERT_THAT(err, Eq(asapo::ReceiverErrorTemplates::kProcessingError));
 }
 
 TEST_F(ReceiveMetaDataHandlerTests, HandleReturnsOK) {
diff --git a/tests/automatic/bug_fixes/CMakeLists.txt b/tests/automatic/bug_fixes/CMakeLists.txt
index 2dba7e68ea370aba06b84e0f85451c68f9432af0..943a450a9f62a60e55a7c2762ef40fe7bbd9b5a7 100644
--- a/tests/automatic/bug_fixes/CMakeLists.txt
+++ b/tests/automatic/bug_fixes/CMakeLists.txt
@@ -5,6 +5,7 @@ if (UNIX)
         add_subdirectory(python_deadlock)
         add_subdirectory(streamlist_python_multithread)
         add_subdirectory(error-sending-data-using-callback-method)
+        add_subdirectory(python_segfault_dataview)
     endif()
 endif()
 
diff --git a/tests/automatic/bug_fixes/python_segfault_dataview/CMakeLists.txt b/tests/automatic/bug_fixes/python_segfault_dataview/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..70a676bbf19d1aa79812d38ff7026cdfbfc23253
--- /dev/null
+++ b/tests/automatic/bug_fixes/python_segfault_dataview/CMakeLists.txt
@@ -0,0 +1,17 @@
+set(TARGET_NAME python_deadlock_producer)
+
+
+find_package (Python3 REQUIRED)
+
+if (UNIX)
+    get_target_property(PYTHON_LIBS_PRODUCER python-lib-producer BINARY_DIR)
+else()
+    get_target_property(PYTHON_LIBS_PRODUCER asapo_producer BINARY_DIR)
+endif()
+
+file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/producer_segfault.py TEST_SCRIPT )
+
+
+add_script_test("${TARGET_NAME}" "${TEST_SCRIPT} ${PYTHON_LIBS_PRODUCER} ${Python3_EXECUTABLE}" nomem)
+
+
diff --git a/tests/automatic/bug_fixes/python_segfault_dataview/check_linux.sh b/tests/automatic/bug_fixes/python_segfault_dataview/check_linux.sh
new file mode 100644
index 0000000000000000000000000000000000000000..409d69f55e9fcabf2928e4506a68d7a3bb80a02d
--- /dev/null
+++ b/tests/automatic/bug_fixes/python_segfault_dataview/check_linux.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -e
+
+trap Cleanup EXIT
+
+Cleanup() {
+    echo cleanup
+    rm -rf ${receiver_root_folder}
+    echo "db.dropDatabase()" | mongo ${beamtime_id}_detector
+}
+
+
+
+export PYTHONPATH=$2:${PYTHONPATH}
+export Python3_EXECUTABLE=$3
+
+beamline=test
+receiver_root_folder=/tmp/asapo/receiver/files
+facility=test_facility
+year=2019
+beamtime_id=asapo_test
+receiver_folder=${receiver_root_folder}/${facility}/gpfs/${beamline}/${year}/data/${beamtime_id}
+endpoint=127.0.0.1:8400
+
+mkdir -p ${receiver_folder}
+echo ${receiver_folder}
+$Python3_EXECUTABLE $1 $endpoint $beamtime_id
+
+
diff --git a/tests/automatic/bug_fixes/python_segfault_dataview/jira_issue.txt b/tests/automatic/bug_fixes/python_segfault_dataview/jira_issue.txt
new file mode 100644
index 0000000000000000000000000000000000000000..29d8a9a0eccbda590ce46ba37c2a0f8d249e27d6
--- /dev/null
+++ b/tests/automatic/bug_fixes/python_segfault_dataview/jira_issue.txt
@@ -0,0 +1 @@
+https://agira.desy.de/browse/ASAPO-159
\ No newline at end of file
diff --git a/tests/automatic/bug_fixes/python_segfault_dataview/producer_segfault.py b/tests/automatic/bug_fixes/python_segfault_dataview/producer_segfault.py
new file mode 100644
index 0000000000000000000000000000000000000000..61ca56fae15873edc4fdd1e9748a3f4a4e502da2
--- /dev/null
+++ b/tests/automatic/bug_fixes/python_segfault_dataview/producer_segfault.py
@@ -0,0 +1,22 @@
+from __future__ import print_function
+
+import asapo_producer
+import time
+import numpy as np
+import sys
+
+endpoint, beamtime = sys.argv[1:]
+
+def callback(payload, err):
+    if isinstance(err, asapo_producer.AsapoServerWarning):
+        print("successfuly sent, but with warning from server: ", payload, err)
+    elif err is not None:
+        print("could not sent: ", payload, err)
+    else:
+        print("successfuly sent: ", payload)
+
+producer = asapo_producer.create_producer(endpoint,'processed', beamtime, 'auto', "data_source", '', 4, 5000)
+data = np.random.random((100, 100))
+
+producer.send(1, exposed_path="processed/foo.dat", stream="foo", data=data.view(np.int8), callback=callback)
+producer.wait_requests_finished(5000)
diff --git a/tests/automatic/producer/aai/check_windows.bat b/tests/automatic/producer/aai/check_windows.bat
index 28a3e3c9d0e230b2f91e1bba3df56382218b17d9..36373fedab7f1d50403c69f39b8aec901b79a2f5 100644
--- a/tests/automatic/producer/aai/check_windows.bat
+++ b/tests/automatic/producer/aai/check_windows.bat
@@ -33,7 +33,7 @@ echo %NUM% | findstr 3 || goto error
 for /F %%N in ('find /C "reauthorization" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 1 || goto error
 
-for /F %%N in ('find /C "} error: server warning, context: response:duplicated request" ^< "out"') do set NUM=%%N
+for /F %%N in ('find /C "} error: server warning, details: response:duplicated request" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 1 || goto error
 
 goto :clean
diff --git a/tests/automatic/producer/python_api/check_windows.bat b/tests/automatic/producer/python_api/check_windows.bat
index 5920874f07bc788076de2dc13087d312561e8fb8..78389910ead22f0ccb8ac2692679345c9c18ec4a 100644
--- a/tests/automatic/producer/python_api/check_windows.bat
+++ b/tests/automatic/producer/python_api/check_windows.bat
@@ -20,13 +20,13 @@ set NUM=0
 for /F %%N in ('find /C "successfuly sent" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 17 || goto error
 
-for /F %%N in ('find /C "} error: wrong input, context: response:error: Bad request, message: already have record with same id" ^< "out"') do set NUM=%%N
+for /F %%N in ('find /C "} error: wrong input, details: response:error: Bad request, message: already have record with same id" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 2 || goto error
 
-for /F %%N in ('find /C "} error: server warning, context: response:ignoring duplicate record" ^< "out"') do set NUM=%%N
+for /F %%N in ('find /C "} error: server warning, details: response:ignoring duplicate record" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 2 || goto error
 
-for /F %%N in ('find /C "} error: server warning, context: response:duplicated request" ^< "out"') do set NUM=%%N
+for /F %%N in ('find /C "} error: server warning, details: response:duplicated request" ^< "out"') do set NUM=%%N
 echo %NUM% | findstr 1 || goto error
 
 
diff --git a/tests/automatic/system_io/read_file_content/CMakeLists.txt b/tests/automatic/system_io/read_file_content/CMakeLists.txt
index 5f302cdb12e8f9f74b452a88b5370b04dd26761e..c4a37e8638a3f273847a6177be065409fd7098d5 100644
--- a/tests/automatic/system_io/read_file_content/CMakeLists.txt
+++ b/tests/automatic/system_io/read_file_content/CMakeLists.txt
@@ -17,6 +17,6 @@ set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
 add_test_setup_cleanup(${TARGET_NAME})
 add_integration_test(${TARGET_NAME} readfile "test/1 123")
 add_integration_test(${TARGET_NAME} readfile_unkown_size "test/2 unknown_size")
-add_integration_test(${TARGET_NAME} filenotfound "test_notexist error:Nosuchfileordirectory,context:name:test_notexist")
-add_integration_test(${TARGET_NAME} filenoaccess "file_noaccess error:Permissiondenied,context:name:file_noaccess")
+add_integration_test(${TARGET_NAME} filenotfound "test_notexist error:nosuchfileordirectory,details:name:test_notexist")
+add_integration_test(${TARGET_NAME} filenoaccess "file_noaccess error:permissiondenied,details:name:file_noaccess")
 
diff --git a/tests/automatic/system_io/read_folder_content/CMakeLists.txt b/tests/automatic/system_io/read_folder_content/CMakeLists.txt
index 42bc1ec48ee536502e96f50e1ae26b59fac96683..c6938f921ac62bc9d31e8fdb0e040ad69fbfd450 100644
--- a/tests/automatic/system_io/read_folder_content/CMakeLists.txt
+++ b/tests/automatic/system_io/read_folder_content/CMakeLists.txt
@@ -27,6 +27,6 @@ ELSE()
 ENDIF(WIN32)
 
 
-add_integration_test(${TARGET_NAME} foldernotfound "test_notexist error:Nosuchfileordirectory,context:name:test_notexist")
-add_integration_test(${TARGET_NAME} foldernoaccess "test_noaccess1 error:Permissiondenied,context:name:test_noaccess1")
+add_integration_test(${TARGET_NAME} foldernotfound "test_notexist error:nosuchfileordirectory,details:name:test_notexist")
+add_integration_test(${TARGET_NAME} foldernoaccess "test_noaccess1 error:permissiondenied,details:name:test_noaccess1")
 
diff --git a/tests/automatic/system_io/read_string_from_file/CMakeLists.txt b/tests/automatic/system_io/read_string_from_file/CMakeLists.txt
index 8cfa3aed8aecbc37b1b8643f8812e9687c5f433d..35645add62b86d0e66e05ca6e7221519efbf7e41 100644
--- a/tests/automatic/system_io/read_string_from_file/CMakeLists.txt
+++ b/tests/automatic/system_io/read_string_from_file/CMakeLists.txt
@@ -16,6 +16,6 @@ set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
 
 add_test_setup_cleanup(${TARGET_NAME})
 add_integration_test(${TARGET_NAME} readfile "test/1 123")
-add_integration_test(${TARGET_NAME} filenotfound "test_notexist error:Nosuchfileordirectory,context:name:test_notexist")
-add_integration_test(${TARGET_NAME} filenoaccess "file_noaccess error:Permissiondenied,context:name:file_noaccess")
+add_integration_test(${TARGET_NAME} filenotfound "test_notexist error:nosuchfileordirectory,details:name:test_notexist")
+add_integration_test(${TARGET_NAME} filenoaccess "file_noaccess error:permissiondenied,details:name:file_noaccess")
 
diff --git a/tests/automatic/system_io/read_subdirectories/CMakeLists.txt b/tests/automatic/system_io/read_subdirectories/CMakeLists.txt
index ab9b1908fa01a83bee13dfd7bf19ea9332c19cd4..4ec44441e63fe7601eda52d39450c788fc4a43aa 100644
--- a/tests/automatic/system_io/read_subdirectories/CMakeLists.txt
+++ b/tests/automatic/system_io/read_subdirectories/CMakeLists.txt
@@ -27,6 +27,6 @@ ELSE()
 ENDIF(WIN32)
 
 
-add_integration_test(${TARGET_NAME} foldernotfound "test_notexist error:Nosuchfileordirectory,context:name:test_notexist")
-add_integration_test(${TARGET_NAME} foldernoaccess "test_noaccess1 error:Permissiondenied,context:name:test_noaccess1")
+add_integration_test(${TARGET_NAME} foldernotfound "test_notexist error:nosuchfileordirectory,details:name:test_notexist")
+add_integration_test(${TARGET_NAME} foldernoaccess "test_noaccess1 error:permissiondenied,details:name:test_noaccess1")
 
diff --git a/tests/automatic/system_io/write_data_to_file/CMakeLists.txt b/tests/automatic/system_io/write_data_to_file/CMakeLists.txt
index 3e7a13c469495cc372c2132356ca9f1efc8d3993..c062aa99347424f9b35ec8ec7b87cf102bf9f559 100644
--- a/tests/automatic/system_io/write_data_to_file/CMakeLists.txt
+++ b/tests/automatic/system_io/write_data_to_file/CMakeLists.txt
@@ -22,5 +22,5 @@ else ()
 endif()
 
 add_integration_test(${TARGET_NAME} writetwice "test_file ok dummy" nomem)
-add_integration_test(${TARGET_NAME} dirnoaccess "test_noaccess/test_file error error:Permissiondenied,context:name:test_noaccess/test_file" nomem)
+add_integration_test(${TARGET_NAME} dirnoaccess "test_noaccess/test_file error error:permissiondenied,details:name:test_noaccess/test_file" nomem)