diff --git a/.gitignore b/.gitignore
index 0b81b3f019a93de98db02b261d7068a15713581b..31c1f74c1d71c595b2e3fca4eae400f611cff591 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,9 +19,6 @@
 .dylib
 .dll
 
-# Fortran module files
-*.mod
-*.smod
 
 # Compiled Static libraries
 .lai
@@ -141,3 +138,8 @@ common/go/src/asapo_common/version/version_lib.go
 
 .terraform
 terraform.tfstate*
+
+
+#helm chart
+deploy/asapo_helm_chart/asapo/Chart.lock
+deploy/asapo_helm_chart/asapo/charts/*.tgz
\ No newline at end of file
diff --git a/CMakeModules/coverage_go.sh b/CMakeModules/coverage_go.sh
index 6a92bc97054fbd267fa9a6618ec2954931778d1f..13ddb7a413da1f36d24fd24021a75760a25bc082 100755
--- a/CMakeModules/coverage_go.sh
+++ b/CMakeModules/coverage_go.sh
@@ -1,20 +1,23 @@
 #!/bin/bash
 
-OUT_DIR=$1
-ASAPO_MINIMUM_COVERAGE=$2
+SOURCE_DIR=$1
+OUT_DIR=$2
+ASAPO_MINIMUM_COVERAGE=$3
 
-export GOPATH=$GOPATH:$3
+export GOPATH=$GOPATH:$4
 
 echo $OUT_DIR
 
-mapfile -t PACKAGES < <( find ./src -type d -not -path '*/\.*' )
+touch $OUT_DIR/coverage.out
+
+mapfile -t PACKAGES < <( find $SOURCE_DIR/src -type d -not -path '*/\.*' )
 
 echo "mode: count" > $OUT_DIR/coverage-all.out
 for pkg in ${PACKAGES[@]}
 do
 #	echo $pkg
 	go test -coverprofile=$OUT_DIR/coverage.out -tags test $pkg #>/dev/null 2>&1
-	tail -n +2 $OUT_DIR/coverage.out >> $OUT_DIR/coverage-all.out #2>/dev/null
+	tail -n +2 $OUT_DIR/coverage.out | grep -v kubernetes >> $OUT_DIR/coverage-all.out #2>/dev/null
 done
 
 coverage=`go tool cover -func=$OUT_DIR/coverage-all.out | grep total | cut -d ")" -f 2 | cut -d "." -f 1`
diff --git a/CMakeModules/prepare_asapo.cmake b/CMakeModules/prepare_asapo.cmake
index 6249467e58838f0c041af6b76796ac7a507ff5aa..ce5c33d92ff39fb8ee26332194512844b64c79e7 100644
--- a/CMakeModules/prepare_asapo.cmake
+++ b/CMakeModules/prepare_asapo.cmake
@@ -30,10 +30,16 @@ function(prepare_asapo)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.win.in receiver.json.tpl @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_win.nmd nginx_kill.nmd @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/authorizer_settings.json.tpl.win authorizer.json.tpl COPYONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/start_services.bat start_services.bat COPYONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/stop_services.bat stop_services.bat COPYONLY)
+
     else()
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/receiver.json.tpl.lin.in receiver.json.tpl @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/config/nomad/nginx_kill_lin.nmd nginx_kill.nmd @ONLY)
         configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/settings/authorizer_settings.json.tpl.lin authorizer.json.tpl COPYONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/start_services.sh start_services.sh COPYONLY)
+        configure_file(${CMAKE_SOURCE_DIR}/tests/automatic/common_scripts/stop_services.sh stop_services.sh COPYONLY)
+
     endif()
 
     configure_file(${CMAKE_SOURCE_DIR}/config/nomad/receiver.nmd.in  receiver.nmd @ONLY)
diff --git a/CMakeModules/testing_go.cmake b/CMakeModules/testing_go.cmake
index 45af50ce4cf3e9c49b04276a482c4dff4a444fb4..dbba4bfe2bde4687bc416bf0da08314b87e613d3 100644
--- a/CMakeModules/testing_go.cmake
+++ b/CMakeModules/testing_go.cmake
@@ -9,10 +9,10 @@ if (BUILD_TESTS)
             "--trace-children=yes --leak-check=full --error-exitcode=1 --suppressions=${CMAKE_SOURCE_DIR}/tests/valgrind.suppressions")
 endif ()
 
-function(gotest target test_source_files)
+function(gotest target source_dir test_source_files)
     if (BUILD_TESTS)
                 add_test(NAME test-${target} COMMAND go test ${test_source_files}
-                WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+                WORKING_DIRECTORY ${source_dir})
         set_property(
                 TEST
                 test-${target}
@@ -22,8 +22,8 @@ function(gotest target test_source_files)
         if (CMAKE_COMPILER_IS_GNUCXX)
         add_test(NAME coveragetest-${target}
                         COMMAND ${CMAKE_MODULE_PATH}/coverage_go.sh
-                        ${CMAKE_CURRENT_BINARY_DIR} ${ASAPO_MINIMUM_COVERAGE} ${gopath}
-                        WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+                        ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${ASAPO_MINIMUM_COVERAGE} ${gopath}
+                        WORKING_DIRECTORY ${source_dir})
         set_tests_properties(coveragetest-${target} PROPERTIES LABELS "coverage;all")
         endif()
     endif ()
diff --git a/asapo_tools/CMakeLists.txt b/asapo_tools/CMakeLists.txt
index de647a4f819390df275048a7adcec7a0e3ecbc53..16b8d29cf1d5ea4e95f31f64e5707d248ac890e7 100644
--- a/asapo_tools/CMakeLists.txt
+++ b/asapo_tools/CMakeLists.txt
@@ -34,4 +34,4 @@ set_target_properties(${TARGET_NAME} PROPERTIES EXENAME ${CMAKE_CURRENT_BINARY_D
 
 install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} DESTINATION bin)
 
-gotest(${TARGET_NAME} "./...")
+gotest(${TARGET_NAME}  "${CMAKE_CURRENT_SOURCE_DIR}" "./...")
diff --git a/authorizer/CMakeLists.txt b/authorizer/CMakeLists.txt
index a6cd2b3c79b20c30b09bf6af01b206e5999d2276..62e4a5c2ba48e9919e9c97a66b3781b5f9335277 100644
--- a/authorizer/CMakeLists.txt
+++ b/authorizer/CMakeLists.txt
@@ -35,6 +35,6 @@ set_target_properties(asapo-authorizer PROPERTIES EXENAME ${CMAKE_CURRENT_BINARY
 
 install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} DESTINATION bin)
 
-gotest(${TARGET_NAME} "./...")
+gotest(${TARGET_NAME}  "${CMAKE_CURRENT_SOURCE_DIR}" "./...")
 #go_integration_test(${TARGET_NAME}-connectdb "./..." "MongoDBConnect")
 #go_integration_test(${TARGET_NAME}-nextrecord "./..." "MongoDBNext")
diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt
index c010ae09f84a4e794ebe0524c29b56e3b20b730a..ba444798ec62eb86a006cf964b1c15912dc21694 100644
--- a/broker/CMakeLists.txt
+++ b/broker/CMakeLists.txt
@@ -35,6 +35,6 @@ set_target_properties(asapo-broker PROPERTIES EXENAME ${CMAKE_CURRENT_BINARY_DIR
 
 install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} DESTINATION bin)
 
-gotest(${TARGET_NAME} "./...")
+gotest(${TARGET_NAME} "${CMAKE_CURRENT_SOURCE_DIR}" "./...")
 go_integration_test(${TARGET_NAME}-connectdb "./..." "MongoDBConnect")
 go_integration_test(${TARGET_NAME}-nextrecord "./..." "MongoDBNext")
diff --git a/broker/src/asapo_broker/server/server.go b/broker/src/asapo_broker/server/server.go
index 1f8fe1ce51c14a516e4bd204b708590a41af403c..cc69db83fe47aa379794d2f3edfde231eb992b5b 100644
--- a/broker/src/asapo_broker/server/server.go
+++ b/broker/src/asapo_broker/server/server.go
@@ -42,7 +42,7 @@ type discoveryAPI struct {
 var discoveryService discoveryAPI
 
 func (api *discoveryAPI) GetMongoDbAddress() (string, error) {
-	resp, err := api.Client.Get(api.baseURL + "/mongo")
+	resp, err := api.Client.Get(api.baseURL + "/asapo-mongodb")
 	if err != nil {
 		return "", err
 	}
diff --git a/broker/src/asapo_broker/server/server_test.go b/broker/src/asapo_broker/server/server_test.go
index fc602642e576a439fb14f2a5deb305f24a7413cb..39967ed7591e81cb5c6af5fe0dcb401cb1daaced 100644
--- a/broker/src/asapo_broker/server/server_test.go
+++ b/broker/src/asapo_broker/server/server_test.go
@@ -67,7 +67,7 @@ func TestInitDBWithAutoAddress(t *testing.T) {
 
 	settings.DatabaseServer = "auto"
 	mock_server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		assert.Equal(t, req.URL.String(), "/mongo", "request string")
+		assert.Equal(t, req.URL.String(), "/asapo-mongodb", "request string")
 		rw.Write([]byte(mongo_address))
 	}))
 	defer mock_server.Close()
@@ -85,7 +85,7 @@ func TestInitDBWithAutoAddress(t *testing.T) {
 func TestReconnectDB(t *testing.T) {
 	mongo_address := "0.0.0.0:0000"
 	mock_server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		assert.Equal(t, req.URL.String(), "/mongo", "request string")
+		assert.Equal(t, req.URL.String(), "/asapo-mongodb", "request string")
 		rw.Write([]byte(mongo_address))
 	}))
 	discoveryService = discoveryAPI{mock_server.Client(), mock_server.URL}
diff --git a/common/cpp/src/http_client/curl_http_client.cpp b/common/cpp/src/http_client/curl_http_client.cpp
index ca543c0a69cf7761040cb50699d8465f93f43ee5..829c6a7f104b49c426a5871efe49237fa486b3f4 100644
--- a/common/cpp/src/http_client/curl_http_client.cpp
+++ b/common/cpp/src/http_client/curl_http_client.cpp
@@ -33,7 +33,7 @@ size_t curl_write( void* ptr, size_t size, size_t nmemb, void* data_container) {
         if (container->bytes_received + nbytes > container->array_size) {
             return -1;
         }
-        memcpy(container->p_array->get(), ptr, nbytes);
+        memcpy(container->p_array->get()+container->bytes_received, ptr, nbytes);
         container->bytes_received += nbytes;
         break;
     case CurlDataMode::file:
diff --git a/common/go/src/asapo_common/go.mod b/common/go/src/asapo_common/go.mod
new file mode 100644
index 0000000000000000000000000000000000000000..ae7d8cf1e05a99c0ed5c8dc2d5383e570f58ca86
--- /dev/null
+++ b/common/go/src/asapo_common/go.mod
@@ -0,0 +1,3 @@
+module asapo_common
+
+go 1.14
diff --git a/common/go/src/asapo_common/utils/structs.go b/common/go/src/asapo_common/utils/structs.go
new file mode 100644
index 0000000000000000000000000000000000000000..37f7a1f567f2f39b8ff953d8796a19d1e6e8f1bc
--- /dev/null
+++ b/common/go/src/asapo_common/utils/structs.go
@@ -0,0 +1,5 @@
+package utils
+
+type FolderTokenTokenExtraClaim struct {
+	RootFolder string
+}
diff --git a/config/nomad/authorizer.nmd.in b/config/nomad/authorizer.nmd.in
index 8e90882eaf4d9657c05a07466105f70e276133f8..dfd6072465d23f6deff1ba2b1b5694f6c6030be5 100644
--- a/config/nomad/authorizer.nmd.in
+++ b/config/nomad/authorizer.nmd.in
@@ -25,7 +25,7 @@ job "authorizer" {
       }
 
       service {
-        name = "authorizer"
+        name = "asapo-authorizer"
         port = "authorizer"
         check {
           name     = "alive"
diff --git a/config/nomad/discovery.nmd.in b/config/nomad/discovery.nmd.in
index cf39f034849d0e93c1435e3bc4bed6f13cfdfe0f..760aed3c700405ec289b47de69c0dd37c4ba4730 100644
--- a/config/nomad/discovery.nmd.in
+++ b/config/nomad/discovery.nmd.in
@@ -25,12 +25,12 @@ job "discovery" {
       }
 
       service {
-        name = "discovery"
+        name = "asapo-discovery"
         port = "discovery"
         check {
           name     = "alive"
           type     = "http"
-          path     = "/receivers"
+          path     = "/asapo-receiver"
           interval = "10s"
           timeout  = "2s"
           initial_status =   "passing"
diff --git a/config/nomad/file_transfer.nmd.in b/config/nomad/file_transfer.nmd.in
index 2ff798a51180888984ab5b8660817551bb30f7f4..f9cdd712fecd507fc4ccbd38fea5670bb7e8f914 100644
--- a/config/nomad/file_transfer.nmd.in
+++ b/config/nomad/file_transfer.nmd.in
@@ -25,7 +25,7 @@ job "file_transfer" {
       }
 
       service {
-        name = "asapo-fts"
+        name = "asapo-file-transfer"
         port = "file_transfer"
         check {
           name     = "alive"
diff --git a/config/nomad/nginx_kill_win.nmd b/config/nomad/nginx_kill_win.nmd
index 978b98d4b8faf7ed23b79a64deac270003d5cb01..3480ab65238a697acf6668b771a556e4ec61bc2b 100644
--- a/config/nomad/nginx_kill_win.nmd
+++ b/config/nomad/nginx_kill_win.nmd
@@ -11,7 +11,7 @@ job "nginx_kill" {
       driver = "raw_exec"
       config {
         command = "taskkill",
-        args =  ["/f","/im","nginx_exe.exe"]
+        args =  ["/f","/im","nginx.exe"]
       }
    }
   }
diff --git a/consumer/api/cpp/src/server_data_broker.cpp b/consumer/api/cpp/src/server_data_broker.cpp
index 883f32b1e45d86414b6e8c114b30305178b85cce..e7ea682be7bd0f4e213d156208b55f4259d9d949 100644
--- a/consumer/api/cpp/src/server_data_broker.cpp
+++ b/consumer/api/cpp/src/server_data_broker.cpp
@@ -14,8 +14,8 @@ using std::chrono::system_clock;
 
 namespace asapo {
 
-const std::string ServerDataBroker::kBrokerServiceName = "broker";
-const std::string ServerDataBroker::kFileTransferServiceName = "fts";
+const std::string ServerDataBroker::kBrokerServiceName = "asapo-broker";
+const std::string ServerDataBroker::kFileTransferServiceName = "asapo-file-transfer";
 
 Error GetNoDataResponseFromJson(const std::string& json_string, ConsumerErrorData* data) {
     JsonStringParser parser(json_string);
@@ -153,7 +153,7 @@ Error ServerDataBroker::DiscoverService(const std::string& service_name, std::st
     }
     RequestInfo ri;
     ri.host = endpoint_;
-    ri.api = "/discovery/" + service_name;
+    ri.api = "/asapo-discovery/" + service_name;
     RequestOutput output;
     Error err;
     err = ProcessRequest(&output, ri, nullptr);
@@ -598,7 +598,7 @@ Error ServerDataBroker::UpdateFolderTokenIfNeeded(bool ignore_existing) {
 RequestInfo ServerDataBroker::CreateFolderTokenRequest() const {
     RequestInfo ri;
     ri.host = endpoint_;
-    ri.api = "/authorizer/folder";
+    ri.api = "/asapo-authorizer/folder";
     ri.post = true;
     ri.body = "{\"Folder\":\"" + source_path_ + "\",\"BeamtimeId\":\"" + source_credentials_.beamtime_id + "\",\"Token\":\""
               +
diff --git a/consumer/api/cpp/unittests/test_server_broker.cpp b/consumer/api/cpp/unittests/test_server_broker.cpp
index aba9bfe5ec1833e97f44af4de54150b8eade8247..0a9e6e7c66324ee1868b7e68fd033fc93f3ec7e7 100644
--- a/consumer/api/cpp/unittests/test_server_broker.cpp
+++ b/consumer/api/cpp/unittests/test_server_broker.cpp
@@ -63,8 +63,8 @@ class ServerDataBrokerTests : public Test {
     NiceMock<MockNetClient> mock_netclient;
     FileInfo info;
     std::string expected_server_uri = "test:8400";
-    std::string expected_broker_uri = "broker:5005";
-    std::string expected_fts_uri = "fts:5008";
+    std::string expected_broker_uri = "asapo-broker:5005";
+    std::string expected_fts_uri = "asapo-file-transfer:5008";
     std::string expected_token = "token";
     std::string expected_path = "/tmp/beamline/beamtime";
     std::string expected_filename = "filename";
@@ -125,7 +125,7 @@ class ServerDataBrokerTests : public Test {
                 ));
     }
     void MockGetServiceUri(std::string service, std::string result) {
-        EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/discovery/" + service), _, _)).WillOnce(DoAll(
+        EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/" + service), _, _)).WillOnce(DoAll(
                     SetArgPointee<1>(HttpCode::OK),
                     SetArgPointee<2>(nullptr),
                     Return(result)));
@@ -134,7 +134,7 @@ class ServerDataBrokerTests : public Test {
     void MockBeforeFTS(FileData* data);
 
     void MockGetFTSUri() {
-        MockGetServiceUri("fts", expected_fts_uri);
+        MockGetServiceUri("asapo-file-transfer", expected_fts_uri);
     }
 
     void ExpectFolderToken();
@@ -142,7 +142,7 @@ class ServerDataBrokerTests : public Test {
     void ExpectRepeatedFileTransfer();
 
     void MockGetBrokerUri() {
-        MockGetServiceUri("broker", expected_broker_uri);
+        MockGetServiceUri("asapo-broker", expected_broker_uri);
     }
     void MockReadDataFromFile(int times = 1) {
         if (times == 0) {
@@ -320,7 +320,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsWrongResponseFromHttpClient) {
 }
 
 TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerAddressNotFound) {
-    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/discovery/broker"), _,
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
                                                     SetArgPointee<1>(HttpCode::NotFound),
                                                     SetArgPointee<2>(nullptr),
@@ -333,7 +333,7 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerAddressNotFound) {
 }
 
 TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerUriEmpty) {
-    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/discovery/broker"), _,
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
                                                     SetArgPointee<1>(HttpCode::OK),
                                                     SetArgPointee<2>(nullptr),
@@ -353,7 +353,7 @@ TEST_F(ServerDataBrokerTests, GetDoNotCallBrokerUriIfAlreadyFound) {
     data_broker->GetNext(&info, expected_group_id, nullptr);
     Mock::VerifyAndClearExpectations(&mock_http_client);
 
-    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/discovery/broker"), _, _)).Times(0);
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asap-broker"), _, _)).Times(0);
     MockGet("error_response");
     data_broker->GetNext(&info, expected_group_id, nullptr);
 }
@@ -432,7 +432,7 @@ ACTION(AssignArg2) {
 
 
 TEST_F(ServerDataBrokerTests, GetNextRetriesIfConnectionHttpClientErrorUntilTimeout) {
-    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/discovery/broker"), _,
+    EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _,
                                         _)).Times(AtLeast(2)).WillRepeatedly(DoAll(
                                                     SetArgPointee<1>(HttpCode::OK),
                                                     SetArgPointee<2>(nullptr),
@@ -1054,7 +1054,7 @@ void ServerDataBrokerTests::ExpectFolderToken() {
                                                expected_beamtime_id
                                                + "\",\"Token\":\"" + expected_token + "\"}";
 
-    EXPECT_CALL(mock_http_client, Post_t(HasSubstr(expected_server_uri + "/authorizer/folder"),
+    EXPECT_CALL(mock_http_client, Post_t(HasSubstr(expected_server_uri + "/asapo-authorizer/folder"),
                                          expected_folder_query_string, _, _)).WillOnce(DoAll(
                                                      SetArgPointee<2>(HttpCode::OK),
                                                      SetArgPointee<3>(nullptr),
diff --git a/deploy/asapo_helm_chart/asapo/.helmignore b/deploy/asapo_helm_chart/asapo/.helmignore
new file mode 100644
index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/deploy/asapo_helm_chart/asapo/Chart.yaml b/deploy/asapo_helm_chart/asapo/Chart.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a3ecc832ab05c3cb0cb788b781eb4ca493f5e5c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/Chart.yaml
@@ -0,0 +1,38 @@
+apiVersion: v2
+name: asapo
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application.
+appVersion: 1.16.0
+
+
+dependencies:
+  - name: influxdb
+    version: "~0.4.3"
+    repository: "https://charts.bitnami.com/bitnami"
+#  - name: grafana
+#    version: "~1.3.7"
+#    repository: "https://charts.bitnami.com/bitnami"
+
+#  - name: elasticsearch
+#    version: "~11.0.13"
+#    repository: "https://charts.bitnami.com/bitnami"
+#  - name: kibana
+#    version: "~7.6.1"
+#    repository: "https://helm.elastic.co"
+
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore
new file mode 100755
index 0000000000000000000000000000000000000000..f0c13194444163d1cba5c67d9e79231a62bc8f44
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..7fc07e39bb32bd2820e8f7dfcf780793c9863175
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+appVersion: 7.6.1
+description: A highly scalable open-source full-text search and analytics engine
+engine: gotpl
+home: https://www.elastic.co/products/elasticsearch
+icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-110x117.png
+keywords:
+- elasticsearch
+maintainers:
+- email: containers@bitnami.com
+  name: Bitnami
+name: elasticsearch
+sources:
+- https://github.com/bitnami/bitnami-docker-elasticsearch
+version: 11.0.13
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..7f202bd4208e8670d9bc118e31d365d4fbf1683d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md
@@ -0,0 +1,527 @@
+# Elasticsearch
+
+[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time.
+
+## TL;DR;
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/elasticsearch
+```
+
+## Introduction
+
+This chart bootstraps a [Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
+
+## Prerequisites
+
+- Kubernetes 1.12+
+- Helm 2.11+ or Helm 3.0-beta3+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/elasticsearch
+```
+
+These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` release:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option:
+
+```console
+$ helm delete --purge my-release
+```
+
+## Parameters
+
+The following table lists the configurable parameters of the Elasticsearch chart and their default values.
+
+|                     Parameter                     |                                                                        Description                                                                        |                           Default                            |
+|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------|
+| `global.imageRegistry`                            | Global Docker image registry                                                                                                                              | `nil`                                                        |
+| `global.imagePullSecrets`                         | Global Docker registry secret names as an array                                                                                                           | `[]` (does not add image pull secrets to deployed pods)      |
+| `global.storageClass`                             | Global storage class for dynamic provisioning                                                                                                             | `nil`                                                        |
+| `global.coordinating.name`                        | Coordinating-only node pod name at global level to be used also in the Kibana subchart                                                                    | `coordinating-only`                                          |
+| `image.registry`                                  | Elasticsearch image registry                                                                                                                              | `docker.io`                                                  |
+| `image.repository`                                | Elasticsearch image repository                                                                                                                            | `bitnami/elasticsearch`                                      |
+| `image.tag`                                       | Elasticsearch image tag                                                                                                                                   | `{TAG_NAME}`                                                 |
+| `image.pullPolicy`                                | Image pull policy                                                                                                                                         | `IfNotPresent`                                               |
+| `image.pullSecrets`                               | Specify docker-registry secret names as an array                                                                                                          | `[]` (does not add image pull secrets to deployed pods)      |
+| `nameOverride`                                    | String to partially override elasticsearch.fullname template with a string (will prepend the release name)                                                | `nil`                                                        |
+| `fullnameOverride`                                | String to fully override elasticsearch.fullname template with a string                                                                                    | `nil`                                                        |
+| `name`                                            | Elasticsearch cluster name                                                                                                                                | `elastic`                                                    |
+| `plugins`                                         | Comma, semi-colon or space separated list of plugins to install at initialization                                                                         | `nil`                                                        |
+| `config`                                          | Elasticsearch node custom configuration                                                                                                                   | ``                                                           |
+| `extraVolumes`                                    | Extra volumes                                                                                                                                             |                                                              |
+| `extraVolumeMounts`                               | Mount extra volume(s),                                                                                                                                    |                                                              |
+| `master.name`                                     | Master-eligible node pod name                                                                                                                             | `master`                                                     |
+| `master.replicas`                                 | Desired number of Elasticsearch master-eligible nodes                                                                                                     | `2`                                                          |
+| `master.updateStrategy.type`                      | Update strategy for Master statefulset                                                                                                                    | `RollingUpdate`                                              |
+| `master.heapSize`                                 | Master-eligible node heap size                                                                                                                            | `128m`                                                       |
+| `master.service.type`                             | Kubernetes Service type (master-eligible nodes)                                                                                                           | `ClusterIP`                                                  |
+| `master.service.port`                             | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes)                                                                          | `9300`                                                       |
+| `master.service.nodePort`                         | Kubernetes Service nodePort (master-eligible nodes)                                                                                                       | `nil`                                                        |
+| `master.service.annotations`                      | Annotations for master-eligible nodes service                                                                                                             | `{}`                                                         |
+| `master.service.loadBalancerIP`                   | loadBalancerIP if master-eligible nodes service type is `LoadBalancer`                                                                                    | `nil`                                                        |
+| `master.resources`                                | CPU/Memory resource requests/limits for master-eligible nodes pods                                                                                        | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `master.podAnnotations`                           | Annotations for master pods.                                                                                                                              | `{}`                                                         |
+| `master.persistence.enabled`                      | Enable persistence using a `PersistentVolumeClaim`                                                                                                        | `true`                                                       |
+| `master.persistence.annotations`                  | Persistent Volume Claim annotations                                                                                                                       | `{}`                                                         |
+| `master.persistence.storageClass`                 | Persistent Volume Storage Class                                                                                                                           | ``                                                           |
+| `master.persistence.accessModes`                  | Persistent Volume Access Modes                                                                                                                            | `[ReadWriteOnce]`                                            |
+| `master.persistence.size`                         | Persistent Volume Size                                                                                                                                    | `8Gi`                                                        |
+| `master.securityContext.enabled`                  | Enable security context for master-eligible pods                                                                                                          | `true`                                                       |
+| `master.securityContext.fsGroup`                  | Group ID for the container for master-eligible pods                                                                                                       | `1001`                                                       |
+| `master.securityContext.runAsUser`                | User ID for the container for master-eligible pods                                                                                                        | `1001`                                                       |
+| `master.livenessProbe.enabled`                    | Enable/disable the liveness probe (master-eligible nodes pod)                                                                                             | `true`                                                       |
+| `master.livenessProbe.initialDelaySeconds`        | Delay before liveness probe is initiated (master-eligible nodes pod)                                                                                      | `90`                                                         |
+| `master.livenessProbe.periodSeconds`              | How often to perform the probe (master-eligible nodes pod)                                                                                                | `10`                                                         |
+| `master.livenessProbe.timeoutSeconds`             | When the probe times out (master-eligible nodes pod)                                                                                                      | `5`                                                          |
+| `master.livenessProbe.successThreshold`           | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)                                   | `1`                                                          |
+| `master.livenessProbe.failureThreshold`           | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `master.readinessProbe.enabled`                   | Enable/disable the readiness probe (master-eligible nodes pod)                                                                                            | `true`                                                       |
+| `master.readinessProbe.initialDelaySeconds`       | Delay before readiness probe is initiated (master-eligible nodes pod)                                                                                     | `90`                                                         |
+| `master.readinessProbe.periodSeconds`             | How often to perform the probe (master-eligible nodes pod)                                                                                                | `10`                                                         |
+| `master.readinessProbe.timeoutSeconds`            | When the probe times out (master-eligible nodes pod)                                                                                                      | `5`                                                          |
+| `master.readinessProbe.successThreshold`          | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)                                   | `1`                                                          |
+| `master.readinessProbe.failureThreshold`          | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `master.serviceAccount.create`                    | Enable creation of ServiceAccount for the master node                                                                                                     | `false`                                                      |
+| `master.serviceAccount.name`                      | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.master.fullname` template |
+| `clusterDomain`                                   | Kubernetes cluster domain                                                                                                                                 | `cluster.local`                                              |
+| `discovery.name`                                  | Discover node pod name                                                                                                                                    | `discovery`                                                  |
+| `coordinating.replicas`                           | Desired number of Elasticsearch coordinating-only nodes                                                                                                   | `2`                                                          |
+| `coordinating.updateStrategy.type`                | Update strategy for Coordinating Deployment                                                                                                               | `RollingUpdate`                                              |
+| `coordinating.heapSize`                           | Coordinating-only node heap size                                                                                                                          | `128m`                                                       |
+| `coordinating.podAnnotations`                     | Annotations for coordniating pods.                                                                                                                        | `{}`                                                         |
+| `coordinating.service.type`                       | Kubernetes Service type (coordinating-only nodes)                                                                                                         | `ClusterIP`                                                  |
+| `coordinating.service.port`                       | Kubernetes Service port for REST API (coordinating-only nodes)                                                                                            | `9200`                                                       |
+| `coordinating.service.nodePort`                   | Kubernetes Service nodePort (coordinating-only nodes)                                                                                                     | `nil`                                                        |
+| `coordinating.service.annotations`                | Annotations for coordinating-only nodes service                                                                                                           | `{}`                                                         |
+| `coordinating.service.loadBalancerIP`             | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer`                                                                                  | `nil`                                                        |
+| `coordinating.resources`                          | CPU/Memory resource requests/limits for coordinating-only nodes pods                                                                                      | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `coordinating.securityContext.enabled`            | Enable security context for coordinating-only pods                                                                                                        | `true`                                                       |
+| `coordinating.securityContext.fsGroup`            | Group ID for the container for coordinating-only pods                                                                                                     | `1001`                                                       |
+| `coordinating.securityContext.runAsUser`          | User ID for the container for coordinating-only pods                                                                                                      | `1001`                                                       |
+| `coordinating.livenessProbe.enabled`              | Enable/disable the liveness probe (coordinating-only nodes pod)                                                                                           | `true`                                                       |
+| `coordinating.livenessProbe.initialDelaySeconds`  | Delay before liveness probe is initiated (coordinating-only nodes pod)                                                                                    | `90`                                                         |
+| `coordinating.livenessProbe.periodSeconds`        | How often to perform the probe (coordinating-only nodes pod)                                                                                              | `10`                                                         |
+| `coordinating.livenessProbe.timeoutSeconds`       | When the probe times out (coordinating-only nodes pod)                                                                                                    | `5`                                                          |
+| `coordinating.livenessProbe.successThreshold`     | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)                                 | `1`                                                          |
+| `coordinating.livenessProbe.failureThreshold`     | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `coordinating.readinessProbe.enabled`             | Enable/disable the readiness probe (coordinating-only nodes pod)                                                                                          | `true`                                                       |
+| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod)                                                                                   | `90`                                                         |
+| `coordinating.readinessProbe.periodSeconds`       | How often to perform the probe (coordinating-only nodes pod)                                                                                              | `10`                                                         |
+| `coordinating.readinessProbe.timeoutSeconds`      | When the probe times out (coordinating-only nodes pod)                                                                                                    | `5`                                                          |
+| `coordinating.readinessProbe.successThreshold`    | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)                                 | `1`                                                          |
+| `coordinating.readinessProbe.failureThreshold`    | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `coordinating.serviceAccount.create`              | Enable creation of ServiceAccount for the coordinating-only node                                                                                          | `false`                                                      |
+| `coordinating.serviceAccount.name`                | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.coordinating.fullname`    |
+| `data.name`                                       | Data node pod name                                                                                                                                        | `data`                                                       |
+| `data.replicas`                                   | Desired number of Elasticsearch data nodes                                                                                                                | `3`                                                          |
+| `data.updateStrategy.type`                        | Update strategy for Data statefulset                                                                                                                      | `RollingUpdate`                                              |
+| `data.updateStrategy.rollingUpdatePartition`      | Partition update strategy for Data statefulset                                                                                                            | `nil`                                                        |
+| `data.heapSize`                                   | Data node heap size                                                                                                                                       | `1024m`                                                      |
+| `data.resources`                                  | CPU/Memory resource requests/limits for data nodes                                                                                                        | `requests: { cpu: "25m", memory: "1152Mi" }`                 |
+| `data.persistence.enabled`                        | Enable persistence using a `PersistentVolumeClaim`                                                                                                        | `true`                                                       |
+| `data.persistence.annotations`                    | Persistent Volume Claim annotations                                                                                                                       | `{}`                                                         |
+| `data.persistence.storageClass`                   | Persistent Volume Storage Class                                                                                                                           | ``                                                           |
+| `data.persistence.accessModes`                    | Persistent Volume Access Modes                                                                                                                            | `[ReadWriteOnce]`                                            |
+| `data.persistence.size`                           | Persistent Volume Size                                                                                                                                    | `8Gi`                                                        |
+| `data.securityContext.enabled`                    | Enable security context for data pods                                                                                                                     | `true`                                                       |
+| `data.securityContext.fsGroup`                    | Group ID for the container for data pods                                                                                                                  | `1001`                                                       |
+| `data.securityContext.runAsUser`                  | User ID for the container for data pods                                                                                                                   | `1001`                                                       |
+| `data.livenessProbe.enabled`                      | Enable/disable the liveness probe (data nodes pod)                                                                                                        | `true`                                                       |
+| `data.livenessProbe.initialDelaySeconds`          | Delay before liveness probe is initiated (data nodes pod)                                                                                                 | `90`                                                         |
+| `data.livenessProbe.periodSeconds`                | How often to perform the probe (data nodes pod)                                                                                                           | `10`                                                         |
+| `data.livenessProbe.timeoutSeconds`               | When the probe times out (data nodes pod)                                                                                                                 | `5`                                                          |
+| `data.livenessProbe.successThreshold`             | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)                                              | `1`                                                          |
+| `data.livenessProbe.failureThreshold`             | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `data.podAnnotations`                             | Annotations for data pods.                                                                                                                                | `{}`                                                         |
+| `data.readinessProbe.enabled`                     | Enable/disable the readiness probe (data nodes pod)                                                                                                       | `true`                                                       |
+| `data.readinessProbe.initialDelaySeconds`         | Delay before readiness probe is initiated (data nodes pod)                                                                                                | `90`                                                         |
+| `data.readinessProbe.periodSeconds`               | How often to perform the probe (data nodes pod)                                                                                                           | `10`                                                         |
+| `data.readinessProbe.timeoutSeconds`              | When the probe times out (data nodes pod)                                                                                                                 | `5`                                                          |
+| `data.readinessProbe.successThreshold`            | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)                                              | `1`                                                          |
+| `data.readinessProbe.failureThreshold`            | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `data.serviceAccount.create`                      | Enable creation of ServiceAccount for the data node                                                                                                     | `false`                                                        |
+| `data.serviceAccount.name`                        | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.data.fullname` template   |
+| `ingest.enabled`                                  | Enable ingest nodes                                                                                                                                       | `false`                                                      |
+| `ingest.name`                                     | Ingest node pod name                                                                                                                                      | `ingest`                                                     |
+| `ingest.replicas`                                 | Desired number of Elasticsearch ingest nodes                                                                                                              | `2`                                                          |
+| `ingest.heapSize`                                 | Ingest node heap size                                                                                                                                     | `128m`                                                       |
+| `ingest.service.type`                             | Kubernetes Service type (ingest nodes)                                                                                                                    | `ClusterIP`                                                  |
+| `ingest.service.port`                             | Kubernetes Service port Elasticsearch transport port (ingest nodes)                                                                                       | `9300`                                                       |
+| `ingest.service.nodePort`                         | Kubernetes Service nodePort (ingest nodes)                                                                                                                | `nil`                                                        |
+| `ingest.service.annotations`                      | Annotations for ingest nodes service                                                                                                                      | `{}`                                                         |
+| `ingest.service.loadBalancerIP`                   | loadBalancerIP if ingest nodes service type is `LoadBalancer`                                                                                             | `nil`                                                        |
+| `ingest.resources`                                | CPU/Memory resource requests/limits for ingest nodes pods                                                                                                 | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `ingest.securityContext.enabled`                  | Enable security context for ingest pods                                                                                                                   | `true`                                                       |
+| `ingest.securityContext.fsGroup`                  | Group ID for the container for ingest pods                                                                                                                | `1001`                                                       |
+| `ingest.securityContext.runAsUser`                | User ID for the container for ingest pods                                                                                                                 | `1001`                                                       |
+| `ingest.livenessProbe.enabled`                    | Enable/disable the liveness probe (ingest nodes pod)                                                                                                      | `true`                                                       |
+| `ingest.livenessProbe.initialDelaySeconds`        | Delay before liveness probe is initiated (ingest nodes pod)                                                                                               | `90`                                                         |
+| `ingest.livenessProbe.periodSeconds`              | How often to perform the probe (ingest nodes pod)                                                                                                         | `10`                                                         |
+| `ingest.livenessProbe.timeoutSeconds`             | When the probe times out (ingest nodes pod)                                                                                                               | `5`                                                          |
+| `ingest.livenessProbe.successThreshold`           | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)                                            | `1`                                                          |
+| `ingest.livenessProbe.failureThreshold`           | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `ingest.podAnnotations`                           | Annotations for ingest pods.                                                                                                                              | `{}`                                                         |
+| `ingest.readinessProbe.enabled`                   | Enable/disable the readiness probe (ingest nodes pod)                                                                                                     | `true`                                                       |
+| `ingest.readinessProbe.initialDelaySeconds`       | Delay before readiness probe is initiated (ingest nodes pod)                                                                                              | `90`                                                         |
+| `ingest.readinessProbe.periodSeconds`             | How often to perform the probe (ingest nodes pod)                                                                                                         | `10`                                                         |
+| `ingest.readinessProbe.timeoutSeconds`            | When the probe times out (ingest nodes pod)                                                                                                               | `5`                                                          |
+| `ingest.readinessProbe.successThreshold`          | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)                                            | `1`                                                          |
+| `ingest.readinessProbe.failureThreshold`          | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `curator.enabled`                                 | Enable Elasticsearch Curator cron job                                                                                                                     | `false`                                                      |
+| `curator.name`                                    | Elasticsearch Curator pod name                                                                                                                            | `curator`                                                    |
+| `curator.image.registry`                          | Elasticsearch Curator image registry                                                                                                                      | `docker.io`                                                  |
+| `curator.image.repository`                        | Elasticsearch Curator image repository                                                                                                                    | `bitnami/elasticsearch-curator`                              |
+| `curator.image.tag`                               | Elasticsearch Curator image tag                                                                                                                           | `{TAG_NAME}`                                                 |
+| `curator.image.pullPolicy`                        | Elasticsearch Curator image pull policy                                                                                                                   | `{TAG_NAME}`                                                 |
+| `curator.cronjob.schedule`                        | Schedule for the CronJob                                                                                                                                  | `0 1 * * *`                                                  |
+| `curator.cronjob.annotations`                     | Annotations to add to the cronjob                                                                                                                         | `{}`                                                         |
+| `curator.cronjob.concurrencyPolicy`               | `Allow,Forbid,Replace` concurrent jobs                                                                                                                    | `nil`                                                        |
+| `curator.cronjob.failedJobsHistoryLimit`          | Specify the number of failed Jobs to keep                                                                                                                 | `nil`                                                        |
+| `curator.cronjob.successfulJobsHistoryLimit`      | Specify the number of completed Jobs to keep                                                                                                              | `nil`                                                        |
+| `curator.cronjob.jobRestartPolicy`                | Control the Job restartPolicy                                                                                                                             | `Never`                                                      |
+| `curator.podAnnotations`                          | Annotations to add to the pod                                                                                                                             | `{}`                                                         |
+| `curator.rbac.enabled`                            | Enable RBAC resources                                                                                                                                     | `false`                                                      |
+| `curator.serviceAccount.create`                   | Create a default serviceaccount for elasticsearch curator                                                                                                 | `true`                                                       |
+| `curator.serviceAccount.name`                     | Name for elasticsearch curator serviceaccount                                                                                                             | `""`                                                         |
+| `curator.hooks`                                   | Whether to run job on selected hooks                                                                                                                      | `{ "install": false, "upgrade": false }`                     |
+| `curator.psp.create`                              | Create pod security policy resources                                                                                                                      | `false`                                                      |
+| `curator.dryrun`                                  | Run Curator in dry-run mode                                                                                                                               | `false`                                                      |
+| `curator.command`                                 | Command to execute                                                                                                                                        | `["/curator/curator"]`                                       |
+| `curator.env`                                     | Environment variables to add to the cronjob container                                                                                                     | `{}`                                                         |
+| `curator.configMaps.action_file_yml`              | Contents of the Curator action_file.yml                                                                                                                   | See values.yaml                                              |
+| `curator.configMaps.config_yml`                   | Contents of the Curator config.yml (overrides config)                                                                                                     | See values.yaml                                              |
+| `curator.resources`                               | Resource requests and limits                                                                                                                              | `{}`                                                         |
+| `curator.priorityClassName`                       | priorityClassName                                                                                                                                         | `nil`                                                        |
+| `curator.extraVolumes`                            | Extra volumes                                                                                                                                             |                                                              |
+| `curator.extraVolumeMounts`                       | Mount extra volume(s),                                                                                                                                    |                                                              |
+| `curator.extraInitContainers`                     | Init containers to add to the cronjob container                                                                                                           | `{}`                                                         |
+| `curator.envFromSecrets`                          | Environment variables from secrets to the cronjob container                                                                                               | `{}`                                                         |
+| `curator.envFromSecrets.*.from.secret`            | - `secretKeyRef.name` used for environment variable                                                                                                       |                                                              |
+| `curator.envFromSecrets.*.from.key`               | - `secretKeyRef.key` used for environment variable                                                                                                        |                                                              |
+| `metrics.enabled`                                 | Enable prometheus exporter                                                                                                                                | `false`                                                      |
+| `metrics.name`                                    | Metrics pod name                                                                                                                                          | `metrics`                                                    |
+| `metrics.image.registry`                          | Metrics exporter image registry                                                                                                                           | `docker.io`                                                  |
+| `metrics.image.repository`                        | Metrics exporter image repository                                                                                                                         | `bitnami/elasticsearch-exporter`                             |
+| `metrics.image.tag`                               | Metrics exporter image tag                                                                                                                                | `1.0.2`                                                      |
+| `metrics.image.pullPolicy`                        | Metrics exporter image pull policy                                                                                                                        | `IfNotPresent`                                               |
+| `metrics.service.type`                            | Metrics exporter endpoint service type                                                                                                                    | `ClusterIP`                                                  |
+| `metrics.service.annotations`                     | Annotations for metrics service.                                                                                                                          | `{prometheus.io/scrape: "true", prometheus.io/port: "8080"}` |
+| `metrics.resources`                               | Metrics exporter resource requests/limit                                                                                                                  | `requests: { cpu: "25m" }`                                   |
+| `metrics.podAnnotations`                          | Annotations for metrics pods.                                                                                                                             | `{prometheus.io/scrape: "true", prometheus.io/port: "8080"}` |
+| `metrics.serviceMonitor.enabled`                  | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)                                                    | `false`                                                      |
+| `metrics.serviceMonitor.namespace`                | Namespace in which Prometheus is running                                                                                                                  | `nil`                                                        |
+| `metrics.serviceMonitor.interval`                 | Interval at which metrics should be scraped.                                                                                                              | `nil` (Prometheus Operator default value)                    |
+| `metrics.serviceMonitor.scrapeTimeout`            | Timeout after which the scrape is ended                                                                                                                   | `nil` (Prometheus Operator default value)                    |
+| `metrics.serviceMonitor.selector`                 | Prometheus instance selector labels                                                                                                                       | `nil`                                                        |
+| `sysctlImage.enabled`                             | Enable kernel settings modifier image                                                                                                                     | `true`                                                       |
+| `sysctlImage.registry`                            | Kernel settings modifier image registry                                                                                                                   | `docker.io`                                                  |
+| `sysctlImage.repository`                          | Kernel settings modifier image repository                                                                                                                 | `bitnami/minideb`                                            |
+| `sysctlImage.tag`                                 | Kernel settings modifier image tag                                                                                                                        | `buster`                                                     |
+| `sysctlImage.pullPolicy`                          | Kernel settings modifier image pull policy                                                                                                                | `Always`                                                     |
+| `volumePermissions.enabled`                       | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false`                                                      |
+| `volumePermissions.image.registry`                | Init container volume-permissions image registry                                                                                                          | `docker.io`                                                  |
+| `volumePermissions.image.repository`              | Init container volume-permissions image name                                                                                                              | `bitnami/minideb`                                            |
+| `volumePermissions.image.tag`                     | Init container volume-permissions image tag                                                                                                               | `buster`                                                     |
+| `volumePermissions.image.pullPolicy`              | Init container volume-permissions image pull policy                                                                                                       | `Always`                                                     |
+| `volumePermissions.resources`                     | Init container resource requests/limit                                                                                                                    | `nil`                                                        |
+
+### Kibana Parameters
+
+|            Parameter           |                                    Description                                      |                                         Default                                         |
+|--------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
+| `global.kibanaEnabled`         | Use bundled Kibana                                                                  | `false`                                                                                 |
+| `kibana.elasticsearch.hosts`   | Array containing hostnames for the ES instances. Used to generate the URL           | `{{ include "elasticsearch.coordinating.fullname" . }}` Coordinating service (fullname) |
+| `kibana.elasticsearch.port`    | Port to connect Kibana and ES instance. Used to generate the URL                    | `9200`                                                                                  |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install my-release \
+  --set name=my-elastic,client.service.port=8080 \
+  bitnami/elasticsearch
+```
+
+The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml bitnami/elasticsearch
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml).
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Production configuration
+
+This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
+
+- Init container that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors):
+```diff
+- sysctlImage.enabled: true
++ sysctlImage.enabled: false
+```
+
+- Desired number of Elasticsearch master-eligible nodes:
+```diff
+- master.replicas: 2
++ master.replicas: 3
+```
+
+- Enable the liveness probe (master-eligible nodes pod):
+```diff
+- master.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ master.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (master-eligible nodes pod):
+```diff
+- master.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ master.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the liveness probe (coordinating-only nodes pod):
+```diff
+- coordinating.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ coordinating.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (coordinating-only nodes pod):
+```diff
+- coordinating.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ coordinating.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Desired number of Elasticsearch data nodes:
+```diff
+- data.replicas: 2
++ data.replicas: 3
+```
+
+- Enable the liveness probe (data nodes pod):
+```diff
+- data.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ data.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (data nodes pod):
+```diff
+- data.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ data.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable ingest nodes:
+```diff
+- ingest.enabled: false
++ ingest.enabled: true
+```
+
+- Enable the liveness probe (ingest nodes pod):
+```diff
+- ingest.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ ingest.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (ingest nodes pod):
+```diff
+- ingest.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ ingest.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable prometheus exporter:
+```diff
+- metrics.enabled: false
++ metrics.enabled: true
+```
+
+- Enable bundled Kibana:
+```diff
+- global.kibanaEnabled: false
++ global.kibanaEnabled: true
+```
+
+### Default kernel settings
+
+Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
+
+- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
+- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
+
+This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`.
+You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
+
+### Enable bundled Kibana
+
+This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter. It is enabled by default using the `values-production.yaml` file.
+To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal.
+
+## Persistence
+
+The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container.
+
+By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC.
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+## Notable changes
+
+### 11.0.0
+
+Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
+
+The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version.
+
+```console
+$ kubectl delete statefulset elasticsearch-master
+$ helm upgrade <DEPLOYMENT_NAME> bitnami/elasticsearch
+```
+
+### 10.0.0
+
+In this version, Kibana was added as dependant chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section.
+
+### 9.0.0
+
+Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster.
+
+To resolve such issues, PVC's are now attached for master node data persistence.
+
+---
+
+Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
+
+In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
+
+### 7.0.0
+
+This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section.
+You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
+
+## Upgrading
+
+### To 3.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch:
+
+```console
+$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl delete statefulset elasticsearch-data --cascade=false
+```
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore
new file mode 100755
index 0000000000000000000000000000000000000000..f0c13194444163d1cba5c67d9e79231a62bc8f44
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..cc920bc9555e7eb9beec46ecf0cdb45348b3472c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+appVersion: 7.6.1
+description: Kibana is an open source, browser based analytics and search dashboard
+  for Elasticsearch.
+engine: gotpl
+home: https://www.elastic.co/products/kibana
+icon: https://bitnami.com/assets/stacks/kibana/img/kibana-stack-220x234.png
+keywords:
+- kibana
+- analitics
+- monitoring
+- metrics
+- logs
+maintainers:
+- email: containers@bitnami.com
+  name: Bitnami
+name: kibana
+sources:
+- https://github.com/bitnami/bitnami-docker-kibana
+version: 5.0.11
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..2d4888f267ef5ea85402ae906c22b0e02d6b2ba6
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md
@@ -0,0 +1,326 @@
+# Kibana
+
+[Kibana](https://kibana.com/) is an open source, browser based analytics and search dashboard for Elasticsearch.
+
+## TL;DR;
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/kibana --set elasticsearch.hosts[0]=<Hostname of your ES instance> --set elasticsearch.port=<port of your ES instance>
+```
+
+## Introduction
+
+This chart bootstraps a [kibana](https://github.com/bitnami/bitnami-docker-kibana) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.12+
+- Helm 2.11+ or Helm 3.0-beta3+
+- PV provisioner support in the underlying infrastructure
+- ReadWriteMany volumes for deployment scaling
+
+## Installing the Chart
+
+This chart requires a Elasticsearch instance to work. You can use an already existing Elasticsearch instance.
+
+ To install the chart with the release name `my-release`:
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release \
+  --set elasticsearch.hosts[0]=<Hostname of your ES instance> \
+  --set elasticsearch.port=<port of your ES instance> \
+  bitnami/kibana
+```
+
+These commands deploy kibana on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` statefulset:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release. Use the option `--purge` to delete all history too.
+
+## Parameters
+
+The following tables lists the configurable parameters of the kibana chart and their default values.
+
+|               Parameter                |                                                                        Description                                                                        |                                                 Default                                                 |             |
+|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------------|
+| `global.imageRegistry`                 | Global Docker image registry                                                                                                                              | `nil`                                                                                                   |             |
+| `global.imagePullSecrets`              | Global Docker registry secret names as an array                                                                                                           | `[]` (does not add image pull secrets to deployed pods)                                                 |             |
+| `global.storageClass`                  | Global storage class for dynamic provisioning                                                                                                             | `nil`                                                                                                   |             |
+| `image.registry`                       | Kibana image registry                                                                                                                                     | `docker.io`                                                                                             |             |
+| `image.repository`                     | Kibana image name                                                                                                                                         | `bitnami/kibana`                                                                                        |             |
+| `image.tag`                            | Kibana image tag                                                                                                                                          | `{TAG_NAME}`                                                                                            |             |
+| `image.pullPolicy`                     | Kibana image pull policy                                                                                                                                  | `IfNotPresent`                                                                                          |             |
+| `image.pullSecrets`                    | Specify docker-registry secret names as an array                                                                                                          | `[]` (does not add image pull secrets to deployed pods)                                                 |             |
+| `nameOverride`                         | String to partially override kibana.fullname template with a string (will prepend the release name)                                                       | `nil`                                                                                                   |             |
+| `fullnameOverride`                     | String to fully override kibana.fullname template with a string                                                                                           | `nil`                                                                                                   |             |
+| `replicaCount`                         | Number of replicas of the Kibana Pod                                                                                                                      | `1`                                                                                                     |             |
+| `updateStrategy`                       | Update strategy for deployment (evaluated as a template)                                                                                                  | `{type: "RollingUpdate"}`                                                                               |             |
+| `schedulerName`                        | Alternative scheduler                                                                                                                                     | `nil`                                                                                                   |             |
+| `plugins`                              | Array containing the Kibana plugins to be installed in deployment                                                                                         | `[]`                                                                                                    |             |
+| `savedObjects.urls`                    | Array containing links to NDJSON files to be imported during Kibana initialization                                                                        | `[]`                                                                                                    |             |
+| `savedObjects.configmap`               | Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template)                                                   | `[]`                                                                                                    |             |
+| `extraConfiguration`                   | Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template  | `nil`                                                                                                   |             |
+| `configurationCM`                      | ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml                                                  | `nil`                                                                                                   |             |
+| `extraEnvVars`                         | Array containing extra env vars to configure Kibana                                                                                                       | `nil`                                                                                                   |             |
+| `extraEnvVarsCM`                       | ConfigMap containing extra env vars to configure Kibana                                                                                                   | `nil`                                                                                                   |             |
+| `extraEnvVarsSecret`                   | Secret containing extra env vars to configure Kibana (in case of sensitive data)                                                                          | `nil`                                                                                                   |             |
+| `extraVolumes`                         | Array of extra volumes to be added to the Kibana deployment (evaluated as template). Requires setting `extraVolumeMounts`                                 | `nil`                                                                                                   |             |
+| `extraVolumeMounts`                    | Array of extra volume mounts to be added to the Kibana deployment (evaluated as template). Normally used with `extraVolumes`.                             | `nil`                                                                                                   |             |
+| `volumePermissions.enabled`            | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false`                                                                                                 |             |
+| `volumePermissions.image.registry`     | Init container volume-permissions image registry                                                                                                          | `docker.io`                                                                                             |             |
+| `volumePermissions.image.repository`   | Init container volume-permissions image name                                                                                                              | `bitnami/minideb`                                                                                       |             |
+| `volumePermissions.image.tag`          | Init container volume-permissions image tag                                                                                                               | `buster`                                                                                                |             |
+| `volumePermissions.image.pullPolicy`   | Init container volume-permissions image pull policy                                                                                                       | `Always`                                                                                                |             |
+| `volumePermissions.resources`          | Init container resource requests/limit                                                                                                                    | `nil`                                                                                                   |             |
+| `persistence.enabled`                  | Enable persistence                                                                                                                                        | `true`                                                                                                  |             |
+| `presistence.storageClass`             | Storage class to use with the PVC                                                                                                                         | `nil`                                                                                                   |             |
+| `persistence.accessMode`               | Access mode to the PV                                                                                                                                     | `ReadWriteOnce`                                                                                         |             |
+| `persistence.size`                     | Size for the PV                                                                                                                                           | `10Gi`                                                                                                  |             |
+| `livenessProbe.enabled`                | Enable/disable the Liveness probe                                                                                                                         | `true`                                                                                                  |             |
+| `livenessProbe.initialDelaySeconds`    | Delay before liveness probe is initiated                                                                                                                  | `60`                                                                                                    |             |
+| `livenessProbe.periodSeconds`          | How often to perform the probe                                                                                                                            | `10`                                                                                                    |             |
+| `livenessProbe.timeoutSeconds`         | When the probe times out                                                                                                                                  | `5`                                                                                                     |             |
+| `livenessProbe.successThreshold`       | Minimum consecutive successes for the probe to be considered successful after having failed.                                                              | `1`                                                                                                     |             |
+| `livenessProbe.failureThreshold`       | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                | `6`                                                                                                     |             |
+| `readinessProbe.enabled`               | Enable/disable the Readiness probe                                                                                                                        | `true`                                                                                                  |             |
+| `readinessProbe.initialDelaySeconds`   | Delay before readiness probe is initiated                                                                                                                 | `5`                                                                                                     |             |
+| `readinessProbe.periodSeconds`         | How often to perform the probe                                                                                                                            | `10`                                                                                                    |             |
+| `readinessProbe.timeoutSeconds`        | When the probe times out                                                                                                                                  | `5`                                                                                                     |             |
+| `readinessProbe.failureThreshold`      | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                | `6`                                                                                                     |             |
+| `readinessProbe.successThreshold`      | Minimum consecutive successes for the probe to be considered successful after having failed.                                                              | `1`                                                                                                     |             |
+| `service.type`                         | Kubernetes Service type                                                                                                                                   |                                                                                                         | `ClusterIP` |
+| `service.nodePort`                     | Port to bind to for NodePort service type (client port)                                                                                                   | `nil`                                                                                                   |             |
+| `service.annotations`                  | Annotations for Kibana service (evaluated as a template)                                                                                                  | `{}`                                                                                                    |             |
+| `service.externalTrafficPolicy`        | Enable client source IP preservation                                                                                                                      | `Cluster`                                                                                               |             |
+| `service.loadBalancerIP`               | loadBalancerIP if Kibana service type is `LoadBalancer`                                                                                                   | `nil`                                                                                                   |             |
+| `service.extraPorts`                   | Extra ports to expose in the service (normally used with the `sidecar` value). Evaluated as a template.                                                   | `nil`                                                                                                   |             |
+| `forceInitScripts`                     | Force the execution of the init scripts located in `/docker-entrypoint-initdb.d`                                                                          | `false`                                                                                                 |             |
+| `initScriptsCM`                        | ConfigMap containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time (evaluated as a template)                                | `nil`                                                                                                   |             |
+| `initScriptsSecret`                    | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time (that contain sensitive data). Evaluated as a template.     | `nil`                                                                                                   |             |
+| `ingress.enabled`                      | Enable ingress controller resource                                                                                                                        | `false`                                                                                                 |             |
+| `ingress.certManager`                  | Add annotations for cert-manager                                                                                                                          | `false`                                                                                                 |             |
+| `ingress.annotations`                  | Ingress annotations                                                                                                                                       | `[]`                                                                                                    |             |
+| `ingress.hosts[0].name`                | Hostname to your Kibana installation                                                                                                                      | `kibana.local`                                                                                          |             |
+| `ingress.hosts[0].path`                | Path within the url structure                                                                                                                             | `/`                                                                                                     |             |
+| `ingress.hosts[0].tls`                 | Utilize TLS backend in ingress                                                                                                                            | `false`                                                                                                 |             |
+| `ingress.hosts[0].tlsHosts`            | Array of TLS hosts for ingress record (defaults to `ingress.hosts[0].name` if `nil`)                                                                      | `nil`                                                                                                   |             |
+| `ingress.hosts[0].tlsSecret`           | TLS Secret (certificates)                                                                                                                                 | `kibana.local-tls`                                                                                      |             |
+| `securityContext.enabled`              | Enable securityContext on for Kibana deployment                                                                                                           | `true`                                                                                                  |             |
+| `securityContext.runAsUser`            | User for the security context                                                                                                                             | `1001`                                                                                                  |             |
+| `securityContext.fsGroup`              | Group to configure permissions for volumes                                                                                                                | `1001`                                                                                                  |             |
+| `resources`                            | Configure resource requests and limits (evaluated as a template)                                                                                          | `nil`                                                                                                   |             |
+| `nodeSelector`                         | Node labels for pod assignment (evaluated as a template)                                                                                                  | `{}`                                                                                                    |             |
+| `tolerations`                          | Tolerations for pod assignment (evaluated as a template)                                                                                                  | `[]`                                                                                                    |             |
+| `affinity`                             | Affinity for pod assignment (evaluated as a template)                                                                                                     | `{}`                                                                                                    |             |
+| `podAnnotations`                       | Pod annotations (evaluated as a template)                                                                                                                 | `{}`                                                                                                    |             |
+| `sidecars`                             | Attach additional containers to the pod (evaluated as a template)                                                                                         | `nil`                                                                                                   |             |
+| `initContainers`                       | Add additional init containers to the pod (evaluated as a template)                                                                                       | `nil`                                                                                                   |             |
+| `metrics.enabled`                      | Start a side-car prometheus exporter                                                                                                                      | `false`                                                                                                 |             |
+| `metrics.service.annotations`          | Prometheus annotations for the Kibana service                                                                                                             | `{ prometheus.io/scrape: "true", prometheus.io/port: "80", prometheus.io/path: "_prometheus/metrics" }` |             |
+| `metrics.serviceMonitor.enabled`       | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)                                                    | `false`                                                                                                 |             |
+| `metrics.serviceMonitor.namespace`     | Namespace in which Prometheus is running                                                                                                                  | `nil`                                                                                                   |             |
+| `metrics.serviceMonitor.interval`      | Interval at which metrics should be scraped.                                                                                                              | `nil` (Prometheus Operator default value)                                                               |             |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended                                                                                                                   | `nil` (Prometheus Operator default value)                                                               |             |
+| `metrics.serviceMonitor.selector`      | Prometheus instance selector labels                                                                                                                       | `nil`                                                                                                   |             |
+| `elasticsearch.hosts`                  | Array containing the hostnames for the already existing Elasticsearch instances                                                                           | `nil`                                                                                                   |             |
+| `elasticsearch.port`                   | Port for the accessing external Elasticsearch instances                                                                                                   | `nil`                                                                                                   |             |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install my-release \
+  --set admin.user=admin-user bitnami/kibana
+```
+
+The above command sets the Kibana admin user to `admin-user`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml bitnami/kibana
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Production configuration
+
+This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
+
+- Enable metrics scraping
+
+```diff
+- metrics.enabled: false
++ metrics.enabled: true
+```
+
+### Using custom configuration
+
+The Bitnami Kibana chart supports using custom configuration settings. For example, to mount a custom `kibana.yml` you can create a ConfigMap like the following:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: myconfig
+data:
+  kibana.yml: |-
+    # Raw text of the file
+```
+
+And now you need to pass the ConfigMap name, to the corresponding parameter: `configurationCM=myconfig`
+
+An alternative is to provide extra configuration settings to the default kibana.yml that the chart deploys. This is done using the `extraConfiguration` value:
+
+```yaml
+extraConfiguration:
+  "server.maxPayloadBytes": 1048576
+  "server.pingTimeout": 1500
+```
+
+### Adding extra environment variables
+
+In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
+
+```yaml
+extraEnvVars:
+  - name: ELASTICSEARCH_VERSION
+    value: 6
+```
+
+Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
+
+### Using custom init scripts
+
+For advanced operations, the Bitnami Kibana charts allows using custom init scripts that will be mounted in `/docker-entrypoint.init-db`. You can use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. Then use the `initScriptsCM` and `initScriptsSecret` values.
+
+```console
+elasticsearch.hosts[0]=elasticsearch-host
+elasticsearch.port=9200
+initScriptsCM=special-scripts
+initScriptsSecret=special-scripts-sensitive
+```
+
+### Installing plugins
+
+The Bitnami Kibana chart allows you to install a set of plugins at deployment time using the `plugins` value:
+
+```console
+elasticsearch.hosts[0]=elasticsearch-host
+elasticsearch.port=9200
+plugins[0]=https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+```
+
+> **NOTE** Make sure that the plugin is available for the Kibana version you are deploying
+
+### Importing saved objects
+
+If you have visualizations and dashboards (in NDJSON format) that you want to import to Kibana. You can create a ConfigMap that includes them and then install the chart with the `savedObjects.configmap` value: `savedObjects.configmap=my-import`
+
+Alternatively, if it is available via URL, you can install the chart as follows: `savedObjects.urls[0]=www.my-site.com/import.ndjson`
+
+### Sidecars and Init Containers
+
+If you have a need for additional containers to run within the same pod as Kibana (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+sidecars:
+- name: your-image-name
+  image: your-image
+  imagePullPolicy: Always
+  ports:
+  - name: portname
+   containerPort: 1234
+```
+
+Similarly, you can add extra init containers using the `initContainers` parameter.
+
+```yaml
+initContainers:
+- name: your-image-name
+  image: your-image
+  imagePullPolicy: Always
+  ports:
+  - name: portname
+   containerPort: 1234
+```
+
+#### Add a sample Elasticsearch container as sidecar
+
+This chart requires an Elasticsearch instance to work. For production, you can use an already existing Elasticsearch instance or deploy the [Elasticsearch chart](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch) with the [`global.kibanaEnabled=true` parameter](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#enable-bundled-kibana).
+
+For the purpose of testing, you can use a sidecar Elasticsearch container setting the following parameters during the Kibana chart installation:
+
+```
+elasticsearch.hosts[0]=localhost
+elasticsearch.port=9200
+sidecars[0].name=elasticsearch
+sidecars[0].image=bitnami/elasticsearch:latest
+sidecars[0].imagePullPolicy=IfNotPresent
+sidecars[0].ports[0].name=http
+sidecars[0].ports[0].containerPort=9200
+```
+
+## Persistence
+
+The [Bitnami Kibana](https://github.com/bitnami/bitnami-docker-kibana) image can persist data. If enabled, the persisted path is `/bitnami/kibana` by default.
+
+The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning.
+
+### Adding extra volumes
+
+The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` property. This can be combined with advanced operations like adding extra init containers and sidecars.
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+## Notable changes
+
+### 5.0.0
+
+This version does not include Elasticsearch as a bundled dependency. From now on, you should specify an external Elasticsearch instance using the `elasticsearch.hosts[]` and `elasticsearch.port` [parameters](#parameters).
+
+### 3.0.0
+
+Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
+
+In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
+
+This major version signifies this change.
+
+### 2.0.0
+
+This version enabled by default an initContainer that modify some kernel settings to meet the Elasticsearch requirements.
+
+Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
+
+- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
+- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
+
+You can disable the initContainer using the `elasticsearch.sysctlImage.enabled=false` parameter.
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt
new file mode 100755
index 0000000000000000000000000000000000000000..0576c2afbe322a11dc92122c815701a982fbbdaf
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt
@@ -0,0 +1,55 @@
+{{- if or (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
+######################################################################################################
+### ERROR: You did not provide the Elasticsearch external host or port in your 'helm install' call ###
+######################################################################################################
+
+Complete your Kibana deployment by running:
+
+  helm upgrade {{ .Release.Name }} bitnami/kibana \
+    --set elasticsearch.hosts[0]=YOUR_ES_HOST,elasticsearch.port=YOUR_ES_PORT
+
+Replacing "YOUR_ES_HOST" and "YOUR_ES_PORT" placeholders by the proper values of your Elasticsearch deployment.
+
+{{- else -}}
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+  {{- range .paths }}
+  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
+  {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kibana.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kibana.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kibana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kibana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward svc/{{ include "kibana.fullname" . }} 8080:{{ .Values.service.port }}
+{{- end }}
+
+{{- if or .Values.ingress.enabled (contains "NodePort" .Values.service.type) (contains "LoadBalancer" .Values.service.type) }}
+
+WARNING: Kibana is externally accessible from the cluster but the dashboard does not contain authentication mechanisms. Make sure you follow the authentication guidelines in your Elastic stack.
++info https://www.elastic.co/guide/en/elastic-stack-overview/current/setting-up-authentication.html
+{{- end }}
+
+{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+
+WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- if .Values.metrics.enabled }}
+
+WARNING: For Prometheus metrics to work, make sure that the kibana-prometheus-exporter plugin is installed:
++info https://github.com/pjhampton/kibana-prometheus-exporter
+{{- end }}
+
+{{ include "kibana.validateValues" . }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl
new file mode 100755
index 0000000000000000000000000000000000000000..d577a70643703e3f006a9664e443672d1962c8fe
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl
@@ -0,0 +1,274 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kibana.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kibana.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "kibana.imagePullSecrets" -}}
+{{- $imagePullSecrets := coalesce .Values.global.imagePullSecrets .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets -}}
+{{- if $imagePullSecrets }}
+imagePullSecrets:
+{{- range $imagePullSecrets }}
+  - name: {{ . }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if the deployment should include dashboards
+*/}}
+{{- define "kibana.importSavedObjects" -}}
+{{- if or .Values.savedObjects.configmap .Values.savedObjects.urls }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Kibana image name
+*/}}
+{{- define "kibana.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kibana.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch URL.
+*/}}
+{{- define "kibana.elasticsearch.url" -}}
+{{- if .Values.elasticsearch.hosts -}}
+{{- $totalHosts := len .Values.elasticsearch.hosts -}}
+{{- range $i, $hostTemplate := .Values.elasticsearch.hosts -}}
+{{- $host := tpl $hostTemplate $ }}
+{{- printf "http://%s:%s" $host (include "kibana.elasticsearch.port" $) -}}
+{{- if (lt ( add1 $i ) $totalHosts ) }}{{- printf "," -}}{{- end }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch Port.
+*/}}
+{{- define "kibana.elasticsearch.port" -}}
+{{- .Values.elasticsearch.port -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch PVC.
+*/}}
+{{- define "kibana.pvc" -}}
+{{- .Values.persistence.existingClaim | default (include "kibana.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts Secret name.
+*/}}
+{{- define "kibana.initScriptsSecret" -}}
+{{- printf "%s" (tpl .Values.initScriptsSecret $) -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts configmap name.
+*/}}
+{{- define "kibana.initScriptsCM" -}}
+{{- printf "%s" (tpl .Values.initScriptsCM $) -}}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "kibana.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the saved objects configmap name.
+*/}}
+{{- define "kibana.savedObjectsCM" -}}
+{{- printf "%s" (tpl .Values.savedObjects.configmap $) -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch Port.
+*/}}
+{{- define "kibana.configurationCM" -}}
+{{- .Values.configurationCM | default (printf "%s-conf" (include "kibana.fullname" .)) -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "kibana.labels" -}}
+app.kubernetes.io/name: {{ include "kibana.name" . }}
+helm.sh/chart: {{ include "kibana.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Match labels
+*/}}
+{{- define "kibana.matchLabels" -}}
+app.kubernetes.io/name: {{ include "kibana.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Return  the proper Storage Class
+*/}}
+{{- define "kibana.storageClass" -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
+*/}}
+{{- if .Values.global -}}
+    {{- if .Values.global.storageClass -}}
+        {{- if (eq "-" .Values.global.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.global.storageClass -}}
+        {{- end -}}
+    {{- else -}}
+        {{- if .Values.persistence.storageClass -}}
+              {{- if (eq "-" .Values.persistence.storageClass) -}}
+                  {{- printf "storageClassName: \"\"" -}}
+              {{- else }}
+                  {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+              {{- end -}}
+        {{- end -}}
+    {{- end -}}
+{{- else -}}
+    {{- if .Values.persistence.storageClass -}}
+        {{- if (eq "-" .Values.persistence.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+        {{- end -}}
+    {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "kibana.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "kibana.validateValues.noElastic" .) -}}
+{{- $messages := append $messages (include "kibana.validateValues.configConflict" .) -}}
+{{- $messages := append $messages (include "kibana.validateValues.extraVolumes" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - must provide an ElasticSearch */}}
+{{- define "kibana.validateValues.noElastic" -}}
+{{- if and (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
+kibana: no-elasticsearch
+    You did not specify an external Elasticsearch instance.
+    Please set elasticsearch.hosts and elasticsearch.port
+{{- else if and (not .Values.elasticsearch.hosts) .Values.elasticsearch.port }}
+kibana: missing-es-settings-host
+    You specified the external Elasticsearch port but not the host. Please
+    set elasticsearch.hosts
+{{- else if and .Values.elasticsearch.hosts (not .Values.elasticsearch.port) }}
+kibana: missing-es-settings-port
+    You specified the external Elasticsearch hosts but not the port. Please
+    set elasticsearch.port
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - configuration conflict */}}
+{{- define "kibana.validateValues.configConflict" -}}
+{{- if and (.Values.extraConfiguration) (.Values.configurationCM) -}}
+kibana: conflict-configuration
+    You specified a ConfigMap with kibana.yml and a set of settings to be added
+    to the default kibana.yml. Please only set either extraConfiguration or configurationCM
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - Incorrect extra volume settings */}}
+{{- define "kibana.validateValues.extraVolumes" -}}
+{{- if and (.Values.extraVolumes) (not .Values.extraVolumeMounts) -}}
+kibana: missing-extra-volume-mounts
+    You specified extra volumes but not mount points for them. Please set
+    the extraVolumeMounts value
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "kibana.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..d77084bbac41a2e8a503197fdac446a2c23d17ce
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml
@@ -0,0 +1,16 @@
+{{- if and (not .Values.configurationCM) (and .Values.elasticsearch.hosts .Values.elasticsearch.port) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-conf
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  kibana.yml: |
+    pid.file: /opt/bitnami/kibana/tmp/kibana.pid
+    server.host: 0.0.0.0
+    server.port: 5601
+    elasticsearch.hosts: [{{ include "kibana.elasticsearch.url" . }}]
+    {{- if .Values.extraConfiguration }}
+    {{- tpl (toYaml .Values.extraConfiguration) $ | nindent 4 }}
+    {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..647584d1b270f7ef6f8f2d321ae1cbfc919ea6dd
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml
@@ -0,0 +1,188 @@
+{{- if and .Values.elasticsearch.hosts .Values.elasticsearch.port -}}
+apiVersion: {{ template "kibana.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  {{- if .Values.updateStrategy }}
+  strategy: {{- tpl (toYaml .Values.updateStrategy) $ | nindent 4 }}
+  {{- end }}
+  selector:
+    matchLabels: {{- include "kibana.matchLabels" . | nindent 6 }}
+  template:
+    metadata:
+      labels: {{- include "kibana.labels" . | nindent 8 }}
+    spec:
+    {{- if .Values.schedulerName }}
+      schedulerName: {{ .Values.schedulerName | quote }}
+    {{- end }}
+{{- include "kibana.imagePullSecrets" . | indent 6 }}
+    {{- if .Values.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.securityContext.fsGroup }}
+    {{- end }}
+    {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }}
+      initContainers:
+      {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+      - name: volume-permissions
+        image: "{{ template "kibana.volumePermissions.image" . }}"
+        imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
+        command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kibana"]
+        securityContext:
+          runAsUser: 0
+        resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }}
+        volumeMounts:
+        - name: kibana-data
+          mountPath: /bitnami/kibana
+      {{- end }}
+      {{- if .Values.initContainers }}
+      {{- tpl (toYaml .Values.initContainers) $ | nindent 8 }}
+      {{- end }}
+    {{- end }}
+      containers:
+        - name: kibana
+          image: {{ include "kibana.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          {{- if .Values.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: KIBANA_ELASTICSEARCH_URL
+              value: {{ include "kibana.elasticsearch.url" . | quote }}
+            - name: KIBANA_ELASTICSEARCH_PORT
+              value: {{ include "kibana.elasticsearch.port" . | quote }}
+            - name: KIBANA_FORCE_INITSCRIPTS
+              value: {{ .Values.forceInitScripts | quote }}
+          {{- if .Values.extraEnvVars }}
+          {{- tpl (toYaml .Values.extraEnvVars) $ | nindent 12 }}
+          {{- end }}
+          {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+          envFrom:
+          {{- if .Values.extraEnvVarsCM }}
+          - configMapRef:
+              name: {{ .Values.extraEnvVarsCM }}
+          {{- end }}
+          {{- if .Values.extraEnvVarsSecret }}
+          - secretRef:
+              name: {{ .Values.extraEnvVarsSecret }}
+          {{- end }}
+          {{- end }}
+          ports:
+            - name: http
+              containerPort: 5601
+              protocol: TCP
+          {{- if .Values.livenessProbe.enabled }}
+          livenessProbe:
+            httpGet:
+              path: {{ tpl .Values.healthCheckPathTemplate $ }}
+              port: http
+            initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+          {{- end }}
+          {{- if .Values.readinessProbe.enabled }}
+          readinessProbe:
+            httpGet:
+              path: {{ tpl .Values.healthCheckPathTemplate $ }}
+              port: http
+            initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+          {{- end }}
+          {{- if .Values.resources }}
+          resources: {{- tpl (toYaml .Values.resources) $ | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+          - name: kibana-data
+            mountPath: /bitnami/kibana
+          - name: kibana-config
+            mountPath: /bitnami/kibana/conf
+          {{- if .Values.plugins }}
+          - name: plugins-init-scripts
+            mountPath: /docker-entrypoint-initdb.d/plugin-install
+          {{- end }}
+          {{- if (include "kibana.importSavedObjects" .) }}
+          - name: saved-objects-init-scripts
+            mountPath: /docker-entrypoint-initdb.d/saved-objects-import
+          {{- end }}
+          {{- if .Values.savedObjects.configmap }}
+          - name: saved-objects-configmap
+            mountPath: /bitnami/kibana/saved-objects
+          {{- end }}
+          {{- if .Values.initScriptsCM }}
+          - name: custom-init-scripts-cm
+            mountPath: /docker-entrypoint-initdb.d/cm
+          {{- end }}
+          {{- if .Values.initScriptsSecret }}
+          - name: custom-init-scripts-secret
+            mountPath: /docker-entrypoint-initdb.d/secret
+          {{- end }}
+          {{- if .Values.extraVolumeMounts }}
+          {{- tpl (toYaml .Values.extraVolumeMounts) $ | nindent 6 }}
+          {{- end }}
+      {{- if .Values.sidecars }}
+      {{- tpl (toYaml .Values.sidecars) $ | nindent 8 }}
+      {{- end }}
+      volumes:
+        - name: kibana-data
+        {{- if .Values.persistence.enabled }}
+          persistentVolumeClaim:
+            claimName: {{ include "kibana.pvc" . }}
+        {{- else }}
+          emptyDir: {}
+        {{ end }}
+        - name: kibana-config
+          configMap:
+            name: {{ include "kibana.configurationCM" . }}
+        {{- if (include "kibana.importSavedObjects" .) }}
+        - name: saved-objects-init-scripts
+          configMap:
+            name: {{ include "kibana.fullname" . }}-saved-objects
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.plugins }}
+        - name: plugins-init-scripts
+          configMap:
+            name: {{ include "kibana.fullname" . }}-plugins
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.initScriptsCM }}
+        - name: custom-init-scripts-cm
+          configMap:
+            name: {{ template "kibana.initScriptsCM" . }}
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.initScriptsSecret }}
+        - name: custom-init-scripts-secret
+          secret:
+            name: {{ template "kibana.initScriptsSecret" . }}
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.savedObjects.configmap }}
+        - name: saved-objects-configmap
+          configMap:
+            name: {{ template "kibana.savedObjectsCM" . }}
+        {{- end }}
+      {{- if .Values.extraVolumes }}
+      {{- tpl (toYaml .Values.extraVolumes) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.nodeSelector }}
+      nodeSelector:
+      {{- tpl (toYaml .Values.nodeSelector) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.affinity }}
+      affinity:
+      {{- tpl (toYaml .Values.affinity) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.tolerations }}
+      tolerations:
+      {{- tpl (toYaml .Values.tolerations) $ | nindent 6 }}
+      {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..849a2d2cbe5ad24edeb88f1ad345a225fef84b97
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml
@@ -0,0 +1,40 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "kibana.fullname" . -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+  annotations:
+    {{- if .Values.ingress.certManager }}
+    kubernetes.io/tls-acme: "true"
+    {{- end }}
+    {{- range $key, $value := .Values.ingress.annotations }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  rules:
+  {{- range .Values.ingress.hosts }}
+    - host: "{{ .name }}"
+      http:
+        paths:
+          - path:  {{ tpl .path_template $ }}
+            backend:
+              serviceName: {{ $fullName }}
+              servicePort: http
+  {{- end }}
+  tls:
+  {{- range .Values.ingress.hosts }}
+  {{- if .tls }}
+    - hosts:
+    {{- if .tlsHosts }}
+      {{- range $host := .tlsHosts }}
+        - {{ $host }}
+      {{- end }}
+    {{- else }}
+        - "{{ .name }}"
+    {{- end }}
+      secretName: {{ .tlsSecret }}
+  {{- end }}
+  {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..a1128ac0557438ec509e20607ea89b74a8978b32
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml
@@ -0,0 +1,18 @@
+{{- if .Values.plugins -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-plugins
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  install-plugins.sh: |
+    #!/bin/bash
+    echo "==> Plugin installation"
+    {{- $totalPlugins := len .Values.plugins }}
+    echo "Total plugins defined in chart installation: {{ $totalPlugins }}"
+    {{- range $i, $plugin := .Values.plugins }}
+    echo "Installing plugin {{ add $i 1 }} out of {{ $totalPlugins }}: {{ $plugin }}"
+    kibana-plugin install "{{ $plugin }}"
+    {{- end }}
+    echo "==> End of Plugin installation"
+{{- end -}}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1a6424e0f3a4e94951ff545eb846361c45cd7408
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+spec:
+  accessModes:
+    - {{ .Values.persistence.accessMode | quote }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.size | quote }}
+  {{ include "kibana.storageClass" . }}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..3f451aa0675b605a3fc52a1fe6cbb844404a1276
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml
@@ -0,0 +1,38 @@
+{{- if (include "kibana.importSavedObjects" .) -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-saved-objects
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  import-saved-objects.sh: |
+    #!/bin/bash
+    echo "==> Saved objects import"
+    {{- if .Values.savedObjects.urls }}
+    {{- $totalURLs := len .Values.savedObjects.urls }}
+    echo "Total saved objects NDJSON URLs to import: {{ $totalURLs }}"
+    {{- range $i, $url := .Values.savedObjects.urls }}
+    echo "Importing saved objects from NDJSON in url {{ add $i 1 }} out of {{ $totalURLs }}: {{ $url }}"
+    download_tmp_file="$(mktemp)"
+    curl "{{$url}}" > "${download_tmp_file}.ndjson"
+    curl -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/saved_objects/_import -H 'kbn-xsrf:true' --form file=@${download_tmp_file}.ndjson
+    {{- end }}
+    {{- end }}
+    {{- if .Values.savedObjects.configmap }}
+    echo "Searching for dashboard NDJSON files from ConfigMap mounted in /bitnami/kibana/saved-objects"
+    ndjson_file_list_tmp="$(mktemp)"
+    find /bitnami/kibana/saved-objects -type f -regex ".*\.ndjson" > $ndjson_file_list_tmp
+    while read -r f; do
+        case "$f" in
+            *.ndjson)
+                echo "Importing $f"
+                curl -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/saved_objects/_import -H 'kbn-xsrf:true' --form file=@${f}
+                ;;
+            *)
+                echo "Ignoring $f"
+                ;;
+        esac
+    done < $ndjson_file_list_tmp
+    {{- end }}
+    echo "==> End of Saved objects import"
+{{- end -}}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..035680b333ce4bc56f3496eda1d9e1dc0cb82db0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+{{- if or (and .Values.metrics.enabled .Values.metrics.service.annotations) .Values.service.annotations }}
+  annotations:
+  {{- if and .Values.metrics.enabled .Values.metrics.service.annotations }}
+    {{- tpl (toYaml .Values.metrics.service.annotations) $ | nindent 4 }}
+  {{- end }}
+  {{- if .Values.service.annotations }}
+    {{- tpl (toYaml .Values.service.annotations) $ | nindent 4 }}
+  {{- end }}
+{{- end }}
+
+spec:
+  type: {{ .Values.service.type }}
+  {{- if eq .Values.service.type "LoadBalancer" }}
+  {{- if .Values.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+  {{- end }}
+  {{- end }}
+  {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }}
+  externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+  {{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.service.port }}
+      targetPort: http
+      {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)))}}
+      nodePort: {{ .Values.service.nodePort }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+{{- if .Values.service.extraPorts }}
+  {{- tpl (toYaml .Values.service.extraPorts) $ | nindent 4 }}
+{{- end }}
+  selector: {{- include "kibana.matchLabels" . | nindent 4 }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..6aa4952237feca05414875b6cb0729ccd1b6a14e
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- end }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+    {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels: {{- include "kibana.matchLabels" . | nindent 6 }}
+  endpoints:
+  - port: http
+    path: "_prometheus/metrics"
+    {{- if .Values.metrics.serviceMonitor.interval }}
+    interval: {{ .Values.metrics.serviceMonitor.interval }}
+    {{- end }}
+    {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+    scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+    {{- end }}
+  namespaceSelector:
+    matchNames:
+    - {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..413c7956c682bfd30059d795fb8512b95a2432ca
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "kibana.fullname" . }}-test-connection"
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+  annotations:
+    "helm.sh/hook": test-success
+spec:
+  containers:
+    - name: wget
+      image: bitnami/minideb
+      command: ['wget']
+      args: ['{{ include "kibana.fullname" . }}:{{ .Values.service.port }}']
+  restartPolicy: Never
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..b71b1ffcf2b388c1478e2513359d2ad20f919fe0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml
@@ -0,0 +1,344 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+global: {}
+#   imageRegistry: myRegistryName
+#   imagePullSecrets:
+#     - myRegistryKeySecretName
+#   storageClass: myStorageClass
+
+## Bitnami Kibana image version
+## ref: https://hub.docker.com/r/bitnami/kibana/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/kibana
+  tag: 7.6.1-debian-10-r8
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## String to partially override kibana.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override kibana.fullname template
+##
+# fullnameOverride:
+
+## Number of Kibana Pod replicas
+##
+replicaCount: 1
+
+## Set up update strategy for Kibana installation. Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+## Example:
+# updateStrategy:
+#  type: RollingUpdate
+#  rollingUpdate:
+#    maxSurge: 25%
+#    maxUnavailable: 25%
+updateStrategy:
+  type: RollingUpdate
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## List of plugins to install
+##
+plugins:
+# - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+
+## Saved objects to import (NDJSON format)
+##
+savedObjects:
+  ## List of saved objects URLs
+  urls:
+  # - www.example.com/dashboard.ndjson
+  ## ConfigMap with saved objects
+  configmap:
+
+## Extra configuration settings
+##
+# extraConfiguration:
+
+## Configuration ConfigMap (for kibana.yml)
+##
+# configurationCM:
+
+## An array to add extra env vars
+## For example:
+## extraEnvVars:
+##  - name: KIBANA_ELASTICSEARCH_URL
+##    value: test
+##
+# extraEnvVars:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsCM:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsSecret:
+
+## Array to add extra volumes
+##
+## extraVolumes:
+
+## Array to add extra mounts (normally used with extraVolumes)
+##
+## extraVolumeMounts: {}
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  resources: {}
+  # resources:
+  #   requests:
+  #     memory: 128Mi
+  #     cpu: 100m
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+##
+persistence:
+  enabled: true
+  ## wordpress data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass: "-"
+  ##
+  ## If you want to reuse an existing claim, you can pass the name of the PVC using
+  ## the existingClaim variable
+  # existingClaim: your-claim
+  accessMode: ReadWriteOnce
+  size: 10Gi
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 120
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Force execution of init scripts
+##
+forceInitScripts: false
+
+## Configmap with init scripts to execute
+##
+# initScriptsCM:
+
+## Secret with init scripts to execute (for sensitive data)
+##
+# initScriptsSecret:
+
+## Service configuration
+##
+service:
+  port: 80
+  type: ClusterIP
+  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  ##
+  # nodePort:
+
+  ## Enable client source IP preservation
+  ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## Provide any additional annotations which may be required. This can be used to
+  ## set the LoadBalancer service type to internal only.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+  ##
+  annotations: {}
+
+  ## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
+  ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  # loadBalancerIP:
+  ## Extra ports to expose (normally used with the `sidecar` value)
+  # extraPorts:
+
+## Configure the ingress resource that allows you to access the
+## Kibana web. Set up the URL
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+  ## Set to true to enable ingress record generation
+  enabled: false
+
+  ## Set this to true in order to add the corresponding annotations for cert-manager
+  certManager: false
+
+  ## Ingress annotations done as key:value pairs
+  ## For a full list of possible ingress annotations, please see
+  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+  ##
+  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+  ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
+  # annotations:
+  #   kubernetes.io/ingress.class: nginx
+
+  ## The list of hostnames to be covered with this ingress record.
+  ## Most likely this will be just one host, but in the event more hosts are needed, this is an array
+  hosts:
+    - name: kibana.local
+      path: /
+
+      ## Set this to true in order to enable TLS on the ingress record
+      tls: false
+
+      ## Optionally specify the TLS hosts for the ingress record
+      ## Useful when the Ingress controller supports www-redirection
+      ## If not specified, the above host name will be used
+      # tlsHosts:
+      #   - www.kibana.local
+      #   - kibana.local
+
+      ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+      tlsSecret: kibana.local-tls
+
+## SecurityContext configuration
+##
+securityContext:
+  enabled: true
+  runAsUser: 1001
+  fsGroup: 1001
+  runAsNonRoot: true
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Example:
+## resources:
+##   requests:
+##     memory: 512Mi
+##     cpu: 300m
+##
+# resources:
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## Add sidecars to the pod
+##
+sidecars:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Add init containers to the pod
+##
+initContainers:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
+##
+metrics:
+  enabled: true
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "80"
+      prometheus.io/path: "_prometheus/metrics"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Properties for Elasticsearch
+##
+elasticsearch:
+  hosts:
+  # - elasticsearch-1
+  # - elasticsearch-2
+  port:
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..5d5e8f04020c8a89823d658b508859f699e8c55a
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml
@@ -0,0 +1,346 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+global: {}
+#   imageRegistry: myRegistryName
+#   imagePullSecrets:
+#     - myRegistryKeySecretName
+#   storageClass: myStorageClass
+
+## Bitnami Kibana image version
+## ref: https://hub.docker.com/r/bitnami/kibana/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/kibana
+  tag: 7.6.1-debian-10-r8
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## String to partially override kibana.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override kibana.fullname template
+##
+# fullnameOverride:
+
+## Number of Kibana Pod replicas
+##
+replicaCount: 1
+
+healthCheckPathTemplate: "/"
+
+## Set up update strategy for Kibana installation. Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+## Example:
+# updateStrategy:
+#  type: RollingUpdate
+#  rollingUpdate:
+#    maxSurge: 25%
+#    maxUnavailable: 25%
+updateStrategy:
+  type: RollingUpdate
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## List of plugins to install
+##
+plugins:
+# - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+
+## Saved objects to import (NDJSON format)
+##
+savedObjects:
+  ## List of saved objects URLs
+  urls:
+  # - www.example.com/dashboard.ndjson
+  ## ConfigMap with saved objects
+  configmap:
+
+## Extra configuration settings
+##
+# extraConfiguration:
+
+## Configuration ConfigMap (for kibana.yml)
+##
+# configurationCM:
+
+## An array to add extra env vars
+## For example:
+## extraEnvVars:
+##  - name: KIBANA_ELASTICSEARCH_URL
+##    value: test
+##
+# extraEnvVars:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsCM:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsSecret:
+
+## Array to add extra volumes
+##
+## extraVolumes:
+
+## Array to add extra mounts (normally used with extraVolumes)
+##
+## extraVolumeMounts: {}
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  resources: {}
+  # resources:
+  #   requests:
+  #     memory: 128Mi
+  #     cpu: 100m
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+##
+persistence:
+  enabled: true
+  ## wordpress data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass: "-"
+  ##
+  ## If you want to reuse an existing claim, you can pass the name of the PVC using
+  ## the existingClaim variable
+  # existingClaim: your-claim
+  accessMode: ReadWriteOnce
+  size: 10Gi
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 120
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Force execution of init scripts
+##
+forceInitScripts: false
+
+## Configmap with init scripts to execute
+##
+# initScriptsCM:
+
+## Secret with init scripts to execute (for sensitive data)
+##
+# initScriptsSecret:
+
+## Service configuration
+##
+service:
+  port: 80
+  type: ClusterIP
+  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  ##
+  # nodePort:
+
+  ## Enable client source IP preservation
+  ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## Provide any additional annotations which may be required. This can be used to
+  ## set the LoadBalancer service type to internal only.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+  ##
+  annotations: {}
+
+  ## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
+  ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  # loadBalancerIP:
+  ## Extra ports to expose (normally used with the `sidecar` value)
+  # extraPorts:
+
+## Configure the ingress resource that allows you to access the
+## Kibana web. Set up the URL
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+  ## Set to true to enable ingress record generation
+  enabled: false
+
+  ## Set this to true in order to add the corresponding annotations for cert-manager
+  certManager: false
+
+  ## Ingress annotations done as key:value pairs
+  ## For a full list of possible ingress annotations, please see
+  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+  ##
+  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+  ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
+  # annotations:
+  #   kubernetes.io/ingress.class: nginx
+
+  ## The list of hostnames to be covered with this ingress record.
+  ## Most likely this will be just one host, but in the event more hosts are needed, this is an array
+  hosts:
+    - name: kibana.local
+      path: /
+
+      ## Set this to true in order to enable TLS on the ingress record
+      tls: false
+
+      ## Optionally specify the TLS hosts for the ingress record
+      ## Useful when the Ingress controller supports www-redirection
+      ## If not specified, the above host name will be used
+      # tlsHosts:
+      #   - www.kibana.local
+      #   - kibana.local
+
+      ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+      tlsSecret: kibana.local-tls
+
+## SecurityContext configuration
+##
+securityContext:
+  enabled: true
+  runAsUser: 1001
+  fsGroup: 1001
+  runAsNonRoot: true
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Example:
+## resources:
+##   requests:
+##     memory: 512Mi
+##     cpu: 300m
+##
+# resources:
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## Add sidecars to the pod
+##
+sidecars:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Add init containers to the pod
+##
+initContainers:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
+##
+metrics:
+  enabled: false
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "80"
+      prometheus.io/path: "_prometheus/metrics"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Properties for Elasticsearch
+##
+elasticsearch:
+  hosts:
+  # - elasticsearch-1
+  # - elasticsearch-2
+  port:
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock
new file mode 100755
index 0000000000000000000000000000000000000000..6416920e50b356ada51c9fb7c6edf99959135859
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: kibana
+  repository: https://charts.bitnami.com/bitnami
+  version: 5.0.11
+digest: sha256:4970b5ac3743b773c6608e77e28eb0928d45c3379bbe6660a35d8d4ef07613df
+generated: "2020-03-26T01:45:21.876314703Z"
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..17d1dfff1679eb0d9ddf96d0155a0ffe186bd6ac
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml
@@ -0,0 +1,5 @@
+dependencies:
+  - name: kibana
+    version: 5.x.x
+    repository: https://charts.bitnami.com/bitnami
+    condition: global.kibanaEnabled
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt
new file mode 100755
index 0000000000000000000000000000000000000000..3fba2e4119b2d737fbc2e28244a22cee7608f205
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt
@@ -0,0 +1,100 @@
+{{- if contains .Values.coordinating.service.type "LoadBalancer" }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    By specifying "coordinating.service.type=LoadBalancer" you have most likely
+    exposed the Elasticsearch service externally.
+
+    Please note that Elasticsearch does not implement a authentication
+    mechanism to secure your cluster. For security reasons, we strongly
+    suggest that you switch to "ClusterIP" or "NodePort".
+-------------------------------------------------------------------------------
+{{- end }}
+{{- if not .Values.sysctlImage.enabled }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    Elasticsearch requires some changes in the kernel of the host machine to
+    work as expected. If those values are not set in the underlying operating
+    system, the ES containers fail to boot with ERROR messages.
+
+    To check whether the host machine meets the requirements, run the command
+    below:
+
+      kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \
+        pods -l app={{ template "elasticsearch.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \
+	elasticsearch
+
+    You can adapt the Kernel parameters on you cluster as described in the
+    official documentation:
+
+      https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster
+
+    As an alternative, you can specify "sysctlImage.enabled=true" to use a
+    privileged initContainer to change those settings in the Kernel:
+
+      helm upgrade {{ .Release.Name }} bitnami/elasticsearch \
+        --set sysctlImage.enabled=true
+
+{{- else if .Values.sysctlImage.enabled }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    Elasticsearch requires some changes in the kernel of the host machine to
+    work as expected. If those values are not set in the underlying operating
+    system, the ES containers fail to boot with ERROR messages.
+
+    More information about these requirements can be found in the links below:
+
+      https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html
+      https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
+
+    This chart uses a privileged initContainer to change those settings in the Kernel
+    by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
+
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.curator.enabled }}
+
+  A CronJob will run with schedule {{ .Values.curator.cronjob.schedule }}.
+
+  The Jobs will not be removed automagically when deleting this Helm chart.
+  To remove these jobs, run the following:
+
+    kubectl -n {{ .Release.Namespace }} delete job -l app={{ template "elasticsearch.name" . }},role=curator
+
+{{- end }}
+
+  Elasticsearch can be accessed within the cluster on port {{ .Values.coordinating.service.port }} at {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+
+  To access from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.coordinating.service.type }}
+
+    export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.coordinating.fullname" . }})
+    export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+    curl http://$NODE_IP:$NODE_PORT/
+{{- else if contains "LoadBalancer" .Values.coordinating.service.type }}
+
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "elasticsearch.coordinating.fullname" . }}'
+
+    export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.coordinating.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+    curl http://$SERVICE_IP:{{ .Values.coordinating.service.port }}/
+{{- else if contains "ClusterIP"  .Values.coordinating.service.type }}
+
+    kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "elasticsearch.coordinating.fullname" . }} {{ .Values.coordinating.service.port }}:9200 &
+    curl http://127.0.0.1:9200/
+{{- end }}
+
+{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+
+WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl
new file mode 100755
index 0000000000000000000000000000000000000000..42ef48431cd7e82729a6de40c42302096f34b3b0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl
@@ -0,0 +1,407 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "elasticsearch.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "elasticsearch.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "elasticsearch.labels" -}}
+app: {{ include "elasticsearch.name" . }}
+chart: {{ include "elasticsearch.chart" . }}
+release: {{ .Release.Name }}
+heritage: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "elasticsearch.matchLabels" -}}
+app: {{ include "elasticsearch.name" . }}
+release: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Return the proper ES image name
+*/}}
+{{- define "elasticsearch.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified master name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.master.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.master.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified ingest name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.ingest.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.ingest.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified discovery name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.discovery.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.discovery.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified coordinating name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.coordinating.fullname" -}}
+{{- if .Values.global.kibanaEnabled -}}
+{{- printf "%s-%s" .Release.Name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified data name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.data.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.data.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+ Create the name of the master service account to use
+ */}}
+{{- define "elasticsearch.master.serviceAccountName" -}}
+{{- if .Values.master.serviceAccount.create -}}
+    {{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.master.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the coordinating-only service account to use
+ */}}
+{{- define "elasticsearch.coordinating.serviceAccountName" -}}
+{{- if .Values.coordinating.serviceAccount.create -}}
+    {{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.coordinating.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the data service account to use
+ */}}
+{{- define "elasticsearch.data.serviceAccountName" -}}
+{{- if .Values.data.serviceAccount.create -}}
+    {{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.data.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified metrics name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.metrics.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.metrics.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper ES exporter image name
+*/}}
+{{- define "elasticsearch.metrics.image" -}}
+{{- $registryName := .Values.metrics.image.registry -}}
+{{- $repositoryName := .Values.metrics.image.repository -}}
+{{- $tag := .Values.metrics.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper sysctl image name
+*/}}
+{{- define "elasticsearch.sysctl.image" -}}
+{{- $registryName := .Values.sysctlImage.registry -}}
+{{- $repositoryName := .Values.sysctlImage.repository -}}
+{{- $tag := .Values.sysctlImage.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "elasticsearch.imagePullSecrets" -}}
+{{- if .Values.global }}
+{{- if .Values.global.imagePullSecrets }}
+imagePullSecrets:
+{{- range .Values.global.imagePullSecrets }}
+  - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- else }}
+{{- $imagePullSecrets := coalesce .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.curator.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets -}}
+{{- if $imagePullSecrets }}
+imagePullSecrets:
+{{- range $imagePullSecrets }}
+  - name: {{ . }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "elasticsearch.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Storage Class
+Usage:
+{{ include "elasticsearch.storageClass" (dict "global" .Values.global "local" .Values.master) }}
+*/}}
+{{- define "elasticsearch.storageClass" -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
+*/}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- if (eq "-" .global.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .global.storageClass -}}
+        {{- end -}}
+    {{- else -}}
+        {{- if .local.persistence.storageClass -}}
+              {{- if (eq "-" .local.persistence.storageClass) -}}
+                  {{- printf "storageClassName: \"\"" -}}
+              {{- else }}
+                  {{- printf "storageClassName: %s" .local.persistence.storageClass -}}
+              {{- end -}}
+        {{- end -}}
+    {{- end -}}
+{{- else -}}
+    {{- if .local.persistence.storageClass -}}
+        {{- if (eq "-" .local.persistence.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .local.persistence.storageClass -}}
+        {{- end -}}
+    {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob APIs.
+*/}}
+{{- define "cronjob.apiVersion" -}}
+{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "batch/v2alpha1" }}
+{{- else if semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "batch/v1beta1" }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for podsecuritypolicy.
+*/}}
+{{- define "podsecuritypolicy.apiVersion" -}}
+{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1beta1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "elasticsearch.curator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}-curator
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "elasticsearch.curator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.curator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "elasticsearch.curator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "elasticsearch.curator.serviceAccountName" -}}
+{{- if .Values.curator.serviceAccount.create -}}
+    {{ default (include "elasticsearch.curator.fullname" .) .Values.curator.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.curator.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper ES curator image name
+*/}}
+{{- define "elasticsearch.curator.image" -}}
+{{- $registryName := .Values.curator.image.registry -}}
+{{- $repositoryName := .Values.curator.image.repository -}}
+{{- $tag := .Values.curator.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "elasticsearch.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "elasticsearch.tplValue" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..7fc7122ceeff720b9db61123fb6397d60d640edb
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.curator.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.curator.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+data:
+  action_file.yml: {{ required "A valid .Values.curator.configMaps.action_file_yml entry is required!" (toYaml .Values.curator.configMaps.action_file_yml | indent 2) }}
+  config.yml: {{ required "A valid .Values.curator.configMaps.config_yml entry is required!" (tpl (toYaml .Values.curator.configMaps.config_yml | indent 2) $) }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..b6924c1621e9172805802e2da4414106c283d092
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml
@@ -0,0 +1,9 @@
+{{- if .Values.config }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+data:
+  elasticsearch.yml: |- {{- toYaml .Values.config | nindent 4 }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..a62b90fcf1d19324f0485de460e8065fb975812b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.sysctlImage.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.fullname" . }}-initcontainer
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+data:
+  sysctl.sh: |-
+    #!/bin/bash
+    
+    set -o errexit
+    set -o pipefail
+    set -o nounset
+
+    if ! [ -x "$(command -v sysctl)" ]; then
+      echo 'sysctl not installed. Installing it...'
+      distro=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+      case $distro in
+        ol | centos)
+          yum install -y procps
+          rm -rf /var/cache/yum;;
+        ubuntu | debian)
+          apt-get update -qq && apt-get install -y --no-install-recommends procps
+          rm -rf /var/lib/apt/lists /var/cache/apt/archives;;
+      esac
+    fi
+    sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..83a6a369c3d2b39465675e29c0930ae6b36cb521
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml
@@ -0,0 +1,137 @@
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.coordinating.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+spec:
+  strategy:
+    type: {{ .Values.coordinating.updateStrategy.type }}
+    {{- if (eq "Recreate" .Values.coordinating.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: coordinating-only
+  replicas: {{ .Values.coordinating.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: coordinating-only
+      {{- with .Values.coordinating.podAnnotations }}
+      annotations: {{- toYaml . | nindent 10 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.coordinating.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.coordinating.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.coordinating.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
+      {{- if .Values.coordinating.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.coordinating.securityContext.fsGroup }}
+      {{- end }}
+      {{- if .Values.sysctlImage.enabled }}
+      ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+      initContainers:
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          {{- if .Values.coordinating.securityContext.enabled }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          securityContext:
+            runAsUser: {{ .Values.coordinating.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.coordinating.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "coordinating"
+          ports:
+            - name: http
+              containerPort: 9200
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.coordinating.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.coordinating.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.coordinating.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.coordinating.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.coordinating.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.coordinating.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: http
+          {{- end }}
+          {{- if .Values.coordinating.readinessProbe.enabled}}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.coordinating.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.coordinating.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.coordinating.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.coordinating.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.coordinating.readinessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: http
+          {{- end }}
+          {{- if .Values.coordinating.resources }}
+          resources: {{- toYaml .Values.coordinating.resources | nindent 12 }}
+          {{- end}}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: config
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data/"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        - name: data
+          emptyDir: {}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..3f4a516728a4715c7c5a75de089d2c39883950c1
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.coordinating.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.coordinating.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.coordinating.service.type | quote }}
+  {{- if and (eq .Values.coordinating.service.type "LoadBalancer") (not (empty .Values.coordinating.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.coordinating.service.port }}
+      targetPort: http
+      {{- if and (or (eq .Values.coordinating.service.type "NodePort") (eq .Values.coordinating.service.type "LoadBalancer")) (not (empty .Values.coordinating.service.nodePort)) }}
+      nodePort: {{ .Values.coordinating.service.nodePort }}
+      {{- else if eq .Values.coordinating.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: coordinating-only
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..78cd83ed211f90c131607d0870250f84d4cb46d2
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml
@@ -0,0 +1,108 @@
+{{- if .Values.curator.enabled }}
+apiVersion: {{ template "cronjob.apiVersion" . }}
+kind: CronJob
+metadata:
+  name: {{ template "elasticsearch.curator.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+  {{- if .Values.curator.cronjob.annotations }}
+  annotations: {{- toYaml .Values.curator.cronjob.annotations | indent 4 }}
+  {{- end }}
+spec:
+  schedule: "{{ .Values.curator.cronjob.schedule }}"
+  {{- with .Values.curator.cronjob.concurrencyPolicy }}
+  concurrencyPolicy: {{ . }}
+  {{- end }}
+  {{- with .Values.curator.cronjob.failedJobsHistoryLimit }}
+  failedJobsHistoryLimit: {{ . }}
+  {{- end }}
+  {{- with .Values.curator.cronjob.successfulJobsHistoryLimit }}
+  successfulJobsHistoryLimit: {{ . }}
+  {{- end }}
+  jobTemplate:
+    metadata:
+      labels:
+        app: {{ template "elasticsearch.name" . }}
+        release: {{ .Release.Name | quote }}
+    spec:
+      template:
+        metadata:
+          labels:
+            app: {{ template "elasticsearch.name" . }}
+            release: {{ .Release.Name | quote }}
+          {{- if .Values.curator.podAnnotations }}
+          annotations: {{- toYaml .Values.curator.podAnnotations | nindent 12 }}
+          {{- end }}
+        spec:
+          volumes:
+            - name: config-volume
+              configMap:
+                name: {{ template "elasticsearch.curator.fullname" . }}
+            {{- if .Values.curator.extraVolumes }}
+            {{- toYaml .Values.curator.extraVolumes | nindent 12 }}
+            {{- end }}
+          restartPolicy: {{ .Values.curator.cronjob.jobRestartPolicy }}
+          {{- if .Values.curator.priorityClassName }}
+          priorityClassName: {{ .Values.curator.priorityClassName | quote }}
+          {{- end }}
+{{- include "elasticsearch.imagePullSecrets" . | indent 10 }}
+          {{- if .Values.curator.extraInitContainers }}
+          initContainers:
+            {{- range $key, $value := .Values.curator.extraInitContainers }}
+            - name: "{{ $key }}"
+            {{- toYaml $value | nindent 14 }}
+            {{- end }}
+          {{- end }}
+          {{- if .Values.curator.rbac.enabled }}
+          serviceAccountName: {{ include "elasticsearch.curator.serviceAccountName" . }}
+          {{- end }}
+          {{- if .Values.curator.affinity }}
+          affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.affinity "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.nodeSelector }}
+          nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.nodeSelector "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.tolerations }}
+          tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.tolerations "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.securityContext }}
+          securityContext: {{- toYaml .Values.curator.securityContext | nindent 12 }}
+          {{- end }}
+          containers:
+            - name: {{ template "elasticsearch.curator.fullname" . }}
+              image: {{ template "elasticsearch.curator.image" . }}
+              imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
+              volumeMounts:
+                - name: config-volume
+                  mountPath: /etc/es-curator
+                {{- if .Values.curator.extraVolumeMounts }}
+                {{- toYaml .Values.curator.extraVolumeMounts | nindent 16 }}
+                {{- end }}
+              {{ if .Values.curator.command }}
+              command: {{ toYaml .Values.curator.command | nindent 16 }}
+              {{- end }}
+              {{- if .Values.curator.dryrun }}
+              args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+              {{- else }}
+              args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+              {{- end }}
+              env:
+                {{- if .Values.curator.env }}
+                {{- range $key,$value := .Values.curator.env }}
+                - name: {{ $key | upper | quote}}
+                  value: {{ $value | quote}}
+                {{- end }}
+                {{- end }}
+                {{- if .Values.curator.envFromSecrets }}
+                {{- range $key,$value := .Values.curator.envFromSecrets }}
+                - name: {{ $key | upper | quote}}
+                  valueFrom:
+                    secretKeyRef:
+                      name: {{ $value.from.secret | quote}}
+                      key: {{ $value.from.key | quote}}
+                {{- end }}
+                {{- end }}
+              {{- if .Values.curator.resources }}
+              resources: {{- toYaml .Values.curator.resources | nindent 16 }}
+              {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..53cbf685857df9fc9b4578251c26e5ba59190b1b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml
@@ -0,0 +1,175 @@
+apiVersion: {{ template "statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ include "elasticsearch.data.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: data
+spec:
+  updateStrategy:
+    type: {{ .Values.data.updateStrategy.type }}
+    {{- if (eq "OnDelete" .Values.data.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- else if .Values.data.updateStrategy.rollingUpdatePartition }}
+    rollingUpdate:
+      partition: {{ .Values.data.updateStrategy.rollingUpdatePartition }}
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: data
+  serviceName: {{ include "elasticsearch.data.fullname" . }}
+  replicas: {{ .Values.data.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: data
+      {{- with .Values.data.podAnnotations }}
+      annotations: {{- toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.data.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.data.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.data.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }}
+      {{- if .Values.data.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.data.securityContext.fsGroup }}
+      {{- end }}
+      {{- if or .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }}
+      initContainers:
+        {{- if .Values.sysctlImage.enabled }}
+        ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+        {{- end }}
+        {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ include "elasticsearch.volumePermissions.image" . }}
+          imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+            - -ec
+            - |
+              chown -R {{ .Values.data.securityContext.runAsUser }}:{{ .Values.data.securityContext.fsGroup }} //bitnami/elasticsearch/data
+          securityContext:
+            runAsUser: 0
+          {{- if .Values.volumePermissions.resource }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data"
+        {{- end }}
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.data.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.data.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.data.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "data"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.data.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.data.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.data.readinessProbe.enabled }}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.data.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.data.resources }}
+          resources: {{- toYaml .Values.data.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: "config"
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: "data"
+              mountPath: "/bitnami/elasticsearch/data"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: "config"
+          configMap:
+            name: {{ template "elasticsearch.fullname" . }}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- if not .Values.data.persistence.enabled }}
+        - name: "data"
+          emptyDir: {}
+{{- else }}
+  volumeClaimTemplates:
+    - metadata:
+        name: "data"
+        {{- if .Values.data.persistence.annotations }}
+        annotations: {{- toYaml .Values.data.persistence.annotations | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes: {{- toYaml .Values.data.persistence.accessModes | nindent 10 }}
+        {{ $storage := dict "global" .Values.global "local" .Values.data }}
+        {{ include "elasticsearch.storageClass" $storage }}
+        resources:
+          requests:
+            storage: {{ .Values.data.persistence.size | quote }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..a852bdc64006c11da3241209ae6d1fb6bb6ee076
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.discovery.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+  annotations:
+    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+  type: ClusterIP
+  clusterIP: None
+  ports:
+    - port: 9300
+      name: transport
+      targetPort: transport
+  publishNotReadyAddresses: true
+  sessionAffinity: None
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..4d552b32a6b4d4845f48159c20ee932330c0c9a7
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml
@@ -0,0 +1,71 @@
+{{- if .Values.curator.enabled }}
+{{- range $kind, $enabled := .Values.curator.hooks }}
+{{- if $enabled }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: {{ template "elasticsearch.curator.fullname" . }}-curator-on-{{ $kind }}
+  labels:
+    app: {{ template "elasticsearch.name" . }}
+    chart: {{ template "elasticsearch.chart" . }}
+    heritage: {{ .Release.Service | quote }}
+    release: {{ .Release.Name | quote }}
+    role: "curator"
+  annotations:
+    "helm.sh/hook": post-{{ $kind }}
+    "helm.sh/hook-weight": "1"
+{{- if $.Values.cronjob.annotations }}
+{{ toYaml $.Values.cronjob.annotations | indent 4 }}
+{{- end }}
+spec:
+ template:
+    metadata:
+      labels:
+        app: {{ template "elasticsearch.name" . }}
+        release: {{ .Release.Name | quote }}
+{{- if $.Values.podAnnotations }}
+      annotations:
+{{ toYaml $.Values.podAnnotations | indent 8 }}
+{{- end }}
+    spec:
+      volumes:
+        - name: config-volume
+          configMap:
+            name: {{ template "elasticsearch.curator.fullname" . }}
+{{- if $.Values.curator.extraVolumes }}
+{{ toYaml $.Values.curator.extraVolumes | indent 8 }}
+{{- end }}
+      restartPolicy: Never
+{{- if $.Values.curator.priorityClassName }}
+      priorityClassName: "{{ $.Values.curator.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "elasticsearch.curator.fullname" . }}
+          image: {{ template "elasticsearch.curator.image" . }}
+          imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/es-curator
+    {{- if $.Values.curator.extraVolumeMounts }}
+{{ toYaml $.Values.curator.extraVolumeMounts | indent 12 }}
+    {{- end }}
+          command: [ "curator" ]
+          args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+          resources:
+{{ toYaml $.Values.curator.resources | indent 12 }}
+    {{- with $.Values.curator.nodeSelector }}
+      nodeSelector:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+    {{- with $.Values.curator.affinity }}
+      affinity:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+    {{- with $.Values.curator.tolerations }}
+      tolerations:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+{{- end -}}
+{{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..69b3d75a0ded0636657923384e22b0a3e859ec75
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml
@@ -0,0 +1,133 @@
+{{- if .Values.ingest.enabled }}
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.ingest.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: ingest
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: ingest
+  replicas: {{ .Values.ingest.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: ingest
+      {{- with .Values.ingest.podAnnotations }}
+      annotations: {{- toYaml . | nindent 10 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.ingest.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.ingest.securityContext.fsGroup }}
+      {{- end }}
+      {{- if .Values.sysctlImage.enabled }}
+      ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+      initContainers:
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          {{- if .Values.ingest.securityContext.enabled }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          securityContext:
+            runAsUser: {{ .Values.ingest.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.ingest.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "ingest"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.ingest.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.ingest.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.ingest.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.ingest.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.ingest.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.ingest.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+            initialDelaySeconds: 90
+          {{- end }}
+          {{- if .Values.ingest.readinessProbe.enabled}}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.ingest.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.ingest.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.ingest.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.ingest.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.ingest.readinessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+            initialDelaySeconds: 5
+          {{- end}}
+          {{- if .Values.ingest.resources }}
+          resources: {{- toYaml .Values.ingest.resources | nindent 12 }}
+          {{- end}}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: "config"
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: "data"
+              mountPath: "/bitnami/elasticsearch/data/"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        - name: data
+          emptyDir: {}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..eeb053dd7b96840d3f72736ca0763d08e4763638
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.ingest.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.ingest.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: ingest
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.ingest.service.type | quote }}
+  {{- if and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: transport
+      port: {{ .Values.ingest.service.port }}
+      targetPort: transport
+      {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePort)) }}
+      nodePort: {{ .Values.ingest.service.nodePort }}
+      {{- else if eq .Values.ingest.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: ingest
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..109b9cb591eae0a4d0135758defbd668471374a2
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml
@@ -0,0 +1,179 @@
+apiVersion: {{ template "statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ include "elasticsearch.master.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+spec:
+  updateStrategy:
+    type: {{ .Values.master.updateStrategy.type }}
+    {{- if (eq "OnDelete" .Values.master.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: master
+  serviceName: {{ template "elasticsearch.master.fullname" . }}
+  podManagementPolicy: Parallel
+  replicas: {{ .Values.master.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: master
+      {{- with .Values.master.podAnnotations }}
+      annotations: {{- toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.master.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.master.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.master.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }}
+      {{- if .Values.master.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.master.securityContext.fsGroup }}
+      {{- end }}
+      {{- if or .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }}
+      initContainers:
+        {{- if .Values.sysctlImage.enabled }}
+        ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+        {{- end }}
+        {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ include "elasticsearch.volumePermissions.image" . }}
+          imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+            - -ec
+            - |
+              chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} //bitnami/elasticsearch/data
+          securityContext:
+            runAsUser: 0
+          {{- if .Values.volumePermissions.resource }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data"
+        {{- end }}
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.master.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.master.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
+              {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
+              {{- $replicas := int .Values.master.replicas }}
+              value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
+            - name: ELASTICSEARCH_MINIMUM_MASTER_NODES
+              value: {{ add (div .Values.master.replicas 2) 1 | quote }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.master.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "master"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.master.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.master.readinessProbe.enabled }}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.master.resources }}
+          resources: {{- toYaml .Values.master.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: config
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: data
+              mountPath: /bitnami/elasticsearch/data
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- if not .Values.master.persistence.enabled }}
+        - name: "data"
+          emptyDir: {}
+{{- else }}
+  volumeClaimTemplates:
+    - metadata:
+        name: "data"
+        {{- if .Values.master.persistence.annotations }}
+        annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes: {{- toYaml .Values.master.persistence.accessModes | nindent 10 }}
+        {{ $storage := dict "global" .Values.global "local" .Values.master }}
+        {{ include "elasticsearch.storageClass" $storage  }}
+        resources:
+          requests:
+            storage: {{ .Values.master.persistence.size | quote }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..eec73578869833e49facf25da42d26b635b76cbf
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.master.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.master.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.master.service.type | quote }}
+  {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: transport
+      port: {{ .Values.master.service.port }}
+      targetPort: transport
+      {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) (not (empty .Values.master.service.nodePort)) }}
+      nodePort: {{ .Values.master.service.nodePort }}
+      {{- else if eq .Values.master.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: master
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..04bbab65b5b23a9ebc71489283f4f54ec813c18f
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml
@@ -0,0 +1,47 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: metrics
+  replicas: 1
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: metrics
+      {{- with .Values.metrics.podAnnotations }}
+      annotations: {{ toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | indent 6 }}
+      containers:
+        - name: metrics
+          image: {{ include "elasticsearch.metrics.image" . }}
+          imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+          args:
+            - --es.uri=http://{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }}
+            - --es.all
+          ports:
+            - name: metrics
+              containerPort: 9114
+          livenessProbe:
+            httpGet:
+              path: /metrics
+              port: metrics
+            initialDelaySeconds: 60
+            timeoutSeconds: 5
+          readinessProbe:
+            httpGet:
+              path: /metrics
+              port: metrics
+            initialDelaySeconds: 5
+            timeoutSeconds: 1
+          {{- if .Values.metrics.resources }}
+          resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+          {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1bde5fae7b955d2299f9201129778195f9faafe8
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.metrics.service.type }}
+  ports:
+    - name: metrics
+      port: 9114
+      targetPort: metrics
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: metrics
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..bc387440339f58b3991bbf36e1b225bd2a338d9d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.curator.enabled .Values.curator.psp.create }}
+apiVersion: {{ include "podsecuritypolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+spec:
+  privileged: true
+  #requiredDropCapabilities:
+  volumes:
+    - 'configMap'
+    - 'secret'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    # Require the container to run without root privileges.
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..f0da273f12b084b3e154457c4fe08fa879070018
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+    component: elasticsearch-curator-configmap
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["update", "patch"]
+  {{- if .Values.curator.psp.create }}
+  - apiGroups: ["extensions"]
+    resources: ["podsecuritypolicies"]
+    verbs: ["use"]
+    resourceNames:
+      - {{ include "elasticsearch.curator.fullname" . }}
+  {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..fc9060b063713d5b704d05068e8dfbb52b558a7c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+    component: elasticsearch-curator-configmap
+roleRef:
+  kind: Role
+  name: {{ template "elasticsearch.curator.name" . }}
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+  - kind: ServiceAccount
+    name: {{ include "elasticsearch.curator.serviceAccountName" . }}
+    namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..588cadd87c6ff544302678d95f6c069eb5be22f6
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml
@@ -0,0 +1,35 @@
+{{- if and .Values.curator.enabled .Values.curator.serviceAccount.create .Values.curator.rbac.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "elasticsearch.curator.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+{{- end }}
+---
+{{- if .Values.data.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.data.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: data
+{{- end }}
+---
+{{- if .Values.master.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.master.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+{{- end }}
+---
+{{- if .Values.coordinating.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..4a4ed799d9f389b5542d1fb21e6aab002026522d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml
@@ -0,0 +1,29 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- end }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+    {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: metrics
+  endpoints:
+    - port: metrics
+      {{- if .Values.metrics.serviceMonitor.interval }}
+      interval: {{ .Values.metrics.serviceMonitor.interval }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+      {{- end }}
+  namespaceSelector:
+    matchNames:
+      - {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..ba838ec547f807ab6acac3896f885e450f4ba60b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml
@@ -0,0 +1,786 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+global:
+  # imageRegistry: myRegistryName
+  # imagePullSecrets:
+  #   - myRegistryKeySecretName
+  # storageClass: myStorageClass
+  ## Coordinating name to be used in the Kibana subchart (service name)
+  ##
+  coordinating:
+    name: coordinating-only
+  kibanaEnabled: true
+
+## Bitnami Elasticsearch image version
+## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/elasticsearch
+  tag: 7.6.1-debian-10-r22
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+  ## Set to true if you would like to see extra information on logs
+  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+  ##
+  debug: false
+
+## String to partially override elasticsearch.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override elasticsearch.fullname template
+##
+# fullnameOverride:
+
+## Bitnami Minideb image version
+## ref: https://hub.docker.com/r/bitnami/minideb/tags/
+##
+sysctlImage:
+  enabled: false
+  registry: docker.io
+  repository: bitnami/minideb
+  tag: buster
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: Always
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Init container' resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+
+## Cluster domain
+##
+clusterDomain: cluster.local
+
+## Elasticsearch cluster name
+##
+name: elastic
+
+## Elasticsearch discovery node parameters
+##
+discovery:
+  name: discovery
+
+## Comma, semi-colon or space separated list of plugins to install at initialization
+## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables
+##
+# plugins:
+
+## Customize elasticsearch configuration
+## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
+##
+# config:
+
+## extraVolumes and extraVolumeMounts allows you to mount other volumes
+## Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+# extraVolumes:
+#   - name: es-certs
+#     secret:
+#       defaultMode: 420
+#       secretName: es-certs
+# extraVolumeMounts:
+#   - name: es-certs
+#     mountPath: /certs
+#     readOnly: true
+
+## Elasticsearch master-eligible node parameters
+##
+master:
+  name: master
+  ## Number of master-eligible node(s) replicas to deploy
+  ##
+  replicas: 3
+
+  ## updateStrategy for ElasticSearch master statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for master-eligible pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for master-eligible pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch master-eligible container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch master-eligible container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+
+  ## Service parameters for master-eligible node(s)
+  ##
+  service:
+    ## master-eligible service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the master node
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    # name:
+
+## Elasticsearch coordinating-only node parameters
+##
+coordinating:
+  ## Number of coordinating-only node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch coordinating deployment
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for the coordinating-only pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for coordinating-only pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch coordinating-only container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch coordinating-only container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Service parameters for coordinating-only node(s)
+  ##
+  service:
+    ## coordinating-only service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch tREST API port
+    ##
+    port: 9200
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the coordinating node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch data node parameters
+##
+data:
+  name: data
+  ## Number of data node(s) replicas to deploy
+  ##
+  replicas: 3
+  ## updateStrategy for ElasticSearch Data statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+    # rollingUpdatePartition
+  heapSize: 128m
+  ## Provide annotations for the data pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for data pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch data container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 1152Mi
+  ## Elasticsearch data container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the data node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch ingest node parameters
+##
+ingest:
+  enabled: true
+  name: ingest
+  ## Number of ingest node(s) replicas to deploy
+  ##
+  replicas: 2
+  heapSize: 128m
+  ## Provide annotations for the ingest pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for ingest pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch ingest container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch ingest container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Service parameters for ingest node(s)
+  ##
+  service:
+    ## ingest service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+
+## Elasticsearch curator parameters
+##
+curator:
+  enabled: false
+  name: curator
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-curator
+    tag: 5.8.1-debian-10-r58
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+
+  cronjob:
+    # At 01:00 every day
+    schedule: "0 1 * * *"
+    annotations: {}
+    concurrencyPolicy: ""
+    failedJobsHistoryLimit: ""
+    successfulJobsHistoryLimit: ""
+    jobRestartPolicy: Never
+
+  podAnnotations: {}
+
+  rbac:
+    # Specifies whether RBAC should be enabled
+    enabled: false
+
+  serviceAccount:
+    # Specifies whether a ServiceAccount should be created
+    create: true
+    # The name of the ServiceAccount to use.
+    # If not set and create is true, a name is generated using the fullname template
+    name:
+
+  psp:
+    # Specifies whether a podsecuritypolicy should be created
+    create: false
+
+  hooks:
+    install: false
+    upgrade: false
+
+  # run curator in dry-run mode
+  dryrun: false
+
+  command: ["curator"]
+  env: {}
+
+  configMaps:
+    # Delete indices older than 90 days
+    action_file_yml: |-
+      ---
+      actions:
+        1:
+          action: delete_indices
+          description: "Clean up ES by deleting old indices"
+          options:
+            timeout_override:
+            continue_if_exception: False
+            disable_action: False
+            ignore_empty_list: True
+          filters:
+          - filtertype: age
+            source: name
+            direction: older
+            timestring: '%Y.%m.%d'
+            unit: days
+            unit_count: 90
+            field:
+            stats_result:
+            epoch:
+            exclude: False
+    # Default config (this value is evaluated as a template)
+    config_yml: |-
+      ---
+      client:
+        hosts:
+          - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+        port: {{ .Values.coordinating.service.port }}
+        # url_prefix:
+        # use_ssl: True
+        # certificate:
+        # client_cert:
+        # client_key:
+        # ssl_no_validate: True
+        # http_auth:
+        # timeout: 30
+        # master_only: False
+      # logging:
+      #   loglevel: INFO
+      #   logfile:
+      #   logformat: default
+      #   blacklist: ['elasticsearch', 'urllib3']
+
+  ## Curator resources requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+    requests: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+
+  priorityClassName: ""
+
+  # extraVolumes and extraVolumeMounts allows you to mount other volumes
+  # Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+  # extraVolumes:
+  #   - name: es-certs
+  #     secret:
+  #       defaultMode: 420
+  #       secretName: es-certs
+  # extraVolumeMounts:
+  #   - name: es-certs
+  #     mountPath: /certs
+  #     readOnly: true
+
+  ## Add your own init container or uncomment and modify the given example.
+  ##
+  extraInitContainers: {}
+  ## Don't configure S3 repository till Elasticsearch is reachable.
+  ## Ensure that it is available at http://elasticsearch:9200
+  ##
+  # elasticsearch-s3-repository:
+  #   image: bitnami/minideb:latest
+  #   imagePullPolicy: "IfNotPresent"
+  #   command:
+  #   - "/bin/bash"
+  #   - "-c"
+  #   args:
+  #   - |
+  #     ES_HOST=elasticsearch
+  #     ES_PORT=9200
+  #     ES_REPOSITORY=backup
+  #     S3_REGION=us-east-1
+  #     S3_BUCKET=bucket
+  #     S3_BASE_PATH=backup
+  #     S3_COMPRESS=true
+  #     S3_STORAGE_CLASS=standard
+  #     install_packages curl && \
+  #     ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \
+  #     cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
+  #     {
+  #       "type": "s3",
+  #       "settings": {
+  #         "bucket": "${S3_BUCKET}",
+  #         "base_path": "${S3_BASE_PATH}",
+  #         "region": "${S3_REGION}",
+  #         "compress": "${S3_COMPRESS}",
+  #         "storage_class": "${S3_STORAGE_CLASS}"
+  #       }
+  #     }
+
+## Elasticsearch Prometheus exporter configuration
+## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/
+##
+metrics:
+  enabled: true
+  name: metrics
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-exporter
+    tag: 1.1.0-debian-10-r57
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Elasticsearch Prometheus exporter service type
+  ##
+  service:
+    type: ClusterIP
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "9114"
+  ## Elasticsearch Prometheus exporter resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+  ## Metrics exporter pod Annotation and Labels
+  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+  ##
+  podAnnotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "8080"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Bundled Kibana parameters
+##
+kibana:
+  elasticsearch:
+    hosts:
+      - '{{ include "elasticsearch.coordinating.fullname" . }}'
+    port: 9200
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1a8f3bdbe52e3015e1c1491f09b8a7986eaf655e
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml
@@ -0,0 +1,786 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+global:
+  # imageRegistry: myRegistryName
+  # imagePullSecrets:
+  #   - myRegistryKeySecretName
+  # storageClass: myStorageClass
+  ## Coordinating name to be used in the Kibana subchart (service name)
+  ##
+  coordinating:
+    name: coordinating-only
+  kibanaEnabled: false
+
+## Bitnami Elasticsearch image version
+## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/elasticsearch
+  tag: 7.6.1-debian-10-r22
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+  ## Set to true if you would like to see extra information on logs
+  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+  ##
+  debug: false
+
+## String to partially override elasticsearch.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override elasticsearch.fullname template
+##
+# fullnameOverride:
+
+## Bitnami Minideb image version
+## ref: https://hub.docker.com/r/bitnami/minideb/tags/
+##
+sysctlImage:
+  enabled: true
+  registry: docker.io
+  repository: bitnami/minideb
+  tag: buster
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: Always
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Init container' resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+
+## Cluster domain
+##
+clusterDomain: cluster.local
+
+## Elasticsearch cluster name
+##
+name: elastic
+
+## Elasticsearch discovery node parameters
+##
+discovery:
+  name: discovery
+
+## Comma, semi-colon or space separated list of plugins to install at initialization
+## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables
+##
+# plugins:
+
+## Customize elasticsearch configuration
+## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
+##
+# config:
+
+## extraVolumes and extraVolumeMounts allows you to mount other volumes
+## Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+# extraVolumes:
+#   - name: es-certs
+#     secret:
+#       defaultMode: 420
+#       secretName: es-certs
+# extraVolumeMounts:
+#   - name: es-certs
+#     mountPath: /certs
+#     readOnly: true
+
+## Elasticsearch master-eligible node parameters
+##
+master:
+  name: master
+  ## Number of master-eligible node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch master statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for master-eligible pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for master-eligible pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch master-eligible container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch master-eligible container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+
+  ## Service parameters for master-eligible node(s)
+  ##
+  service:
+    ## master-eligible service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the master node
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    # name:
+
+## Elasticsearch coordinating-only node parameters
+##
+coordinating:
+  ## Number of coordinating-only node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch coordinating deployment
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for the coordinating-only pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for coordinating-only pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch coordinating-only container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch coordinating-only container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Service parameters for coordinating-only node(s)
+  ##
+  service:
+    ## coordinating-only service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch tREST API port
+    ##
+    port: 9200
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the coordinating node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch data node parameters
+##
+data:
+  name: data
+  ## Number of data node(s) replicas to deploy
+  ##
+  replicas: 2
+  ## updateStrategy for ElasticSearch Data statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+    # rollingUpdatePartition
+  heapSize: 128m
+  ## Provide annotations for the data pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for data pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch data container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 1152Mi
+  ## Elasticsearch data container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the data node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch ingest node parameters
+##
+ingest:
+  enabled: false
+  name: ingest
+  ## Number of ingest node(s) replicas to deploy
+  ##
+  replicas: 2
+  heapSize: 128m
+  ## Provide annotations for the ingest pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for ingest pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch ingest container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch ingest container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Service parameters for ingest node(s)
+  ##
+  service:
+    ## ingest service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+
+## Elasticsearch curator parameters
+##
+curator:
+  enabled: false
+  name: curator
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-curator
+    tag: 5.8.1-debian-10-r58
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+
+  cronjob:
+    # At 01:00 every day
+    schedule: "0 1 * * *"
+    annotations: {}
+    concurrencyPolicy: ""
+    failedJobsHistoryLimit: ""
+    successfulJobsHistoryLimit: ""
+    jobRestartPolicy: Never
+
+  podAnnotations: {}
+
+  rbac:
+    # Specifies whether RBAC should be enabled
+    enabled: false
+
+  serviceAccount:
+    # Specifies whether a ServiceAccount should be created
+    create: true
+    # The name of the ServiceAccount to use.
+    # If not set and create is true, a name is generated using the fullname template
+    name:
+
+  psp:
+    # Specifies whether a podsecuritypolicy should be created
+    create: false
+
+  hooks:
+    install: false
+    upgrade: false
+
+  # run curator in dry-run mode
+  dryrun: false
+
+  command: ["curator"]
+  env: {}
+
+  configMaps:
+    # Delete indices older than 90 days
+    action_file_yml: |-
+      ---
+      actions:
+        1:
+          action: delete_indices
+          description: "Clean up ES by deleting old indices"
+          options:
+            timeout_override:
+            continue_if_exception: False
+            disable_action: False
+            ignore_empty_list: True
+          filters:
+          - filtertype: age
+            source: name
+            direction: older
+            timestring: '%Y.%m.%d'
+            unit: days
+            unit_count: 90
+            field:
+            stats_result:
+            epoch:
+            exclude: False
+    # Default config (this value is evaluated as a template)
+    config_yml: |-
+      ---
+      client:
+        hosts:
+          - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+        port: {{ .Values.coordinating.service.port }}
+        # url_prefix:
+        # use_ssl: True
+        # certificate:
+        # client_cert:
+        # client_key:
+        # ssl_no_validate: True
+        # http_auth:
+        # timeout: 30
+        # master_only: False
+      # logging:
+      #   loglevel: INFO
+      #   logfile:
+      #   logformat: default
+      #   blacklist: ['elasticsearch', 'urllib3']
+
+  ## Curator resources requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+    requests: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+
+  priorityClassName: ""
+
+  # extraVolumes and extraVolumeMounts allows you to mount other volumes
+  # Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+  # extraVolumes:
+  #   - name: es-certs
+  #     secret:
+  #       defaultMode: 420
+  #       secretName: es-certs
+  # extraVolumeMounts:
+  #   - name: es-certs
+  #     mountPath: /certs
+  #     readOnly: true
+
+  ## Add your own init container or uncomment and modify the given example.
+  ##
+  extraInitContainers: {}
+  ## Don't configure S3 repository till Elasticsearch is reachable.
+  ## Ensure that it is available at http://elasticsearch:9200
+  ##
+  # elasticsearch-s3-repository:
+  #   image: bitnami/minideb:latest
+  #   imagePullPolicy: "IfNotPresent"
+  #   command:
+  #   - "/bin/bash"
+  #   - "-c"
+  #   args:
+  #   - |
+  #     ES_HOST=elasticsearch
+  #     ES_PORT=9200
+  #     ES_REPOSITORY=backup
+  #     S3_REGION=us-east-1
+  #     S3_BUCKET=bucket
+  #     S3_BASE_PATH=backup
+  #     S3_COMPRESS=true
+  #     S3_STORAGE_CLASS=standard
+  #     install_packages curl && \
+  #     ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \
+  #     cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
+  #     {
+  #       "type": "s3",
+  #       "settings": {
+  #         "bucket": "${S3_BUCKET}",
+  #         "base_path": "${S3_BASE_PATH}",
+  #         "region": "${S3_REGION}",
+  #         "compress": "${S3_COMPRESS}",
+  #         "storage_class": "${S3_STORAGE_CLASS}"
+  #       }
+  #     }
+
+## Elasticsearch Prometheus exporter configuration
+## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/
+##
+metrics:
+  enabled: false
+  name: metrics
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-exporter
+    tag: 1.1.0-debian-10-r57
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Elasticsearch Prometheus exporter service type
+  ##
+  service:
+    type: ClusterIP
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "9114"
+  ## Elasticsearch Prometheus exporter resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+  ## Metrics exporter pod Annotation and Labels
+  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+  ##
+  podAnnotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "8080"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Bundled Kibana parameters
+##
+kibana:
+  elasticsearch:
+    hosts:
+      - '{{ include "elasticsearch.coordinating.fullname" . }}'
+    port: 9200
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-authorizer.json b/deploy/asapo_helm_chart/asapo/configs/asapo-authorizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3ad3fd52c785b586e2277882f30cbf6289a87d1
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-authorizer.json
@@ -0,0 +1,11 @@
+{
+  "Port": {{ .Values.ownServices.authorizer.port }},
+  "LogLevel":"debug",
+  "AlwaysAllowedBeamtimes":[{"beamtimeId":"asapo_test","beamline":"test","core-path":"{{ .Values.common.offlineDir }}/test_facility/gpfs/test/2019/data/asapo_test"},
+  {"beamtimeId":"asapo_test1","beamline":"test1","core-path":"{{ .Values.common.offlineDir }}/test_facility/gpfs/test1/2019/data/asapo_test1"},
+  {"beamtimeId":"asapo_test2","beamline":"test2","core-path":"{{ .Values.common.offlineDir }}/test_facility/gpfs/test2/2019/data/asapo_test2"}],
+  "RootBeamtimesFolder":"{{ .Values.common.offlineDir }}",
+  "CurrentBeamlinesFolder":"{{ .Values.common.onlineDir }}",
+  "SecretFile":"/etc/authorizer/auth_secret.key",
+  "TokenDurationMin":600
+}
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-broker.json b/deploy/asapo_helm_chart/asapo/configs/asapo-broker.json
new file mode 100644
index 0000000000000000000000000000000000000000..3753878d2cbce843fa612b36b264e83c5a227172
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-broker.json
@@ -0,0 +1,9 @@
+{
+  "DatabaseServer":"asapo-mongodb:{{ .Values.ownServices.mongodb.port }}",
+  "DiscoveryServer": "asapo-discovery:{{ .Values.ownServices.discovery.port }}",
+  "PerformanceDbServer":"{{ .Chart.Name }}-influxdb:{{ .Values.influxdb.influxdb.service.port }}",
+  "PerformanceDbName": "asapo_brokers",
+  "Port": {{ .Values.ownServices.broker.port }},
+  "LogLevel":"debug",
+  "SecretFile":"/etc/broker/auth_secret.key"
+}
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-discovery.json b/deploy/asapo_helm_chart/asapo/configs/asapo-discovery.json
new file mode 100644
index 0000000000000000000000000000000000000000..11d9221013832c7df74b427ba572196e6582f924
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-discovery.json
@@ -0,0 +1,15 @@
+{
+  "Mode": "kubernetes",
+  "Kubernetes":{
+    "Mode": "internal",
+    "Namespace": "{{ .Release.Namespace }}"
+  },
+  "Receiver": {
+    "MaxConnections": 32,
+    "UseIBAddress": false
+  },
+  "Port": {{ .Values.ownServices.discovery.port }},
+  "LogLevel": "debug"
+}
+
+
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-file-transfer.json b/deploy/asapo_helm_chart/asapo/configs/asapo-file-transfer.json
new file mode 100644
index 0000000000000000000000000000000000000000..49cc9bfc3b204c19cd3b9751b930726e651b2c9b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-file-transfer.json
@@ -0,0 +1,5 @@
+{
+  "Port": {{ .Values.ownServices.fileTransfer.port }},
+  "LogLevel":"debug",
+  "SecretFile":"/etc/file-transfer/auth_secret.key"
+}
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf b/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf
new file mode 100644
index 0000000000000000000000000000000000000000..2ba55b940529ab672458db682fe6728eb5c14f40
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf
@@ -0,0 +1,40 @@
+<source>
+@type tail
+path /var/log/containers/{{ .service.serviceName }}*{{ .service.serviceName }}*.log
+pos_file /tmp/{{ .service.serviceName }}.log.pos
+read_from_head true
+tag asapo
+<parse>
+@type json
+</parse>
+</source>
+
+<filter asapo.**>
+@type parser
+key_name log
+format json
+time_format %Y-%m-%d %H:%M:%S.%N
+reserve_data true
+</filter>
+
+<filter asapo.**>
+@type record_transformer
+enable_ruby
+remove_keys ["log","stream"]
+<record>
+source_addr ${hostname}
+</record>
+</filter>
+
+<match asapo.**>
+  @type elasticsearch
+  host asapo-elk-coordinating
+  port {{ .Values.elasticsearch.coordinating.service.port }}
+  flush_interval 5s
+  logstash_format true
+  time_key_format %Y-%m-%dT%H:%M:%S.%N
+  time_key time
+  time_key_exclude_timestamp true
+  buffer_type memory
+</match>
+
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json b/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c9e6b1554fc58141d37736fb46719bbac1db4da
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-receiver.json
@@ -0,0 +1,24 @@
+{
+  "PerformanceDbServer": "{{ .Chart.Name }}-influxdb:{{ .Values.influxdb.influxdb.service.port }}",
+  "PerformanceDbName": "asapo_receivers",
+  "DatabaseServer": "asapo-mongodb:{{ .Values.ownServices.mongodb.port }}",
+  "DiscoveryServer": "asapo-discovery:{{ .Values.ownServices.discovery.port }}",
+  "AuthorizationServer": "asapo-authorizer:{{ .Values.ownServices.authorizer.port }}",
+  "AuthorizationInterval": 10000,
+  "ListenPort": {{ .Values.ownServices.receiver.port }},
+  "DataServer": {
+    "AdvertiseURI": "auto",
+    "NThreads": {{ .Values.ownServices.receiver.dataServer.nThreads }},
+    "ListenPort": {{ .Values.ownServices.receiver.dataServer.port }}
+  },
+  "DataCache": {
+    "Use": {{ .Values.ownServices.receiver.dataCache.enable }},
+    "SizeGB": {{ .Values.ownServices.receiver.dataCache.sizeGb }},
+    "ReservedShare": 10
+  },
+  "Tag": "receiver",
+  "WriteToDisk":true,
+  "ReceiveToDiskThresholdMB": {{ .Values.ownServices.receiver.receiveToDiskThresholdMB }},
+  "WriteToDb":true,
+  "LogLevel": "info"
+}
diff --git a/deploy/asapo_helm_chart/asapo/templates/NOTES.txt b/deploy/asapo_helm_chart/asapo/templates/NOTES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/NOTES.txt
@@ -0,0 +1 @@
+
diff --git a/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl b/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..04692f57c8cb6970532415cab677c67efc9f5090
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl
@@ -0,0 +1,22 @@
+{{/* Generate configmaps */}}
+{{- define "asapo.configmap-fromfile" }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ .service.serviceName }}-config
+data:
+  {{ .service.serviceName }}.json:  {{ tpl (.Files.Get (printf "configs/%s.json" .service.serviceName)) . | quote }}
+
+{{- if .service.sidecarLogs }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ .service.serviceName }}-fluentd-config
+data:
+  asapo-fluentd.conf:  {{ tpl (.Files.Get  ("configs/asapo-fluentd.conf")) . | quote }}
+{{- end }}
+
+{{- end }}
+
+
diff --git a/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl b/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..88ceda218c8316c3dedae3056401a89dc1223447
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl
@@ -0,0 +1,29 @@
+{{/* Generate add fluentd sidecar */}}
+{{- define "asapo.fluentd.container" }}
+- name: fluentd
+  image: "yakser/fluentd_elastic"
+  command: ["fluentd"]
+  args: ["-c", "/fluentd/etc/asapo-fluentd.conf"]
+  volumeMounts:
+    - mountPath: "/fluentd/etc"
+      name: fluentd-config
+    - mountPath: /var/log/containers
+      name: logs
+    - mountPath: /var
+      name: var
+{{- end }}
+
+{{/* Generate add fluentd sidecar */}}
+{{- define "asapo.fluentd.volumes" }}
+- name: fluentd-config
+  configMap:
+    name: {{ .serviceName }}-fluentd-config
+- name: logs
+  hostPath:
+    path: /var/log/containers
+    type: Directory
+- name: var
+  hostPath:
+    path: /var
+    type: Directory
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/_ingress.tpl b/deploy/asapo_helm_chart/asapo/templates/_ingress.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..2ef5a7eab873adf81e0233a37ed7a7e6ecf1c0f1
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/_ingress.tpl
@@ -0,0 +1,20 @@
+{{/* Generate ingres */}}
+{{- define "asapo.ingress" }}
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+  name: ingress-{{ .service.serviceName }}
+  annotations:
+    kubernetes.io/ingress.class: "nginx"
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+    nginx.ingress.kubernetes.io/whitelist-source-range: 131.169.0.0/16
+spec:
+  rules:
+    - host: "*.desy.de"
+      http:
+        paths:
+          - path: /{{ .Release.Namespace }}/{{ .service.serviceName }}(/|$)(.*)
+            backend:
+              serviceName: {{ .service.serviceName }}
+              servicePort: {{ .service.port }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/_service.tpl b/deploy/asapo_helm_chart/asapo/templates/_service.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..808feac9df9b1c8b8309b3387d74f44eecdc1c43
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/_service.tpl
@@ -0,0 +1,15 @@
+{{/* Generate service */}}
+{{- define "asapo.service" }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ .service.serviceName }}
+spec:
+  type: {{ if .service._exposeServiceExtrernally }}NodePort{{ else }}ClusterIP{{ end }}
+  {{ if .service._exposeServiceExtrernally }}externalTrafficPolicy: Local{{ end }}
+  ports:
+    - protocol: TCP
+      port: {{ .service.port }}
+  selector:
+    app: {{ .service.appName | default .service.serviceName }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/auth-secret.yaml b/deploy/asapo_helm_chart/asapo/templates/auth-secret.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b90f8bf216f66deb745bc042d044c9e1302bae49
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/auth-secret.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: auth-secret
+type: Opaque
+data:
+  auth_secret.key: {{ .Values.common.authSecret | b64enc | quote }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e923be6acd41da528beaf088b8852125d3b8cf55
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml
@@ -0,0 +1,50 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-authorizer
+  labels:
+    app: asapo-authorizer
+spec:
+  replicas: {{ .Values.ownServices.authorizer.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-authorizer
+  template:
+    metadata:
+      labels:
+        app: asapo-authorizer
+      annotations:
+        checksum/config: {{ .Files.Get "configs/asapo-authorizer.json" | sha256sum  }}
+        checksum/secret: {{ include (print $.Template.BasePath "/auth-secret.yaml") . | sha256sum }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
+    spec:
+      volumes:
+        - name: all-in-one
+          projected:
+            sources:
+              - configMap:
+                  name: asapo-authorizer-config
+              - secret:
+                  name: auth-secret
+        - name: shared-volume-offline
+          persistentVolumeClaim:
+            claimName: asapo-offline-pv
+        - name: shared-volume-online
+          persistentVolumeClaim:
+            claimName: asapo-online-pv
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.authorizer | indent 8 }}
+      containers:
+        - name: asapo-authorizer
+          image: "yakser/asapo-authorizer-dev:{{ .Values.common.asapoVersionTag }}"
+          command: ["/asapo-authorizer"]
+          args: ["-config", "/etc/authorizer/asapo-authorizer.json"]
+          ports:
+            - containerPort: {{ .Values.ownServices.authorizer.port }}
+          volumeMounts:
+            - mountPath: "/etc/authorizer"
+              name: all-in-one
+            - mountPath: {{ .Values.common.offlineDir }}
+              name: shared-volume-offline
+            - mountPath: {{ .Values.common.onlineDir }}
+              name: shared-volume-online
+      {{- include "asapo.fluentd.container" . | indent 8 }}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a48d728c768ba7fe61b6f486b4c0469e14041307
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml
@@ -0,0 +1,44 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-broker
+  labels:
+    app: asapo-broker
+spec:
+  replicas: {{ .Values.ownServices.broker.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-broker
+  template:
+    metadata:
+      labels:
+        app: asapo-broker
+      annotations:
+        checksum/config: {{ .Files.Get "configs/asapo-broker.json" | sha256sum  }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
+        checksum/secret: {{ include (print $.Template.BasePath "/auth-secret.yaml") . | sha256sum }}
+    spec:
+      volumes:
+        - name: all-in-one
+          projected:
+            sources:
+              - configMap:
+                  name: asapo-broker-config
+              - secret:
+                  name: auth-secret
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.broker | indent 8 }}
+      initContainers:
+        - name: wait-databases
+          image: busybox:1.28
+          command: ['sh', '-c', "echo initializing;until wget -T 2 asapo-influxdb:8086/ping; do echo waiting for influxdb; sleep 2; done"]
+      containers:
+        - name: asapo-broker
+          image: "yakser/asapo-broker-dev:{{ .Values.common.asapoVersionTag }}"
+          command: ["/asapo-broker"]
+          args: ["-config", "/etc/broker/asapo-broker.json"]
+          ports:
+            - containerPort: {{ .Values.ownServices.broker.port }}
+          volumeMounts:
+            - mountPath: "/etc/broker"
+              name: all-in-one
+        {{- include "asapo.fluentd.container" . | indent 8 }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/configs.yaml b/deploy/asapo_helm_chart/asapo/templates/configs.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7497412bbfabcfbdbc6b3389084b1d452d34ea65
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/configs.yaml
@@ -0,0 +1,5 @@
+{{- range .Values.ownServices }}
+  {{ $data := dict "Release" $.Release "Chart" $.Chart "Template" $.Template "Values" $.Values "Files" $.Files "service" . }}
+    {{- template "asapo.configmap-fromfile" $data }}
+---
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/discovery-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/discovery-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7cee3709df13537c2b8e6c36add89907a68c6c69
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/discovery-deployment.yaml
@@ -0,0 +1,35 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-discovery
+  labels:
+    app: asapo-discovery
+spec:
+  replicas: {{ .Values.ownServices.discovery.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-discovery
+  template:
+    metadata:
+      labels:
+        app: asapo-discovery
+      annotations:
+        checksum/config: {{ .Files.Get "configs/asapo-discovery.json" | sha256sum  }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
+    spec:
+      volumes:
+        - name: asapo-discovery-config
+          configMap:
+            name: asapo-discovery-config
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.discovery | indent 8 }}
+      containers:
+        - name: asapo-discovery
+          image: "yakser/asapo-discovery-dev:{{ .Values.common.asapoVersionTag }}"
+          command: ["/asapo-discovery"]
+          args: ["-config", "/etc/discovery/asapo-discovery.json"]
+          ports:
+            - containerPort: {{ .Values.ownServices.discovery.port }}
+          volumeMounts:
+            - mountPath: "/etc/discovery"
+              name: asapo-discovery-config
+      {{- include "asapo.fluentd.container" . | indent 8 }}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/file-transfer-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/file-transfer-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..04e5d502f712e544758fda683a761fa5ff480949
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/file-transfer-deployment.yaml
@@ -0,0 +1,50 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-file-transfer
+  labels:
+    app: asapo-file-transfer
+spec:
+  replicas: {{ .Values.ownServices.fileTransfer.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-file-transfer
+  template:
+    metadata:
+      labels:
+        app: asapo-file-transfer
+      annotations:
+        checksum/config: {{ .Files.Get "configs/asapo-file-transfer.json" | sha256sum  }}
+        checksum/secret: {{ include (print $.Template.BasePath "/auth-secret.yaml") . | sha256sum }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
+    spec:
+      volumes:
+        - name: all-in-one
+          projected:
+            sources:
+              - configMap:
+                  name: asapo-file-transfer-config
+              - secret:
+                  name: auth-secret
+        - name: shared-volume-offline
+          persistentVolumeClaim:
+            claimName: asapo-offline-pv
+        - name: shared-volume-online
+          persistentVolumeClaim:
+            claimName: asapo-online-pv
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.fileTransfer | indent 8 }}
+      containers:
+        - name: asapo-file-transfer
+          image: "yakser/asapo-file-transfer-dev:{{ .Values.common.asapoVersionTag }}"
+          command: ["/asapo-file-transfer"]
+          args: ["-config", "/etc/file-transfer/asapo-file-transfer.json"]
+          ports:
+            - containerPort: {{ .Values.ownServices.fileTransfer.port }}
+          volumeMounts:
+            - mountPath: "/etc/file-transfer"
+              name: all-in-one
+            - mountPath: {{ .Values.common.offlineDir }}
+              name: shared-volume-offline
+            - mountPath: {{ .Values.common.onlineDir }}
+              name: shared-volume-online
+      {{- include "asapo.fluentd.container" . | indent 8 }}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/grafana-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/grafana-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dbc498acf24f14f58781ece47525e0f125a3cb23
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/grafana-deployment.yaml
@@ -0,0 +1,35 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-grafana
+  labels:
+    app: asapo-grafana
+spec:
+  replicas: {{ .Values.ownServices.grafana.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-grafana
+  template:
+    metadata:
+      labels:
+        app: asapo-grafana
+    spec:
+      securityContext:
+        fsGroup: 472
+      volumes:
+        - name: asapo-grafana
+          persistentVolumeClaim:
+            claimName: asapo-grafana
+      containers:
+        - name: asapo-grafana
+          image: "grafana/grafana:6.7.1-ubuntu"
+          ports:
+            - containerPort: {{ .Values.ownServices.grafana.port }}
+          volumeMounts:
+            - mountPath: /var/lib/grafana
+              name: asapo-grafana
+          env:
+            - name: GF_SERVER_ROOT_URL
+              value: "%(protocol)s://%(domain)s/{{ .Release.Namespace }}/asapo-grafana/"
+            - name: GF_SERVER_SERVE_FROM_SUB_PATH
+              value: "true"
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/ingress.yaml b/deploy/asapo_helm_chart/asapo/templates/ingress.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9efb8c0ac77cd634288b7b54ab1648cc7829bfd0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/ingress.yaml
@@ -0,0 +1,7 @@
+{{- range .Values.ownServices }}
+  {{- if not ._exposeServiceExtrernally }}
+  {{- $data := dict  "Release" $.Release "Values" $.Values "service" . }}
+  {{- template "asapo.ingress" $data }}
+  {{- end }}
+---
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/mongodb-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/mongodb-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e012f9fee3b3f5c1d9c3d3dc4c791fac1a6aa4e4
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/mongodb-deployment.yaml
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-mongodb
+  labels:
+    app: asapo-mongodb
+spec:
+  replicas: {{ .Values.ownServices.mongodb.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-mongodb
+  template:
+    metadata:
+      labels:
+        app: asapo-mongodb
+    spec:
+      volumes:
+        - name: asapo-mongodb
+          persistentVolumeClaim:
+            claimName: asapo-mongodb
+      containers:
+        - name: asapo-mongodb
+          image: "mongo:4.0.0"
+          ports:
+            - containerPort: {{ .Values.ownServices.mongodb.port }}
+          volumeMounts:
+            - mountPath: /data/db
+              name: asapo-mongodb
diff --git a/deploy/asapo_helm_chart/asapo/templates/receiver-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/receiver-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..144feeaf716a103185f46ca85274e28384463fd6
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/receiver-deployment.yaml
@@ -0,0 +1,86 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: config-script
+data:
+  entrypoint.sh: |-
+    #!/bin/bash
+    export TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token`
+    export NODE_PORT=`curl https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1/namespaces/{{ .Release.Namespace }}/services/asapo-receiver-dataserver --insecure --header "Authorization: Bearer $TOKEN" --silent | jq .spec.ports[0].nodePort`
+    jq ".DataServer.AdvertiseURI = \"$NODE_IP:$NODE_PORT\"" /etc/receiver/init/asapo-receiver.json > /etc/receiver/asapo-receiver.json
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: asapo-receiver
+  labels:
+    app: asapo-receiver
+spec:
+  replicas: {{ .Values.ownServices.receiver.replicaCount }}
+  selector:
+    matchLabels:
+      app: asapo-receiver
+  template:
+    metadata:
+      labels:
+        app: asapo-receiver
+      annotations:
+        checksum/config: {{ .Files.Get "configs/asapo-receiver.json" | sha256sum  }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
+    spec:
+      volumes:
+        - name: config-script
+          configMap:
+            defaultMode: 0700
+            name: config-script
+        - name: asapo-receiver-config
+          emptyDir: {}
+        - name: asapo-receiver-config-init
+          configMap:
+            name: asapo-receiver-config
+        - name: shared-volume-offline
+          persistentVolumeClaim:
+            claimName: asapo-offline-pv
+        - name: shared-volume-online
+          persistentVolumeClaim:
+            claimName: asapo-online-pv
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.receiver | indent 8 }}
+      initContainers:
+        - name: prepare-config
+          image: yakser/alpine_curl_jq
+          command: ['/bin/entrypoint.sh']
+          env:
+            - name: NODE_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          volumeMounts:
+            - mountPath: "/etc/receiver"
+              name: asapo-receiver-config
+            - mountPath: "/etc/receiver/init"
+              name: asapo-receiver-config-init
+            - mountPath: /bin/entrypoint.sh
+              name: config-script
+              readOnly: true
+              subPath: entrypoint.sh
+
+      containers:
+        - name: asapo-receiver
+          image: "yakser/asapo-receiver-dev:{{ .Values.common.asapoVersionTag }}"
+          command: ["/receiver"]
+          args: ["/etc/receiver/asapo-receiver.json"]
+          ports:
+            - containerPort: {{ .Values.ownServices.receiver.port }}
+              name: http
+            - containerPort: {{ .Values.ownServices.receiver.dataServer.port }}
+              name: http-ds
+          volumeMounts:
+            - mountPath: "/etc/receiver"
+              name: asapo-receiver-config
+            - mountPath: {{ .Values.common.offlineDir }}
+              name: shared-volume-offline
+            - mountPath: {{ .Values.common.onlineDir }}
+              name: shared-volume-online
+
+      {{- include "asapo.fluentd.container" . | indent 8 }}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/roles.yaml b/deploy/asapo_helm_chart/asapo/templates/roles.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..748e9d5f10f104068ae251e694221fb1bfa1ab48
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/roles.yaml
@@ -0,0 +1,27 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  creationTimestamp: null
+  name: role
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - services
+      - pods
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  creationTimestamp: null
+  name: role-bind
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: role
+subjects:
+  - kind: ServiceAccount
+    name: default
diff --git a/deploy/asapo_helm_chart/asapo/templates/services.yaml b/deploy/asapo_helm_chart/asapo/templates/services.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fb7f806615faffb6ad9d3120dc13f88bfd4be47c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/services.yaml
@@ -0,0 +1,7 @@
+{{- range .Values.ownServices }}
+  {{ $data := dict "Values" $.Values "service" . }}
+    {{- template "asapo.service" $data }}
+---
+{{- end }}
+{{ $data := dict "Values" $.Values "service" .Values.ownServices.receiver.dataServer }}
+{{- template "asapo.service" $data }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/storage.yaml b/deploy/asapo_helm_chart/asapo/templates/storage.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6bc55dcdc1ffd5e3353c83b9be53e3bcc2bc4d7
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/storage.yaml
@@ -0,0 +1,48 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: asapo-offline-pv
+spec:
+  storageClassName: nfs-storage
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 3Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: asapo-online-pv
+spec:
+  storageClassName: nfs-storage
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: asapo-grafana
+spec:
+  storageClassName: standard
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: asapo-mongodb
+spec:
+  storageClassName: standard
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 20Gi
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/values.yaml b/deploy/asapo_helm_chart/asapo/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fb6a5359dd63f4dbc90d82fdeecaf11e52852a40
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/values.yaml
@@ -0,0 +1,89 @@
+ownServices:
+  authorizer:
+    serviceName: asapo-authorizer
+    replicaCount: 1
+    port: 5006
+    sidecarLogs: true
+    _exposeServiceExtrernally: false
+  broker:
+    serviceName: asapo-broker
+    replicaCount: 1
+    port: 5007
+    sidecarLogs: true
+    _exposeServiceExtrernally: true
+  discovery:
+    serviceName: asapo-discovery
+    replicaCount: 1
+    port: 5008
+    sidecarLogs: true
+    _exposeServiceExtrernally: false
+  receiver:
+    serviceName: asapo-receiver
+    replicaCount: 1
+    port: 5009
+    sidecarLogs: true
+    _exposeServiceExtrernally: true
+    dataServer:
+      appName: asapo-receiver
+      serviceName: asapo-receiver-dataserver
+      port: 5010
+      nThreads: 1
+      _exposeServiceExtrernally: true
+    dataCache:
+      enable: true
+      sizeGb: 1
+    receiveToDiskThresholdMB: 200
+  fileTransfer:
+    serviceName: asapo-file-transfer
+    replicaCount: 1
+    port: 5011
+    sidecarLogs: true
+    _exposeServiceExtrernally: true
+  grafana:
+    serviceName: asapo-grafana
+    replicaCount: 1
+    port: 3000
+    sidecarLogs: false
+    _exposeServiceExtrernally: false
+  mongodb:
+    serviceName: asapo-mongodb
+    replicaCount: 1
+    port: 27017
+    sidecarLogs: false
+    _exposeServiceExtrernally: false
+
+
+common:
+  authSecret: "12ljzgneasfd"
+  offlineDir: "/test_offline"
+  onlineDir: "/test_online"
+  asapoVersionTag: "feature_ASAPO-108-kubernetes-deployment.latest"
+
+influxdb:
+  authEnabled: false
+  influxdb:
+    updateStrategy: Recreate
+    service:
+      port: 8086
+
+elasticsearch:
+  global:
+    kibanaEnabled: true
+    coordinating:
+      name: elk-coordinating
+  kibana:
+    extraConfiguration:
+      "server.basePath": /{{ .Release.Namespace }}/logsview
+      "server.rewriteBasePath": true
+    plugins:
+      - "https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-7.6.1-0.1.31.zip"
+      - "x-pack"
+    healthCheckPathTemplate: "/{{ .Release.Namespace }}/logsview/app/kibana"
+    ingress:
+      enabled: true
+      annotations:
+        kubernetes.io/ingress.class: "nginx"
+        nginx.ingress.kubernetes.io/whitelist-source-range: 131.169.0.0/16
+      hosts:
+        - name: "*.desy.de"
+          path_template: "/{{ .Release.Namespace }}/logsview"
diff --git a/deploy/asapo_services/asap3.tfvars b/deploy/asapo_services/asap3.tfvars
index 392572994f0961d135ef942b19c57ca869865b4b..7ecdf397804d525479303c8f4a85f1ff036fbeee 100644
--- a/deploy/asapo_services/asap3.tfvars
+++ b/deploy/asapo_services/asap3.tfvars
@@ -1,5 +1,7 @@
 elk_logs = true
 
+asapo_image_tag = "feature_ASAPO-108-kubernetes-deployment.latest"
+
 service_dir="/gpfs/asapo/shared/service_dir"
 online_dir="/beamline"
 offline_dir="/asap3"
diff --git a/deploy/asapo_services/copy_asap3.sh.in b/deploy/asapo_services/copy_asap3.sh.in
index 59bd40ce2eb52bae06ec7c37fca76ca73459bb71..e5121101a1ead3f2fee2f5697d4d0c56291bdf4f 100755
--- a/deploy/asapo_services/copy_asap3.sh.in
+++ b/deploy/asapo_services/copy_asap3.sh.in
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
-scp scripts/* asap3-utl01:@NOMAD_INSTALL@/terraform/
-scp asap3.tfvars asap3-utl01:@NOMAD_INSTALL@/terraform/
-ssh asap3-utl01 chown asapo: -R @NOMAD_INSTALL@/terraform/
-ssh asap3-utl01 rm @NOMAD_INSTALL@/terraform/auth_secret.key
-ssh asap3-utl01 ln -s @NOMAD_INSTALL@/access/secret.key @NOMAD_INSTALL@/terraform/auth_secret.key
+scp scripts/* root@asap3-utl01:@NOMAD_INSTALL@/terraform/
+scp asap3.tfvars root@asap3-utl01:@NOMAD_INSTALL@/terraform/
+ssh -l root asap3-utl01 chown asapo: -R @NOMAD_INSTALL@/terraform/
+ssh -l root asap3-utl01 rm @NOMAD_INSTALL@/terraform/auth_secret.key
+ssh -l root asap3-utl01 ln -s @NOMAD_INSTALL@/access/secret.key @NOMAD_INSTALL@/terraform/auth_secret.key
diff --git a/deploy/asapo_services/scripts/asapo-fts.nmd.tpl b/deploy/asapo_services/scripts/asapo-fts.nmd.tpl
index b3bd2c3063eb4f92c3651c99ded205bf4a2571f5..055d567c0da050d0898ca58f72164b7b30065048 100644
--- a/deploy/asapo_services/scripts/asapo-fts.nmd.tpl
+++ b/deploy/asapo_services/scripts/asapo-fts.nmd.tpl
@@ -1,4 +1,4 @@
-job "asapo-fts" {
+job "asapo-file-transfer" {
   datacenters = ["dc1"]
   affinity {
     attribute = "$${meta.node_group}"
@@ -13,7 +13,7 @@ job "asapo-fts" {
     auto_revert = false
   }
 
-  group "fts" {
+  group "file-transfer" {
     count = ${n_fts}
     restart {
       attempts = 2
@@ -22,7 +22,7 @@ job "asapo-fts" {
       mode = "fail"
     }
 
-    task "fts" {
+    task "file-transfer" {
       driver = "docker"
       user = "${asapo_user}"
       config {
@@ -55,9 +55,9 @@ job "asapo-fts" {
 
       service {
         port = "fts"
-        name = "asapo-fts"
+        name = "asapo-file-transfer"
         check {
-          name     = "asapo-fts-alive"
+          name     = "asapo-file-transfer-alive"
           type     = "http"
           path     = "/health-check"
           interval = "10s"
@@ -71,7 +71,7 @@ job "asapo-fts" {
       }
 
       template {
-         source        = "${scripts_dir}/fts.json.tpl"
+         source        = "${scripts_dir}/file-transfer.json.tpl"
          destination   = "local/config.json"
          change_mode   = "restart"
       }
diff --git a/deploy/asapo_services/scripts/asapo-mongo.nmd.tpl b/deploy/asapo_services/scripts/asapo-mongo.nmd.tpl
index cc6389d8b93da7a9b6353dc7f8ee4d79ffac4780..9c06fa7da69beeec0fee5a4f6a0b51725ef36403 100644
--- a/deploy/asapo_services/scripts/asapo-mongo.nmd.tpl
+++ b/deploy/asapo_services/scripts/asapo-mongo.nmd.tpl
@@ -45,7 +45,7 @@ job "asapo-mongo" {
 
       service {
         port = "mongo"
-        name = "mongo"
+        name = "asapo-mongodb"
         check {
           type     = "script"
           name     = "alive"
diff --git a/deploy/asapo_services/scripts/asapo-services.nmd.tpl b/deploy/asapo_services/scripts/asapo-services.nmd.tpl
index 818819dc111c028f7476e39ed0f0e5324c20924c..dae2350c53835c4a4aed16d239a90fb81a8838f9 100644
--- a/deploy/asapo_services/scripts/asapo-services.nmd.tpl
+++ b/deploy/asapo_services/scripts/asapo-services.nmd.tpl
@@ -120,7 +120,7 @@ job "asapo-services" {
         check {
           name     = "alive"
           type     = "http"
-          path     = "/receivers"
+          path     = "/asapo-receiver"
           interval = "10s"
           timeout  = "2s"
           initial_status =   "passing"
diff --git a/deploy/asapo_services/scripts/broker.json.tpl b/deploy/asapo_services/scripts/broker.json.tpl
index 531fdaa7787aba5d6cf02755c4f9822a70b1c340..26aead08bc8f38911278adc11e9af75e4861bec4 100644
--- a/deploy/asapo_services/scripts/broker.json.tpl
+++ b/deploy/asapo_services/scripts/broker.json.tpl
@@ -1,6 +1,6 @@
 {
   "DatabaseServer":"auto",
-  "DiscoveryServer": "localhost:8400/discovery",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
   "PerformanceDbServer":"localhost:8400/influxdb",
   "PerformanceDbName": "asapo_brokers",
   "Port":{{ env "NOMAD_PORT_broker" }},
diff --git a/deploy/asapo_services/scripts/fts.json.tpl b/deploy/asapo_services/scripts/file-transfer.json.tpl
similarity index 100%
rename from deploy/asapo_services/scripts/fts.json.tpl
rename to deploy/asapo_services/scripts/file-transfer.json.tpl
diff --git a/deploy/asapo_services/scripts/fluentd.conf.tpl b/deploy/asapo_services/scripts/fluentd.conf.tpl
index a46d063838dc19462084e5a60a60b886f6f5c659..c97a9048e4be3f3114fd59c225be2ca433910cef 100644
--- a/deploy/asapo_services/scripts/fluentd.conf.tpl
+++ b/deploy/asapo_services/scripts/fluentd.conf.tpl
@@ -17,7 +17,6 @@
 <filter asapo.docker>
   @type parser
   key_name log
-
   format json
   time_format %Y-%m-%d %H:%M:%S.%N
   reserve_data true
diff --git a/deploy/asapo_services/scripts/nginx.conf.tpl b/deploy/asapo_services/scripts/nginx.conf.tpl
index b101534ded41c65dbeb83de86b5ca563826051e1..e0c52d1ed7dfab0791f81cb226e5fa2d81a43d59 100644
--- a/deploy/asapo_services/scripts/nginx.conf.tpl
+++ b/deploy/asapo_services/scripts/nginx.conf.tpl
@@ -46,8 +46,8 @@ http {
             proxy_pass http://$elasticsearch_endpoint:{{ env "NOMAD_META_elasticsearch_port" }}$uri$is_args$args;
           }
 
-          location /discovery/ {
-            rewrite ^/discovery(/.*) $1 break;
+          location /asapo-discovery/ {
+            rewrite ^/asapo-discovery(/.*) $1 break;
             proxy_pass http://$discovery_endpoint:{{ env "NOMAD_META_discovery_port" }}$uri$is_args$args;
           }
 
@@ -68,8 +68,8 @@ http {
             proxy_pass http://$grafana_endpoint:{{ env "NOMAD_META_grafana_port" }}$uri$is_args$args;
           }
 
-          location /authorizer/ {
-             rewrite ^/authorizer(/.*) $1 break;
+          location /asapo-authorizer/ {
+             rewrite ^/asapo-authorizer(/.*) $1 break;
              proxy_pass http://$authorizer_endpoint:{{ env "NOMAD_META_authorizer_port" }}$uri$is_args$args;
           }
 
diff --git a/deploy/asapo_services/scripts/receiver.json.tpl b/deploy/asapo_services/scripts/receiver.json.tpl
index 0120daebf1437598e9b2c95fa76afd3a097db603..a5f8192358812e3af2b281e48db0f2cdef0becb8 100644
--- a/deploy/asapo_services/scripts/receiver.json.tpl
+++ b/deploy/asapo_services/scripts/receiver.json.tpl
@@ -1,13 +1,13 @@
 {
-  "AdvertiseIP": "{{ if or (env "meta.ib_address") "none" | regexMatch "none" }}{{ env "NOMAD_IP_recv" }}{{ else }}{{ env "meta.ib_address" }}{{ end }}",
   "PerformanceDbServer":"localhost:8400/influxdb",
   "PerformanceDbName": "asapo_receivers",
   "DatabaseServer":"auto",
-  "DiscoveryServer": "localhost:8400/discovery",
-  "AuthorizationServer": "localhost:8400/authorizer",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
+  "AuthorizationServer": "localhost:8400/asapo-authorizer",
   "AuthorizationInterval": 10000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
   "DataServer": {
+    "AdvertiseURI": "{{ if or (env "meta.ib_address") "none" | regexMatch "none" }}{{ env "NOMAD_IP_recv" }}{{ else }}{{ env "meta.ib_address" }}{{ end }}:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": {{ env "NOMAD_META_receiver_dataserver_nthreads" }},
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
diff --git a/deploy/asapo_services/scripts/resources_services.tf b/deploy/asapo_services/scripts/resources_services.tf
index 0c2ddf51c8c14fd0b342bca62d73d359ea311015..fa84a79b18e64e5a4a1431b9c8589f08330d2e02 100644
--- a/deploy/asapo_services/scripts/resources_services.tf
+++ b/deploy/asapo_services/scripts/resources_services.tf
@@ -22,7 +22,7 @@ resource "null_resource" "fluentd" {
 
 resource "null_resource" "mongo" {
   provisioner "local-exec" {
-    command = "asapo-wait-service mongo"
+    command = "asapo-wait-service asapo-mongodb"
   }
   depends_on = [nomad_job.asapo-mongo]
 
@@ -53,7 +53,7 @@ resource "null_resource" "asapo-broker" {
 
 resource "null_resource" "asapo-fts" {
   provisioner "local-exec" {
-    command = "asapo-wait-service asapo-fts"
+    command = "asapo-wait-service asapo-file-transfer"
   }
   depends_on = [nomad_job.asapo-fts]
 }
diff --git a/discovery/CMakeLists.txt b/discovery/CMakeLists.txt
index 2c863597936d4ba34e5dd0434f913989e14908a7..a5c3e6616b1b6ffe8ffa585b834852b9e1a62699 100644
--- a/discovery/CMakeLists.txt
+++ b/discovery/CMakeLists.txt
@@ -1,20 +1,8 @@
 set (TARGET_NAME asapo-discovery)
 
-if (NOT "$ENV{GOPATH}" STREQUAL "")
-	set(GOPATH $ENV{GOPATH})
-endif()
-
-if (NOT GOPATH)
-    message (FATAL_ERROR "GOPATH not set")
-endif()
-
-message(STATUS "global gopath ${GOPATH}")
-
 IF(WIN32)
-    set (gopath "${GOPATH}\;${CMAKE_CURRENT_SOURCE_DIR}\;${CMAKE_SOURCE_DIR}/common/go")
     set (exe_name "${TARGET_NAME}.exe")
 ELSE()
-    set (gopath ${GOPATH}:${CMAKE_CURRENT_SOURCE_DIR}:${CMAKE_SOURCE_DIR}/common/go)
     set (exe_name "${TARGET_NAME}")
 ENDIF()
 
@@ -23,8 +11,8 @@ include(testing_go)
 configure_file(docker/Dockerfile . COPYONLY)
 
 add_custom_target(${TARGET_NAME} ALL
-    COMMAND  ${CMAKE_COMMAND} -E env GOPATH=${gopath}
-    go build ${GO_OPTS} -o ${exe_name} asapo_discovery/main
+    COMMAND go build ${GO_OPTS} -o ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} main/discovery.go
+    WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/asapo_discovery
     VERBATIM)
 define_property(TARGET PROPERTY EXENAME
         BRIEF_DOCS <executable name>
@@ -35,4 +23,4 @@ set_target_properties(${TARGET_NAME} PROPERTIES EXENAME ${CMAKE_CURRENT_BINARY_D
 
 install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} DESTINATION bin)
 
-gotest(${TARGET_NAME} "./...")
+gotest(${TARGET_NAME} "${CMAKE_CURRENT_SOURCE_DIR}/src/asapo_discovery" "./...")
diff --git a/discovery/src/asapo_discovery/common/consts.go b/discovery/src/asapo_discovery/common/consts.go
new file mode 100644
index 0000000000000000000000000000000000000000..9789e656f1b7cadca21b370582f815a6e23453bc
--- /dev/null
+++ b/discovery/src/asapo_discovery/common/consts.go
@@ -0,0 +1,8 @@
+package common
+
+const  (
+	NameMongoService = "asapo-mongodb"
+	NameFtsService = "asapo-file-transfer"
+	NameBrokerService = "asapo-broker"
+	NameReceiverService = "asapo-receiver"
+)
diff --git a/common/go/src/asapo_common/utils/stucts.go b/discovery/src/asapo_discovery/common/structs.go
similarity index 80%
rename from common/go/src/asapo_common/utils/stucts.go
rename to discovery/src/asapo_discovery/common/structs.go
index e9c11a59e09cdd64e60cbffc4d996bdf1a036c66..2844d977195a6bfb8a62e49e4150ae5c8c175757 100644
--- a/common/go/src/asapo_common/utils/stucts.go
+++ b/discovery/src/asapo_discovery/common/structs.go
@@ -1,4 +1,4 @@
-package utils
+package common
 
 import "errors"
 
@@ -27,12 +27,17 @@ type Settings struct {
 	FileTransferService  FtsInfo
 	ConsulEndpoints []string
 	Mode			string
+	Kubernetes struct {
+		Mode string
+		ConfigFile string
+		Namespace string
+	}
 	Port            int
 	LogLevel        string
 }
 
 func (settings *Settings) Validate() error {
-	if settings.Mode != "consul"{
+	if settings.Mode == "static"{
 		if len(settings.Receiver.StaticEndpoints) == 0 || len(settings.Broker.StaticEndpoint) == 0 || len(settings.Mongo.StaticEndpoint) == 0{
 			return errors.New("static endpoints not set")
 		}
@@ -50,13 +55,9 @@ func (settings *Settings) Validate() error {
 		return errors.New("Mode not set")
 	}
 
-	if settings.Mode != "static" && settings.Mode != "consul" {
-		return errors.New("wrong mode: "  + settings.Mode+ ", (allowed static|consul)")
+	if settings.Mode != "static" && settings.Mode != "consul" && settings.Mode != "kubernetes" {
+		return errors.New("wrong mode: "  + settings.Mode+ ", (allowed static|consul|kubernetes)")
 	}
 
 	return nil
 }
-
-type FolderTokenTokenExtraClaim struct {
-	RootFolder string
-}
diff --git a/discovery/src/asapo_discovery/go.mod b/discovery/src/asapo_discovery/go.mod
new file mode 100644
index 0000000000000000000000000000000000000000..8661775101cee70211a111c2fb3bcf48bfb8f7c8
--- /dev/null
+++ b/discovery/src/asapo_discovery/go.mod
@@ -0,0 +1,16 @@
+module asapo_discovery
+
+go 1.14
+
+replace asapo_common v0.0.0 => ../../../common/go/src/asapo_common
+
+require (
+	asapo_common v0.0.0
+	github.com/gorilla/mux v1.7.4 // indirect
+	github.com/hashicorp/consul/api v1.4.0
+	github.com/sirupsen/logrus v1.5.0 // indirect
+	github.com/stretchr/testify v1.4.0
+	k8s.io/api v0.17.0
+	k8s.io/apimachinery v0.17.0
+	k8s.io/client-go v0.17.0
+)
diff --git a/discovery/src/asapo_discovery/go.sum b/discovery/src/asapo_discovery/go.sum
new file mode 100644
index 0000000000000000000000000000000000000000..859f1d959eb69a677e2ca214af6b005bd2a89e43
--- /dev/null
+++ b/discovery/src/asapo_discovery/go.sum
@@ -0,0 +1,297 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v1.0.2 h1:KPldsxuKGsS2FPWsNeg9ZO18aCrGKujPoWXn2yo+KQM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
+github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
+github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/hashicorp/consul v1.7.2 h1:pDEnRiUE8jOUlxIqzo8Jw3Zcsz6KSpygk2BjkrsASsk=
+github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY=
+github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
+github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
+github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
+github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
+k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
+k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
+k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
+k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo=
+k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
+k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
+k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
+k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
+k8s.io/client-go v1.5.1 h1:XaX/lo2/u3/pmFau8HN+sB5C/b4dc4Dmm2eXjBH4p1E=
+k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
+k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200327001022-6496210b90e8 h1:6JFbaLjRyBz8K2Jvt+pcT+N3vvwMZfg8MfVENwe9aag=
+k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/discovery/src/asapo_discovery/main/discovery.go b/discovery/src/asapo_discovery/main/discovery.go
index e4bd21e4a37f6a11be84fa447dfa9d97ea6b3993..f306154ec24b3e0c72b3cc623199a542185ab111 100644
--- a/discovery/src/asapo_discovery/main/discovery.go
+++ b/discovery/src/asapo_discovery/main/discovery.go
@@ -17,6 +17,8 @@ func NewDefaultHandler() request_handler.Agent {
 		return new(request_handler.StaticRequestHandler)
 	case "consul":
 		return new(request_handler.ConsulRequestHandler)
+	case "kubernetes":
+		return new(request_handler.KubernetesRequestHandler)
 	default:
 		log.Fatal("wrong handler")
 		return nil
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler.go b/discovery/src/asapo_discovery/request_handler/request_handler.go
index 8949bf179280329cc83a36bd17e0a5c4a8a416e0..85fac562f472a43416ece4c361af2ac281479072 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler.go
@@ -1,12 +1,16 @@
 package request_handler
 
-import "asapo_common/utils"
+import (
+	"asapo_discovery/common"
+)
 
 type Agent interface {
 	GetReceivers(bool) ([]byte, error)
-	GetBroker() ([]byte, error)
-	GetMongo() ([]byte, error)
-	GetFts() ([]byte, error)
-	Init(settings utils.Settings) error
+	GetSingleService(service string) ([]byte, error)
+	Init(settings common.Settings) error
 }
 
+type Responce struct {
+	MaxConnections int
+	Uris           []string
+}
\ No newline at end of file
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
index 3fa6862e3c944fe5cf5efbe52ffe913840ad49af..9db9cb5eecb1e8b466f2a72c7ba3e42225bcbe93 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
@@ -1,12 +1,13 @@
 package request_handler
 
 import (
-	"asapo_common/utils"
+	"asapo_discovery/common"
 	"github.com/hashicorp/consul/api"
 	"strconv"
 	"errors"
 	"sort"
 	"sync"
+	"asapo_common/utils"
 )
 
 type ConsulRequestHandler struct {
@@ -15,10 +16,6 @@ type ConsulRequestHandler struct {
 	staticHandler *StaticRequestHandler
 }
 
-type Responce struct {
-	MaxConnections int
-	Uris           []string
-}
 
 type SafeCounter struct {
 	counter   int
@@ -77,15 +74,15 @@ func (rh *ConsulRequestHandler) GetReceivers(use_ib bool) ([]byte, error) {
 	return utils.MapToJson(&response)
 }
 
-func (rh *ConsulRequestHandler) GetBroker() ([]byte, error) {
-	if len(rh.staticHandler.broker)>0 {
-		return rh.staticHandler.GetBroker()
+func (rh *ConsulRequestHandler) GetSingleService(name string) ([]byte, error) {
+	if len(rh.staticHandler.singleServices[name])>0 {
+		return rh.staticHandler.GetSingleService(name)
 	}
 
 	if (rh.client == nil) {
 		return nil, errors.New("consul client not connected")
 	}
-	response, err := rh.GetServices("asapo-broker",false)
+	response, err := rh.GetServices(name,false)
 	if err != nil {
 		return nil, err
 	}
@@ -96,48 +93,7 @@ func (rh *ConsulRequestHandler) GetBroker() ([]byte, error) {
 		return []byte(response[counter.Next(size)]),nil
 	}
 	return nil, nil
-}
 
-func (rh *ConsulRequestHandler) GetMongo() ([]byte, error) {
-	if len(rh.staticHandler.mongo)>0 {
-		return rh.staticHandler.GetMongo()
-	}
-
-	if (rh.client == nil) {
-		return nil, errors.New("consul client not connected")
-	}
-	response, err := rh.GetServices("mongo",false)
-	if err != nil {
-		return nil, err
-	}
-	size := len(response)
-	if size ==0 {
-		return []byte(""),nil
-	}else {
-		return []byte(response[counter.Next(size)]),nil
-	}
-	return nil, nil
-}
-
-func (rh *ConsulRequestHandler) GetFts() ([]byte, error) {
-	if len(rh.staticHandler.fts)>0 {
-		return rh.staticHandler.GetFts()
-	}
-
-	if (rh.client == nil) {
-		return nil, errors.New("consul client not connected")
-	}
-	response, err := rh.GetServices("asapo-fts",false)
-	if err != nil {
-		return nil, err
-	}
-	size := len(response)
-	if size ==0 {
-		return []byte(""),nil
-	}else {
-		return []byte(response[counter.Next(size)]),nil
-	}
-	return nil, nil
 }
 
 func (rh *ConsulRequestHandler) connectClient(uri string) (client *api.Client, err error) {
@@ -155,7 +111,7 @@ func (rh *ConsulRequestHandler) connectClient(uri string) (client *api.Client, e
 	return
 }
 
-func (rh *ConsulRequestHandler) Init(settings utils.Settings) (err error) {
+func (rh *ConsulRequestHandler) Init(settings common.Settings) (err error) {
 	rh.staticHandler = new(StaticRequestHandler)
 	rh.staticHandler.Init(settings)
 	rh.MaxConnections = settings.Receiver.MaxConnections
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
index 234e17192a496021cf3eef4c52f21b0ba03818b3..516928011dba75e384b3196d10203a7972bd3a80 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
@@ -5,7 +5,7 @@ import (
 	"testing"
 	"github.com/hashicorp/consul/api"
 	"strconv"
-	"asapo_common/utils"
+	"asapo_discovery/common"
 )
 
 type ConsulHandlerTestSuite struct {
@@ -18,7 +18,7 @@ func TestConsulHandlerTestSuite(t *testing.T) {
 	suite.Run(t, new(ConsulHandlerTestSuite))
 }
 
-var consul_settings utils.Settings
+var consul_settings common.Settings
 
 func (suite *ConsulHandlerTestSuite) registerAgents(name string) {
 	for i := 1234; i < 1236; i++ {
@@ -42,7 +42,7 @@ func (suite *ConsulHandlerTestSuite) registerAgents(name string) {
 
 func (suite *ConsulHandlerTestSuite) SetupTest() {
 	var err error
-	consul_settings = utils.Settings{Receiver: utils.ReceiverInfo{MaxConnections: 10, StaticEndpoints: []string{}}}
+	consul_settings = common.Settings{Receiver: common.ReceiverInfo{MaxConnections: 10, StaticEndpoints: []string{}}}
 
 	suite.client, err = api.NewClient(api.DefaultConfig())
 	if err != nil {
@@ -51,8 +51,6 @@ func (suite *ConsulHandlerTestSuite) SetupTest() {
 
 	suite.registerAgents("asapo-receiver")
 	suite.registerAgents("asapo-broker")
-	suite.registerAgents("asapo-fts")
-	suite.registerAgents("mongo")
 }
 
 func (suite *ConsulHandlerTestSuite) TearDownTest() {
@@ -60,11 +58,6 @@ func (suite *ConsulHandlerTestSuite) TearDownTest() {
 	suite.client.Agent().ServiceDeregister("asapo-receiver1235")
 	suite.client.Agent().ServiceDeregister("asapo-broker1234")
 	suite.client.Agent().ServiceDeregister("asapo-broker1235")
-	suite.client.Agent().ServiceDeregister("mongo1234")
-	suite.client.Agent().ServiceDeregister("mongo1235")
-	suite.client.Agent().ServiceDeregister("asapo-fts1234")
-	suite.client.Agent().ServiceDeregister("asapo-fts1235")
-
 }
 
 func (suite *ConsulHandlerTestSuite) TestInitDefaultUri() {
@@ -128,61 +121,34 @@ func (suite *ConsulHandlerTestSuite) TestGetReceiversWhenNotConnected() {
 func (suite *ConsulHandlerTestSuite) TestGetBrokerWhenNotConnected() {
 	consul_settings.ConsulEndpoints = []string{"blabla"}
 	suite.handler.Init(consul_settings)
-	_, err := suite.handler.GetBroker()
+	_, err := suite.handler.GetSingleService(common.NameBrokerService)
 	suite.Error(err, "")
 }
 
 func (suite *ConsulHandlerTestSuite) TestGetBrokerRoundRobin() {
 	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetBroker()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1234", string(res), "uris")
-
-	res, err = suite.handler.GetBroker()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1235", string(res), "uris")
-
-	res, err = suite.handler.GetBroker()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1234", string(res), "uris")
-
-}
-
-func (suite *ConsulHandlerTestSuite) TestGetMongoRoundRobin() {
-	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetMongo()
+	res, err := suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("127.0.0.1:1234", string(res), "uris")
 
-	res, err = suite.handler.GetMongo()
+	res, err = suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("127.0.0.1:1235", string(res), "uris")
 
-	res, err = suite.handler.GetMongo()
+	res, err = suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("127.0.0.1:1234", string(res), "uris")
-}
-
-func (suite *ConsulHandlerTestSuite) TestGetMongoStatic() {
-	consul_settings.Mongo.StaticEndpoint="127.0.0.1:0000"
-	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetMongo()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:0000", string(res), "uris")
 
-	res, err = suite.handler.GetMongo()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:0000", string(res), "uris")
 }
 
 func (suite *ConsulHandlerTestSuite) TestGetBrokerStatic() {
 	consul_settings.Broker.StaticEndpoint="127.0.0.1:0000"
 	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetBroker()
+	res, err := suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("127.0.0.1:0000", string(res), "uris")
 
-	res, err = suite.handler.GetBroker()
+	res, err = suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("127.0.0.1:0000", string(res), "uris")
 }
@@ -192,36 +158,8 @@ func (suite *ConsulHandlerTestSuite) TestGetBrokerEmpty() {
 	suite.client.Agent().ServiceDeregister("asapo-broker1235")
 
 	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetBroker()
+	res, err := suite.handler.GetSingleService(common.NameBrokerService)
 	suite.NoError(err, "")
 	suite.Equal("", string(res), "uris")
 }
 
-func (suite *ConsulHandlerTestSuite) TestGetFtsRoundRobin() {
-	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetFts()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1235", string(res), "uris")
-
-	res, err = suite.handler.GetFts()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1234", string(res), "uris")
-
-	res, err = suite.handler.GetFts()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:1235", string(res), "uris")
-}
-
-func (suite *ConsulHandlerTestSuite) TestGetFtsStatic() {
-	consul_settings.FileTransferService.StaticEndpoint="127.0.0.1:0000"
-	suite.handler.Init(consul_settings)
-	res, err := suite.handler.GetFts()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:0000", string(res), "uris")
-
-	res, err = suite.handler.GetFts()
-	suite.NoError(err, "")
-	suite.Equal("127.0.0.1:0000", string(res), "uris")
-}
-
-
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes.go b/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes.go
new file mode 100644
index 0000000000000000000000000000000000000000..6173f77e8a6fea62c74cc07029f5dd7d0129e51d
--- /dev/null
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes.go
@@ -0,0 +1,134 @@
+package request_handler
+
+import (
+	"asapo_common/utils"
+	"asapo_discovery/common"
+	"errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/clientcmd"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+)
+
+type KubernetesRequestHandler struct {
+	MaxConnections int
+	client         *kubernetes.Clientset
+	staticHandler  *StaticRequestHandler
+	namespace string
+}
+
+func (rh *KubernetesRequestHandler) GetServices(name string, use_ib bool) ([]string, error) {
+	pods, err := rh.client.CoreV1().Pods(rh.namespace).List(metav1.ListOptions{LabelSelector:"app="+name,FieldSelector:"status.phase=Running"})
+	if err != nil {
+		return []string{},err
+	}
+
+	var result = make([]string, 0)
+
+	services, err := rh.client.CoreV1().Services(rh.namespace).List(metav1.ListOptions{FieldSelector:"metadata.name="+name})
+	if err != nil {
+		return []string{},err
+	}
+
+	if len(services.Items) != 1 || len(services.Items[0].Spec.Ports)!=1 {
+		return []string{},errors.New("cannot find kubernetes service or port")
+	}
+
+	port := strconv.Itoa(int(services.Items[0].Spec.Ports[0].NodePort))
+	for _,pod:=range pods.Items {
+		result = append(result, pod.Status.HostIP+":"+port)
+	}
+	sort.Strings(result)
+	return result, nil
+}
+
+func (rh *KubernetesRequestHandler) GetSingleService(name string) ([]byte, error) {
+	if len(rh.staticHandler.singleServices[name]) > 0 {
+		return rh.staticHandler.GetSingleService(name)
+	}
+
+	if rh.client == nil {
+		return nil, errors.New("Kubernetes client not initialized")
+	}
+	response, err := rh.GetServices(name, false)
+	if err != nil {
+		return nil, err
+	}
+	size := len(response)
+	if size == 0 {
+		return []byte(""), nil
+	} else {
+		return []byte(response[counter.Next(size)]), nil
+	}
+	return nil, nil
+
+}
+
+func (rh *KubernetesRequestHandler) GetReceivers(use_ib bool) ([]byte, error) {
+	if len(rh.staticHandler.receiverResponce.Uris)>0 {
+		return rh.staticHandler.GetReceivers(false)
+	}
+
+	var response Responce
+	response.MaxConnections = rh.MaxConnections
+	if (rh.client == nil) {
+		return nil, errors.New("kubernetes client not initialized")
+	}
+	var err error
+	response.Uris, err = rh.GetServices("asapo-receiver",use_ib)
+	if err != nil {
+		return nil, err
+	}
+	return utils.MapToJson(&response)
+}
+
+func (rh *KubernetesRequestHandler) createExternalConfig(settings common.Settings) (config *rest.Config, err error) {
+	var kubeconfig string
+	if len(settings.Kubernetes.ConfigFile) == 0 {
+		if home := homeDir(); home != "" {
+			kubeconfig = filepath.Join(home, ".kube", "config")
+		} else {
+			return nil,errors.New("cannot set default kubeconfig file")
+		}
+	} else {
+		kubeconfig = settings.Kubernetes.ConfigFile
+	}
+
+	config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
+	return config,err
+}
+
+func (rh *KubernetesRequestHandler) createInternalConfig() (config *rest.Config, err error) {
+	return rest.InClusterConfig()
+}
+
+func (rh *KubernetesRequestHandler) Init(settings common.Settings) (err error) {
+	rh.staticHandler = new(StaticRequestHandler)
+	rh.staticHandler.Init(settings)
+	rh.MaxConnections = settings.Receiver.MaxConnections
+
+	var config *rest.Config
+	if  settings.Kubernetes.Mode=="external" {
+		config, err = rh.createExternalConfig(settings)
+	} else {
+		config, err = rh.createInternalConfig()
+	}
+	if err != nil {
+		return err
+	}
+
+	rh.client, err = kubernetes.NewForConfig(config)
+	rh.namespace = settings.Kubernetes.Namespace
+	return err
+}
+
+func homeDir() string {
+	if h := os.Getenv("HOME"); h != "" {
+		return h
+	}
+	return os.Getenv("USERPROFILE") // windows
+}
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f1b7ae0b280a46e4cf22a1ac91d7e4484ca9efd8
--- /dev/null
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_kubernetes_test.go
@@ -0,0 +1,80 @@
+//+build kubernetes_accessible
+
+// for manual tests - kubernetes cluster should be accessible in $HOME/.kube/config
+
+package request_handler
+
+import (
+	"asapo_discovery/common"
+	"fmt"
+	"github.com/stretchr/testify/suite"
+	"k8s.io/client-go/kubernetes"
+	"testing"
+)
+
+type KubernetesHandlerTestSuite struct {
+	suite.Suite
+	client  *kubernetes.Clientset
+	handler KubernetesRequestHandler
+}
+
+func TestKubernetesHandlerTestSuite(t *testing.T) {
+	suite.Run(t, new(KubernetesHandlerTestSuite))
+}
+
+var Kubernetes_settings common.Settings
+
+func (suite *KubernetesHandlerTestSuite) SetupTest() {
+//	var err error
+	Kubernetes_settings = common.Settings{Receiver: common.ReceiverInfo{MaxConnections: 10, StaticEndpoints: []string{}}}
+	Kubernetes_settings.Kubernetes.Mode="external"
+}
+
+func (suite *KubernetesHandlerTestSuite) TearDownTest() {
+
+}
+
+func (suite *KubernetesHandlerTestSuite) TestInit() {
+	err := suite.handler.Init(Kubernetes_settings)
+	suite.NoError(err, "init ok")
+}
+
+func (suite *KubernetesHandlerTestSuite) TestRoundRobinBroker() {
+	err := suite.handler.Init(Kubernetes_settings)
+	suite.handler.Init(consul_settings)
+	suite.NoError(err, "")
+
+	for i:=0;i<4;i++ {
+		res, err := suite.handler.GetSingleService(common.NameBrokerService)
+		suite.NoError(err, "")
+		fmt.Println(string(res))
+	}
+}
+
+func (suite *KubernetesHandlerTestSuite) TestWrongServiceName() {
+	err := suite.handler.Init(Kubernetes_settings)
+	suite.handler.Init(consul_settings)
+
+	_, err = suite.handler.GetSingleService("bla")
+	suite.Error(err, "")
+}
+
+func (suite *KubernetesHandlerTestSuite) TestNoRunningInstances() {
+// set fts replicas to zero before running it
+	err := suite.handler.Init(Kubernetes_settings)
+	suite.handler.Init(consul_settings)
+
+	res, err := suite.handler.GetSingleService(common.NameFtsService)
+	suite.NoError(err, "")
+	suite.Empty(res, "")
+}
+
+
+func (suite *KubernetesHandlerTestSuite) TestGerReceiversBroker() {
+	err := suite.handler.Init(Kubernetes_settings)
+	suite.NoError(err, "")
+
+	res, err := suite.handler.GetReceivers(false)
+	suite.NoError(err, "")
+	fmt.Println(string(res))
+}
\ No newline at end of file
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_static.go b/discovery/src/asapo_discovery/request_handler/request_handler_static.go
index b603c85939882e700c3a7affb5c06e80ef07eb4a..a874bc522f22754fe79ada97ea6d2ab88ae049db 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_static.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_static.go
@@ -2,38 +2,33 @@ package request_handler
 
 import (
 	"asapo_common/utils"
+	"errors"
+	"asapo_discovery/common"
 )
 
 type StaticRequestHandler struct {
 	receiverResponce Responce
-	broker string
-	mongo string
-	fts string
+	singleServices map[string]string
 }
 
+func (rh *StaticRequestHandler) GetSingleService(service string) ([]byte, error) {
+	uri,ok := rh.singleServices[service]
+	if !ok {
+		return nil, errors.New("wrong service: " + service)
+	}
+	return  []byte(uri),nil
+}
 
 func (rh *StaticRequestHandler) GetReceivers(bool) ([]byte, error) {
 	return utils.MapToJson(&rh.receiverResponce)
 }
 
-func (rh *StaticRequestHandler) GetBroker() ([]byte, error) {
-	return []byte(rh.broker),nil
-}
-
-func (rh *StaticRequestHandler) GetMongo() ([]byte, error) {
-	return []byte(rh.mongo),nil
-}
-
-func (rh *StaticRequestHandler) GetFts() ([]byte, error) {
-	return []byte(rh.fts),nil
-}
-
-
-func (rh *StaticRequestHandler) Init(settings utils.Settings) error {
+func (rh *StaticRequestHandler) Init(settings common.Settings) error {
 	rh.receiverResponce.MaxConnections = settings.Receiver.MaxConnections
 	rh.receiverResponce.Uris = settings.Receiver.StaticEndpoints
-	rh.broker = settings.Broker.StaticEndpoint
-	rh.mongo = settings.Mongo.StaticEndpoint
-	rh.fts = settings.FileTransferService.StaticEndpoint
+	rh.singleServices = make(map[string]string)
+	rh.singleServices[common.NameBrokerService] = settings.Broker.StaticEndpoint
+	rh.singleServices[common.NameMongoService] = settings.Mongo.StaticEndpoint
+	rh.singleServices[common.NameFtsService] = settings.FileTransferService.StaticEndpoint
 	return nil
 }
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
index 7409996fa663b31d4c0ff6fe746091852ad6fe3d..2f5cc8c6d314e05c9a7b3737ccfde59c26b8059d 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_static_test.go
@@ -3,15 +3,15 @@ package request_handler
 import (
 	"github.com/stretchr/testify/assert"
 	"testing"
-    "asapo_common/utils"
+	"asapo_discovery/common"
 )
 
 
 var uris = []string{"ip1","ip2"}
 const max_conn = 1
 
-var static_settings utils.Settings= utils.Settings{Receiver:utils.ReceiverInfo{MaxConnections:max_conn,StaticEndpoints:uris},Broker:utils.BrokerInfo{
-	StaticEndpoint:"ip_broker"}, Mongo:utils.MongoInfo{StaticEndpoint:"ip_mongo"}, FileTransferService:utils.FtsInfo{StaticEndpoint:"ip_fts"}}
+var static_settings common.Settings= common.Settings{Receiver:common.ReceiverInfo{MaxConnections:max_conn,StaticEndpoints:uris},Broker:common.BrokerInfo{
+	StaticEndpoint:"ip_broker"}, Mongo:common.MongoInfo{StaticEndpoint:"ip_mongo"}, FileTransferService:common.FtsInfo{StaticEndpoint:"ip_fts"}}
 
 
 
@@ -31,14 +31,14 @@ func TestStaticHandlerGetReceviersOK(t *testing.T) {
 
 func TestStaticHandlerGetBrokerOK(t *testing.T) {
 	rh.Init(static_settings)
-	res,err := rh.GetBroker()
+	res,err := rh.GetSingleService(common.NameBrokerService)
 	assert.Equal(t,string(res), "ip_broker")
 	assert.Nil(t, err)
 }
 
 func TestStaticHandlerGetMongoOK(t *testing.T) {
 	rh.Init(static_settings)
-	res,err := rh.GetMongo()
+	res,err := rh.GetSingleService(common.NameMongoService)
 	assert.Equal(t,string(res), "ip_mongo")
 	assert.Nil(t, err)
 }
@@ -46,7 +46,7 @@ func TestStaticHandlerGetMongoOK(t *testing.T) {
 
 func TestStaticHandlerGetFtsOK(t *testing.T) {
 	rh.Init(static_settings)
-	res,err := rh.GetFts()
+	res,err := rh.GetSingleService(common.NameFtsService)
 	assert.Equal(t,string(res), "ip_fts")
 	assert.Nil(t, err)
 }
\ No newline at end of file
diff --git a/discovery/src/asapo_discovery/server/get_receivers.go b/discovery/src/asapo_discovery/server/get_receivers.go
index 056d303705207bc88c8955368a090ca63ee1e4a7..5fed8a36aa8be5856be7bad3f700b37933ea2760 100644
--- a/discovery/src/asapo_discovery/server/get_receivers.go
+++ b/discovery/src/asapo_discovery/server/get_receivers.go
@@ -3,28 +3,17 @@ package server
 import (
 	"net/http"
 	"asapo_common/logger"
-	"errors"
+	"asapo_discovery/common"
 )
 
 func getService(service string) (answer []byte, code int) {
 	var err error
-	switch service {
-	case "receivers":
+	if (service == "asapo-receiver") {
 		answer, err = requestHandler.GetReceivers(settings.Receiver.UseIBAddress)
-		break
-	case "broker":
-		answer, err = requestHandler.GetBroker()
-		break
-	case "mongo":
-		answer, err = requestHandler.GetMongo()
-		break
-	case "fts":
-		answer, err = requestHandler.GetFts()
-		break
-	default:
-		err = errors.New("wrong request: "+service)
-	}
+	} else {
+		answer, err = requestHandler.GetSingleService(service)
 
+	}
 	log_str := "processing get "+service
 	if err != nil {
 		logger.Error(log_str + " - " + err.Error())
@@ -37,28 +26,28 @@ func getService(service string) (answer []byte, code int) {
 
 func routeGetReceivers(w http.ResponseWriter, r *http.Request) {
 	r.Header.Set("Content-type", "application/json")
-	answer,code := getService("receivers")
+	answer,code := getService(common.NameReceiverService)
 	w.WriteHeader(code)
 	w.Write(answer)
 }
 
 func routeGetBroker(w http.ResponseWriter, r *http.Request) {
 	r.Header.Set("Content-type", "application/json")
-	answer,code := getService("broker")
+	answer,code := getService(common.NameBrokerService)
 	w.WriteHeader(code)
 	w.Write(answer)
 }
 
 func routeGetMongo(w http.ResponseWriter, r *http.Request) {
 	r.Header.Set("Content-type", "application/json")
-	answer,code := getService("mongo")
+	answer,code := getService(common.NameMongoService)
 	w.WriteHeader(code)
 	w.Write(answer)
 }
 
 func routeGetFileTransferService(w http.ResponseWriter, r *http.Request) {
 	r.Header.Set("Content-type", "application/json")
-	answer,code := getService("fts")
+	answer,code := getService(common.NameFtsService)
 	w.WriteHeader(code)
 	w.Write(answer)
 }
\ No newline at end of file
diff --git a/discovery/src/asapo_discovery/server/listroutes.go b/discovery/src/asapo_discovery/server/listroutes.go
index 6ae466fa6445b8dd3f14f83d6d93a3e37f6bc63e..ec6ae17371a6f688925c8a05614855a9d7f248a1 100644
--- a/discovery/src/asapo_discovery/server/listroutes.go
+++ b/discovery/src/asapo_discovery/server/listroutes.go
@@ -2,32 +2,32 @@ package server
 
 import (
 	"asapo_common/utils"
+	"asapo_discovery/common"
 )
 
 var listRoutes = utils.Routes{
 	utils.Route{
 		"GetReceivers",
 		"Get",
-		"/receivers",
+		"/" + common.NameReceiverService,
 		routeGetReceivers,
 	},
 	utils.Route{
 		"GetBroker",
 		"Get",
-		"/broker",
+		"/asapo-broker",
 		routeGetBroker,
 	},
 	utils.Route{
 		"GetMongo",
 		"Get",
-		"/mongo",
+		"/" + common.NameMongoService,
 		routeGetMongo,
 	},
 	utils.Route{
 		"GetFTS",
 		"Get",
-		"/fts",
+		"/" + common.NameFtsService,
 		routeGetFileTransferService,
 	},
-
 }
diff --git a/discovery/src/asapo_discovery/server/routes_test.go b/discovery/src/asapo_discovery/server/routes_test.go
index eeac5da5567e184b7f23a6a04f3f4b7b6d67b5fb..f15fb60704580489017776c1ef40916eb669c892 100644
--- a/discovery/src/asapo_discovery/server/routes_test.go
+++ b/discovery/src/asapo_discovery/server/routes_test.go
@@ -10,6 +10,7 @@ import (
 	"strings"
 	"testing"
 	"asapo_discovery/request_handler"
+	"asapo_discovery/common"
 )
 
 func containsMatcher(substr string) func(str string) bool {
@@ -24,27 +25,27 @@ func doRequest(path string) *httptest.ResponseRecorder {
 	return w
 }
 
-type GetReceiversTestSuite struct {
+type GetServicesTestSuite struct {
 	suite.Suite
 }
 
-func (suite *GetReceiversTestSuite) SetupTest() {
+func (suite *GetServicesTestSuite) SetupTest() {
 	requestHandler = new(request_handler.StaticRequestHandler)
-	var s utils.Settings= utils.Settings{Receiver:utils.ReceiverInfo{MaxConnections:10,StaticEndpoints:[]string{"ip1","ip2"}},
-	Broker:utils.BrokerInfo{StaticEndpoint:"ip_broker"},Mongo:utils.MongoInfo{StaticEndpoint:"ip_mongo"},
-		FileTransferService:utils.FtsInfo{StaticEndpoint:"ip_fts"}}
+	var s common.Settings= common.Settings{Receiver:common.ReceiverInfo{MaxConnections:10,StaticEndpoints:[]string{"ip1","ip2"}},
+	Broker:common.BrokerInfo{StaticEndpoint:"ip_broker"},Mongo:common.MongoInfo{StaticEndpoint:"ip_mongo"},
+		FileTransferService:common.FtsInfo{StaticEndpoint:"ip_fts"}}
 
 	requestHandler.Init(s)
 	logger.SetMockLog()
 }
 
-func (suite *GetReceiversTestSuite) TearDownTest() {
+func (suite *GetServicesTestSuite) TearDownTest() {
 	logger.UnsetMockLog()
 	requestHandler = nil
 }
 
-func TestGetReceiversTestSuite(t *testing.T) {
-	suite.Run(t, new(GetReceiversTestSuite))
+func TestGetServicesTestSuite(t *testing.T) {
+	suite.Run(t, new(GetServicesTestSuite))
 }
 
 func assertExpectations(t *testing.T) {
@@ -52,15 +53,15 @@ func assertExpectations(t *testing.T) {
 	logger.MockLog.ExpectedCalls = nil
 }
 
-func (suite *GetReceiversTestSuite) TestWrongPath() {
+func (suite *GetServicesTestSuite) TestWrongPath() {
 	w := doRequest("/blabla")
 	suite.Equal(http.StatusNotFound, w.Code, "wrong path")
 }
 
-func (suite *GetReceiversTestSuite) TestGetReceivers() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get receivers")))
+func (suite *GetServicesTestSuite) TestGetReceivers() {
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameReceiverService)))
 
-	w := doRequest("/receivers")
+	w := doRequest("/asapo-receiver")
 
 	suite.Equal(http.StatusOK, w.Code, "code ok")
 	suite.Equal(w.Body.String(), "{\"MaxConnections\":10,\"Uris\":[\"ip1\",\"ip2\"]}", "result")
@@ -68,30 +69,30 @@ func (suite *GetReceiversTestSuite) TestGetReceivers() {
 }
 
 
-func (suite *GetReceiversTestSuite) TestGetBroker() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get broker")))
+func (suite *GetServicesTestSuite) TestGetBroker() {
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameBrokerService)))
 
-	w := doRequest("/broker")
+	w := doRequest("/asapo-broker")
 
 	suite.Equal(http.StatusOK, w.Code, "code ok")
 	suite.Equal(w.Body.String(), "ip_broker", "result")
 	assertExpectations(suite.T())
 }
 
-func (suite *GetReceiversTestSuite) TestGetMongo() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get mongo")))
+func (suite *GetServicesTestSuite) TestGetMongo() {
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameMongoService)))
 
-	w := doRequest("/mongo")
+	w := doRequest("/asapo-mongodb")
 
 	suite.Equal(http.StatusOK, w.Code, "code ok")
 	suite.Equal(w.Body.String(), "ip_mongo", "result")
 	assertExpectations(suite.T())
 }
 
-func (suite *GetReceiversTestSuite) TestGetFts() {
-	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get fts")))
+func (suite *GetServicesTestSuite) TestGetFts() {
+	logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing get "+common.NameFtsService)))
 
-	w := doRequest("/fts")
+	w := doRequest("/asapo-file-transfer")
 
 	suite.Equal(http.StatusOK, w.Code, "code ok")
 	suite.Equal(w.Body.String(), "ip_fts", "result")
diff --git a/discovery/src/asapo_discovery/server/server.go b/discovery/src/asapo_discovery/server/server.go
index 63379b876c2525776bdafe5749fc9bfd76f0455a..b1292d7a182c298f9b89bf41c367768eaa72b0be 100644
--- a/discovery/src/asapo_discovery/server/server.go
+++ b/discovery/src/asapo_discovery/server/server.go
@@ -2,13 +2,13 @@ package server
 
 import (
 	"asapo_discovery/request_handler"
-	"asapo_common/utils"
+	"asapo_discovery/common"
 )
 
 var requestHandler request_handler.Agent
 
 
-var settings utils.Settings
+var settings common.Settings
 
 func SetHandler(rh request_handler.Agent) error {
 	requestHandler = rh
@@ -20,3 +20,4 @@ func SetHandler(rh request_handler.Agent) error {
 func GetHandlerMode()string {
 	return settings.Mode
 }
+
diff --git a/discovery/src/asapo_discovery/server/settings_test.go b/discovery/src/asapo_discovery/server/settings_test.go
index b6025b25a5f681940d7728883abe3c07b427f6e8..e053b71a4c8c5c50391fb84333bbd3cb364f4db4 100644
--- a/discovery/src/asapo_discovery/server/settings_test.go
+++ b/discovery/src/asapo_discovery/server/settings_test.go
@@ -3,11 +3,11 @@ package server
 import (
 	"github.com/stretchr/testify/assert"
 	"testing"
-	"asapo_common/utils"
+	"asapo_discovery/common"
 )
 
-func fillSettings(mode string) utils.Settings {
-	var settings utils.Settings
+func fillSettings(mode string) common.Settings {
+	var settings common.Settings
 	settings.Port = 1
 	settings.Mode = mode
 	settings.Receiver.MaxConnections = 10
@@ -16,6 +16,8 @@ func fillSettings(mode string) utils.Settings {
 	settings.Broker.StaticEndpoint="ip_b"
 	settings.Mongo.StaticEndpoint="ip_m"
 	settings.ConsulEndpoints=[]string{"ipc1","ipc2"}
+	settings.Kubernetes.ConfigFile=""
+	settings.Kubernetes.Mode="external"
 	return settings
 }
 
@@ -65,3 +67,9 @@ func TestGetHandlerMode(t *testing.T) {
 	settings = fillSettings(mode)
 	assert.Equal(t,mode,GetHandlerMode())
 }
+
+func TestSettingsOKKubernetes(t *testing.T) {
+	settings := fillSettings("kubernetes")
+	err := settings.Validate()
+	assert.Nil(t, err)
+}
diff --git a/examples/consumer/getnext_broker/check_windows.bat b/examples/consumer/getnext_broker/check_windows.bat
index 76f25ae90b7090626aada003779aecd5e9090632..6945e1dbade737b50f5f7f8bd1319b3eecbb869f 100644
--- a/examples/consumer/getnext_broker/check_windows.bat
+++ b/examples/consumer/getnext_broker/check_windows.bat
@@ -7,11 +7,7 @@ SET database_name=%beamtime_id%_%stream%
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
@@ -24,8 +20,5 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 echo db.dropDatabase() | %mongo_exe% %database_name%
diff --git a/examples/consumer/getnext_broker_python/check_windows.bat b/examples/consumer/getnext_broker_python/check_windows.bat
index 610c6a4c37c38672d58cefc6c6809860e74f9c8a..44980989369cddff912cad3685b940f2c42ef69b 100644
--- a/examples/consumer/getnext_broker_python/check_windows.bat
+++ b/examples/consumer/getnext_broker_python/check_windows.bat
@@ -7,11 +7,7 @@ SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 set group_id=bif31l2uiddd4r0q6b40
 
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
@@ -48,8 +44,5 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 echo db.dropDatabase() | %mongo_exe% %database_name%
diff --git a/examples/pipeline/in_to_out/check_windows.bat b/examples/pipeline/in_to_out/check_windows.bat
index 673e626835e2d9f8c60e2d29f1d9c05c828bcf12..9b575777b8fb97ab7f155e0e94c414cafa7fe038 100644
--- a/examples/pipeline/in_to_out/check_windows.bat
+++ b/examples/pipeline/in_to_out/check_windows.bat
@@ -18,14 +18,7 @@ SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\d
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
-
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"file%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
 
@@ -64,12 +57,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 
 echo db.dropDatabase() | %mongo_exe% %indatabase_name%
 echo db.dropDatabase() | %mongo_exe% %outdatabase_name%
diff --git a/examples/pipeline/in_to_out_python/check_windows.bat b/examples/pipeline/in_to_out_python/check_windows.bat
index fe83804aa5d7d2fa684ae462c26b5f031f43ef22..b93a7f38c5ac641a5cb51e34b678d1f0debbaa2e 100644
--- a/examples/pipeline/in_to_out_python/check_windows.bat
+++ b/examples/pipeline/in_to_out_python/check_windows.bat
@@ -20,13 +20,7 @@ SET timeout=2
 SET timeout_producer=25
 SET nthreads=4
 
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 3) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"file%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %indatabase_name%  || goto :error
 
@@ -58,13 +52,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop authorizer
-
+call stop_services.bat
 echo db.dropDatabase() | %mongo_exe% %indatabase_name%
 echo db.dropDatabase() | %mongo_exe% %outdatabase_name%
 rmdir /S /Q %receiver_root_folder%
diff --git a/file_transfer/CMakeLists.txt b/file_transfer/CMakeLists.txt
index b420835f5162dedcedb6c7e80ceb560644e1a166..7f8b5bead6c43f4317c22befafa3780445011fef 100644
--- a/file_transfer/CMakeLists.txt
+++ b/file_transfer/CMakeLists.txt
@@ -35,4 +35,4 @@ set_target_properties(asapo-file-transfer PROPERTIES EXENAME ${CMAKE_CURRENT_BIN
 
 install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${exe_name} DESTINATION bin)
 
-gotest(${TARGET_NAME} "./...")
+gotest(${TARGET_NAME}  "${CMAKE_CURRENT_SOURCE_DIR}" "./...")
diff --git a/producer/api/cpp/src/receiver_discovery_service.cpp b/producer/api/cpp/src/receiver_discovery_service.cpp
index 2f10a3152a80d9f53b2cf675be504a9096c23c77..1aa99fce9b5d2408a6f05bd8b54d675072553230 100644
--- a/producer/api/cpp/src/receiver_discovery_service.cpp
+++ b/producer/api/cpp/src/receiver_discovery_service.cpp
@@ -9,7 +9,7 @@
 
 namespace  asapo {
 
-const std::string ReceiverDiscoveryService::kServiceEndpointSuffix = "/discovery/receivers";
+const std::string ReceiverDiscoveryService::kServiceEndpointSuffix = "/asapo-discovery/asapo-receiver";
 
 ReceiverDiscoveryService::ReceiverDiscoveryService(std::string endpoint, uint64_t update_frequency_ms): httpclient__{DefaultHttpClient()},
     log__{GetDefaultProducerLogger()},
diff --git a/producer/api/cpp/unittests/test_receiver_discovery_service.cpp b/producer/api/cpp/unittests/test_receiver_discovery_service.cpp
index ddf55532fccb3dde4cf93292b9c57d677457502f..3f813ce511ee0c2776bd5505c9274082f22ad6df 100644
--- a/producer/api/cpp/unittests/test_receiver_discovery_service.cpp
+++ b/producer/api/cpp/unittests/test_receiver_discovery_service.cpp
@@ -48,7 +48,7 @@ class ReceiversStatusTests : public Test {
     NiceMock<asapo::MockLogger> mock_logger;
     NiceMock<MockHttpClient>* mock_http_client;
 
-    std::string expected_endpoint{"endpoint/discovery/receivers"};
+    std::string expected_endpoint{"endpoint/asapo-discovery/asapo-receiver"};
     ReceiverDiscoveryService status{"endpoint", 20};
 
     void SetUp() override {
diff --git a/receiver/src/receiver_config.cpp b/receiver/src/receiver_config.cpp
index f82f44afe1edcd0cd5c493a178affb3127d8c690..d919cfd0eea6616b6eeb35cf9408d6657e6b2c2d 100644
--- a/receiver/src/receiver_config.cpp
+++ b/receiver/src/receiver_config.cpp
@@ -34,7 +34,7 @@ Error ReceiverConfigFactory::SetConfig(std::string file_name) {
     (err = parser.GetString("AuthorizationServer", &config.authorization_server)) ||
     (err = parser.GetUInt64("AuthorizationInterval", &config.authorization_interval_ms)) ||
     (err = parser.GetString("PerformanceDbName", &config.performance_db_name)) ||
-    (err = parser.GetString("AdvertiseIP", &config.advertise_ip)) ||
+    (err = parser.Embedded("DataServer").GetString("AdvertiseURI", &config.dataserver.advertise_uri)) ||
     (err = parser.GetString("LogLevel", &log_level));
 
     if (err) {
diff --git a/receiver/src/receiver_config.h b/receiver/src/receiver_config.h
index c69ff7990d88bec1b8db6f65194d00c25e045cbe..514905eac527d7d29ea57ca3448c533f92ca9ecc 100644
--- a/receiver/src/receiver_config.h
+++ b/receiver/src/receiver_config.h
@@ -23,7 +23,6 @@ struct ReceiverConfig {
     uint64_t receive_to_disk_threshold_mb = 0;
     LogLevel log_level = LogLevel::Info;
     std::string tag;
-    std::string advertise_ip;
     ReceiverDataCenterConfig dataserver;
     std::string discovery_server;
 };
diff --git a/receiver/src/receiver_data_server/receiver_datacenter_config.h b/receiver/src/receiver_data_server/receiver_datacenter_config.h
index ddef216fd26d56db398be4889d95a636da3ec5a2..27b4c401cddab0948dbe6a9dfc4c512e64d4221f 100644
--- a/receiver/src/receiver_data_server/receiver_datacenter_config.h
+++ b/receiver/src/receiver_data_server/receiver_datacenter_config.h
@@ -9,6 +9,7 @@ struct ReceiverDataCenterConfig {
     uint64_t listen_port = 0;
     uint64_t nthreads = 0;
     std::string tag;
+    std::string advertise_uri;
 };
 
 }
diff --git a/receiver/src/request_handler_db.cpp b/receiver/src/request_handler_db.cpp
index 2b64f85c04f2113fa200dd4c35f5a9b4fc61cf72..d543b9dc7c5d1b916533e58a467700cbce2e2da9 100644
--- a/receiver/src/request_handler_db.cpp
+++ b/receiver/src/request_handler_db.cpp
@@ -37,7 +37,7 @@ Error RequestHandlerDb::GetDatabaseServerUri(std::string* uri) const {
 
     HttpCode code;
     Error http_err;
-    *uri = http_client__->Get(GetReceiverConfig()->discovery_server + "/mongo", &code, &http_err);
+    *uri = http_client__->Get(GetReceiverConfig()->discovery_server + "/asapo-mongodb", &code, &http_err);
     if (http_err) {
         log__->Error(std::string{"http error when discover database server "} + " from " + GetReceiverConfig()->discovery_server
                      + " : " + http_err->Explain());
diff --git a/receiver/src/request_handler_db_write.cpp b/receiver/src/request_handler_db_write.cpp
index 6a0e31bcb708d899b38acb7650b1f763ad0488b8..ad0dbf902199ced9911254e16f62c1726b22d537 100644
--- a/receiver/src/request_handler_db_write.cpp
+++ b/receiver/src/request_handler_db_write.cpp
@@ -80,8 +80,7 @@ FileInfo RequestHandlerDbWrite::PrepareFileInfo(const Request* request) const {
     file_info.size = request->GetDataSize();
     file_info.id = request->GetDataID();
     file_info.buf_id = request->GetSlotId();
-    file_info.source = GetReceiverConfig()->advertise_ip + ":" + string_format("%ld",
-                       GetReceiverConfig()->dataserver.listen_port);
+    file_info.source = GetReceiverConfig()->dataserver.advertise_uri;
     file_info.metadata = request->GetMetaData();
     return file_info;
 }
diff --git a/receiver/unittests/mock_receiver_config.cpp b/receiver/unittests/mock_receiver_config.cpp
index 99d2873cea0722cb1f865a7cf06c05c149ffbddb..aacffd49cda8d9559c164a9ac35bb81f45f876e6 100644
--- a/receiver/unittests/mock_receiver_config.cpp
+++ b/receiver/unittests/mock_receiver_config.cpp
@@ -49,6 +49,7 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
     config_string += "," + Key("ListenPort", error_field) + std::to_string(config.listen_port);
     config_string += "," + Key("DataServer", error_field) + "{";
     config_string += Key("ListenPort", error_field) + std::to_string(config.dataserver.listen_port);
+    config_string += "," +  Key("AdvertiseURI", error_field) + "\"" + config.dataserver.advertise_uri + "\"";
     config_string += "," + Key("NThreads", error_field) + std::to_string(config.dataserver.nthreads);
     config_string += "}";
     config_string += "," + Key("DataCache", error_field) + "{";
@@ -63,7 +64,6 @@ Error SetReceiverConfig (const ReceiverConfig& config, std::string error_field)
     config_string += "," +  Key("WriteToDisk", error_field) + (config.write_to_disk ? "true" : "false");
     config_string += "," +  Key("WriteToDb", error_field) + (config.write_to_db ? "true" : "false");
     config_string += "," +  Key("LogLevel", error_field) + "\"" + log_level + "\"";
-    config_string += "," +  Key("AdvertiseIP", error_field) + "\"" + config.advertise_ip + "\"";
     config_string += "," +  Key("Tag", error_field) + "\"" + config.tag + "\"";
     config_string += "}";
 
diff --git a/receiver/unittests/test_config.cpp b/receiver/unittests/test_config.cpp
index 538f6f9dc03ac6791a26df9ae239c62d7c5a6010..d3c81a5003b8203c3c94d9fc9a6b6310b12b929e 100644
--- a/receiver/unittests/test_config.cpp
+++ b/receiver/unittests/test_config.cpp
@@ -58,10 +58,9 @@ class ConfigTests : public Test {
         test_config.use_datacache = false;
         test_config.datacache_reserved_share = 10;
         test_config.datacache_size_gb = 2;
-        test_config.advertise_ip = "host";
         test_config.dataserver.nthreads = 5;
         test_config.discovery_server = "discovery";
-        test_config.advertise_ip = "0.0.0.1";
+        test_config.dataserver.advertise_uri = "0.0.0.1:4201";
         test_config.receive_to_disk_threshold_mb = 50;
 
     }
@@ -94,7 +93,7 @@ TEST_F(ConfigTests, ReadSettings) {
     ASSERT_THAT(config->dataserver.nthreads, Eq(5));
     ASSERT_THAT(config->dataserver.tag, Eq("receiver1_ds"));
     ASSERT_THAT(config->discovery_server, Eq("discovery"));
-    ASSERT_THAT(config->advertise_ip, Eq("0.0.0.1"));
+    ASSERT_THAT(config->dataserver.advertise_uri, Eq("0.0.0.1:4201"));
     ASSERT_THAT(config->receive_to_disk_threshold_mb, Eq(50));
 }
 
@@ -105,7 +104,7 @@ TEST_F(ConfigTests, ErrorReadSettings) {
     std::vector<std::string>fields {"PerformanceDbServer", "ListenPort", "DataServer", "ListenPort", "WriteToDisk",
                                     "WriteToDb", "DataCache", "Use", "SizeGB", "ReservedShare", "DatabaseServer", "Tag",
                                     "AuthorizationServer", "AuthorizationInterval", "PerformanceDbName", "LogLevel",
-                                    "NThreads", "DiscoveryServer", "AdvertiseIP", "ReceiveToDiskThresholdMB"};
+                                    "NThreads", "DiscoveryServer", "AdvertiseURI", "ReceiveToDiskThresholdMB"};
     for (const auto& field : fields) {
         auto err = asapo::SetReceiverConfig(test_config, field);
         ASSERT_THAT(err, Ne(nullptr));
diff --git a/receiver/unittests/test_request_handler_db.cpp b/receiver/unittests/test_request_handler_db.cpp
index 1f4dc691c5eb5570574a722020d45f0cca77519a..f1091c3c2e2bb1328898f4eca4a3f710b4a18a3c 100644
--- a/receiver/unittests/test_request_handler_db.cpp
+++ b/receiver/unittests/test_request_handler_db.cpp
@@ -95,7 +95,7 @@ class DbHandlerTests : public Test {
 
 void DbHandlerTests::MockAuthRequest(bool error, HttpCode code) {
     if (error) {
-        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/mongo",  _, _)).
+        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/asapo-mongodb",  _, _)).
         WillOnce(
             DoAll(SetArgPointee<2>(new asapo::SimpleError("http error")),
                   Return("")
@@ -105,7 +105,7 @@ void DbHandlerTests::MockAuthRequest(bool error, HttpCode code) {
                                              HasSubstr(expected_discovery_server))));
 
     } else {
-        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/mongo", _, _)).
+        EXPECT_CALL(mock_http_client, Get_t(expected_discovery_server + "/asapo-mongodb", _, _)).
         WillOnce(
             DoAll(
                 SetArgPointee<1>(code),
diff --git a/receiver/unittests/test_request_handler_db_check_request.cpp b/receiver/unittests/test_request_handler_db_check_request.cpp
index e40cbfbb4cfe1091924edbc317db2d71f6f06619..4675c3fdd4bb23ce7df028c2ff9bddd271fb285a 100644
--- a/receiver/unittests/test_request_handler_db_check_request.cpp
+++ b/receiver/unittests/test_request_handler_db_check_request.cpp
@@ -75,7 +75,7 @@ class DbCheckRequestHandlerTests : public Test {
     std::string expected_beamtime_id = "beamtime_id";
     std::string expected_default_stream = "detector";
     std::string expected_stream = "stream";
-    std::string expected_host_ip = "127.0.0.1";
+    std::string expected_host_uri = "127.0.0.1:1234";
     uint64_t expected_port = 1234;
     uint64_t expected_buf_id = 18446744073709551615ull;
     std::string expected_file_name = "2";
@@ -96,7 +96,7 @@ class DbCheckRequestHandlerTests : public Test {
         handler.log__ = &mock_logger;
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, "", nullptr});
         config.database_uri = "127.0.0.1:27017";
-        config.advertise_ip = expected_host_ip;
+        config.dataserver.advertise_uri = expected_host_uri;
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
         expected_file_info =  PrepareFileInfo();
@@ -196,7 +196,7 @@ FileInfo DbCheckRequestHandlerTests::PrepareFileInfo() {
     file_info.name = expected_file_name;
     file_info.id = expected_id;
     file_info.buf_id = expected_buf_id;
-    file_info.source = expected_host_ip + ":" + std::to_string(expected_port);
+    file_info.source = expected_host_uri;
     file_info.metadata = expected_metadata;
     return file_info;
 }
diff --git a/receiver/unittests/test_request_handler_db_writer.cpp b/receiver/unittests/test_request_handler_db_writer.cpp
index 4f82656c72d3a2b067cc171f85182f1cd4be7bc7..ec2c75ae355104365ccf372d4363e768ddc3eba8 100644
--- a/receiver/unittests/test_request_handler_db_writer.cpp
+++ b/receiver/unittests/test_request_handler_db_writer.cpp
@@ -94,7 +94,7 @@ class DbWriterHandlerTests : public Test {
         handler.log__ = &mock_logger;
         mock_request.reset(new NiceMock<MockRequest> {request_header, 1, "", &mock_db_check_handler});
         config.database_uri = "127.0.0.1:27017";
-        config.advertise_ip = expected_host_ip;
+        config.dataserver.advertise_uri = expected_host_ip+":"+std::to_string(expected_port);
         config.dataserver.listen_port = expected_port;
         SetReceiverConfig(config, "none");
 
diff --git a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
index e5affaa43e61d5c9d867bb2b4ff4f9fea838ebea..acaf9b1a6fb163dc7daee974c34d21cc1fa17d9f 100644
--- a/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
+++ b/tests/automatic/bug_fixes/error-sending-data-using-callback-method/check_windows.bat
@@ -8,12 +8,7 @@ SET dbname = %beamtime_id%_%stream%
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 5 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 
@@ -34,11 +29,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 echo db.dropDatabase() | %mongo_exe% %dbname%
 
diff --git a/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat b/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
index 4b321f8974fc8e051a7dc5f3ce166847bd4d4a86..2421a18add8a7711b43c85fb2319db3b8b07d725 100644
--- a/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
+++ b/tests/automatic/bug_fixes/producer_send_after_restart/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -61,12 +55,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
diff --git a/tests/automatic/common_scripts/start_services.bat b/tests/automatic/common_scripts/start_services.bat
new file mode 100644
index 0000000000000000000000000000000000000000..9383d4aced21329cc23e324589d2af2e34a0cca1
--- /dev/null
+++ b/tests/automatic/common_scripts/start_services.bat
@@ -0,0 +1,22 @@
+c:\opt\consul\nomad run receiver.nmd
+c:\opt\consul\nomad run authorizer.nmd
+c:\opt\consul\nomad run discovery.nmd
+c:\opt\consul\nomad run broker.nmd
+c:\opt\consul\nomad run nginx.nmd
+c:\opt\consul\nomad run file_transfer.nmd
+
+
+ping 1.0.0.0 -n 10 -w 100 > nul
+
+set i=0
+:repeat
+set /a i=%i%+1
+echo %i%
+if %i% EQU 20 (
+    goto :error
+)
+ping 1.0.0.0 -n 3 -w 100  1>nul
+curl --silent --fail 127.0.0.1:8400/asapo-discovery/asapo-receiver --stderr - | findstr 127.0.0.1  || goto :repeat
+curl --silent --fail 127.0.0.1:8400/asapo-discovery/asapo-broker --stderr - | findstr 127.0.0.1 || goto :repeat
+curl --silent --fail 127.0.0.1:8400/asapo-discovery/asapo-file-transfer --stderr -  | findstr 127.0.0.1 || goto :repeat
+echo discovery ready
diff --git a/tests/automatic/common_scripts/start_services.sh b/tests/automatic/common_scripts/start_services.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/automatic/common_scripts/stop_services.bat b/tests/automatic/common_scripts/stop_services.bat
new file mode 100644
index 0000000000000000000000000000000000000000..1f75f6970a8ffcac37a7478b7d4672f0c0a532ea
--- /dev/null
+++ b/tests/automatic/common_scripts/stop_services.bat
@@ -0,0 +1,7 @@
+c:\opt\consul\nomad stop receiver
+c:\opt\consul\nomad stop discovery
+c:\opt\consul\nomad stop broker
+c:\opt\consul\nomad stop authorizer
+c:\opt\consul\nomad stop nginx
+c:\opt\consul\nomad stop file_transfer
+c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
diff --git a/tests/automatic/common_scripts/stop_services.sh b/tests/automatic/common_scripts/stop_services.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/automatic/consumer/consumer_api/check_windows.bat b/tests/automatic/consumer/consumer_api/check_windows.bat
index a11f05a2bb4eb9e547d3f213eb0c694819486f77..6619a5c9653f487133012b6ccd3426b9332e64b9 100644
--- a/tests/automatic/consumer/consumer_api/check_windows.bat
+++ b/tests/automatic/consumer/consumer_api/check_windows.bat
@@ -6,13 +6,7 @@ SET database_name=%beamtime_id%_%stream%
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
-::first argument  path to the executable
-
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
@@ -39,10 +33,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 echo db.dropDatabase() | %mongo_exe% %database_name%
 del "1 1_1"
 
diff --git a/tests/automatic/consumer/consumer_api_python/check_windows.bat b/tests/automatic/consumer/consumer_api_python/check_windows.bat
index 86d08b03e970cd32a50f1ff9e9bbe555d67ae7f7..1b0ade3d5327978c3a7c99f6024dfb13ab04fc04 100644
--- a/tests/automatic/consumer/consumer_api_python/check_windows.bat
+++ b/tests/automatic/consumer/consumer_api_python/check_windows.bat
@@ -10,13 +10,7 @@ SET database_name=%beamtime_id%_%stream%
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-c:\opt\consul\nomad run file_transfer.nmd
-c:\opt\consul\nomad run authorizer.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 5) do echo db.data_default.insert({"_id":%%x,"size":6,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
@@ -49,12 +43,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop file_transfer
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 
 echo db.dropDatabase() | %mongo_exe% %database_name%
 del c:\tmp\asapo\consumer_test\files\1
diff --git a/tests/automatic/consumer/next_multithread_broker/CMakeLists.txt b/tests/automatic/consumer/next_multithread_broker/CMakeLists.txt
index 6e0ef20c094b35d6b80e5cb4c422b29386bfe577..cb89f1c4f5f9f2c1272ea9aa72f0c5b9dc1d4e2c 100644
--- a/tests/automatic/consumer/next_multithread_broker/CMakeLists.txt
+++ b/tests/automatic/consumer/next_multithread_broker/CMakeLists.txt
@@ -1,7 +1,6 @@
 set(TARGET_NAME next_multithread_broker)
 set(SOURCE_FILES next_multithread_broker.cpp)
 
-
 ################################
 # Executable and link
 ################################
@@ -11,7 +10,11 @@ target_link_libraries(${TARGET_NAME} test_common asapo-consumer)
 ################################
 # Testing
 ################################
-prepare_asapo()
-add_script_test("${TARGET_NAME}" "$<TARGET_FILE:${TARGET_NAME}>"
-        )
+
+if (UNIX)
+    prepare_asapo()
+    add_script_test("${TARGET_NAME}" "$<TARGET_FILE:${TARGET_NAME}>")
+endif()
+
+
 
diff --git a/tests/automatic/consumer/next_multithread_broker/check_windows.bat b/tests/automatic/consumer/next_multithread_broker/check_windows.bat
index de24971c97cc3a4048d60a8a84914910cfa1a8ae..4c4bb2e685d89f1339fd8aaaea08809181e19df6 100644
--- a/tests/automatic/consumer/next_multithread_broker/check_windows.bat
+++ b/tests/automatic/consumer/next_multithread_broker/check_windows.bat
@@ -2,13 +2,7 @@ SET database_name=test_run_detector
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 set token_test_run=K38Mqc90iRv8fC7prcFHd994mF_wfUiJnWBfIjIzieo=
 
-::first argument  path to the executable
-
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":100,"name":"%%x","lastchange":1,"source":"none","buf_id":0,"meta":{"test":10}}) | %mongo_exe% %database_name%  || goto :error
 
@@ -22,8 +16,6 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
+
 echo db.dropDatabase() | %mongo_exe% %database_name%
diff --git a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
index df24e18c1e33de2109b93fb840fab8acade22c86..93917960d16747306a8189b0e4be975018d12e59 100644
--- a/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
+++ b/tests/automatic/consumer/next_multithread_broker/next_multithread_broker.cpp
@@ -18,8 +18,15 @@ void Assert(std::vector<asapo::FileInfos> file_infos, int nthreads, int nfiles)
         }
     }
     // file names created by setup.sh should be '1','2',... Each thread should access different files.
-    M_AssertEq(nfiles, nfiles_read);
-    M_AssertTrue(std::is_permutation(expect.begin(), expect.end(), result.begin()));
+    if (nfiles != nfiles_read) {
+        std::cout << "nfiles != nfiles_read" << std::endl;
+        exit(EXIT_FAILURE);
+    }
+//    M_AssertEq(nfiles, nfiles_read);
+    if (!std::is_permutation(expect.begin(), expect.end(), result.begin())) {
+        std::cout << "!std::is_permutation" << std::endl;
+        exit(EXIT_FAILURE);
+    }
 }
 
 struct Args {
@@ -64,7 +71,9 @@ void TestAll(const Args& args) {
     }
 
     for (auto& thread : threads) {
-        thread.join();
+        if (thread.joinable()) {
+            thread.join();
+        }
     }
 
     Assert(file_infos, args.nthreads, args.nfiles);
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/check_linux.sh b/tests/automatic/curl_http_client/curl_http_client_command/check_linux.sh
index 989a4562e6b4e5708dc3d000e31b1394c4b38ece..406b111aa9a3989178fd6451b3bbd9d24f39b50e 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/check_linux.sh
+++ b/tests/automatic/curl_http_client/curl_http_client_command/check_linux.sh
@@ -11,7 +11,7 @@ Cleanup() {
   echo cleanup
   nomad stop authorizer
   nomad stop file_transfer
-  rm -rf $file_transfer_folder bbb
+  rm -rf $file_transfer_folder bbb random
 }
 
 nomad run authorizer.nmd
@@ -21,7 +21,10 @@ sleep 1
 mkdir -p $file_transfer_folder
 echo -n hello > $file_transfer_folder/aaa
 
-$1  127.0.0.1:5007 127.0.0.1:5008 $file_transfer_folder aaa
+dd if=/dev/urandom of=$file_transfer_folder/random bs=1 count=100000
+
+$1  127.0.0.1:5007 127.0.0.1:5008 $file_transfer_folder
 cat bbb | tee /dev/stderr | grep hello
+diff -q random $file_transfer_folder/random
 
 
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/check_windows.bat b/tests/automatic/curl_http_client/curl_http_client_command/check_windows.bat
index 1748f6e8c9c643754a6f2c383c12dc154c1ed4f7..7aab2755cce76f6668eba63700022d09ef6bb402 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/check_windows.bat
+++ b/tests/automatic/curl_http_client/curl_http_client_command/check_windows.bat
@@ -13,7 +13,9 @@ ping 1.0.0.0 -n 1 -w 100 > nul
 mkdir %file_transfer_folder%
 echo | set /p dummyName="hello" > %file_transfer_folder%\aaa
 
-"%1"  127.0.0.1:5007 127.0.0.1:5008 %file_transfer_folder% aaa  || goto :error
+python3 -c "import os;fout=open('%file_transfer_folder%\\random', 'wb');fout.write(os.urandom(100000))"
+
+"%1"  127.0.0.1:5007 127.0.0.1:5008 %file_transfer_folder%   || goto :error
 
 type bbb | findstr /c:"hello"  || goto :error
 
@@ -27,5 +29,5 @@ exit /b 1
 c:\opt\consul\nomad stop authorizer
 c:\opt\consul\nomad stop file_transfer
 rmdir /S /Q %file_transfer_folder%
-del /f bbb
+del /f bbb random
 
diff --git a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
index 63d202b67c4c24f0eac1f0678a518b264b6e7b82..75bc3d65bcd9ec7b5cc6b194c888cc669669da35 100644
--- a/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
+++ b/tests/automatic/curl_http_client/curl_http_client_command/curl_httpclient_command.cpp
@@ -4,24 +4,24 @@
 #include "testing.h"
 #include "../../../consumer/api/cpp/src/server_data_broker.h"
 #include "preprocessor/definitions.h"
+#include "io/io_factory.h"
+#include "io/io.h"
 
 struct Args {
     std::string uri_authorizer;
     std::string uri_fts;
     std::string folder;
-    std::string fname;
 };
 
 Args GetArgs(int argc, char* argv[]) {
-    if (argc != 5) {
+    if (argc != 4) {
         std::cout << "Wrong number of arguments" << std::endl;
         exit(EXIT_FAILURE);
     }
     std::string uri_authorizer{argv[1]};
     std::string uri_fts{argv[2]};
     std::string folder{argv[3]};
-    std::string fname{argv[4]};
-    return Args{uri_authorizer, uri_fts, folder, fname};
+    return Args{uri_authorizer, uri_fts, folder};
 }
 
 
@@ -58,14 +58,34 @@ int main(int argc, char* argv[]) {
     auto content = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &code, &err);
     M_AssertEq("hello", content);
     M_AssertTrue(code == asapo::HttpCode::OK);
-
+// with array
     asapo::FileData data;
     err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, 5, &code);
     M_AssertEq( "hello", reinterpret_cast<char const*>(data.get()));
     M_AssertTrue(code == asapo::HttpCode::OK);
 
+    transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"random\"}";
+    auto io = asapo::GenerateDefaultIO();
+    auto fname = args.folder+asapo::kPathSeparator+"random";
+    uint64_t size=0;
+    auto expected_data = io->GetDataFromFile(fname,&size,&err);
+    M_AssertEq(nullptr, err);
+    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, &data, size, &code);
+    M_AssertTrue(code == asapo::HttpCode::OK);
+    for (uint64_t i=0;i<size;i++) {
+        if (expected_data[i] != data[i]) {
+            M_AssertTrue(false,"recieve array equal to sent array");
+        }
+    }
+
+// with file
+    transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"aaa\"}";
     err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "bbb", &code);
     M_AssertTrue(code == asapo::HttpCode::OK);
 
+    transfer = "{\"Folder\":\"" + args.folder + "\",\"FileName\":\"random\"}";
+    err = server_broker->httpclient__->Post(args.uri_fts + "/transfer", cookie, transfer, "random", &code);
+    M_AssertTrue(code == asapo::HttpCode::OK);
+
     return 0;
 }
diff --git a/tests/automatic/file_transfer_service/rest_api/check_linux.sh b/tests/automatic/file_transfer_service/rest_api/check_linux.sh
index 89f45c4c8440887d845f022b61d7c1296e25e024..b1b61e26255c3a51e2dde0fdfd6c656ee54e1001 100644
--- a/tests/automatic/file_transfer_service/rest_api/check_linux.sh
+++ b/tests/automatic/file_transfer_service/rest_api/check_linux.sh
@@ -25,11 +25,12 @@ token=bnCXpOdBV90wU1zybEw1duQNSORuwaKz6oDHqmL35p0= #token for aaa
 folder_token=`curl --silent --data "{\"Folder\":\"$file_transfer_folder\",\"BeamtimeId\":\"aaa\",\"Token\":\"$token\"}" 127.0.0.1:5007/folder`
 echo $folder_token
 
-echo hello > $file_transfer_folder/aaa
+
+dd if=/dev/urandom of=$file_transfer_folder/aaa bs=1 count=100000
 
 curl -o aaa --silent -H "Authorization: Bearer ${folder_token}" --data "{\"Folder\":\"$file_transfer_folder\",\"FileName\":\"aaa\",\"Token\":\"$folder_token\"}" 127.0.0.1:5008/transfer --stderr - | tee /dev/stderr
 
-cat aaa | grep hello
+diff -q aaa $file_transfer_folder/aaa
 
 dd if=/dev/zero of=$file_transfer_folder/big_file bs=1 count=0 seek=5368709120
 
diff --git a/tests/automatic/full_chain/send_recv_substreams/check_windows.bat b/tests/automatic/full_chain/send_recv_substreams/check_windows.bat
index aa79cf59d7dff753f5127ebbe37b6fc81fd09af5..d89ca68c8e229b56fd09562bfa1712b2463490f6 100644
--- a/tests/automatic/full_chain/send_recv_substreams/check_windows.bat
+++ b/tests/automatic/full_chain/send_recv_substreams/check_windows.bat
@@ -10,12 +10,7 @@ SET beamline=test
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
-
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
+call start_services.bat
 
 "%1" 127.0.0.1:8400 %beamtime_id% %token%
 
@@ -26,11 +21,6 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 
 echo db.dropDatabase() | %mongo_exe% %indatabase_name%
diff --git a/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat b/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat
index aa79cf59d7dff753f5127ebbe37b6fc81fd09af5..475943c379ac9a534bfd8afb91e9616585f9b055 100644
--- a/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat
+++ b/tests/automatic/full_chain/send_recv_substreams_python/check_windows.bat
@@ -10,12 +10,7 @@ SET beamline=test
 
 SET mongo_exe="c:\Program Files\MongoDB\Server\4.2\bin\mongo.exe"
 
-
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
+call start_services.bat
 
 "%1" 127.0.0.1:8400 %beamtime_id% %token%
 
@@ -26,11 +21,5 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop authorizer
-
+call stop_services.bat
 echo db.dropDatabase() | %mongo_exe% %indatabase_name%
diff --git a/tests/automatic/full_chain/simple_chain/check_windows.bat b/tests/automatic/full_chain/simple_chain/check_windows.bat
index 5abab849d9d27b4c14f94f6450cc1aea8df10eff..fd9244556c2a3c0775a069d8d7aac95d63bfaa49 100644
--- a/tests/automatic/full_chain/simple_chain/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain/check_windows.bat
@@ -13,13 +13,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -38,12 +32,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
diff --git a/tests/automatic/full_chain/simple_chain_dataset/check_windows.bat b/tests/automatic/full_chain/simple_chain_dataset/check_windows.bat
index b3b48582f15d77a69aaff2bb707f42b6818d5480..e1273b93924409a1e8e41093276405b4e3927861 100644
--- a/tests/automatic/full_chain/simple_chain_dataset/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_dataset/check_windows.bat
@@ -12,13 +12,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -39,12 +33,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
diff --git a/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
index 5c726fe1548963c9b8ee6bf106c4660dd604d8d6..500e19ccdfdafaaa065ad5b4f4898523fcd0642c 100644
--- a/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -51,12 +45,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
 rmdir /S /Q c:\tmp\asapo\test_in\test2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_batches/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_batches/check_windows.bat
index 831f5eaeb05c74ba6eb08d682a77b47ba68f6af0..5c5e7017ff72cda4c99cce9c5f30753bbdb60bfc 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_batches/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_batches/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -52,12 +46,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
 rmdir /S /Q c:\tmp\asapo\test_in\test2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
index 73c81c8aafb520f989d89c41a910a0a3e906cca2..a8cf1670079f3c700aa5e395cc4dd8d8fbb1f1eb 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_multisource/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 mkdir  c:\tmp\asapo\test_in\test1
@@ -58,12 +52,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
 rmdir /S /Q c:\tmp\asapo\test_in\test2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
index 782fb8460ea5fe9379a4138184887da2eb3e1e67..1e3fcd21031106b6e5fd830611bdfe60ffcd8ea6 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -55,12 +49,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
 rmdir /S /Q c:\tmp\asapo\test_in\test2
diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
index 9ec3d1d6aaea0923bd832a15b86b25456f9da5c3..dc674898e8a6de37125f77147c700529e6628394 100644
--- a/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_file/check_windows.bat
@@ -16,13 +16,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -55,12 +49,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q c:\tmp\asapo\test_in\test1
 rmdir /S /Q c:\tmp\asapo\test_in\test2
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
index 60fec9f1fe30c00cba5eadf5bccf8ee40aa4ac78..4f1689f50bc5554d932169c3c407dc82939ed281 100644
--- a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh
@@ -43,8 +43,8 @@ sleep 1
 
 #producer
 mkdir -p ${receiver_folder}
-$1 localhost:8400 ${beamtime_id} 100 0 1 0 100
+$1 localhost:8400 ${beamtime_id} 100 0 1 0 1000
 
-$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 0 1 > out
+$2 ${proxy_address} ${receiver_folder} ${beamtime_id} 2 $token 1000 1 > out
 cat out
 cat out | grep "dummy_meta"
\ No newline at end of file
diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
index 7dc9d9a93d9e32fbfa998768e7e407f565b85b1c..9dbba7db3e8c1d8487c9d0ccc19eb20d0e9226ea 100644
--- a/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_metadata/check_windows.bat
@@ -4,7 +4,6 @@ SET beamline=test
 SET receiver_root_folder=c:\tmp\asapo\receiver\files
 SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\data\%beamtime_id%"
 
-
 "%3" token -secret auth_secret.key %beamtime_id% > token
 set /P token=< token
 
@@ -12,20 +11,14 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
-"%1" %proxy_address% %beamtime_id% 100 0 1 0 100
+"%1" %proxy_address% %beamtime_id% 100 0 1 0 1000
 
 REM consumer
-"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 0  1 > out.txt
+"%2" %proxy_address% %receiver_folder% %beamtime_id% 2 %token% 5000  1 > out.txt
 type out.txt
 findstr /i /l /c:"dummy_meta"  out.txt || goto :error
 
@@ -37,12 +30,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
index 76a0fdf19efddbf0e8b03bd03ed56616fad73216..3fc487795810b2546dc28f6a45a2ab93fe69f801 100644
--- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
+++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_windows.bat
@@ -12,13 +12,7 @@ set proxy_address="127.0.0.1:8400"
 
 echo db.%beamtime_id%_detector.insert({dummy:1}) | %mongo_exe% %beamtime_id%_detector
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 20 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -39,12 +33,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
diff --git a/tests/automatic/full_chain/two_beamlines/check_windows.bat b/tests/automatic/full_chain/two_beamlines/check_windows.bat
index cc1f0b2c1af45fd68ed38a78fa7b61c04041855c..b0c5259143f98ad9710033b7a6fc13d6faf70425 100644
--- a/tests/automatic/full_chain/two_beamlines/check_windows.bat
+++ b/tests/automatic/full_chain/two_beamlines/check_windows.bat
@@ -23,13 +23,7 @@ set proxy_address="127.0.0.1:8400"
 echo db.%beamtime_id1%_%stream%.insert({dummy:1}) | %mongo_exe% %beamtime_id1%_%stream%
 echo db.%beamtime_id2%_%stream%.insert({dummy:1}) | %mongo_exe% %beamtime_id2%_%stream%
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder1%
@@ -56,12 +50,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token1
 del /f token2
diff --git a/tests/automatic/full_chain/two_streams/check_windows.bat b/tests/automatic/full_chain/two_streams/check_windows.bat
index 3e47d04b78f43e2003ebef66c17a1e26fb80dc37..fd2b9268da99a3c4ab0f715ae04b05fc0d095895 100644
--- a/tests/automatic/full_chain/two_streams/check_windows.bat
+++ b/tests/automatic/full_chain/two_streams/check_windows.bat
@@ -15,14 +15,7 @@ set proxy_address="127.0.0.1:8400"
 echo db.%beamtime_id%_%stream1%.insert({dummy:1}) | %mongo_exe% %beamtime_id%_%stream1%
 echo db.%beamtime_id%_%stream2%.insert({dummy:1}) | %mongo_exe% %beamtime_id%_%stream2%
 
-
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run broker.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 10 -w 100 > nul
+call start_services.bat
 
 REM producer
 mkdir %receiver_folder%
@@ -48,12 +41,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop broker
-c:\opt\consul\nomad stop authorizer
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 del /f token1
 del /f token2
diff --git a/tests/automatic/producer/aai/check_windows.bat b/tests/automatic/producer/aai/check_windows.bat
index ea077946cbae3ecd7a9f29da0f914ca244bf5a7e..6a38017b8ba4d9af8b51d3106cad22a8fac06a7f 100644
--- a/tests/automatic/producer/aai/check_windows.bat
+++ b/tests/automatic/producer/aai/check_windows.bat
@@ -12,12 +12,7 @@ SET token=-pZmisCNjAbjT2gFBKs3OB2kNOU79SNsfHud0bV8gS4=
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 5 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 mkdir %receiver_folder2%
@@ -48,11 +43,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 rmdir /S /Q %receiver_root_folder2%
 echo db.dropDatabase() | %mongo_exe% %dbname%
diff --git a/tests/automatic/producer/python_api/check_windows.bat b/tests/automatic/producer/python_api/check_windows.bat
index dd61b4ea3a132aeff2bf9647f40cda1db18c75eb..aec0dd80126a6b8a868105c866f68d3396f72b06 100644
--- a/tests/automatic/producer/python_api/check_windows.bat
+++ b/tests/automatic/producer/python_api/check_windows.bat
@@ -8,12 +8,7 @@ SET dbname=%beamtime_id%_%stream%
 
 echo db.%dbname%.insert({dummy:1})" | %mongo_exe% %dbname%
 
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 5 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 
@@ -45,11 +40,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 echo db.dropDatabase() | %mongo_exe% %dbname%
 
diff --git a/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat b/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
index 28016c93da8dad3fbc1a55a9f6148ac099756182..c675100ba2f845d2329cb97c658f9aa4e38a34de 100644
--- a/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_datasets/check_windows.bat
@@ -7,13 +7,7 @@ SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\d
 
 echo db.%beamtime_id%_detector.insert({dummy:1})" | %mongo_exe% %beamtime_id%_detector
 
-
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 1 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 
@@ -42,11 +36,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
 
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
index 8c88e139f4f1c423a56bf24168e09bc21f432d45..75c4b4c2a45e4c6b5c3b1421bd6dd0e33f4dc5b9 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
@@ -7,13 +7,7 @@ SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\d
 
 echo db.%beamtime_id%_detector.insert({dummy:1})" | %mongo_exe% %beamtime_id%_detector
 
-
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 1 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 
@@ -33,11 +27,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
 
diff --git a/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_windows.bat b/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_windows.bat
index 2a9fe358fe6c7850419ca759165cdcf800a1c1d7..bc501b842e9d4a4e61aab6630ae8632202cbcdae 100644
--- a/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_single_file_bypass_buffer/check_windows.bat
@@ -7,13 +7,7 @@ SET receiver_folder="%receiver_root_folder%\test_facility\gpfs\%beamline%\2019\d
 
 echo db.%beamtime_id%_detector.insert({dummy:1})" | %mongo_exe% %beamtime_id%_detector
 
-
-c:\opt\consul\nomad run receiver.nmd
-c:\opt\consul\nomad run authorizer.nmd
-c:\opt\consul\nomad run discovery.nmd
-c:\opt\consul\nomad run nginx.nmd
-
-ping 1.0.0.0 -n 1 -w 100 > nul
+call start_services.bat
 
 mkdir %receiver_folder%
 
@@ -36,11 +30,7 @@ call :clean
 exit /b 1
 
 :clean
-c:\opt\consul\nomad stop receiver
-c:\opt\consul\nomad stop discovery
-c:\opt\consul\nomad stop nginx
-c:\opt\consul\nomad run nginx_kill.nmd  && c:\opt\consul\nomad stop -yes -purge nginx_kill
-c:\opt\consul\nomad stop authorizer
+call stop_services.bat
 rmdir /S /Q %receiver_root_folder%
 echo db.dropDatabase() | %mongo_exe% %beamtime_id%_detector
 
diff --git a/tests/automatic/settings/broker_settings.json.tpl b/tests/automatic/settings/broker_settings.json.tpl
index 56694d8538be26718c9e32bc69a538ea20abc987..3edfc9b888491cd19af803f02bdc9dda650bb95c 100644
--- a/tests/automatic/settings/broker_settings.json.tpl
+++ b/tests/automatic/settings/broker_settings.json.tpl
@@ -1,6 +1,6 @@
 {
   "DatabaseServer":"auto",
-  "DiscoveryServer": "localhost:8400/discovery",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
   "PerformanceDbServer": "localhost:8086",
   "PerformanceDbName": "db_test",
   "Port":{{ env "NOMAD_PORT_broker" }},
diff --git a/tests/automatic/settings/nginx.conf.tpl b/tests/automatic/settings/nginx.conf.tpl
index bf598bc5b0f782fb44e7cc127d209aaee5b407ad..09a376250af365dde744bd36ae718a5b3a09c491 100644
--- a/tests/automatic/settings/nginx.conf.tpl
+++ b/tests/automatic/settings/nginx.conf.tpl
@@ -27,8 +27,8 @@ http {
     resolver 127.0.0.1:8600 valid=1s;
     server {
           listen {{ env "NOMAD_PORT_nginx" }};
-          set $discovery_endpoint discovery.service.asapo;
-          set $authorizer_endpoint authorizer.service.asapo;
+          set $discovery_endpoint asapo-discovery.service.asapo;
+          set $authorizer_endpoint asapo-authorizer.service.asapo;
           set $fluentd_endpoint fluentd.service.asapo;
           set $kibana_endpoint kibana.service.asapo;
           set $grafana_endpoint grafana.service.asapo;
@@ -45,8 +45,8 @@ http {
             proxy_pass http://$elasticsearch_endpoint:9200$uri$is_args$args;
           }
 
-          location /discovery/ {
-            rewrite ^/discovery(/.*) $1 break;
+          location /asapo-discovery/ {
+            rewrite ^/asapo-discovery(/.*) $1 break;
             proxy_pass http://$discovery_endpoint:5006$uri$is_args$args;
           }
 
@@ -67,8 +67,8 @@ http {
             proxy_pass http://$grafana_endpoint:3000$uri$is_args$args;
           }
 
-          location /authorizer/ {
-             rewrite ^/authorizer(/.*) $1 break;
+          location /asapo-authorizer/ {
+             rewrite ^/asapo-authorizer(/.*) $1 break;
              proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args;
           }
 
diff --git a/tests/automatic/settings/receiver.json.tpl.lin.in b/tests/automatic/settings/receiver.json.tpl.lin.in
index b910579330a6849a654cafef00918a119628a8e1..1177861e12ba54dcf0e0a0e0c4c21cb30a079283 100644
--- a/tests/automatic/settings/receiver.json.tpl.lin.in
+++ b/tests/automatic/settings/receiver.json.tpl.lin.in
@@ -1,10 +1,10 @@
 {
-  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"auto",
-  "DiscoveryServer": "localhost:8400/discovery",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
   "DataServer": {
+    "AdvertiseURI": "127.0.0.1:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
@@ -13,7 +13,7 @@
     "SizeGB": 1,
     "ReservedShare": 10
   },
-  "AuthorizationServer": "localhost:8400/authorizer",
+  "AuthorizationServer": "localhost:8400/asapo-authorizer",
   "AuthorizationInterval": 1000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
   "Tag": "{{ env "NOMAD_ADDR_recv" }}",
diff --git a/tests/automatic/settings/receiver.json.tpl.win.in b/tests/automatic/settings/receiver.json.tpl.win.in
index f46bd5bdf5349be12114870036853518059cbd6b..02fdd657bff47c4f8988b677e57092917e5db1e9 100644
--- a/tests/automatic/settings/receiver.json.tpl.win.in
+++ b/tests/automatic/settings/receiver.json.tpl.win.in
@@ -1,13 +1,13 @@
 {
-  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"auto",
-  "DiscoveryServer": "localhost:8400/discovery",
-  "AuthorizationServer": "localhost:8400/authorizer",
+  "DiscoveryServer": "localhost:8400/asapo-discovery",
+  "AuthorizationServer": "localhost:8400/asapo-authorizer",
   "AuthorizationInterval": 1000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
   "DataServer": {
+    "AdvertiseURI": "127.0.0.1:{{ env "NOMAD_PORT_recv_ds" }}",
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
diff --git a/tests/manual/broker_debug_local/receiver.json b/tests/manual/broker_debug_local/receiver.json
index 6a5d12f59d24f354bfba8354a61341fcafbad518..5567105bf672adfedefa41bacd655e2c24d20351 100644
--- a/tests/manual/broker_debug_local/receiver.json
+++ b/tests/manual/broker_debug_local/receiver.json
@@ -3,8 +3,8 @@
   "PerformanceDbName": "db_test",
   "DatabaseServer":"localhost:27017",
   "DiscoveryServer": "localhost:8400/discovery",
-  "AdvertiseIP":"127.0.0.1",
   "DataServer": {
+    "AdvertiseURI":"127.0.0.1",
     "NThreads": 2,
     "ListenPort": 22000
   },
diff --git a/tests/manual/broker_debug_local/receiver.json.tpl b/tests/manual/broker_debug_local/receiver.json.tpl
index 1f7cbc7c1b75af4b38c23fc7fdba9f158c145b8e..a6c0887d9c96c0f22c59808f7c438009704f7896 100644
--- a/tests/manual/broker_debug_local/receiver.json.tpl
+++ b/tests/manual/broker_debug_local/receiver.json.tpl
@@ -1,10 +1,10 @@
 {
-  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"auto",
   "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
+    "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
diff --git a/tests/manual/maxwell/asapo_test/clean.sh b/tests/manual/maxwell/asapo_test/clean.sh
index 3160c9320cd1d85a49365bd683f5ccf2d8fde3ae..83475c376e73deea5a3ef941d3458e6f283670c9 100755
--- a/tests/manual/maxwell/asapo_test/clean.sh
+++ b/tests/manual/maxwell/asapo_test/clean.sh
@@ -1,12 +1,12 @@
 export asapo_host=`cat asapo_host`
 
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream0
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream1
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream2
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream3
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream4
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream5
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream6
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream7
-dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/discovery/mongo` --eval "db.dropDatabase()" asapo_test_stream8
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream0
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream1
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream2
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream3
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream4
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream5
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream6
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream7
+dockerrun -v `pwd`:/tmp/yakubov mongo mongo --host `curl -s $asapo_host:8400/asapo-discovery/asapo-mongodb` --eval "db.dropDatabase()" asapo_test_stream8
diff --git a/tests/manual/python_tests/consumer/consumer_api.py b/tests/manual/python_tests/consumer/consumer_api.py
index c59d022e451abc660e711e36904a6164da7aee43..0aa14cb8dbd215b7fe7538d8815a8180af9c6d39 100644
--- a/tests/manual/python_tests/consumer/consumer_api.py
+++ b/tests/manual/python_tests/consumer/consumer_api.py
@@ -3,15 +3,18 @@ from __future__ import print_function
 import asapo_consumer
 import sys
 
-source, beamtime,path, token = sys.argv[1:]
+source, path,beamtime, token = sys.argv[1:]
 broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
 group_id = broker.generate_group_id()
 
 
-data, meta = broker.get_by_id(1, group_id, meta_only=False)
+_, meta = broker.get_by_id(1,group_id, meta_only=True)
 
-print (meta)
-print (len(data))
+#meta["buf_id"]=0
+data = broker.retrieve_data(meta)
 
+print (meta)
+print (len(data),data[0:100])
+data.tofile("out")
 
 sys.exit(0)
\ No newline at end of file
diff --git a/tests/manual/python_tests/consumer/test_asap3.sh b/tests/manual/python_tests/consumer/test_asap3.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6f43030a3abde5128a9d4ba282437cddd0ab4785
--- /dev/null
+++ b/tests/manual/python_tests/consumer/test_asap3.sh
@@ -0,0 +1,3 @@
+export PYTHONPATH=/Users/yakubov/projects/asapo/cmake-build-debug/consumer/api/python
+export token=KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc=
+python3 consumer_api.py asap3-utl.desy.de:8400 /gpfs/asapo/shared/test_dir/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token
diff --git a/tests/manual/python_tests/consumer/test_k8s.sh b/tests/manual/python_tests/consumer/test_k8s.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3cd69a940ae900b7c389d53eff4922a05bf30eb3
--- /dev/null
+++ b/tests/manual/python_tests/consumer/test_k8s.sh
@@ -0,0 +1,4 @@
+export PYTHONPATH=/Users/yakubov/projects/asapo/cmake-build-debug/consumer/api/python
+export token=IEfwsWa0GXky2S3MkxJSUHJT1sI8DD5teRdjBUXVRxk=
+python3 consumer_api.py gest-k8s-test2.desy.de/yakser /test_offline/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token
+#python3 getnext.py gest-k8s-test2.desy.de/yakser /test_offline/test_facility/gpfs/test/2019/data/asapo_test asapo_test $token new
\ No newline at end of file
diff --git a/tests/manual/python_tests/producer/receiver.json.tpl b/tests/manual/python_tests/producer/receiver.json.tpl
index a235fec0d273fea59a9e8537f5f65324c38498a8..6f80d44bbde2b6531ff5c05406b0a9ded02e11a5 100644
--- a/tests/manual/python_tests/producer/receiver.json.tpl
+++ b/tests/manual/python_tests/producer/receiver.json.tpl
@@ -1,10 +1,10 @@
 {
-  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"localhost:27017",
   "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
+    "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
diff --git a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
index a235fec0d273fea59a9e8537f5f65324c38498a8..6f80d44bbde2b6531ff5c05406b0a9ded02e11a5 100644
--- a/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
+++ b/tests/manual/python_tests/producer_wait_bug_mongo/receiver.json.tpl
@@ -1,10 +1,10 @@
 {
-  "AdvertiseIP": "127.0.0.1",
   "PerformanceDbServer":"localhost:8086",
   "PerformanceDbName": "db_test",
   "DatabaseServer":"localhost:27017",
   "DiscoveryServer": "localhost:8400/discovery",
   "DataServer": {
+    "AdvertiseURI": "127.0.0.1",
     "NThreads": 2,
     "ListenPort": {{ env "NOMAD_PORT_recv_ds" }}
   },
diff --git a/tests/manual/receiver_debug_local/receiver.json b/tests/manual/receiver_debug_local/receiver.json
index 6a5d12f59d24f354bfba8354a61341fcafbad518..5567105bf672adfedefa41bacd655e2c24d20351 100644
--- a/tests/manual/receiver_debug_local/receiver.json
+++ b/tests/manual/receiver_debug_local/receiver.json
@@ -3,8 +3,8 @@
   "PerformanceDbName": "db_test",
   "DatabaseServer":"localhost:27017",
   "DiscoveryServer": "localhost:8400/discovery",
-  "AdvertiseIP":"127.0.0.1",
   "DataServer": {
+    "AdvertiseURI":"127.0.0.1",
     "NThreads": 2,
     "ListenPort": 22000
   },