diff --git a/.gitignore b/.gitignore
index f7e7dd86ded915fa21e6425bcf8f175642da575c..408ed7c05c698edf53182f529d2bb53a6cb2ecf2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -145,4 +145,4 @@ terraform.tfstate*
 
 #helm chart
 deploy/asapo_helm_chart/asapo/Chart.lock
-deploy/asapo_helm_chart/asapo/charts/
\ No newline at end of file
+deploy/asapo_helm_chart/asapo/charts/*.tgz
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/Chart.yaml b/deploy/asapo_helm_chart/asapo/Chart.yaml
index 032a6d45bf5ccfff6c17974c8d09d108506a09f4..c6617f16684d5616fb8bc1956ec1146de6408e33 100644
--- a/deploy/asapo_helm_chart/asapo/Chart.yaml
+++ b/deploy/asapo_helm_chart/asapo/Chart.yaml
@@ -25,5 +25,10 @@ dependencies:
   - name: influxdb
     version: "~0.4.3"
     repository: "https://charts.bitnami.com/bitnami"
-
+#  - name: elasticsearch
+#    version: "~11.0.13"
+#    repository: "https://charts.bitnami.com/bitnami"
+#  - name: kibana
+#    version: "~7.6.1"
+#    repository: "https://helm.elastic.co"
 
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore
new file mode 100755
index 0000000000000000000000000000000000000000..f0c13194444163d1cba5c67d9e79231a62bc8f44
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..7fc07e39bb32bd2820e8f7dfcf780793c9863175
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/Chart.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+appVersion: 7.6.1
+description: A highly scalable open-source full-text search and analytics engine
+engine: gotpl
+home: https://www.elastic.co/products/elasticsearch
+icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-110x117.png
+keywords:
+- elasticsearch
+maintainers:
+- email: containers@bitnami.com
+  name: Bitnami
+name: elasticsearch
+sources:
+- https://github.com/bitnami/bitnami-docker-elasticsearch
+version: 11.0.13
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..7f202bd4208e8670d9bc118e31d365d4fbf1683d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/README.md
@@ -0,0 +1,527 @@
+# Elasticsearch
+
+[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time.
+
+## TL;DR;
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/elasticsearch
+```
+
+## Introduction
+
+This chart bootstraps a [Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
+
+## Prerequisites
+
+- Kubernetes 1.12+
+- Helm 2.11+ or Helm 3.0-beta3+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/elasticsearch
+```
+
+These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` release:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option:
+
+```console
+$ helm delete --purge my-release
+```
+
+## Parameters
+
+The following table lists the configurable parameters of the Elasticsearch chart and their default values.
+
+|                     Parameter                     |                                                                        Description                                                                        |                           Default                            |
+|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------|
+| `global.imageRegistry`                            | Global Docker image registry                                                                                                                              | `nil`                                                        |
+| `global.imagePullSecrets`                         | Global Docker registry secret names as an array                                                                                                           | `[]` (does not add image pull secrets to deployed pods)      |
+| `global.storageClass`                             | Global storage class for dynamic provisioning                                                                                                             | `nil`                                                        |
+| `global.coordinating.name`                        | Coordinating-only node pod name at global level to be used also in the Kibana subchart                                                                    | `coordinating-only`                                          |
+| `image.registry`                                  | Elasticsearch image registry                                                                                                                              | `docker.io`                                                  |
+| `image.repository`                                | Elasticsearch image repository                                                                                                                            | `bitnami/elasticsearch`                                      |
+| `image.tag`                                       | Elasticsearch image tag                                                                                                                                   | `{TAG_NAME}`                                                 |
+| `image.pullPolicy`                                | Image pull policy                                                                                                                                         | `IfNotPresent`                                               |
+| `image.pullSecrets`                               | Specify docker-registry secret names as an array                                                                                                          | `[]` (does not add image pull secrets to deployed pods)      |
+| `nameOverride`                                    | String to partially override elasticsearch.fullname template with a string (will prepend the release name)                                                | `nil`                                                        |
+| `fullnameOverride`                                | String to fully override elasticsearch.fullname template with a string                                                                                    | `nil`                                                        |
+| `name`                                            | Elasticsearch cluster name                                                                                                                                | `elastic`                                                    |
+| `plugins`                                         | Comma, semi-colon or space separated list of plugins to install at initialization                                                                         | `nil`                                                        |
+| `config`                                          | Elasticsearch node custom configuration                                                                                                                   | ``                                                           |
+| `extraVolumes`                                    | Extra volumes                                                                                                                                             |                                                              |
+| `extraVolumeMounts`                               | Mount extra volume(s),                                                                                                                                    |                                                              |
+| `master.name`                                     | Master-eligible node pod name                                                                                                                             | `master`                                                     |
+| `master.replicas`                                 | Desired number of Elasticsearch master-eligible nodes                                                                                                     | `2`                                                          |
+| `master.updateStrategy.type`                      | Update strategy for Master statefulset                                                                                                                    | `RollingUpdate`                                              |
+| `master.heapSize`                                 | Master-eligible node heap size                                                                                                                            | `128m`                                                       |
+| `master.service.type`                             | Kubernetes Service type (master-eligible nodes)                                                                                                           | `ClusterIP`                                                  |
+| `master.service.port`                             | Kubernetes Service port for Elasticsearch transport port (master-eligible nodes)                                                                          | `9300`                                                       |
+| `master.service.nodePort`                         | Kubernetes Service nodePort (master-eligible nodes)                                                                                                       | `nil`                                                        |
+| `master.service.annotations`                      | Annotations for master-eligible nodes service                                                                                                             | `{}`                                                         |
+| `master.service.loadBalancerIP`                   | loadBalancerIP if master-eligible nodes service type is `LoadBalancer`                                                                                    | `nil`                                                        |
+| `master.resources`                                | CPU/Memory resource requests/limits for master-eligible nodes pods                                                                                        | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `master.podAnnotations`                           | Annotations for master pods.                                                                                                                              | `{}`                                                         |
+| `master.persistence.enabled`                      | Enable persistence using a `PersistentVolumeClaim`                                                                                                        | `true`                                                       |
+| `master.persistence.annotations`                  | Persistent Volume Claim annotations                                                                                                                       | `{}`                                                         |
+| `master.persistence.storageClass`                 | Persistent Volume Storage Class                                                                                                                           | ``                                                           |
+| `master.persistence.accessModes`                  | Persistent Volume Access Modes                                                                                                                            | `[ReadWriteOnce]`                                            |
+| `master.persistence.size`                         | Persistent Volume Size                                                                                                                                    | `8Gi`                                                        |
+| `master.securityContext.enabled`                  | Enable security context for master-eligible pods                                                                                                          | `true`                                                       |
+| `master.securityContext.fsGroup`                  | Group ID for the container for master-eligible pods                                                                                                       | `1001`                                                       |
+| `master.securityContext.runAsUser`                | User ID for the container for master-eligible pods                                                                                                        | `1001`                                                       |
+| `master.livenessProbe.enabled`                    | Enable/disable the liveness probe (master-eligible nodes pod)                                                                                             | `true`                                                       |
+| `master.livenessProbe.initialDelaySeconds`        | Delay before liveness probe is initiated (master-eligible nodes pod)                                                                                      | `90`                                                         |
+| `master.livenessProbe.periodSeconds`              | How often to perform the probe (master-eligible nodes pod)                                                                                                | `10`                                                         |
+| `master.livenessProbe.timeoutSeconds`             | When the probe times out (master-eligible nodes pod)                                                                                                      | `5`                                                          |
+| `master.livenessProbe.successThreshold`           | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)                                   | `1`                                                          |
+| `master.livenessProbe.failureThreshold`           | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `master.readinessProbe.enabled`                   | Enable/disable the readiness probe (master-eligible nodes pod)                                                                                            | `true`                                                       |
+| `master.readinessProbe.initialDelaySeconds`       | Delay before readiness probe is initiated (master-eligible nodes pod)                                                                                     | `90`                                                         |
+| `master.readinessProbe.periodSeconds`             | How often to perform the probe (master-eligible nodes pod)                                                                                                | `10`                                                         |
+| `master.readinessProbe.timeoutSeconds`            | When the probe times out (master-eligible nodes pod)                                                                                                      | `5`                                                          |
+| `master.readinessProbe.successThreshold`          | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)                                   | `1`                                                          |
+| `master.readinessProbe.failureThreshold`          | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `master.serviceAccount.create`                    | Enable creation of ServiceAccount for the master node                                                                                                     | `false`                                                      |
+| `master.serviceAccount.name`                      | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.master.fullname` template |
+| `clusterDomain`                                   | Kubernetes cluster domain                                                                                                                                 | `cluster.local`                                              |
+| `discovery.name`                                  | Discover node pod name                                                                                                                                    | `discovery`                                                  |
+| `coordinating.replicas`                           | Desired number of Elasticsearch coordinating-only nodes                                                                                                   | `2`                                                          |
+| `coordinating.updateStrategy.type`                | Update strategy for Coordinating Deployment                                                                                                               | `RollingUpdate`                                              |
+| `coordinating.heapSize`                           | Coordinating-only node heap size                                                                                                                          | `128m`                                                       |
+| `coordinating.podAnnotations`                     | Annotations for coordniating pods.                                                                                                                        | `{}`                                                         |
+| `coordinating.service.type`                       | Kubernetes Service type (coordinating-only nodes)                                                                                                         | `ClusterIP`                                                  |
+| `coordinating.service.port`                       | Kubernetes Service port for REST API (coordinating-only nodes)                                                                                            | `9200`                                                       |
+| `coordinating.service.nodePort`                   | Kubernetes Service nodePort (coordinating-only nodes)                                                                                                     | `nil`                                                        |
+| `coordinating.service.annotations`                | Annotations for coordinating-only nodes service                                                                                                           | `{}`                                                         |
+| `coordinating.service.loadBalancerIP`             | loadBalancerIP if coordinating-only nodes service type is `LoadBalancer`                                                                                  | `nil`                                                        |
+| `coordinating.resources`                          | CPU/Memory resource requests/limits for coordinating-only nodes pods                                                                                      | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `coordinating.securityContext.enabled`            | Enable security context for coordinating-only pods                                                                                                        | `true`                                                       |
+| `coordinating.securityContext.fsGroup`            | Group ID for the container for coordinating-only pods                                                                                                     | `1001`                                                       |
+| `coordinating.securityContext.runAsUser`          | User ID for the container for coordinating-only pods                                                                                                      | `1001`                                                       |
+| `coordinating.livenessProbe.enabled`              | Enable/disable the liveness probe (coordinating-only nodes pod)                                                                                           | `true`                                                       |
+| `coordinating.livenessProbe.initialDelaySeconds`  | Delay before liveness probe is initiated (coordinating-only nodes pod)                                                                                    | `90`                                                         |
+| `coordinating.livenessProbe.periodSeconds`        | How often to perform the probe (coordinating-only nodes pod)                                                                                              | `10`                                                         |
+| `coordinating.livenessProbe.timeoutSeconds`       | When the probe times out (coordinating-only nodes pod)                                                                                                    | `5`                                                          |
+| `coordinating.livenessProbe.successThreshold`     | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)                                 | `1`                                                          |
+| `coordinating.livenessProbe.failureThreshold`     | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `coordinating.readinessProbe.enabled`             | Enable/disable the readiness probe (coordinating-only nodes pod)                                                                                          | `true`                                                       |
+| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod)                                                                                   | `90`                                                         |
+| `coordinating.readinessProbe.periodSeconds`       | How often to perform the probe (coordinating-only nodes pod)                                                                                              | `10`                                                         |
+| `coordinating.readinessProbe.timeoutSeconds`      | When the probe times out (coordinating-only nodes pod)                                                                                                    | `5`                                                          |
+| `coordinating.readinessProbe.successThreshold`    | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)                                 | `1`                                                          |
+| `coordinating.readinessProbe.failureThreshold`    | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `coordinating.serviceAccount.create`              | Enable creation of ServiceAccount for the coordinating-only node                                                                                          | `false`                                                      |
+| `coordinating.serviceAccount.name`                | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.coordinating.fullname`    |
+| `data.name`                                       | Data node pod name                                                                                                                                        | `data`                                                       |
+| `data.replicas`                                   | Desired number of Elasticsearch data nodes                                                                                                                | `3`                                                          |
+| `data.updateStrategy.type`                        | Update strategy for Data statefulset                                                                                                                      | `RollingUpdate`                                              |
+| `data.updateStrategy.rollingUpdatePartition`      | Partition update strategy for Data statefulset                                                                                                            | `nil`                                                        |
+| `data.heapSize`                                   | Data node heap size                                                                                                                                       | `1024m`                                                      |
+| `data.resources`                                  | CPU/Memory resource requests/limits for data nodes                                                                                                        | `requests: { cpu: "25m", memory: "1152Mi" }`                 |
+| `data.persistence.enabled`                        | Enable persistence using a `PersistentVolumeClaim`                                                                                                        | `true`                                                       |
+| `data.persistence.annotations`                    | Persistent Volume Claim annotations                                                                                                                       | `{}`                                                         |
+| `data.persistence.storageClass`                   | Persistent Volume Storage Class                                                                                                                           | ``                                                           |
+| `data.persistence.accessModes`                    | Persistent Volume Access Modes                                                                                                                            | `[ReadWriteOnce]`                                            |
+| `data.persistence.size`                           | Persistent Volume Size                                                                                                                                    | `8Gi`                                                        |
+| `data.securityContext.enabled`                    | Enable security context for data pods                                                                                                                     | `true`                                                       |
+| `data.securityContext.fsGroup`                    | Group ID for the container for data pods                                                                                                                  | `1001`                                                       |
+| `data.securityContext.runAsUser`                  | User ID for the container for data pods                                                                                                                   | `1001`                                                       |
+| `data.livenessProbe.enabled`                      | Enable/disable the liveness probe (data nodes pod)                                                                                                        | `true`                                                       |
+| `data.livenessProbe.initialDelaySeconds`          | Delay before liveness probe is initiated (data nodes pod)                                                                                                 | `90`                                                         |
+| `data.livenessProbe.periodSeconds`                | How often to perform the probe (data nodes pod)                                                                                                           | `10`                                                         |
+| `data.livenessProbe.timeoutSeconds`               | When the probe times out (data nodes pod)                                                                                                                 | `5`                                                          |
+| `data.livenessProbe.successThreshold`             | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)                                              | `1`                                                          |
+| `data.livenessProbe.failureThreshold`             | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `data.podAnnotations`                             | Annotations for data pods.                                                                                                                                | `{}`                                                         |
+| `data.readinessProbe.enabled`                     | Enable/disable the readiness probe (data nodes pod)                                                                                                       | `true`                                                       |
+| `data.readinessProbe.initialDelaySeconds`         | Delay before readiness probe is initiated (data nodes pod)                                                                                                | `90`                                                         |
+| `data.readinessProbe.periodSeconds`               | How often to perform the probe (data nodes pod)                                                                                                           | `10`                                                         |
+| `data.readinessProbe.timeoutSeconds`              | When the probe times out (data nodes pod)                                                                                                                 | `5`                                                          |
+| `data.readinessProbe.successThreshold`            | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)                                              | `1`                                                          |
+| `data.readinessProbe.failureThreshold`            | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `data.serviceAccount.create`                      | Enable creation of ServiceAccount for the data node                                                                                                     | `false`                                                        |
+| `data.serviceAccount.name`                        | Name of the created serviceAccount                                                                                                                        | Generated using the `elasticsearch.data.fullname` template   |
+| `ingest.enabled`                                  | Enable ingest nodes                                                                                                                                       | `false`                                                      |
+| `ingest.name`                                     | Ingest node pod name                                                                                                                                      | `ingest`                                                     |
+| `ingest.replicas`                                 | Desired number of Elasticsearch ingest nodes                                                                                                              | `2`                                                          |
+| `ingest.heapSize`                                 | Ingest node heap size                                                                                                                                     | `128m`                                                       |
+| `ingest.service.type`                             | Kubernetes Service type (ingest nodes)                                                                                                                    | `ClusterIP`                                                  |
+| `ingest.service.port`                             | Kubernetes Service port Elasticsearch transport port (ingest nodes)                                                                                       | `9300`                                                       |
+| `ingest.service.nodePort`                         | Kubernetes Service nodePort (ingest nodes)                                                                                                                | `nil`                                                        |
+| `ingest.service.annotations`                      | Annotations for ingest nodes service                                                                                                                      | `{}`                                                         |
+| `ingest.service.loadBalancerIP`                   | loadBalancerIP if ingest nodes service type is `LoadBalancer`                                                                                             | `nil`                                                        |
+| `ingest.resources`                                | CPU/Memory resource requests/limits for ingest nodes pods                                                                                                 | `requests: { cpu: "25m", memory: "256Mi" }`                  |
+| `ingest.securityContext.enabled`                  | Enable security context for ingest pods                                                                                                                   | `true`                                                       |
+| `ingest.securityContext.fsGroup`                  | Group ID for the container for ingest pods                                                                                                                | `1001`                                                       |
+| `ingest.securityContext.runAsUser`                | User ID for the container for ingest pods                                                                                                                 | `1001`                                                       |
+| `ingest.livenessProbe.enabled`                    | Enable/disable the liveness probe (ingest nodes pod)                                                                                                      | `true`                                                       |
+| `ingest.livenessProbe.initialDelaySeconds`        | Delay before liveness probe is initiated (ingest nodes pod)                                                                                               | `90`                                                         |
+| `ingest.livenessProbe.periodSeconds`              | How often to perform the probe (ingest nodes pod)                                                                                                         | `10`                                                         |
+| `ingest.livenessProbe.timeoutSeconds`             | When the probe times out (ingest nodes pod)                                                                                                               | `5`                                                          |
+| `ingest.livenessProbe.successThreshold`           | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)                                            | `1`                                                          |
+| `ingest.livenessProbe.failureThreshold`           | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `ingest.podAnnotations`                           | Annotations for ingest pods.                                                                                                                              | `{}`                                                         |
+| `ingest.readinessProbe.enabled`                   | Enable/disable the readiness probe (ingest nodes pod)                                                                                                     | `true`                                                       |
+| `ingest.readinessProbe.initialDelaySeconds`       | Delay before readiness probe is initiated (ingest nodes pod)                                                                                              | `90`                                                         |
+| `ingest.readinessProbe.periodSeconds`             | How often to perform the probe (ingest nodes pod)                                                                                                         | `10`                                                         |
+| `ingest.readinessProbe.timeoutSeconds`            | When the probe times out (ingest nodes pod)                                                                                                               | `5`                                                          |
+| `ingest.readinessProbe.successThreshold`          | Minimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)                                            | `1`                                                          |
+| `ingest.readinessProbe.failureThreshold`          | Minimum consecutive failures for the probe to be considered failed after having succeeded                                                                 | `5`                                                          |
+| `curator.enabled`                                 | Enable Elasticsearch Curator cron job                                                                                                                     | `false`                                                      |
+| `curator.name`                                    | Elasticsearch Curator pod name                                                                                                                            | `curator`                                                    |
+| `curator.image.registry`                          | Elasticsearch Curator image registry                                                                                                                      | `docker.io`                                                  |
+| `curator.image.repository`                        | Elasticsearch Curator image repository                                                                                                                    | `bitnami/elasticsearch-curator`                              |
+| `curator.image.tag`                               | Elasticsearch Curator image tag                                                                                                                           | `{TAG_NAME}`                                                 |
+| `curator.image.pullPolicy`                        | Elasticsearch Curator image pull policy                                                                                                                   | `{TAG_NAME}`                                                 |
+| `curator.cronjob.schedule`                        | Schedule for the CronJob                                                                                                                                  | `0 1 * * *`                                                  |
+| `curator.cronjob.annotations`                     | Annotations to add to the cronjob                                                                                                                         | `{}`                                                         |
+| `curator.cronjob.concurrencyPolicy`               | `Allow,Forbid,Replace` concurrent jobs                                                                                                                    | `nil`                                                        |
+| `curator.cronjob.failedJobsHistoryLimit`          | Specify the number of failed Jobs to keep                                                                                                                 | `nil`                                                        |
+| `curator.cronjob.successfulJobsHistoryLimit`      | Specify the number of completed Jobs to keep                                                                                                              | `nil`                                                        |
+| `curator.cronjob.jobRestartPolicy`                | Control the Job restartPolicy                                                                                                                             | `Never`                                                      |
+| `curator.podAnnotations`                          | Annotations to add to the pod                                                                                                                             | `{}`                                                         |
+| `curator.rbac.enabled`                            | Enable RBAC resources                                                                                                                                     | `false`                                                      |
+| `curator.serviceAccount.create`                   | Create a default serviceaccount for elasticsearch curator                                                                                                 | `true`                                                       |
+| `curator.serviceAccount.name`                     | Name for elasticsearch curator serviceaccount                                                                                                             | `""`                                                         |
+| `curator.hooks`                                   | Whether to run job on selected hooks                                                                                                                      | `{ "install": false, "upgrade": false }`                     |
+| `curator.psp.create`                              | Create pod security policy resources                                                                                                                      | `false`                                                      |
+| `curator.dryrun`                                  | Run Curator in dry-run mode                                                                                                                               | `false`                                                      |
+| `curator.command`                                 | Command to execute                                                                                                                                        | `["/curator/curator"]`                                       |
+| `curator.env`                                     | Environment variables to add to the cronjob container                                                                                                     | `{}`                                                         |
+| `curator.configMaps.action_file_yml`              | Contents of the Curator action_file.yml                                                                                                                   | See values.yaml                                              |
+| `curator.configMaps.config_yml`                   | Contents of the Curator config.yml (overrides config)                                                                                                     | See values.yaml                                              |
+| `curator.resources`                               | Resource requests and limits                                                                                                                              | `{}`                                                         |
+| `curator.priorityClassName`                       | priorityClassName                                                                                                                                         | `nil`                                                        |
+| `curator.extraVolumes`                            | Extra volumes                                                                                                                                             |                                                              |
+| `curator.extraVolumeMounts`                       | Mount extra volume(s),                                                                                                                                    |                                                              |
+| `curator.extraInitContainers`                     | Init containers to add to the cronjob container                                                                                                           | `{}`                                                         |
+| `curator.envFromSecrets`                          | Environment variables from secrets to the cronjob container                                                                                               | `{}`                                                         |
+| `curator.envFromSecrets.*.from.secret`            | - `secretKeyRef.name` used for environment variable                                                                                                       |                                                              |
+| `curator.envFromSecrets.*.from.key`               | - `secretKeyRef.key` used for environment variable                                                                                                        |                                                              |
+| `metrics.enabled`                                 | Enable prometheus exporter                                                                                                                                | `false`                                                      |
+| `metrics.name`                                    | Metrics pod name                                                                                                                                          | `metrics`                                                    |
+| `metrics.image.registry`                          | Metrics exporter image registry                                                                                                                           | `docker.io`                                                  |
+| `metrics.image.repository`                        | Metrics exporter image repository                                                                                                                         | `bitnami/elasticsearch-exporter`                             |
+| `metrics.image.tag`                               | Metrics exporter image tag                                                                                                                                | `1.0.2`                                                      |
+| `metrics.image.pullPolicy`                        | Metrics exporter image pull policy                                                                                                                        | `IfNotPresent`                                               |
+| `metrics.service.type`                            | Metrics exporter endpoint service type                                                                                                                    | `ClusterIP`                                                  |
+| `metrics.service.annotations`                     | Annotations for metrics service.                                                                                                                          | `{prometheus.io/scrape: "true", prometheus.io/port: "8080"}` |
+| `metrics.resources`                               | Metrics exporter resource requests/limit                                                                                                                  | `requests: { cpu: "25m" }`                                   |
+| `metrics.podAnnotations`                          | Annotations for metrics pods.                                                                                                                             | `{prometheus.io/scrape: "true", prometheus.io/port: "8080"}` |
+| `metrics.serviceMonitor.enabled`                  | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)                                                    | `false`                                                      |
+| `metrics.serviceMonitor.namespace`                | Namespace in which Prometheus is running                                                                                                                  | `nil`                                                        |
+| `metrics.serviceMonitor.interval`                 | Interval at which metrics should be scraped.                                                                                                              | `nil` (Prometheus Operator default value)                    |
+| `metrics.serviceMonitor.scrapeTimeout`            | Timeout after which the scrape is ended                                                                                                                   | `nil` (Prometheus Operator default value)                    |
+| `metrics.serviceMonitor.selector`                 | Prometheus instance selector labels                                                                                                                       | `nil`                                                        |
+| `sysctlImage.enabled`                             | Enable kernel settings modifier image                                                                                                                     | `true`                                                       |
+| `sysctlImage.registry`                            | Kernel settings modifier image registry                                                                                                                   | `docker.io`                                                  |
+| `sysctlImage.repository`                          | Kernel settings modifier image repository                                                                                                                 | `bitnami/minideb`                                            |
+| `sysctlImage.tag`                                 | Kernel settings modifier image tag                                                                                                                        | `buster`                                                     |
+| `sysctlImage.pullPolicy`                          | Kernel settings modifier image pull policy                                                                                                                | `Always`                                                     |
+| `volumePermissions.enabled`                       | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false`                                                      |
+| `volumePermissions.image.registry`                | Init container volume-permissions image registry                                                                                                          | `docker.io`                                                  |
+| `volumePermissions.image.repository`              | Init container volume-permissions image name                                                                                                              | `bitnami/minideb`                                            |
+| `volumePermissions.image.tag`                     | Init container volume-permissions image tag                                                                                                               | `buster`                                                     |
+| `volumePermissions.image.pullPolicy`              | Init container volume-permissions image pull policy                                                                                                       | `Always`                                                     |
+| `volumePermissions.resources`                     | Init container resource requests/limit                                                                                                                    | `nil`                                                        |
+
+### Kibana Parameters
+
+|            Parameter           |                                    Description                                      |                                         Default                                         |
+|--------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
+| `global.kibanaEnabled`         | Use bundled Kibana                                                                  | `false`                                                                                 |
+| `kibana.elasticsearch.hosts`   | Array containing hostnames for the ES instances. Used to generate the URL           | `{{ include "elasticsearch.coordinating.fullname" . }}` Coordinating service (fullname) |
+| `kibana.elasticsearch.port`    | Port to connect Kibana and ES instance. Used to generate the URL                    | `9200`                                                                                  |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install my-release \
+  --set name=my-elastic,client.service.port=8080 \
+  bitnami/elasticsearch
+```
+
+The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml bitnami/elasticsearch
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml).
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Production configuration
+
+This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
+
+- Init container that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors):
+```diff
+- sysctlImage.enabled: true
++ sysctlImage.enabled: false
+```
+
+- Desired number of Elasticsearch master-eligible nodes:
+```diff
+- master.replicas: 2
++ master.replicas: 3
+```
+
+- Enable the liveness probe (master-eligible nodes pod):
+```diff
+- master.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ master.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (master-eligible nodes pod):
+```diff
+- master.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ master.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the liveness probe (coordinating-only nodes pod):
+```diff
+- coordinating.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ coordinating.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (coordinating-only nodes pod):
+```diff
+- coordinating.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ coordinating.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Desired number of Elasticsearch data nodes:
+```diff
+- data.replicas: 2
++ data.replicas: 3
+```
+
+- Enable the liveness probe (data nodes pod):
+```diff
+- data.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ data.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (data nodes pod):
+```diff
+- data.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ data.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable ingest nodes:
+```diff
+- ingest.enabled: false
++ ingest.enabled: true
+```
+
+- Enable the liveness probe (ingest nodes pod):
+```diff
+- ingest.livenessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ ingest.livenessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable the readiness probe (ingest nodes pod):
+```diff
+- ingest.readinessProbe.enabled: false
+-   #  initialDelaySeconds: 90
+-   #  periodSeconds: 10
+-   #  timeoutSeconds: 5
+-   #  successThreshold: 1
+-   #  failureThreshold: 5
++ ingest.readinessProbe.enabled: true
++   initialDelaySeconds: 90
++   periodSeconds: 10
++   timeoutSeconds: 5
++   successThreshold: 1
++   failureThreshold: 5
+```
+
+- Enable prometheus exporter:
+```diff
+- metrics.enabled: false
++ metrics.enabled: true
+```
+
+- Enable bundled Kibana:
+```diff
+- global.kibanaEnabled: false
++ global.kibanaEnabled: true
+```
+
+### Default kernel settings
+
+Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
+
+- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
+- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
+
+This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`.
+You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
+
+### Enable bundled Kibana
+
+This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter. It is enabled by default using the `values-production.yaml` file.
+To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal.
+
+## Persistence
+
+The [Bitnami Elasticsearch](https://github.com/bitnami/bitnami-docker-elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container.
+
+By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC.
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+## Notable changes
+
+### 11.0.0
+
+Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered.
+
+The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version.
+
+```console
+$ kubectl delete statefulset elasticsearch-master
+$ helm upgrade <DEPLOYMENT_NAME> bitnami/elasticsearch
+```
+
+### 10.0.0
+
+In this version, Kibana was added as dependant chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section.
+
+### 9.0.0
+
+Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster.
+
+To resolve such issues, PVC's are now attached for master node data persistence.
+
+---
+
+Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
+
+In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
+
+### 7.0.0
+
+This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section.
+You can disable the initContainer using the `sysctlImage.enabled=false` parameter.
+
+## Upgrading
+
+### To 3.0.0
+
+Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
+Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch:
+
+```console
+$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
+$ kubectl delete statefulset elasticsearch-data --cascade=false
+```
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore
new file mode 100755
index 0000000000000000000000000000000000000000..f0c13194444163d1cba5c67d9e79231a62bc8f44
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..cc920bc9555e7eb9beec46ecf0cdb45348b3472c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+appVersion: 7.6.1
+description: Kibana is an open source, browser based analytics and search dashboard
+  for Elasticsearch.
+engine: gotpl
+home: https://www.elastic.co/products/kibana
+icon: https://bitnami.com/assets/stacks/kibana/img/kibana-stack-220x234.png
+keywords:
+- kibana
+- analitics
+- monitoring
+- metrics
+- logs
+maintainers:
+- email: containers@bitnami.com
+  name: Bitnami
+name: kibana
+sources:
+- https://github.com/bitnami/bitnami-docker-kibana
+version: 5.0.11
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..2d4888f267ef5ea85402ae906c22b0e02d6b2ba6
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/README.md
@@ -0,0 +1,326 @@
+# Kibana
+
+[Kibana](https://kibana.com/) is an open source, browser based analytics and search dashboard for Elasticsearch.
+
+## TL;DR;
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release bitnami/kibana --set elasticsearch.hosts[0]=<Hostname of your ES instance> --set elasticsearch.port=<port of your ES instance>
+```
+
+## Introduction
+
+This chart bootstraps a [kibana](https://github.com/bitnami/bitnami-docker-kibana) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
+
+## Prerequisites
+
+- Kubernetes 1.12+
+- Helm 2.11+ or Helm 3.0-beta3+
+- PV provisioner support in the underlying infrastructure
+- ReadWriteMany volumes for deployment scaling
+
+## Installing the Chart
+
+This chart requires a Elasticsearch instance to work. You can use an already existing Elasticsearch instance.
+
+ To install the chart with the release name `my-release`:
+
+```console
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm install my-release \
+  --set elasticsearch.hosts[0]=<Hostname of your ES instance> \
+  --set elasticsearch.port=<port of your ES instance> \
+  bitnami/kibana
+```
+
+These commands deploy kibana on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `my-release` statefulset:
+
+```console
+$ helm delete my-release
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release. Use the option `--purge` to delete all history too.
+
+## Parameters
+
+The following tables lists the configurable parameters of the kibana chart and their default values.
+
+|               Parameter                |                                                                        Description                                                                        |                                                 Default                                                 |             |
+|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------------|
+| `global.imageRegistry`                 | Global Docker image registry                                                                                                                              | `nil`                                                                                                   |             |
+| `global.imagePullSecrets`              | Global Docker registry secret names as an array                                                                                                           | `[]` (does not add image pull secrets to deployed pods)                                                 |             |
+| `global.storageClass`                  | Global storage class for dynamic provisioning                                                                                                             | `nil`                                                                                                   |             |
+| `image.registry`                       | Kibana image registry                                                                                                                                     | `docker.io`                                                                                             |             |
+| `image.repository`                     | Kibana image name                                                                                                                                         | `bitnami/kibana`                                                                                        |             |
+| `image.tag`                            | Kibana image tag                                                                                                                                          | `{TAG_NAME}`                                                                                            |             |
+| `image.pullPolicy`                     | Kibana image pull policy                                                                                                                                  | `IfNotPresent`                                                                                          |             |
+| `image.pullSecrets`                    | Specify docker-registry secret names as an array                                                                                                          | `[]` (does not add image pull secrets to deployed pods)                                                 |             |
+| `nameOverride`                         | String to partially override kibana.fullname template with a string (will prepend the release name)                                                       | `nil`                                                                                                   |             |
+| `fullnameOverride`                     | String to fully override kibana.fullname template with a string                                                                                           | `nil`                                                                                                   |             |
+| `replicaCount`                         | Number of replicas of the Kibana Pod                                                                                                                      | `1`                                                                                                     |             |
+| `updateStrategy`                       | Update strategy for deployment (evaluated as a template)                                                                                                  | `{type: "RollingUpdate"}`                                                                               |             |
+| `schedulerName`                        | Alternative scheduler                                                                                                                                     | `nil`                                                                                                   |             |
+| `plugins`                              | Array containing the Kibana plugins to be installed in deployment                                                                                         | `[]`                                                                                                    |             |
+| `savedObjects.urls`                    | Array containing links to NDJSON files to be imported during Kibana initialization                                                                        | `[]`                                                                                                    |             |
+| `savedObjects.configmap`               | Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template)                                                   | `[]`                                                                                                    |             |
+| `extraConfiguration`                   | Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template  | `nil`                                                                                                   |             |
+| `configurationCM`                      | ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml                                                  | `nil`                                                                                                   |             |
+| `extraEnvVars`                         | Array containing extra env vars to configure Kibana                                                                                                       | `nil`                                                                                                   |             |
+| `extraEnvVarsCM`                       | ConfigMap containing extra env vars to configure Kibana                                                                                                   | `nil`                                                                                                   |             |
+| `extraEnvVarsSecret`                   | Secret containing extra env vars to configure Kibana (in case of sensitive data)                                                                          | `nil`                                                                                                   |             |
+| `extraVolumes`                         | Array of extra volumes to be added to the Kibana deployment (evaluated as template). Requires setting `extraVolumeMounts`                                 | `nil`                                                                                                   |             |
+| `extraVolumeMounts`                    | Array of extra volume mounts to be added to the Kibana deployment (evaluated as template). Normally used with `extraVolumes`.                             | `nil`                                                                                                   |             |
+| `volumePermissions.enabled`            | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false`                                                                                                 |             |
+| `volumePermissions.image.registry`     | Init container volume-permissions image registry                                                                                                          | `docker.io`                                                                                             |             |
+| `volumePermissions.image.repository`   | Init container volume-permissions image name                                                                                                              | `bitnami/minideb`                                                                                       |             |
+| `volumePermissions.image.tag`          | Init container volume-permissions image tag                                                                                                               | `buster`                                                                                                |             |
+| `volumePermissions.image.pullPolicy`   | Init container volume-permissions image pull policy                                                                                                       | `Always`                                                                                                |             |
+| `volumePermissions.resources`          | Init container resource requests/limit                                                                                                                    | `nil`                                                                                                   |             |
+| `persistence.enabled`                  | Enable persistence                                                                                                                                        | `true`                                                                                                  |             |
+| `presistence.storageClass`             | Storage class to use with the PVC                                                                                                                         | `nil`                                                                                                   |             |
+| `persistence.accessMode`               | Access mode to the PV                                                                                                                                     | `ReadWriteOnce`                                                                                         |             |
+| `persistence.size`                     | Size for the PV                                                                                                                                           | `10Gi`                                                                                                  |             |
+| `livenessProbe.enabled`                | Enable/disable the Liveness probe                                                                                                                         | `true`                                                                                                  |             |
+| `livenessProbe.initialDelaySeconds`    | Delay before liveness probe is initiated                                                                                                                  | `60`                                                                                                    |             |
+| `livenessProbe.periodSeconds`          | How often to perform the probe                                                                                                                            | `10`                                                                                                    |             |
+| `livenessProbe.timeoutSeconds`         | When the probe times out                                                                                                                                  | `5`                                                                                                     |             |
+| `livenessProbe.successThreshold`       | Minimum consecutive successes for the probe to be considered successful after having failed.                                                              | `1`                                                                                                     |             |
+| `livenessProbe.failureThreshold`       | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                | `6`                                                                                                     |             |
+| `readinessProbe.enabled`               | Enable/disable the Readiness probe                                                                                                                        | `true`                                                                                                  |             |
+| `readinessProbe.initialDelaySeconds`   | Delay before readiness probe is initiated                                                                                                                 | `5`                                                                                                     |             |
+| `readinessProbe.periodSeconds`         | How often to perform the probe                                                                                                                            | `10`                                                                                                    |             |
+| `readinessProbe.timeoutSeconds`        | When the probe times out                                                                                                                                  | `5`                                                                                                     |             |
+| `readinessProbe.failureThreshold`      | Minimum consecutive failures for the probe to be considered failed after having succeeded.                                                                | `6`                                                                                                     |             |
+| `readinessProbe.successThreshold`      | Minimum consecutive successes for the probe to be considered successful after having failed.                                                              | `1`                                                                                                     |             |
+| `service.type`                         | Kubernetes Service type                                                                                                                                   |                                                                                                         | `ClusterIP` |
+| `service.nodePort`                     | Port to bind to for NodePort service type (client port)                                                                                                   | `nil`                                                                                                   |             |
+| `service.annotations`                  | Annotations for Kibana service (evaluated as a template)                                                                                                  | `{}`                                                                                                    |             |
+| `service.externalTrafficPolicy`        | Enable client source IP preservation                                                                                                                      | `Cluster`                                                                                               |             |
+| `service.loadBalancerIP`               | loadBalancerIP if Kibana service type is `LoadBalancer`                                                                                                   | `nil`                                                                                                   |             |
+| `service.extraPorts`                   | Extra ports to expose in the service (normally used with the `sidecar` value). Evaluated as a template.                                                   | `nil`                                                                                                   |             |
+| `forceInitScripts`                     | Force the execution of the init scripts located in `/docker-entrypoint-initdb.d`                                                                          | `false`                                                                                                 |             |
+| `initScriptsCM`                        | ConfigMap containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time (evaluated as a template)                                | `nil`                                                                                                   |             |
+| `initScriptsSecret`                    | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time (that contain sensitive data). Evaluated as a template.     | `nil`                                                                                                   |             |
+| `ingress.enabled`                      | Enable ingress controller resource                                                                                                                        | `false`                                                                                                 |             |
+| `ingress.certManager`                  | Add annotations for cert-manager                                                                                                                          | `false`                                                                                                 |             |
+| `ingress.annotations`                  | Ingress annotations                                                                                                                                       | `[]`                                                                                                    |             |
+| `ingress.hosts[0].name`                | Hostname to your Kibana installation                                                                                                                      | `kibana.local`                                                                                          |             |
+| `ingress.hosts[0].path`                | Path within the url structure                                                                                                                             | `/`                                                                                                     |             |
+| `ingress.hosts[0].tls`                 | Utilize TLS backend in ingress                                                                                                                            | `false`                                                                                                 |             |
+| `ingress.hosts[0].tlsHosts`            | Array of TLS hosts for ingress record (defaults to `ingress.hosts[0].name` if `nil`)                                                                      | `nil`                                                                                                   |             |
+| `ingress.hosts[0].tlsSecret`           | TLS Secret (certificates)                                                                                                                                 | `kibana.local-tls`                                                                                      |             |
+| `securityContext.enabled`              | Enable securityContext on for Kibana deployment                                                                                                           | `true`                                                                                                  |             |
+| `securityContext.runAsUser`            | User for the security context                                                                                                                             | `1001`                                                                                                  |             |
+| `securityContext.fsGroup`              | Group to configure permissions for volumes                                                                                                                | `1001`                                                                                                  |             |
+| `resources`                            | Configure resource requests and limits (evaluated as a template)                                                                                          | `nil`                                                                                                   |             |
+| `nodeSelector`                         | Node labels for pod assignment (evaluated as a template)                                                                                                  | `{}`                                                                                                    |             |
+| `tolerations`                          | Tolerations for pod assignment (evaluated as a template)                                                                                                  | `[]`                                                                                                    |             |
+| `affinity`                             | Affinity for pod assignment (evaluated as a template)                                                                                                     | `{}`                                                                                                    |             |
+| `podAnnotations`                       | Pod annotations (evaluated as a template)                                                                                                                 | `{}`                                                                                                    |             |
+| `sidecars`                             | Attach additional containers to the pod (evaluated as a template)                                                                                         | `nil`                                                                                                   |             |
+| `initContainers`                       | Add additional init containers to the pod (evaluated as a template)                                                                                       | `nil`                                                                                                   |             |
+| `metrics.enabled`                      | Start a side-car prometheus exporter                                                                                                                      | `false`                                                                                                 |             |
+| `metrics.service.annotations`          | Prometheus annotations for the Kibana service                                                                                                             | `{ prometheus.io/scrape: "true", prometheus.io/port: "80", prometheus.io/path: "_prometheus/metrics" }` |             |
+| `metrics.serviceMonitor.enabled`       | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)                                                    | `false`                                                                                                 |             |
+| `metrics.serviceMonitor.namespace`     | Namespace in which Prometheus is running                                                                                                                  | `nil`                                                                                                   |             |
+| `metrics.serviceMonitor.interval`      | Interval at which metrics should be scraped.                                                                                                              | `nil` (Prometheus Operator default value)                                                               |             |
+| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended                                                                                                                   | `nil` (Prometheus Operator default value)                                                               |             |
+| `metrics.serviceMonitor.selector`      | Prometheus instance selector labels                                                                                                                       | `nil`                                                                                                   |             |
+| `elasticsearch.hosts`                  | Array containing the hostnames for the already existing Elasticsearch instances                                                                           | `nil`                                                                                                   |             |
+| `elasticsearch.port`                   | Port for the accessing external Elasticsearch instances                                                                                                   | `nil`                                                                                                   |             |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
+
+```console
+$ helm install my-release \
+  --set admin.user=admin-user bitnami/kibana
+```
+
+The above command sets the Kibana admin user to `admin-user`.
+
+Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
+
+```console
+$ helm install my-release -f values.yaml bitnami/kibana
+```
+
+> **Tip**: You can use the default [values.yaml](values.yaml)
+
+## Configuration and installation details
+
+### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
+
+It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
+
+Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
+
+### Production configuration
+
+This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
+
+- Enable metrics scraping
+
+```diff
+- metrics.enabled: false
++ metrics.enabled: true
+```
+
+### Using custom configuration
+
+The Bitnami Kibana chart supports using custom configuration settings. For example, to mount a custom `kibana.yml` you can create a ConfigMap like the following:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: myconfig
+data:
+  kibana.yml: |-
+    # Raw text of the file
+```
+
+And now you need to pass the ConfigMap name, to the corresponding parameter: `configurationCM=myconfig`
+
+An alternative is to provide extra configuration settings to the default kibana.yml that the chart deploys. This is done using the `extraConfiguration` value:
+
+```yaml
+extraConfiguration:
+  "server.maxPayloadBytes": 1048576
+  "server.pingTimeout": 1500
+```
+
+### Adding extra environment variables
+
+In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
+
+```yaml
+extraEnvVars:
+  - name: ELASTICSEARCH_VERSION
+    value: 6
+```
+
+Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
+
+### Using custom init scripts
+
+For advanced operations, the Bitnami Kibana charts allows using custom init scripts that will be mounted in `/docker-entrypoint.init-db`. You can use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. Then use the `initScriptsCM` and `initScriptsSecret` values.
+
+```console
+elasticsearch.hosts[0]=elasticsearch-host
+elasticsearch.port=9200
+initScriptsCM=special-scripts
+initScriptsSecret=special-scripts-sensitive
+```
+
+### Installing plugins
+
+The Bitnami Kibana chart allows you to install a set of plugins at deployment time using the `plugins` value:
+
+```console
+elasticsearch.hosts[0]=elasticsearch-host
+elasticsearch.port=9200
+plugins[0]=https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+```
+
+> **NOTE** Make sure that the plugin is available for the Kibana version you are deploying
+
+### Importing saved objects
+
+If you have visualizations and dashboards (in NDJSON format) that you want to import to Kibana. You can create a ConfigMap that includes them and then install the chart with the `savedObjects.configmap` value: `savedObjects.configmap=my-import`
+
+Alternatively, if it is available via URL, you can install the chart as follows: `savedObjects.urls[0]=www.my-site.com/import.ndjson`
+
+### Sidecars and Init Containers
+
+If you have a need for additional containers to run within the same pod as Kibana (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
+
+```yaml
+sidecars:
+- name: your-image-name
+  image: your-image
+  imagePullPolicy: Always
+  ports:
+  - name: portname
+   containerPort: 1234
+```
+
+Similarly, you can add extra init containers using the `initContainers` parameter.
+
+```yaml
+initContainers:
+- name: your-image-name
+  image: your-image
+  imagePullPolicy: Always
+  ports:
+  - name: portname
+   containerPort: 1234
+```
+
+#### Add a sample Elasticsearch container as sidecar
+
+This chart requires an Elasticsearch instance to work. For production, you can use an already existing Elasticsearch instance or deploy the [Elasticsearch chart](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch) with the [`global.kibanaEnabled=true` parameter](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#enable-bundled-kibana).
+
+For the purpose of testing, you can use a sidecar Elasticsearch container setting the following parameters during the Kibana chart installation:
+
+```
+elasticsearch.hosts[0]=localhost
+elasticsearch.port=9200
+sidecars[0].name=elasticsearch
+sidecars[0].image=bitnami/elasticsearch:latest
+sidecars[0].imagePullPolicy=IfNotPresent
+sidecars[0].ports[0].name=http
+sidecars[0].ports[0].containerPort=9200
+```
+
+## Persistence
+
+The [Bitnami Kibana](https://github.com/bitnami/bitnami-docker-kibana) image can persist data. If enabled, the persisted path is `/bitnami/kibana` by default.
+
+The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning.
+
+### Adding extra volumes
+
+The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` property. This can be combined with advanced operations like adding extra init containers and sidecars.
+
+### Adjust permissions of persistent volume mountpoint
+
+As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
+
+By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
+As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
+
+You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
+
+## Notable changes
+
+### 5.0.0
+
+This version does not include Elasticsearch as a bundled dependency. From now on, you should specify an external Elasticsearch instance using the `elasticsearch.hosts[]` and `elasticsearch.port` [parameters](#parameters).
+
+### 3.0.0
+
+Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
+
+In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
+
+This major version signifies this change.
+
+### 2.0.0
+
+This version enabled by default an initContainer that modify some kernel settings to meet the Elasticsearch requirements.
+
+Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
+
+- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
+- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
+
+You can disable the initContainer using the `elasticsearch.sysctlImage.enabled=false` parameter.
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt
new file mode 100755
index 0000000000000000000000000000000000000000..0576c2afbe322a11dc92122c815701a982fbbdaf
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/NOTES.txt
@@ -0,0 +1,55 @@
+{{- if or (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
+######################################################################################################
+### ERROR: You did not provide the Elasticsearch external host or port in your 'helm install' call ###
+######################################################################################################
+
+Complete your Kibana deployment by running:
+
+  helm upgrade {{ .Release.Name }} bitnami/kibana \
+    --set elasticsearch.hosts[0]=YOUR_ES_HOST,elasticsearch.port=YOUR_ES_PORT
+
+Replacing "YOUR_ES_HOST" and "YOUR_ES_PORT" placeholders by the proper values of your Elasticsearch deployment.
+
+{{- else -}}
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+  {{- range .paths }}
+  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
+  {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kibana.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kibana.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kibana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kibana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward svc/{{ include "kibana.fullname" . }} 8080:{{ .Values.service.port }}
+{{- end }}
+
+{{- if or .Values.ingress.enabled (contains "NodePort" .Values.service.type) (contains "LoadBalancer" .Values.service.type) }}
+
+WARNING: Kibana is externally accessible from the cluster but the dashboard does not contain authentication mechanisms. Make sure you follow the authentication guidelines in your Elastic stack.
++info https://www.elastic.co/guide/en/elastic-stack-overview/current/setting-up-authentication.html
+{{- end }}
+
+{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+
+WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+{{- end }}
+
+{{- if .Values.metrics.enabled }}
+
+WARNING: For Prometheus metrics to work, make sure that the kibana-prometheus-exporter plugin is installed:
++info https://github.com/pjhampton/kibana-prometheus-exporter
+{{- end }}
+
+{{ include "kibana.validateValues" . }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl
new file mode 100755
index 0000000000000000000000000000000000000000..d577a70643703e3f006a9664e443672d1962c8fe
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/_helpers.tpl
@@ -0,0 +1,274 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kibana.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kibana.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "kibana.imagePullSecrets" -}}
+{{- $imagePullSecrets := coalesce .Values.global.imagePullSecrets .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets -}}
+{{- if $imagePullSecrets }}
+imagePullSecrets:
+{{- range $imagePullSecrets }}
+  - name: {{ . }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return true if the deployment should include dashboards
+*/}}
+{{- define "kibana.importSavedObjects" -}}
+{{- if or .Values.savedObjects.configmap .Values.savedObjects.urls }}
+    {{- true -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Kibana image name
+*/}}
+{{- define "kibana.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kibana.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch URL.
+*/}}
+{{- define "kibana.elasticsearch.url" -}}
+{{- if .Values.elasticsearch.hosts -}}
+{{- $totalHosts := len .Values.elasticsearch.hosts -}}
+{{- range $i, $hostTemplate := .Values.elasticsearch.hosts -}}
+{{- $host := tpl $hostTemplate $ }}
+{{- printf "http://%s:%s" $host (include "kibana.elasticsearch.port" $) -}}
+{{- if (lt ( add1 $i ) $totalHosts ) }}{{- printf "," -}}{{- end }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch Port.
+*/}}
+{{- define "kibana.elasticsearch.port" -}}
+{{- .Values.elasticsearch.port -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch PVC.
+*/}}
+{{- define "kibana.pvc" -}}
+{{- .Values.persistence.existingClaim | default (include "kibana.fullname" .) -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts Secret name.
+*/}}
+{{- define "kibana.initScriptsSecret" -}}
+{{- printf "%s" (tpl .Values.initScriptsSecret $) -}}
+{{- end -}}
+
+{{/*
+Get the initialization scripts configmap name.
+*/}}
+{{- define "kibana.initScriptsCM" -}}
+{{- printf "%s" (tpl .Values.initScriptsCM $) -}}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "kibana.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Get the saved objects configmap name.
+*/}}
+{{- define "kibana.savedObjectsCM" -}}
+{{- printf "%s" (tpl .Values.savedObjects.configmap $) -}}
+{{- end -}}
+
+{{/*
+Set Elasticsearch Port.
+*/}}
+{{- define "kibana.configurationCM" -}}
+{{- .Values.configurationCM | default (printf "%s-conf" (include "kibana.fullname" .)) -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "kibana.labels" -}}
+app.kubernetes.io/name: {{ include "kibana.name" . }}
+helm.sh/chart: {{ include "kibana.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Match labels
+*/}}
+{{- define "kibana.matchLabels" -}}
+app.kubernetes.io/name: {{ include "kibana.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Return  the proper Storage Class
+*/}}
+{{- define "kibana.storageClass" -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
+*/}}
+{{- if .Values.global -}}
+    {{- if .Values.global.storageClass -}}
+        {{- if (eq "-" .Values.global.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.global.storageClass -}}
+        {{- end -}}
+    {{- else -}}
+        {{- if .Values.persistence.storageClass -}}
+              {{- if (eq "-" .Values.persistence.storageClass) -}}
+                  {{- printf "storageClassName: \"\"" -}}
+              {{- else }}
+                  {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+              {{- end -}}
+        {{- end -}}
+    {{- end -}}
+{{- else -}}
+    {{- if .Values.persistence.storageClass -}}
+        {{- if (eq "-" .Values.persistence.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
+        {{- end -}}
+    {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message, and call fail.
+*/}}
+{{- define "kibana.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "kibana.validateValues.noElastic" .) -}}
+{{- $messages := append $messages (include "kibana.validateValues.configConflict" .) -}}
+{{- $messages := append $messages (include "kibana.validateValues.extraVolumes" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{-   printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - must provide an ElasticSearch */}}
+{{- define "kibana.validateValues.noElastic" -}}
+{{- if and (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
+kibana: no-elasticsearch
+    You did not specify an external Elasticsearch instance.
+    Please set elasticsearch.hosts and elasticsearch.port
+{{- else if and (not .Values.elasticsearch.hosts) .Values.elasticsearch.port }}
+kibana: missing-es-settings-host
+    You specified the external Elasticsearch port but not the host. Please
+    set elasticsearch.hosts
+{{- else if and .Values.elasticsearch.hosts (not .Values.elasticsearch.port) }}
+kibana: missing-es-settings-port
+    You specified the external Elasticsearch hosts but not the port. Please
+    set elasticsearch.port
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - configuration conflict */}}
+{{- define "kibana.validateValues.configConflict" -}}
+{{- if and (.Values.extraConfiguration) (.Values.configurationCM) -}}
+kibana: conflict-configuration
+    You specified a ConfigMap with kibana.yml and a set of settings to be added
+    to the default kibana.yml. Please only set either extraConfiguration or configurationCM
+{{- end -}}
+{{- end -}}
+
+{{/* Validate values of Kibana - Incorrect extra volume settings */}}
+{{- define "kibana.validateValues.extraVolumes" -}}
+{{- if and (.Values.extraVolumes) (not .Values.extraVolumeMounts) -}}
+kibana: missing-extra-volume-mounts
+    You specified extra volumes but not mount points for them. Please set
+    the extraVolumeMounts value
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "kibana.deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..d77084bbac41a2e8a503197fdac446a2c23d17ce
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/configmap.yml
@@ -0,0 +1,16 @@
+{{- if and (not .Values.configurationCM) (and .Values.elasticsearch.hosts .Values.elasticsearch.port) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-conf
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  kibana.yml: |
+    pid.file: /opt/bitnami/kibana/tmp/kibana.pid
+    server.host: 0.0.0.0
+    server.port: 5601
+    elasticsearch.hosts: [{{ include "kibana.elasticsearch.url" . }}]
+    {{- if .Values.extraConfiguration }}
+    {{- tpl (toYaml .Values.extraConfiguration) $ | nindent 4 }}
+    {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..647584d1b270f7ef6f8f2d321ae1cbfc919ea6dd
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/deployment.yaml
@@ -0,0 +1,188 @@
+{{- if and .Values.elasticsearch.hosts .Values.elasticsearch.port -}}
+apiVersion: {{ template "kibana.deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  {{- if .Values.updateStrategy }}
+  strategy: {{- tpl (toYaml .Values.updateStrategy) $ | nindent 4 }}
+  {{- end }}
+  selector:
+    matchLabels: {{- include "kibana.matchLabels" . | nindent 6 }}
+  template:
+    metadata:
+      labels: {{- include "kibana.labels" . | nindent 8 }}
+    spec:
+    {{- if .Values.schedulerName }}
+      schedulerName: {{ .Values.schedulerName | quote }}
+    {{- end }}
+{{- include "kibana.imagePullSecrets" . | indent 6 }}
+    {{- if .Values.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.securityContext.fsGroup }}
+    {{- end }}
+    {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }}
+      initContainers:
+      {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
+      - name: volume-permissions
+        image: "{{ template "kibana.volumePermissions.image" . }}"
+        imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
+        command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kibana"]
+        securityContext:
+          runAsUser: 0
+        resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }}
+        volumeMounts:
+        - name: kibana-data
+          mountPath: /bitnami/kibana
+      {{- end }}
+      {{- if .Values.initContainers }}
+      {{- tpl (toYaml .Values.initContainers) $ | nindent 8 }}
+      {{- end }}
+    {{- end }}
+      containers:
+        - name: kibana
+          image: {{ include "kibana.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          {{- if .Values.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: KIBANA_ELASTICSEARCH_URL
+              value: {{ include "kibana.elasticsearch.url" . | quote }}
+            - name: KIBANA_ELASTICSEARCH_PORT
+              value: {{ include "kibana.elasticsearch.port" . | quote }}
+            - name: KIBANA_FORCE_INITSCRIPTS
+              value: {{ .Values.forceInitScripts | quote }}
+          {{- if .Values.extraEnvVars }}
+          {{- tpl (toYaml .Values.extraEnvVars) $ | nindent 12 }}
+          {{- end }}
+          {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
+          envFrom:
+          {{- if .Values.extraEnvVarsCM }}
+          - configMapRef:
+              name: {{ .Values.extraEnvVarsCM }}
+          {{- end }}
+          {{- if .Values.extraEnvVarsSecret }}
+          - secretRef:
+              name: {{ .Values.extraEnvVarsSecret }}
+          {{- end }}
+          {{- end }}
+          ports:
+            - name: http
+              containerPort: 5601
+              protocol: TCP
+          {{- if .Values.livenessProbe.enabled }}
+          livenessProbe:
+            httpGet:
+              path: {{ tpl .Values.healthCheckPathTemplate $ }}
+              port: http
+            initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
+          {{- end }}
+          {{- if .Values.readinessProbe.enabled }}
+          readinessProbe:
+            httpGet:
+              path: {{ tpl .Values.healthCheckPathTemplate $ }}
+              port: http
+            initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
+          {{- end }}
+          {{- if .Values.resources }}
+          resources: {{- tpl (toYaml .Values.resources) $ | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+          - name: kibana-data
+            mountPath: /bitnami/kibana
+          - name: kibana-config
+            mountPath: /bitnami/kibana/conf
+          {{- if .Values.plugins }}
+          - name: plugins-init-scripts
+            mountPath: /docker-entrypoint-initdb.d/plugin-install
+          {{- end }}
+          {{- if (include "kibana.importSavedObjects" .) }}
+          - name: saved-objects-init-scripts
+            mountPath: /docker-entrypoint-initdb.d/saved-objects-import
+          {{- end }}
+          {{- if .Values.savedObjects.configmap }}
+          - name: saved-objects-configmap
+            mountPath: /bitnami/kibana/saved-objects
+          {{- end }}
+          {{- if .Values.initScriptsCM }}
+          - name: custom-init-scripts-cm
+            mountPath: /docker-entrypoint-initdb.d/cm
+          {{- end }}
+          {{- if .Values.initScriptsSecret }}
+          - name: custom-init-scripts-secret
+            mountPath: /docker-entrypoint-initdb.d/secret
+          {{- end }}
+          {{- if .Values.extraVolumeMounts }}
+          {{- tpl (toYaml .Values.extraVolumeMounts) $ | nindent 6 }}
+          {{- end }}
+      {{- if .Values.sidecars }}
+      {{- tpl (toYaml .Values.sidecars) $ | nindent 8 }}
+      {{- end }}
+      volumes:
+        - name: kibana-data
+        {{- if .Values.persistence.enabled }}
+          persistentVolumeClaim:
+            claimName: {{ include "kibana.pvc" . }}
+        {{- else }}
+          emptyDir: {}
+        {{ end }}
+        - name: kibana-config
+          configMap:
+            name: {{ include "kibana.configurationCM" . }}
+        {{- if (include "kibana.importSavedObjects" .) }}
+        - name: saved-objects-init-scripts
+          configMap:
+            name: {{ include "kibana.fullname" . }}-saved-objects
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.plugins }}
+        - name: plugins-init-scripts
+          configMap:
+            name: {{ include "kibana.fullname" . }}-plugins
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.initScriptsCM }}
+        - name: custom-init-scripts-cm
+          configMap:
+            name: {{ template "kibana.initScriptsCM" . }}
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.initScriptsSecret }}
+        - name: custom-init-scripts-secret
+          secret:
+            name: {{ template "kibana.initScriptsSecret" . }}
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.savedObjects.configmap }}
+        - name: saved-objects-configmap
+          configMap:
+            name: {{ template "kibana.savedObjectsCM" . }}
+        {{- end }}
+      {{- if .Values.extraVolumes }}
+      {{- tpl (toYaml .Values.extraVolumes) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.nodeSelector }}
+      nodeSelector:
+      {{- tpl (toYaml .Values.nodeSelector) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.affinity }}
+      affinity:
+      {{- tpl (toYaml .Values.affinity) $ | nindent 6 }}
+      {{- end }}
+      {{- if .Values.tolerations }}
+      tolerations:
+      {{- tpl (toYaml .Values.tolerations) $ | nindent 6 }}
+      {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..849a2d2cbe5ad24edeb88f1ad345a225fef84b97
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/ingress.yaml
@@ -0,0 +1,40 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "kibana.fullname" . -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+  annotations:
+    {{- if .Values.ingress.certManager }}
+    kubernetes.io/tls-acme: "true"
+    {{- end }}
+    {{- range $key, $value := .Values.ingress.annotations }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  rules:
+  {{- range .Values.ingress.hosts }}
+    - host: "{{ .name }}"
+      http:
+        paths:
+          - path:  {{ tpl .path_template $ }}
+            backend:
+              serviceName: {{ $fullName }}
+              servicePort: http
+  {{- end }}
+  tls:
+  {{- range .Values.ingress.hosts }}
+  {{- if .tls }}
+    - hosts:
+    {{- if .tlsHosts }}
+      {{- range $host := .tlsHosts }}
+        - {{ $host }}
+      {{- end }}
+    {{- else }}
+        - "{{ .name }}"
+    {{- end }}
+      secretName: {{ .tlsSecret }}
+  {{- end }}
+  {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..a1128ac0557438ec509e20607ea89b74a8978b32
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/plugins-configmap.yml
@@ -0,0 +1,18 @@
+{{- if .Values.plugins -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-plugins
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  install-plugins.sh: |
+    #!/bin/bash
+    echo "==> Plugin installation"
+    {{- $totalPlugins := len .Values.plugins }}
+    echo "Total plugins defined in chart installation: {{ $totalPlugins }}"
+    {{- range $i, $plugin := .Values.plugins }}
+    echo "Installing plugin {{ add $i 1 }} out of {{ $totalPlugins }}: {{ $plugin }}"
+    kibana-plugin install "{{ $plugin }}"
+    {{- end }}
+    echo "==> End of Plugin installation"
+{{- end -}}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1a6424e0f3a4e94951ff545eb846361c45cd7408
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/pvc.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+spec:
+  accessModes:
+    - {{ .Values.persistence.accessMode | quote }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.size | quote }}
+  {{ include "kibana.storageClass" . }}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml
new file mode 100755
index 0000000000000000000000000000000000000000..3f451aa0675b605a3fc52a1fe6cbb844404a1276
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/saved-objects-configmap.yml
@@ -0,0 +1,38 @@
+{{- if (include "kibana.importSavedObjects" .) -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "kibana.fullname" . }}-saved-objects
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+data:
+  import-saved-objects.sh: |
+    #!/bin/bash
+    echo "==> Saved objects import"
+    {{- if .Values.savedObjects.urls }}
+    {{- $totalURLs := len .Values.savedObjects.urls }}
+    echo "Total saved objects NDJSON URLs to import: {{ $totalURLs }}"
+    {{- range $i, $url := .Values.savedObjects.urls }}
+    echo "Importing saved objects from NDJSON in url {{ add $i 1 }} out of {{ $totalURLs }}: {{ $url }}"
+    download_tmp_file="$(mktemp)"
+    curl "{{$url}}" > "${download_tmp_file}.ndjson"
+    curl -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/saved_objects/_import -H 'kbn-xsrf:true' --form file=@${download_tmp_file}.ndjson
+    {{- end }}
+    {{- end }}
+    {{- if .Values.savedObjects.configmap }}
+    echo "Searching for dashboard NDJSON files from ConfigMap mounted in /bitnami/kibana/saved-objects"
+    ndjson_file_list_tmp="$(mktemp)"
+    find /bitnami/kibana/saved-objects -type f -regex ".*\.ndjson" > $ndjson_file_list_tmp
+    while read -r f; do
+        case "$f" in
+            *.ndjson)
+                echo "Importing $f"
+                curl -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/saved_objects/_import -H 'kbn-xsrf:true' --form file=@${f}
+                ;;
+            *)
+                echo "Ignoring $f"
+                ;;
+        esac
+    done < $ndjson_file_list_tmp
+    {{- end }}
+    echo "==> End of Saved objects import"
+{{- end -}}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..035680b333ce4bc56f3496eda1d9e1dc0cb82db0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/service.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+{{- if or (and .Values.metrics.enabled .Values.metrics.service.annotations) .Values.service.annotations }}
+  annotations:
+  {{- if and .Values.metrics.enabled .Values.metrics.service.annotations }}
+    {{- tpl (toYaml .Values.metrics.service.annotations) $ | nindent 4 }}
+  {{- end }}
+  {{- if .Values.service.annotations }}
+    {{- tpl (toYaml .Values.service.annotations) $ | nindent 4 }}
+  {{- end }}
+{{- end }}
+
+spec:
+  type: {{ .Values.service.type }}
+  {{- if eq .Values.service.type "LoadBalancer" }}
+  {{- if .Values.service.loadBalancerIP }}
+  loadBalancerIP: {{ .Values.service.loadBalancerIP }}
+  {{- end }}
+  {{- end }}
+  {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }}
+  externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
+  {{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.service.port }}
+      targetPort: http
+      {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)))}}
+      nodePort: {{ .Values.service.nodePort }}
+      {{- else if eq .Values.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+{{- if .Values.service.extraPorts }}
+  {{- tpl (toYaml .Values.service.extraPorts) $ | nindent 4 }}
+{{- end }}
+  selector: {{- include "kibana.matchLabels" . | nindent 4 }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..6aa4952237feca05414875b6cb0729ccd1b6a14e
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/servicemonitor.yaml
@@ -0,0 +1,28 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ include "kibana.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- end }}
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+    {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels: {{- include "kibana.matchLabels" . | nindent 6 }}
+  endpoints:
+  - port: http
+    path: "_prometheus/metrics"
+    {{- if .Values.metrics.serviceMonitor.interval }}
+    interval: {{ .Values.metrics.serviceMonitor.interval }}
+    {{- end }}
+    {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+    scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+    {{- end }}
+  namespaceSelector:
+    matchNames:
+    - {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..413c7956c682bfd30059d795fb8512b95a2432ca
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/templates/tests/test-connection.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "kibana.fullname" . }}-test-connection"
+  labels: {{- include "kibana.labels" . | nindent 4 }}
+  annotations:
+    "helm.sh/hook": test-success
+spec:
+  containers:
+    - name: wget
+      image: bitnami/minideb
+      command: ['wget']
+      args: ['{{ include "kibana.fullname" . }}:{{ .Values.service.port }}']
+  restartPolicy: Never
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..b71b1ffcf2b388c1478e2513359d2ad20f919fe0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values-production.yaml
@@ -0,0 +1,344 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+global: {}
+#   imageRegistry: myRegistryName
+#   imagePullSecrets:
+#     - myRegistryKeySecretName
+#   storageClass: myStorageClass
+
+## Bitnami Kibana image version
+## ref: https://hub.docker.com/r/bitnami/kibana/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/kibana
+  tag: 7.6.1-debian-10-r8
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## String to partially override kibana.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override kibana.fullname template
+##
+# fullnameOverride:
+
+## Number of Kibana Pod replicas
+##
+replicaCount: 1
+
+## Set up update strategy for Kibana installation. Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+## Example:
+# updateStrategy:
+#  type: RollingUpdate
+#  rollingUpdate:
+#    maxSurge: 25%
+#    maxUnavailable: 25%
+updateStrategy:
+  type: RollingUpdate
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## List of plugins to install
+##
+plugins:
+# - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+
+## Saved objects to import (NDJSON format)
+##
+savedObjects:
+  ## List of saved objects URLs
+  urls:
+  # - www.example.com/dashboard.ndjson
+  ## ConfigMap with saved objects
+  configmap:
+
+## Extra configuration settings
+##
+# extraConfiguration:
+
+## Configuration ConfigMap (for kibana.yml)
+##
+# configurationCM:
+
+## An array to add extra env vars
+## For example:
+## extraEnvVars:
+##  - name: KIBANA_ELASTICSEARCH_URL
+##    value: test
+##
+# extraEnvVars:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsCM:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsSecret:
+
+## Array to add extra volumes
+##
+## extraVolumes:
+
+## Array to add extra mounts (normally used with extraVolumes)
+##
+## extraVolumeMounts: {}
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  resources: {}
+  # resources:
+  #   requests:
+  #     memory: 128Mi
+  #     cpu: 100m
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+##
+persistence:
+  enabled: true
+  ## wordpress data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass: "-"
+  ##
+  ## If you want to reuse an existing claim, you can pass the name of the PVC using
+  ## the existingClaim variable
+  # existingClaim: your-claim
+  accessMode: ReadWriteOnce
+  size: 10Gi
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 120
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Force execution of init scripts
+##
+forceInitScripts: false
+
+## Configmap with init scripts to execute
+##
+# initScriptsCM:
+
+## Secret with init scripts to execute (for sensitive data)
+##
+# initScriptsSecret:
+
+## Service configuration
+##
+service:
+  port: 80
+  type: ClusterIP
+  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  ##
+  # nodePort:
+
+  ## Enable client source IP preservation
+  ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## Provide any additional annotations which may be required. This can be used to
+  ## set the LoadBalancer service type to internal only.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+  ##
+  annotations: {}
+
+  ## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
+  ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  # loadBalancerIP:
+  ## Extra ports to expose (normally used with the `sidecar` value)
+  # extraPorts:
+
+## Configure the ingress resource that allows you to access the
+## Kibana web. Set up the URL
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+  ## Set to true to enable ingress record generation
+  enabled: false
+
+  ## Set this to true in order to add the corresponding annotations for cert-manager
+  certManager: false
+
+  ## Ingress annotations done as key:value pairs
+  ## For a full list of possible ingress annotations, please see
+  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+  ##
+  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+  ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
+  # annotations:
+  #   kubernetes.io/ingress.class: nginx
+
+  ## The list of hostnames to be covered with this ingress record.
+  ## Most likely this will be just one host, but in the event more hosts are needed, this is an array
+  hosts:
+    - name: kibana.local
+      path: /
+
+      ## Set this to true in order to enable TLS on the ingress record
+      tls: false
+
+      ## Optionally specify the TLS hosts for the ingress record
+      ## Useful when the Ingress controller supports www-redirection
+      ## If not specified, the above host name will be used
+      # tlsHosts:
+      #   - www.kibana.local
+      #   - kibana.local
+
+      ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+      tlsSecret: kibana.local-tls
+
+## SecurityContext configuration
+##
+securityContext:
+  enabled: true
+  runAsUser: 1001
+  fsGroup: 1001
+  runAsNonRoot: true
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Example:
+## resources:
+##   requests:
+##     memory: 512Mi
+##     cpu: 300m
+##
+# resources:
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## Add sidecars to the pod
+##
+sidecars:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Add init containers to the pod
+##
+initContainers:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
+##
+metrics:
+  enabled: true
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "80"
+      prometheus.io/path: "_prometheus/metrics"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Properties for Elasticsearch
+##
+elasticsearch:
+  hosts:
+  # - elasticsearch-1
+  # - elasticsearch-2
+  port:
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..5d5e8f04020c8a89823d658b508859f699e8c55a
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/charts/kibana/values.yaml
@@ -0,0 +1,346 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+##
+global: {}
+#   imageRegistry: myRegistryName
+#   imagePullSecrets:
+#     - myRegistryKeySecretName
+#   storageClass: myStorageClass
+
+## Bitnami Kibana image version
+## ref: https://hub.docker.com/r/bitnami/kibana/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/kibana
+  tag: 7.6.1-debian-10-r8
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## String to partially override kibana.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override kibana.fullname template
+##
+# fullnameOverride:
+
+## Number of Kibana Pod replicas
+##
+replicaCount: 1
+
+healthCheckPathTemplate: "/"
+
+## Set up update strategy for Kibana installation. Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+## Example:
+# updateStrategy:
+#  type: RollingUpdate
+#  rollingUpdate:
+#    maxSurge: 25%
+#    maxUnavailable: 25%
+updateStrategy:
+  type: RollingUpdate
+
+## Use an alternate scheduler, e.g. "stork".
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+# schedulerName:
+
+## List of plugins to install
+##
+plugins:
+# - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
+
+## Saved objects to import (NDJSON format)
+##
+savedObjects:
+  ## List of saved objects URLs
+  urls:
+  # - www.example.com/dashboard.ndjson
+  ## ConfigMap with saved objects
+  configmap:
+
+## Extra configuration settings
+##
+# extraConfiguration:
+
+## Configuration ConfigMap (for kibana.yml)
+##
+# configurationCM:
+
+## An array to add extra env vars
+## For example:
+## extraEnvVars:
+##  - name: KIBANA_ELASTICSEARCH_URL
+##    value: test
+##
+# extraEnvVars:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsCM:
+
+## Array to add extra configmaps:
+##
+## extraEnvVarsSecret:
+
+## Array to add extra volumes
+##
+## extraVolumes:
+
+## Array to add extra mounts (normally used with extraVolumes)
+##
+## extraVolumeMounts: {}
+
+##
+## Init containers parameters:
+## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  resources: {}
+  # resources:
+  #   requests:
+  #     memory: 128Mi
+  #     cpu: 100m
+
+## Enable persistence using Persistent Volume Claims
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+##
+##
+persistence:
+  enabled: true
+  ## wordpress data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass: "-"
+  ##
+  ## If you want to reuse an existing claim, you can pass the name of the PVC using
+  ## the existingClaim variable
+  # existingClaim: your-claim
+  accessMode: ReadWriteOnce
+  size: 10Gi
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
+##
+livenessProbe:
+  enabled: true
+  initialDelaySeconds: 120
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+readinessProbe:
+  enabled: true
+  initialDelaySeconds: 30
+  periodSeconds: 10
+  timeoutSeconds: 5
+  failureThreshold: 6
+  successThreshold: 1
+
+## Force execution of init scripts
+##
+forceInitScripts: false
+
+## Configmap with init scripts to execute
+##
+# initScriptsCM:
+
+## Secret with init scripts to execute (for sensitive data)
+##
+# initScriptsSecret:
+
+## Service configuration
+##
+service:
+  port: 80
+  type: ClusterIP
+  ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  ##
+  # nodePort:
+
+  ## Enable client source IP preservation
+  ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+  ##
+  externalTrafficPolicy: Cluster
+  ## Provide any additional annotations which may be required. This can be used to
+  ## set the LoadBalancer service type to internal only.
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+  ##
+  annotations: {}
+
+  ## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
+  ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+  ##
+  # loadBalancerIP:
+  ## Extra ports to expose (normally used with the `sidecar` value)
+  # extraPorts:
+
+## Configure the ingress resource that allows you to access the
+## Kibana web. Set up the URL
+## ref: http://kubernetes.io/docs/user-guide/ingress/
+##
+ingress:
+  ## Set to true to enable ingress record generation
+  enabled: false
+
+  ## Set this to true in order to add the corresponding annotations for cert-manager
+  certManager: false
+
+  ## Ingress annotations done as key:value pairs
+  ## For a full list of possible ingress annotations, please see
+  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+  ##
+  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
+  ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
+  # annotations:
+  #   kubernetes.io/ingress.class: nginx
+
+  ## The list of hostnames to be covered with this ingress record.
+  ## Most likely this will be just one host, but in the event more hosts are needed, this is an array
+  hosts:
+    - name: kibana.local
+      path: /
+
+      ## Set this to true in order to enable TLS on the ingress record
+      tls: false
+
+      ## Optionally specify the TLS hosts for the ingress record
+      ## Useful when the Ingress controller supports www-redirection
+      ## If not specified, the above host name will be used
+      # tlsHosts:
+      #   - www.kibana.local
+      #   - kibana.local
+
+      ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
+      tlsSecret: kibana.local-tls
+
+## SecurityContext configuration
+##
+securityContext:
+  enabled: true
+  runAsUser: 1001
+  fsGroup: 1001
+  runAsNonRoot: true
+
+## Configure resource requests and limits
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+## Example:
+## resources:
+##   requests:
+##     memory: 512Mi
+##     cpu: 300m
+##
+# resources:
+
+## Node labels for pod assignment
+## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Affinity for pod assignment
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Pod annotations
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## Add sidecars to the pod
+##
+sidecars:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Add init containers to the pod
+##
+initContainers:
+## e.g.
+# - name: your-image-name
+# image: your-image
+# imagePullPolicy: Always
+# ports:
+# - name: portname
+#   containerPort: 1234
+
+## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
+##
+metrics:
+  enabled: false
+  service:
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "80"
+      prometheus.io/path: "_prometheus/metrics"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Properties for Elasticsearch
+##
+elasticsearch:
+  hosts:
+  # - elasticsearch-1
+  # - elasticsearch-2
+  port:
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock
new file mode 100755
index 0000000000000000000000000000000000000000..6416920e50b356ada51c9fb7c6edf99959135859
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.lock
@@ -0,0 +1,6 @@
+dependencies:
+- name: kibana
+  repository: https://charts.bitnami.com/bitnami
+  version: 5.0.11
+digest: sha256:4970b5ac3743b773c6608e77e28eb0928d45c3379bbe6660a35d8d4ef07613df
+generated: "2020-03-26T01:45:21.876314703Z"
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..17d1dfff1679eb0d9ddf96d0155a0ffe186bd6ac
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/requirements.yaml
@@ -0,0 +1,5 @@
+dependencies:
+  - name: kibana
+    version: 5.x.x
+    repository: https://charts.bitnami.com/bitnami
+    condition: global.kibanaEnabled
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt
new file mode 100755
index 0000000000000000000000000000000000000000..3fba2e4119b2d737fbc2e28244a22cee7608f205
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/NOTES.txt
@@ -0,0 +1,100 @@
+{{- if contains .Values.coordinating.service.type "LoadBalancer" }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    By specifying "coordinating.service.type=LoadBalancer" you have most likely
+    exposed the Elasticsearch service externally.
+
+    Please note that Elasticsearch does not implement a authentication
+    mechanism to secure your cluster. For security reasons, we strongly
+    suggest that you switch to "ClusterIP" or "NodePort".
+-------------------------------------------------------------------------------
+{{- end }}
+{{- if not .Values.sysctlImage.enabled }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    Elasticsearch requires some changes in the kernel of the host machine to
+    work as expected. If those values are not set in the underlying operating
+    system, the ES containers fail to boot with ERROR messages.
+
+    To check whether the host machine meets the requirements, run the command
+    below:
+
+      kubectl logs --namespace {{ .Release.Namespace }} $(kubectl get --namespace {{ .Release.Namespace }} \
+        pods -l app={{ template "elasticsearch.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \
+	elasticsearch
+
+    You can adapt the Kernel parameters on you cluster as described in the
+    official documentation:
+
+      https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster
+
+    As an alternative, you can specify "sysctlImage.enabled=true" to use a
+    privileged initContainer to change those settings in the Kernel:
+
+      helm upgrade {{ .Release.Name }} bitnami/elasticsearch \
+        --set sysctlImage.enabled=true
+
+{{- else if .Values.sysctlImage.enabled }}
+
+-------------------------------------------------------------------------------
+ WARNING
+
+    Elasticsearch requires some changes in the kernel of the host machine to
+    work as expected. If those values are not set in the underlying operating
+    system, the ES containers fail to boot with ERROR messages.
+
+    More information about these requirements can be found in the links below:
+
+      https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html
+      https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
+
+    This chart uses a privileged initContainer to change those settings in the Kernel
+    by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
+
+{{- end }}
+
+** Please be patient while the chart is being deployed **
+
+{{- if .Values.curator.enabled }}
+
+  A CronJob will run with schedule {{ .Values.curator.cronjob.schedule }}.
+
+  The Jobs will not be removed automagically when deleting this Helm chart.
+  To remove these jobs, run the following:
+
+    kubectl -n {{ .Release.Namespace }} delete job -l app={{ template "elasticsearch.name" . }},role=curator
+
+{{- end }}
+
+  Elasticsearch can be accessed within the cluster on port {{ .Values.coordinating.service.port }} at {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+
+  To access from outside the cluster execute the following commands:
+
+{{- if contains "NodePort" .Values.coordinating.service.type }}
+
+    export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.coordinating.fullname" . }})
+    export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+    curl http://$NODE_IP:$NODE_PORT/
+{{- else if contains "LoadBalancer" .Values.coordinating.service.type }}
+
+  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+        Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "elasticsearch.coordinating.fullname" . }}'
+
+    export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch.coordinating.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+    curl http://$SERVICE_IP:{{ .Values.coordinating.service.port }}/
+{{- else if contains "ClusterIP"  .Values.coordinating.service.type }}
+
+    kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "elasticsearch.coordinating.fullname" . }} {{ .Values.coordinating.service.port }}:9200 &
+    curl http://127.0.0.1:9200/
+{{- end }}
+
+{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
+
+WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
+
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl
new file mode 100755
index 0000000000000000000000000000000000000000..42ef48431cd7e82729a6de40c42302096f34b3b0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/_helpers.tpl
@@ -0,0 +1,407 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+
+{{/*
+Return the appropriate apiVersion for statefulset.
+*/}}
+{{- define "statefulset.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "apps/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for deployment.
+*/}}
+{{- define "deployment.apiVersion" -}}
+{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "apps/v1" -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "elasticsearch.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "elasticsearch.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "elasticsearch.labels" -}}
+app: {{ include "elasticsearch.name" . }}
+chart: {{ include "elasticsearch.chart" . }}
+release: {{ .Release.Name }}
+heritage: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
+*/}}
+{{- define "elasticsearch.matchLabels" -}}
+app: {{ include "elasticsearch.name" . }}
+release: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Return the proper ES image name
+*/}}
+{{- define "elasticsearch.image" -}}
+{{- $registryName := .Values.image.registry -}}
+{{- $repositoryName := .Values.image.repository -}}
+{{- $tag := .Values.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified master name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.master.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.master.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified ingest name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.ingest.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.ingest.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified discovery name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.discovery.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.discovery.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified coordinating name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.coordinating.fullname" -}}
+{{- if .Values.global.kibanaEnabled -}}
+{{- printf "%s-%s" .Release.Name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.global.coordinating.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified data name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.data.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.data.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+ Create the name of the master service account to use
+ */}}
+{{- define "elasticsearch.master.serviceAccountName" -}}
+{{- if .Values.master.serviceAccount.create -}}
+    {{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.master.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the coordinating-only service account to use
+ */}}
+{{- define "elasticsearch.coordinating.serviceAccountName" -}}
+{{- if .Values.coordinating.serviceAccount.create -}}
+    {{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.coordinating.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+ Create the name of the data service account to use
+ */}}
+{{- define "elasticsearch.data.serviceAccountName" -}}
+{{- if .Values.data.serviceAccount.create -}}
+    {{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.data.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified metrics name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.metrics.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.metrics.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Return the proper ES exporter image name
+*/}}
+{{- define "elasticsearch.metrics.image" -}}
+{{- $registryName := .Values.metrics.image.registry -}}
+{{- $repositoryName := .Values.metrics.image.repository -}}
+{{- $tag := .Values.metrics.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper sysctl image name
+*/}}
+{{- define "elasticsearch.sysctl.image" -}}
+{{- $registryName := .Values.sysctlImage.registry -}}
+{{- $repositoryName := .Values.sysctlImage.repository -}}
+{{- $tag := .Values.sysctlImage.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "elasticsearch.imagePullSecrets" -}}
+{{- if .Values.global }}
+{{- if .Values.global.imagePullSecrets }}
+imagePullSecrets:
+{{- range .Values.global.imagePullSecrets }}
+  - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- else }}
+{{- $imagePullSecrets := coalesce .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.curator.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets -}}
+{{- if $imagePullSecrets }}
+imagePullSecrets:
+{{- range $imagePullSecrets }}
+  - name: {{ . }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "elasticsearch.volumePermissions.image" -}}
+{{- $registryName := .Values.volumePermissions.image.registry -}}
+{{- $repositoryName := .Values.volumePermissions.image.repository -}}
+{{- $tag := .Values.volumePermissions.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper Storage Class
+Usage:
+{{ include "elasticsearch.storageClass" (dict "global" .Values.global "local" .Values.master) }}
+*/}}
+{{- define "elasticsearch.storageClass" -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
+*/}}
+{{- if .global -}}
+    {{- if .global.storageClass -}}
+        {{- if (eq "-" .global.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .global.storageClass -}}
+        {{- end -}}
+    {{- else -}}
+        {{- if .local.persistence.storageClass -}}
+              {{- if (eq "-" .local.persistence.storageClass) -}}
+                  {{- printf "storageClassName: \"\"" -}}
+              {{- else }}
+                  {{- printf "storageClassName: %s" .local.persistence.storageClass -}}
+              {{- end -}}
+        {{- end -}}
+    {{- end -}}
+{{- else -}}
+    {{- if .local.persistence.storageClass -}}
+        {{- if (eq "-" .local.persistence.storageClass) -}}
+            {{- printf "storageClassName: \"\"" -}}
+        {{- else }}
+            {{- printf "storageClassName: %s" .local.persistence.storageClass -}}
+        {{- end -}}
+    {{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for cronjob APIs.
+*/}}
+{{- define "cronjob.apiVersion" -}}
+{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "batch/v2alpha1" }}
+{{- else if semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "batch/v1beta1" }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the appropriate apiVersion for podsecuritypolicy.
+*/}}
+{{- define "podsecuritypolicy.apiVersion" -}}
+{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}}
+{{- print "extensions/v1beta1" -}}
+{{- else -}}
+{{- print "policy/v1beta1" -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "elasticsearch.curator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}-curator
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "elasticsearch.curator.fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.curator.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "elasticsearch.curator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "elasticsearch.curator.serviceAccountName" -}}
+{{- if .Values.curator.serviceAccount.create -}}
+    {{ default (include "elasticsearch.curator.fullname" .) .Values.curator.serviceAccount.name }}
+{{- else -}}
+    {{ default "default" .Values.curator.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return the proper ES curator image name
+*/}}
+{{- define "elasticsearch.curator.image" -}}
+{{- $registryName := .Values.curator.image.registry -}}
+{{- $repositoryName := .Values.curator.image.repository -}}
+{{- $tag := .Values.curator.image.tag | toString -}}
+{{/*
+Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
+but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
+Also, we can't use a single if because lazy evaluation is not an option
+*/}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+        {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
+    {{- else -}}
+        {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+    {{- end -}}
+{{- else -}}
+    {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Renders a value that contains template.
+Usage:
+{{ include "elasticsearch.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
+*/}}
+{{- define "elasticsearch.tplValue" -}}
+    {{- if typeIs "string" .value }}
+        {{- tpl .value .context }}
+    {{- else }}
+        {{- tpl (.value | toYaml) .context }}
+    {{- end }}
+{{- end -}}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..7fc7122ceeff720b9db61123fb6397d60d640edb
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-curator.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.curator.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.curator.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+data:
+  action_file.yml: {{ required "A valid .Values.curator.configMaps.action_file_yml entry is required!" (toYaml .Values.curator.configMaps.action_file_yml | indent 2) }}
+  config.yml: {{ required "A valid .Values.curator.configMaps.config_yml entry is required!" (tpl (toYaml .Values.curator.configMaps.config_yml | indent 2) $) }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..b6924c1621e9172805802e2da4414106c283d092
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-es.yaml
@@ -0,0 +1,9 @@
+{{- if .Values.config }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+data:
+  elasticsearch.yml: |- {{- toYaml .Values.config | nindent 4 }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..a62b90fcf1d19324f0485de460e8065fb975812b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/configmap-initcontainer.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.sysctlImage.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "elasticsearch.fullname" . }}-initcontainer
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+data:
+  sysctl.sh: |-
+    #!/bin/bash
+    
+    set -o errexit
+    set -o pipefail
+    set -o nounset
+
+    if ! [ -x "$(command -v sysctl)" ]; then
+      echo 'sysctl not installed. Installing it...'
+      distro=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
+      case $distro in
+        ol | centos)
+          yum install -y procps
+          rm -rf /var/cache/yum;;
+        ubuntu | debian)
+          apt-get update -qq && apt-get install -y --no-install-recommends procps
+          rm -rf /var/lib/apt/lists /var/cache/apt/archives;;
+      esac
+    fi
+    sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..83a6a369c3d2b39465675e29c0930ae6b36cb521
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-deploy.yaml
@@ -0,0 +1,137 @@
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.coordinating.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+spec:
+  strategy:
+    type: {{ .Values.coordinating.updateStrategy.type }}
+    {{- if (eq "Recreate" .Values.coordinating.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: coordinating-only
+  replicas: {{ .Values.coordinating.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: coordinating-only
+      {{- with .Values.coordinating.podAnnotations }}
+      annotations: {{- toYaml . | nindent 10 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.coordinating.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.coordinating.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.coordinating.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
+      {{- if .Values.coordinating.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.coordinating.securityContext.fsGroup }}
+      {{- end }}
+      {{- if .Values.sysctlImage.enabled }}
+      ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+      initContainers:
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          {{- if .Values.coordinating.securityContext.enabled }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          securityContext:
+            runAsUser: {{ .Values.coordinating.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.coordinating.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "coordinating"
+          ports:
+            - name: http
+              containerPort: 9200
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.coordinating.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.coordinating.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.coordinating.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.coordinating.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.coordinating.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.coordinating.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: http
+          {{- end }}
+          {{- if .Values.coordinating.readinessProbe.enabled}}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.coordinating.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.coordinating.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.coordinating.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.coordinating.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.coordinating.readinessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: http
+          {{- end }}
+          {{- if .Values.coordinating.resources }}
+          resources: {{- toYaml .Values.coordinating.resources | nindent 12 }}
+          {{- end}}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: config
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data/"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        - name: data
+          emptyDir: {}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..3f4a516728a4715c7c5a75de089d2c39883950c1
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/coordinating-svc.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.coordinating.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.coordinating.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.coordinating.service.type | quote }}
+  {{- if and (eq .Values.coordinating.service.type "LoadBalancer") (not (empty .Values.coordinating.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.coordinating.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: http
+      port: {{ .Values.coordinating.service.port }}
+      targetPort: http
+      {{- if and (or (eq .Values.coordinating.service.type "NodePort") (eq .Values.coordinating.service.type "LoadBalancer")) (not (empty .Values.coordinating.service.nodePort)) }}
+      nodePort: {{ .Values.coordinating.service.nodePort }}
+      {{- else if eq .Values.coordinating.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: coordinating-only
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..78cd83ed211f90c131607d0870250f84d4cb46d2
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/cronjob.yaml
@@ -0,0 +1,108 @@
+{{- if .Values.curator.enabled }}
+apiVersion: {{ template "cronjob.apiVersion" . }}
+kind: CronJob
+metadata:
+  name: {{ template "elasticsearch.curator.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+  {{- if .Values.curator.cronjob.annotations }}
+  annotations: {{- toYaml .Values.curator.cronjob.annotations | indent 4 }}
+  {{- end }}
+spec:
+  schedule: "{{ .Values.curator.cronjob.schedule }}"
+  {{- with .Values.curator.cronjob.concurrencyPolicy }}
+  concurrencyPolicy: {{ . }}
+  {{- end }}
+  {{- with .Values.curator.cronjob.failedJobsHistoryLimit }}
+  failedJobsHistoryLimit: {{ . }}
+  {{- end }}
+  {{- with .Values.curator.cronjob.successfulJobsHistoryLimit }}
+  successfulJobsHistoryLimit: {{ . }}
+  {{- end }}
+  jobTemplate:
+    metadata:
+      labels:
+        app: {{ template "elasticsearch.name" . }}
+        release: {{ .Release.Name | quote }}
+    spec:
+      template:
+        metadata:
+          labels:
+            app: {{ template "elasticsearch.name" . }}
+            release: {{ .Release.Name | quote }}
+          {{- if .Values.curator.podAnnotations }}
+          annotations: {{- toYaml .Values.curator.podAnnotations | nindent 12 }}
+          {{- end }}
+        spec:
+          volumes:
+            - name: config-volume
+              configMap:
+                name: {{ template "elasticsearch.curator.fullname" . }}
+            {{- if .Values.curator.extraVolumes }}
+            {{- toYaml .Values.curator.extraVolumes | nindent 12 }}
+            {{- end }}
+          restartPolicy: {{ .Values.curator.cronjob.jobRestartPolicy }}
+          {{- if .Values.curator.priorityClassName }}
+          priorityClassName: {{ .Values.curator.priorityClassName | quote }}
+          {{- end }}
+{{- include "elasticsearch.imagePullSecrets" . | indent 10 }}
+          {{- if .Values.curator.extraInitContainers }}
+          initContainers:
+            {{- range $key, $value := .Values.curator.extraInitContainers }}
+            - name: "{{ $key }}"
+            {{- toYaml $value | nindent 14 }}
+            {{- end }}
+          {{- end }}
+          {{- if .Values.curator.rbac.enabled }}
+          serviceAccountName: {{ include "elasticsearch.curator.serviceAccountName" . }}
+          {{- end }}
+          {{- if .Values.curator.affinity }}
+          affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.affinity "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.nodeSelector }}
+          nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.nodeSelector "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.tolerations }}
+          tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.curator.tolerations "context" $) | nindent 12 }}
+          {{- end }}
+          {{- if .Values.curator.securityContext }}
+          securityContext: {{- toYaml .Values.curator.securityContext | nindent 12 }}
+          {{- end }}
+          containers:
+            - name: {{ template "elasticsearch.curator.fullname" . }}
+              image: {{ template "elasticsearch.curator.image" . }}
+              imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
+              volumeMounts:
+                - name: config-volume
+                  mountPath: /etc/es-curator
+                {{- if .Values.curator.extraVolumeMounts }}
+                {{- toYaml .Values.curator.extraVolumeMounts | nindent 16 }}
+                {{- end }}
+              {{ if .Values.curator.command }}
+              command: {{ toYaml .Values.curator.command | nindent 16 }}
+              {{- end }}
+              {{- if .Values.curator.dryrun }}
+              args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+              {{- else }}
+              args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+              {{- end }}
+              env:
+                {{- if .Values.curator.env }}
+                {{- range $key,$value := .Values.curator.env }}
+                - name: {{ $key | upper | quote}}
+                  value: {{ $value | quote}}
+                {{- end }}
+                {{- end }}
+                {{- if .Values.curator.envFromSecrets }}
+                {{- range $key,$value := .Values.curator.envFromSecrets }}
+                - name: {{ $key | upper | quote}}
+                  valueFrom:
+                    secretKeyRef:
+                      name: {{ $value.from.secret | quote}}
+                      key: {{ $value.from.key | quote}}
+                {{- end }}
+                {{- end }}
+              {{- if .Values.curator.resources }}
+              resources: {{- toYaml .Values.curator.resources | nindent 16 }}
+              {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..53cbf685857df9fc9b4578251c26e5ba59190b1b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/data-statefulset.yaml
@@ -0,0 +1,175 @@
+apiVersion: {{ template "statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ include "elasticsearch.data.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: data
+spec:
+  updateStrategy:
+    type: {{ .Values.data.updateStrategy.type }}
+    {{- if (eq "OnDelete" .Values.data.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- else if .Values.data.updateStrategy.rollingUpdatePartition }}
+    rollingUpdate:
+      partition: {{ .Values.data.updateStrategy.rollingUpdatePartition }}
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: data
+  serviceName: {{ include "elasticsearch.data.fullname" . }}
+  replicas: {{ .Values.data.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: data
+      {{- with .Values.data.podAnnotations }}
+      annotations: {{- toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.data.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.data.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.data.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }}
+      {{- if .Values.data.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.data.securityContext.fsGroup }}
+      {{- end }}
+      {{- if or .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }}
+      initContainers:
+        {{- if .Values.sysctlImage.enabled }}
+        ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+        {{- end }}
+        {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ include "elasticsearch.volumePermissions.image" . }}
+          imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+            - -ec
+            - |
+              chown -R {{ .Values.data.securityContext.runAsUser }}:{{ .Values.data.securityContext.fsGroup }} //bitnami/elasticsearch/data
+          securityContext:
+            runAsUser: 0
+          {{- if .Values.volumePermissions.resource }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data"
+        {{- end }}
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.data.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.data.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.data.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "data"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.data.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.data.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.data.readinessProbe.enabled }}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.data.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.data.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.data.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.data.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.data.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.data.resources }}
+          resources: {{- toYaml .Values.data.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: "config"
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: "data"
+              mountPath: "/bitnami/elasticsearch/data"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: "config"
+          configMap:
+            name: {{ template "elasticsearch.fullname" . }}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- if not .Values.data.persistence.enabled }}
+        - name: "data"
+          emptyDir: {}
+{{- else }}
+  volumeClaimTemplates:
+    - metadata:
+        name: "data"
+        {{- if .Values.data.persistence.annotations }}
+        annotations: {{- toYaml .Values.data.persistence.annotations | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes: {{- toYaml .Values.data.persistence.accessModes | nindent 10 }}
+        {{ $storage := dict "global" .Values.global "local" .Values.data }}
+        {{ include "elasticsearch.storageClass" $storage }}
+        resources:
+          requests:
+            storage: {{ .Values.data.persistence.size | quote }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..a852bdc64006c11da3241209ae6d1fb6bb6ee076
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/discovery-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.discovery.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+  annotations:
+    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+  type: ClusterIP
+  clusterIP: None
+  ports:
+    - port: 9300
+      name: transport
+      targetPort: transport
+  publishNotReadyAddresses: true
+  sessionAffinity: None
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..4d552b32a6b4d4845f48159c20ee932330c0c9a7
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/hooks/job.install.yaml
@@ -0,0 +1,71 @@
+{{- if .Values.curator.enabled }}
+{{- range $kind, $enabled := .Values.curator.hooks }}
+{{- if $enabled }}
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: {{ template "elasticsearch.curator.fullname" . }}-curator-on-{{ $kind }}
+  labels:
+    app: {{ template "elasticsearch.name" . }}
+    chart: {{ template "elasticsearch.chart" . }}
+    heritage: {{ .Release.Service | quote }}
+    release: {{ .Release.Name | quote }}
+    role: "curator"
+  annotations:
+    "helm.sh/hook": post-{{ $kind }}
+    "helm.sh/hook-weight": "1"
+{{- if $.Values.cronjob.annotations }}
+{{ toYaml $.Values.cronjob.annotations | indent 4 }}
+{{- end }}
+spec:
+ template:
+    metadata:
+      labels:
+        app: {{ template "elasticsearch.name" . }}
+        release: {{ .Release.Name | quote }}
+{{- if $.Values.podAnnotations }}
+      annotations:
+{{ toYaml $.Values.podAnnotations | indent 8 }}
+{{- end }}
+    spec:
+      volumes:
+        - name: config-volume
+          configMap:
+            name: {{ template "elasticsearch.curator.fullname" . }}
+{{- if $.Values.curator.extraVolumes }}
+{{ toYaml $.Values.curator.extraVolumes | indent 8 }}
+{{- end }}
+      restartPolicy: Never
+{{- if $.Values.curator.priorityClassName }}
+      priorityClassName: "{{ $.Values.curator.priorityClassName }}"
+{{- end }}
+      containers:
+        - name: {{ template "elasticsearch.curator.fullname" . }}
+          image: {{ template "elasticsearch.curator.image" . }}
+          imagePullPolicy: {{ .Values.curator.image.pullPolicy | quote }}
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/es-curator
+    {{- if $.Values.curator.extraVolumeMounts }}
+{{ toYaml $.Values.curator.extraVolumeMounts | indent 12 }}
+    {{- end }}
+          command: [ "curator" ]
+          args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ]
+          resources:
+{{ toYaml $.Values.curator.resources | indent 12 }}
+    {{- with $.Values.curator.nodeSelector }}
+      nodeSelector:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+    {{- with $.Values.curator.affinity }}
+      affinity:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+    {{- with $.Values.curator.tolerations }}
+      tolerations:
+{{ toYaml . | indent 8 }}
+    {{- end }}
+{{- end -}}
+{{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..69b3d75a0ded0636657923384e22b0a3e859ec75
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-deploy.yaml
@@ -0,0 +1,133 @@
+{{- if .Values.ingest.enabled }}
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.ingest.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: ingest
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: ingest
+  replicas: {{ .Values.ingest.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: ingest
+      {{- with .Values.ingest.podAnnotations }}
+      annotations: {{- toYaml . | nindent 10 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.ingest.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.ingest.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.ingest.securityContext.fsGroup }}
+      {{- end }}
+      {{- if .Values.sysctlImage.enabled }}
+      ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+      initContainers:
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          {{- if .Values.ingest.securityContext.enabled }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          securityContext:
+            runAsUser: {{ .Values.ingest.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.ingest.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "ingest"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.ingest.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.ingest.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.ingest.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.ingest.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.ingest.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.ingest.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+            initialDelaySeconds: 90
+          {{- end }}
+          {{- if .Values.ingest.readinessProbe.enabled}}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.ingest.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.ingest.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.ingest.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.ingest.readinessProbe.successThreshold }}
+            failureThreshold: {{ .Values.ingest.readinessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+            initialDelaySeconds: 5
+          {{- end}}
+          {{- if .Values.ingest.resources }}
+          resources: {{- toYaml .Values.ingest.resources | nindent 12 }}
+          {{- end}}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: "config"
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: "data"
+              mountPath: "/bitnami/elasticsearch/data/"
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        - name: data
+          emptyDir: {}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..eeb053dd7b96840d3f72736ca0763d08e4763638
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/ingest-svc.yaml
@@ -0,0 +1,25 @@
+{{- if .Values.ingest.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.ingest.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: ingest
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.ingest.service.type | quote }}
+  {{- if and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: transport
+      port: {{ .Values.ingest.service.port }}
+      targetPort: transport
+      {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePort)) }}
+      nodePort: {{ .Values.ingest.service.nodePort }}
+      {{- else if eq .Values.ingest.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: ingest
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..109b9cb591eae0a4d0135758defbd668471374a2
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-statefulset.yaml
@@ -0,0 +1,179 @@
+apiVersion: {{ template "statefulset.apiVersion" . }}
+kind: StatefulSet
+metadata:
+  name: {{ include "elasticsearch.master.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+spec:
+  updateStrategy:
+    type: {{ .Values.master.updateStrategy.type }}
+    {{- if (eq "OnDelete" .Values.master.updateStrategy.type) }}
+    rollingUpdate: null
+    {{- end }}
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: master
+  serviceName: {{ template "elasticsearch.master.fullname" . }}
+  podManagementPolicy: Parallel
+  replicas: {{ .Values.master.replicas }}
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: master
+      {{- with .Values.master.podAnnotations }}
+      annotations: {{- toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | nindent 6 }}
+      {{- if .Values.master.affinity }}
+      affinity: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.affinity "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.master.nodeSelector }}
+      nodeSelector: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }}
+      {{- end }}
+      {{- if .Values.master.tolerations }}
+      tolerations: {{- include "elasticsearch.tplValue" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }}
+      {{- if .Values.master.securityContext.enabled }}
+      securityContext:
+        fsGroup: {{ .Values.master.securityContext.fsGroup }}
+      {{- end }}
+      {{- if or .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }}
+      initContainers:
+        {{- if .Values.sysctlImage.enabled }}
+        ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors)
+        - name: sysctl
+          image: {{ include "elasticsearch.sysctl.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          command:
+            - /scripts/sysctl.sh
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: initcontainer-script
+              mountPath: /scripts/sysctl.sh
+              subPath: sysctl.sh
+        {{- end }}
+        {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }}
+        - name: volume-permissions
+          image: {{ include "elasticsearch.volumePermissions.image" . }}
+          imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}
+          command:
+            - /bin/bash
+            - -ec
+            - |
+              chown -R {{ .Values.master.securityContext.runAsUser }}:{{ .Values.master.securityContext.fsGroup }} //bitnami/elasticsearch/data
+          securityContext:
+            runAsUser: 0
+          {{- if .Values.volumePermissions.resource }}
+          resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            - name: data
+              mountPath: "/bitnami/elasticsearch/data"
+        {{- end }}
+      {{- end }}
+      containers:
+        - name: elasticsearch
+          image: {{ include "elasticsearch.image" . }}
+          imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+          {{- if .Values.master.securityContext.enabled }}
+          securityContext:
+            runAsUser: {{ .Values.master.securityContext.runAsUser }}
+          {{- end }}
+          env:
+            - name: BITNAMI_DEBUG
+              value: {{ ternary "true" "false" .Values.image.debug | quote }}
+            - name: ELASTICSEARCH_CLUSTER_NAME
+              value: {{ .Values.name | quote }}
+            - name: ELASTICSEARCH_CLUSTER_HOSTS
+              value: {{ template "elasticsearch.discovery.fullname" . }}
+            - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS
+              {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }}
+              {{- $replicas := int .Values.master.replicas }}
+              value: {{range $i, $e := until $replicas }}{{ $elasticsearchMasterFullname }}-{{ $e }} {{ end }}
+            - name: ELASTICSEARCH_MINIMUM_MASTER_NODES
+              value: {{ add (div .Values.master.replicas 2) 1 | quote }}
+            {{- if .Values.plugins }}
+            - name: ELASTICSEARCH_PLUGINS
+              value: {{ .Values.plugins | quote }}
+            {{- end }}
+            - name: ELASTICSEARCH_HEAP_SIZE
+              value: {{ .Values.master.heapSize | quote }}
+            - name: ELASTICSEARCH_IS_DEDICATED_NODE
+              value: "yes"
+            - name: ELASTICSEARCH_NODE_TYPE
+              value: "master"
+          ports:
+            - name: transport
+              containerPort: 9300
+          {{- if .Values.master.livenessProbe.enabled }}
+          livenessProbe:
+            initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.master.readinessProbe.enabled }}
+          readinessProbe:
+            initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+            failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+            httpGet:
+              path: /_cluster/health?local=true
+              port: 9200
+          {{- end }}
+          {{- if .Values.master.resources }}
+          resources: {{- toYaml .Values.master.resources | nindent 12 }}
+          {{- end }}
+          volumeMounts:
+            {{- if .Values.config }}
+            - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml
+              name: config
+              subPath: elasticsearch.yml
+            {{- end }}
+            - name: data
+              mountPath: /bitnami/elasticsearch/data
+            {{- if .Values.extraVolumeMounts }}
+            {{- toYaml .Values.extraVolumeMounts | nindent 12 }}
+            {{- end }}
+      volumes:
+        {{- if .Values.sysctlImage.enabled }}
+        - name: initcontainer-script
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}-initcontainer
+            defaultMode: 0755
+        {{- end }}
+        {{- if .Values.config }}
+        - name: config
+          configMap:
+            name: {{ include "elasticsearch.fullname" . }}
+        {{- end }}
+        {{- if .Values.extraVolumes }}
+        {{- toYaml .Values.extraVolumes | nindent 8 }}
+        {{- end }}
+{{- if not .Values.master.persistence.enabled }}
+        - name: "data"
+          emptyDir: {}
+{{- else }}
+  volumeClaimTemplates:
+    - metadata:
+        name: "data"
+        {{- if .Values.master.persistence.annotations }}
+        annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }}
+        {{- end }}
+      spec:
+        accessModes: {{- toYaml .Values.master.persistence.accessModes | nindent 10 }}
+        {{ $storage := dict "global" .Values.global "local" .Values.master }}
+        {{ include "elasticsearch.storageClass" $storage  }}
+        resources:
+          requests:
+            storage: {{ .Values.master.persistence.size | quote }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..eec73578869833e49facf25da42d26b635b76cbf
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/master-svc.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.master.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.master.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.master.service.type | quote }}
+  {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }}
+  loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}
+  {{- end }}
+  ports:
+    - name: transport
+      port: {{ .Values.master.service.port }}
+      targetPort: transport
+      {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) (not (empty .Values.master.service.nodePort)) }}
+      nodePort: {{ .Values.master.service.nodePort }}
+      {{- else if eq .Values.master.service.type "ClusterIP" }}
+      nodePort: null
+      {{- end }}
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: master
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..04bbab65b5b23a9ebc71489283f4f54ec813c18f
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-deploy.yaml
@@ -0,0 +1,47 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: {{ template "deployment.apiVersion" . }}
+kind: Deployment
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: metrics
+  replicas: 1
+  template:
+    metadata:
+      labels: {{- include "elasticsearch.labels" . | nindent 8 }}
+        role: metrics
+      {{- with .Values.metrics.podAnnotations }}
+      annotations: {{ toYaml . | nindent 8 }}
+      {{- end }}
+    spec:
+{{- include "elasticsearch.imagePullSecrets" . | indent 6 }}
+      containers:
+        - name: metrics
+          image: {{ include "elasticsearch.metrics.image" . }}
+          imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}
+          args:
+            - --es.uri=http://{{ template "elasticsearch.coordinating.fullname" . }}:{{ .Values.coordinating.service.port }}
+            - --es.all
+          ports:
+            - name: metrics
+              containerPort: 9114
+          livenessProbe:
+            httpGet:
+              path: /metrics
+              port: metrics
+            initialDelaySeconds: 60
+            timeoutSeconds: 5
+          readinessProbe:
+            httpGet:
+              path: /metrics
+              port: metrics
+            initialDelaySeconds: 5
+            timeoutSeconds: 1
+          {{- if .Values.metrics.resources }}
+          resources: {{- toYaml .Values.metrics.resources | nindent 12 }}
+          {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1bde5fae7b955d2299f9201129778195f9faafe8
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/metrics-svc.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.metrics.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+  annotations: {{ include "elasticsearch.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
+spec:
+  type: {{ .Values.metrics.service.type }}
+  ports:
+    - name: metrics
+      port: 9114
+      targetPort: metrics
+  selector: {{- include "elasticsearch.matchLabels" . | nindent 4 }}
+    role: metrics
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..bc387440339f58b3991bbf36e1b225bd2a338d9d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/podsecuritypolicy.yaml
@@ -0,0 +1,33 @@
+{{- if and .Values.curator.enabled .Values.curator.psp.create }}
+apiVersion: {{ include "podsecuritypolicy.apiVersion" . }}
+kind: PodSecurityPolicy
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+spec:
+  privileged: true
+  #requiredDropCapabilities:
+  volumes:
+    - 'configMap'
+    - 'secret'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    # Require the container to run without root privileges.
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+      - min: 1
+        max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+      - min: 1
+        max: 65535
+  readOnlyRootFilesystem: false
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..f0da273f12b084b3e154457c4fe08fa879070018
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/role.yaml
@@ -0,0 +1,20 @@
+{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+    component: elasticsearch-curator-configmap
+rules:
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["update", "patch"]
+  {{- if .Values.curator.psp.create }}
+  - apiGroups: ["extensions"]
+    resources: ["podsecuritypolicies"]
+    verbs: ["use"]
+    resourceNames:
+      - {{ include "elasticsearch.curator.fullname" . }}
+  {{- end }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..fc9060b063713d5b704d05068e8dfbb52b558a7c
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/rolebinding.yaml
@@ -0,0 +1,17 @@
+{{- if and .Values.curator.enabled .Values.curator.rbac.enabled }}
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: {{ include "elasticsearch.curator.name" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+    component: elasticsearch-curator-configmap
+roleRef:
+  kind: Role
+  name: {{ template "elasticsearch.curator.name" . }}
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+  - kind: ServiceAccount
+    name: {{ include "elasticsearch.curator.serviceAccountName" . }}
+    namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..588cadd87c6ff544302678d95f6c069eb5be22f6
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/serviceaccount.yaml
@@ -0,0 +1,35 @@
+{{- if and .Values.curator.enabled .Values.curator.serviceAccount.create .Values.curator.rbac.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "elasticsearch.curator.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: curator
+{{- end }}
+---
+{{- if .Values.data.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.data.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: data
+{{- end }}
+---
+{{- if .Values.master.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.master.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: master
+{{- end }}
+---
+{{- if .Values.coordinating.serviceAccount.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "elasticsearch.coordinating.serviceAccountName" . }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: coordinating-only
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..4a4ed799d9f389b5542d1fb21e6aab002026522d
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/templates/servicemonitor.yaml
@@ -0,0 +1,29 @@
+{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  name: {{ include "elasticsearch.metrics.fullname" . }}
+  {{- if .Values.metrics.serviceMonitor.namespace }}
+  namespace: {{ .Values.metrics.serviceMonitor.namespace }}
+  {{- end }}
+  labels: {{- include "elasticsearch.labels" . | nindent 4 }}
+    role: metrics
+    {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
+    {{ $key }}: {{ $value | quote }}
+    {{- end }}
+spec:
+  selector:
+    matchLabels: {{- include "elasticsearch.matchLabels" . | nindent 6 }}
+      role: metrics
+  endpoints:
+    - port: metrics
+      {{- if .Values.metrics.serviceMonitor.interval }}
+      interval: {{ .Values.metrics.serviceMonitor.interval }}
+      {{- end }}
+      {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
+      scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
+      {{- end }}
+  namespaceSelector:
+    matchNames:
+      - {{ .Release.Namespace }}
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..ba838ec547f807ab6acac3896f885e450f4ba60b
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values-production.yaml
@@ -0,0 +1,786 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+global:
+  # imageRegistry: myRegistryName
+  # imagePullSecrets:
+  #   - myRegistryKeySecretName
+  # storageClass: myStorageClass
+  ## Coordinating name to be used in the Kibana subchart (service name)
+  ##
+  coordinating:
+    name: coordinating-only
+  kibanaEnabled: true
+
+## Bitnami Elasticsearch image version
+## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/elasticsearch
+  tag: 7.6.1-debian-10-r22
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+  ## Set to true if you would like to see extra information on logs
+  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+  ##
+  debug: false
+
+## String to partially override elasticsearch.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override elasticsearch.fullname template
+##
+# fullnameOverride:
+
+## Bitnami Minideb image version
+## ref: https://hub.docker.com/r/bitnami/minideb/tags/
+##
+sysctlImage:
+  enabled: false
+  registry: docker.io
+  repository: bitnami/minideb
+  tag: buster
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: Always
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Init container' resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+
+## Cluster domain
+##
+clusterDomain: cluster.local
+
+## Elasticsearch cluster name
+##
+name: elastic
+
+## Elasticsearch discovery node parameters
+##
+discovery:
+  name: discovery
+
+## Comma, semi-colon or space separated list of plugins to install at initialization
+## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables
+##
+# plugins:
+
+## Customize elasticsearch configuration
+## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
+##
+# config:
+
+## extraVolumes and extraVolumeMounts allows you to mount other volumes
+## Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+# extraVolumes:
+#   - name: es-certs
+#     secret:
+#       defaultMode: 420
+#       secretName: es-certs
+# extraVolumeMounts:
+#   - name: es-certs
+#     mountPath: /certs
+#     readOnly: true
+
+## Elasticsearch master-eligible node parameters
+##
+master:
+  name: master
+  ## Number of master-eligible node(s) replicas to deploy
+  ##
+  replicas: 3
+
+  ## updateStrategy for ElasticSearch master statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for master-eligible pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for master-eligible pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch master-eligible container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch master-eligible container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+
+  ## Service parameters for master-eligible node(s)
+  ##
+  service:
+    ## master-eligible service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the master node
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    # name:
+
+## Elasticsearch coordinating-only node parameters
+##
+coordinating:
+  ## Number of coordinating-only node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch coordinating deployment
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for the coordinating-only pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for coordinating-only pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch coordinating-only container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch coordinating-only container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Service parameters for coordinating-only node(s)
+  ##
+  service:
+    ## coordinating-only service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch tREST API port
+    ##
+    port: 9200
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the coordinating node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch data node parameters
+##
+data:
+  name: data
+  ## Number of data node(s) replicas to deploy
+  ##
+  replicas: 3
+  ## updateStrategy for ElasticSearch Data statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+    # rollingUpdatePartition
+  heapSize: 128m
+  ## Provide annotations for the data pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for data pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch data container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 1152Mi
+  ## Elasticsearch data container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the data node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch ingest node parameters
+##
+ingest:
+  enabled: true
+  name: ingest
+  ## Number of ingest node(s) replicas to deploy
+  ##
+  replicas: 2
+  heapSize: 128m
+  ## Provide annotations for the ingest pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for ingest pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch ingest container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch ingest container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  readinessProbe:
+    enabled: true
+    initialDelaySeconds: 90
+    periodSeconds: 10
+    timeoutSeconds: 5
+    successThreshold: 1
+    failureThreshold: 5
+  ## Service parameters for ingest node(s)
+  ##
+  service:
+    ## ingest service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+
+## Elasticsearch curator parameters
+##
+curator:
+  enabled: false
+  name: curator
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-curator
+    tag: 5.8.1-debian-10-r58
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+
+  cronjob:
+    # At 01:00 every day
+    schedule: "0 1 * * *"
+    annotations: {}
+    concurrencyPolicy: ""
+    failedJobsHistoryLimit: ""
+    successfulJobsHistoryLimit: ""
+    jobRestartPolicy: Never
+
+  podAnnotations: {}
+
+  rbac:
+    # Specifies whether RBAC should be enabled
+    enabled: false
+
+  serviceAccount:
+    # Specifies whether a ServiceAccount should be created
+    create: true
+    # The name of the ServiceAccount to use.
+    # If not set and create is true, a name is generated using the fullname template
+    name:
+
+  psp:
+    # Specifies whether a podsecuritypolicy should be created
+    create: false
+
+  hooks:
+    install: false
+    upgrade: false
+
+  # run curator in dry-run mode
+  dryrun: false
+
+  command: ["curator"]
+  env: {}
+
+  configMaps:
+    # Delete indices older than 90 days
+    action_file_yml: |-
+      ---
+      actions:
+        1:
+          action: delete_indices
+          description: "Clean up ES by deleting old indices"
+          options:
+            timeout_override:
+            continue_if_exception: False
+            disable_action: False
+            ignore_empty_list: True
+          filters:
+          - filtertype: age
+            source: name
+            direction: older
+            timestring: '%Y.%m.%d'
+            unit: days
+            unit_count: 90
+            field:
+            stats_result:
+            epoch:
+            exclude: False
+    # Default config (this value is evaluated as a template)
+    config_yml: |-
+      ---
+      client:
+        hosts:
+          - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+        port: {{ .Values.coordinating.service.port }}
+        # url_prefix:
+        # use_ssl: True
+        # certificate:
+        # client_cert:
+        # client_key:
+        # ssl_no_validate: True
+        # http_auth:
+        # timeout: 30
+        # master_only: False
+      # logging:
+      #   loglevel: INFO
+      #   logfile:
+      #   logformat: default
+      #   blacklist: ['elasticsearch', 'urllib3']
+
+  ## Curator resources requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+    requests: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+
+  priorityClassName: ""
+
+  # extraVolumes and extraVolumeMounts allows you to mount other volumes
+  # Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+  # extraVolumes:
+  #   - name: es-certs
+  #     secret:
+  #       defaultMode: 420
+  #       secretName: es-certs
+  # extraVolumeMounts:
+  #   - name: es-certs
+  #     mountPath: /certs
+  #     readOnly: true
+
+  ## Add your own init container or uncomment and modify the given example.
+  ##
+  extraInitContainers: {}
+  ## Don't configure S3 repository till Elasticsearch is reachable.
+  ## Ensure that it is available at http://elasticsearch:9200
+  ##
+  # elasticsearch-s3-repository:
+  #   image: bitnami/minideb:latest
+  #   imagePullPolicy: "IfNotPresent"
+  #   command:
+  #   - "/bin/bash"
+  #   - "-c"
+  #   args:
+  #   - |
+  #     ES_HOST=elasticsearch
+  #     ES_PORT=9200
+  #     ES_REPOSITORY=backup
+  #     S3_REGION=us-east-1
+  #     S3_BUCKET=bucket
+  #     S3_BASE_PATH=backup
+  #     S3_COMPRESS=true
+  #     S3_STORAGE_CLASS=standard
+  #     install_packages curl && \
+  #     ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \
+  #     cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
+  #     {
+  #       "type": "s3",
+  #       "settings": {
+  #         "bucket": "${S3_BUCKET}",
+  #         "base_path": "${S3_BASE_PATH}",
+  #         "region": "${S3_REGION}",
+  #         "compress": "${S3_COMPRESS}",
+  #         "storage_class": "${S3_STORAGE_CLASS}"
+  #       }
+  #     }
+
+## Elasticsearch Prometheus exporter configuration
+## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/
+##
+metrics:
+  enabled: true
+  name: metrics
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-exporter
+    tag: 1.1.0-debian-10-r57
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Elasticsearch Prometheus exporter service type
+  ##
+  service:
+    type: ClusterIP
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "9114"
+  ## Elasticsearch Prometheus exporter resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+  ## Metrics exporter pod Annotation and Labels
+  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+  ##
+  podAnnotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "8080"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Bundled Kibana parameters
+##
+kibana:
+  elasticsearch:
+    hosts:
+      - '{{ include "elasticsearch.coordinating.fullname" . }}'
+    port: 9200
diff --git a/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..1a8f3bdbe52e3015e1c1491f09b8a7986eaf655e
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/charts/elasticsearch/values.yaml
@@ -0,0 +1,786 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+global:
+  # imageRegistry: myRegistryName
+  # imagePullSecrets:
+  #   - myRegistryKeySecretName
+  # storageClass: myStorageClass
+  ## Coordinating name to be used in the Kibana subchart (service name)
+  ##
+  coordinating:
+    name: coordinating-only
+  kibanaEnabled: false
+
+## Bitnami Elasticsearch image version
+## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
+##
+image:
+  registry: docker.io
+  repository: bitnami/elasticsearch
+  tag: 7.6.1-debian-10-r22
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: IfNotPresent
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+  ## Set to true if you would like to see extra information on logs
+  ## ref:  https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
+  ##
+  debug: false
+
+## String to partially override elasticsearch.fullname template (will maintain the release name)
+##
+# nameOverride:
+
+## String to fully override elasticsearch.fullname template
+##
+# fullnameOverride:
+
+## Bitnami Minideb image version
+## ref: https://hub.docker.com/r/bitnami/minideb/tags/
+##
+sysctlImage:
+  enabled: true
+  registry: docker.io
+  repository: bitnami/minideb
+  tag: buster
+  ## Specify a imagePullPolicy
+  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+  ##
+  pullPolicy: Always
+  ## Optionally specify an array of imagePullSecrets.
+  ## Secrets must be manually created in the namespace.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+  ##
+  # pullSecrets:
+  #   - myRegistryKeySecretName
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+  enabled: false
+  image:
+    registry: docker.io
+    repository: bitnami/minideb
+    tag: buster
+    pullPolicy: Always
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Init container' resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+
+## Cluster domain
+##
+clusterDomain: cluster.local
+
+## Elasticsearch cluster name
+##
+name: elastic
+
+## Elasticsearch discovery node parameters
+##
+discovery:
+  name: discovery
+
+## Comma, semi-colon or space separated list of plugins to install at initialization
+## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables
+##
+# plugins:
+
+## Customize elasticsearch configuration
+## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
+##
+# config:
+
+## extraVolumes and extraVolumeMounts allows you to mount other volumes
+## Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+# extraVolumes:
+#   - name: es-certs
+#     secret:
+#       defaultMode: 420
+#       secretName: es-certs
+# extraVolumeMounts:
+#   - name: es-certs
+#     mountPath: /certs
+#     readOnly: true
+
+## Elasticsearch master-eligible node parameters
+##
+master:
+  name: master
+  ## Number of master-eligible node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch master statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for master-eligible pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for master-eligible pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch master-eligible container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch master-eligible container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+
+  ## Service parameters for master-eligible node(s)
+  ##
+  service:
+    ## master-eligible service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the master node
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    # name:
+
+## Elasticsearch coordinating-only node parameters
+##
+coordinating:
+  ## Number of coordinating-only node(s) replicas to deploy
+  ##
+  replicas: 2
+
+  ## updateStrategy for ElasticSearch coordinating deployment
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+  ##
+  updateStrategy:
+    type: RollingUpdate
+
+  heapSize: 128m
+  ## Provide annotations for the coordinating-only pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for coordinating-only pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch coordinating-only container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch coordinating-only container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Service parameters for coordinating-only node(s)
+  ##
+  service:
+    ## coordinating-only service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch tREST API port
+    ##
+    port: 9200
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the coordinating node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch data node parameters
+##
+data:
+  name: data
+  ## Number of data node(s) replicas to deploy
+  ##
+  replicas: 2
+  ## updateStrategy for ElasticSearch Data statefulset
+  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
+  ##
+  updateStrategy:
+    type: RollingUpdate
+    # rollingUpdatePartition
+  heapSize: 128m
+  ## Provide annotations for the data pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for data pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch data container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 1152Mi
+  ## Elasticsearch data container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Enable persistence using Persistent Volume Claims
+  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
+  ##
+  persistence:
+    ## If true, use a Persistent Volume Claim, If false, use emptyDir
+    ##
+    enabled: true
+    ## Persistent Volume Storage Class
+    ## If defined, storageClassName: <storageClass>
+    ## If set to "-", storageClassName: "", which disables dynamic provisioning
+    ## If undefined (the default) or set to null, no storageClassName spec is
+    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+    ##   GKE, AWS & OpenStack)
+    ##
+    # storageClass: "-"
+    ## Persistent Volume Claim annotations
+    ##
+    annotations: {}
+    ## Persistent Volume Access Mode
+    ##
+    accessModes:
+      - ReadWriteOnce
+    ## Persistent Volume size
+    ##
+    size: 8Gi
+  ## Provide functionality to use RBAC
+  ##
+  serviceAccount:
+    ## Specifies whether a ServiceAccount should be created for the data node
+    ##
+    create: false
+    ## The name of the ServiceAccount to use.
+    ## If not set and create is true, a name is generated using the fullname template
+    ##
+    # name:
+
+## Elasticsearch ingest node parameters
+##
+ingest:
+  enabled: false
+  name: ingest
+  ## Number of ingest node(s) replicas to deploy
+  ##
+  replicas: 2
+  heapSize: 128m
+  ## Provide annotations for the ingest pods.
+  ##
+  podAnnotations: {}
+  ## Pod Security Context for ingest pods.
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  ##
+  securityContext:
+    enabled: true
+    fsGroup: 1001
+    runAsUser: 1001
+  ## Affinity for pod assignment.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+  ##
+  affinity: {}
+  ## Node labels for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+  ##
+  nodeSelector: {}
+  ## Tolerations for pod assignment. Evaluated as a template.
+  ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+  ##
+  tolerations: []
+  ## Elasticsearch ingest container's resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    ## We usually recommend not to specify default resources and to leave this as a conscious
+    ## choice for the user. This also increases chances charts run on environments with little
+    ## resources, such as Minikube.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests:
+      cpu: 25m
+      memory: 256Mi
+  ## Elasticsearch ingest container's liveness and readiness probes
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
+  ##
+  livenessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  readinessProbe:
+    enabled: false
+  #  initialDelaySeconds: 90
+  #  periodSeconds: 10
+  #  timeoutSeconds: 5
+  #  successThreshold: 1
+  #  failureThreshold: 5
+  ## Service parameters for ingest node(s)
+  ##
+  service:
+    ## ingest service type
+    ##
+    type: ClusterIP
+    ## Elasticsearch transport port
+    ##
+    port: 9300
+    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+    ##
+    # nodePort:
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations: {}
+    ## Set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    # loadBalancerIP:
+
+## Elasticsearch curator parameters
+##
+curator:
+  enabled: false
+  name: curator
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-curator
+    tag: 5.8.1-debian-10-r58
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+
+  cronjob:
+    # At 01:00 every day
+    schedule: "0 1 * * *"
+    annotations: {}
+    concurrencyPolicy: ""
+    failedJobsHistoryLimit: ""
+    successfulJobsHistoryLimit: ""
+    jobRestartPolicy: Never
+
+  podAnnotations: {}
+
+  rbac:
+    # Specifies whether RBAC should be enabled
+    enabled: false
+
+  serviceAccount:
+    # Specifies whether a ServiceAccount should be created
+    create: true
+    # The name of the ServiceAccount to use.
+    # If not set and create is true, a name is generated using the fullname template
+    name:
+
+  psp:
+    # Specifies whether a podsecuritypolicy should be created
+    create: false
+
+  hooks:
+    install: false
+    upgrade: false
+
+  # run curator in dry-run mode
+  dryrun: false
+
+  command: ["curator"]
+  env: {}
+
+  configMaps:
+    # Delete indices older than 90 days
+    action_file_yml: |-
+      ---
+      actions:
+        1:
+          action: delete_indices
+          description: "Clean up ES by deleting old indices"
+          options:
+            timeout_override:
+            continue_if_exception: False
+            disable_action: False
+            ignore_empty_list: True
+          filters:
+          - filtertype: age
+            source: name
+            direction: older
+            timestring: '%Y.%m.%d'
+            unit: days
+            unit_count: 90
+            field:
+            stats_result:
+            epoch:
+            exclude: False
+    # Default config (this value is evaluated as a template)
+    config_yml: |-
+      ---
+      client:
+        hosts:
+          - {{ template "elasticsearch.coordinating.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
+        port: {{ .Values.coordinating.service.port }}
+        # url_prefix:
+        # use_ssl: True
+        # certificate:
+        # client_cert:
+        # client_key:
+        # ssl_no_validate: True
+        # http_auth:
+        # timeout: 30
+        # master_only: False
+      # logging:
+      #   loglevel: INFO
+      #   logfile:
+      #   logformat: default
+      #   blacklist: ['elasticsearch', 'urllib3']
+
+  ## Curator resources requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+    requests: {}
+    #  cpu: 100m
+    #  memory: 128Mi
+
+  priorityClassName: ""
+
+  # extraVolumes and extraVolumeMounts allows you to mount other volumes
+  # Example Use Case: mount ssl certificates when elasticsearch has tls enabled
+  # extraVolumes:
+  #   - name: es-certs
+  #     secret:
+  #       defaultMode: 420
+  #       secretName: es-certs
+  # extraVolumeMounts:
+  #   - name: es-certs
+  #     mountPath: /certs
+  #     readOnly: true
+
+  ## Add your own init container or uncomment and modify the given example.
+  ##
+  extraInitContainers: {}
+  ## Don't configure S3 repository till Elasticsearch is reachable.
+  ## Ensure that it is available at http://elasticsearch:9200
+  ##
+  # elasticsearch-s3-repository:
+  #   image: bitnami/minideb:latest
+  #   imagePullPolicy: "IfNotPresent"
+  #   command:
+  #   - "/bin/bash"
+  #   - "-c"
+  #   args:
+  #   - |
+  #     ES_HOST=elasticsearch
+  #     ES_PORT=9200
+  #     ES_REPOSITORY=backup
+  #     S3_REGION=us-east-1
+  #     S3_BUCKET=bucket
+  #     S3_BASE_PATH=backup
+  #     S3_COMPRESS=true
+  #     S3_STORAGE_CLASS=standard
+  #     install_packages curl && \
+  #     ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \
+  #     cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
+  #     {
+  #       "type": "s3",
+  #       "settings": {
+  #         "bucket": "${S3_BUCKET}",
+  #         "base_path": "${S3_BASE_PATH}",
+  #         "region": "${S3_REGION}",
+  #         "compress": "${S3_COMPRESS}",
+  #         "storage_class": "${S3_STORAGE_CLASS}"
+  #       }
+  #     }
+
+## Elasticsearch Prometheus exporter configuration
+## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/
+##
+metrics:
+  enabled: false
+  name: metrics
+  image:
+    registry: docker.io
+    repository: bitnami/elasticsearch-exporter
+    tag: 1.1.0-debian-10-r57
+    pullPolicy: IfNotPresent
+    ## Optionally specify an array of imagePullSecrets.
+    ## Secrets must be manually created in the namespace.
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+    ##
+    # pullSecrets:
+    #   - myRegistryKeySecretName
+  ## Elasticsearch Prometheus exporter service type
+  ##
+  service:
+    type: ClusterIP
+    ## Provide any additional annotations which may be required. This can be used to
+    ## set the LoadBalancer service type to internal only.
+    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
+    ##
+    annotations:
+      prometheus.io/scrape: "true"
+      prometheus.io/port: "9114"
+  ## Elasticsearch Prometheus exporter resource requests and limits
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  ##
+  resources:
+    # We usually recommend not to specify default resources and to leave this as a conscious
+    # choice for the user. This also increases chances charts run on environments with little
+    # resources, such as Minikube. If you do want to specify resources, uncomment the following
+    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+    limits: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+    requests: {}
+    #   cpu: 100m
+    #   memory: 128Mi
+  ## Metrics exporter pod Annotation and Labels
+  ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+  ##
+  podAnnotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "8080"
+
+  ## Prometheus Operator ServiceMonitor configuration
+  ##
+  serviceMonitor:
+    enabled: false
+    ## Namespace in which Prometheus is running
+    ##
+    # namespace: monitoring
+
+    ## Interval at which metrics should be scraped.
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # interval: 10s
+
+    ## Timeout after which the scrape is ended
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+    ##
+    # scrapeTimeout: 10s
+
+    ## ServiceMonitor selector labels
+    ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+    ##
+    # selector:
+    #   prometheus: my-prometheus
+
+## Bundled Kibana parameters
+##
+kibana:
+  elasticsearch:
+    hosts:
+      - '{{ include "elasticsearch.coordinating.fullname" . }}'
+    port: 9200
diff --git a/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf b/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf
new file mode 100644
index 0000000000000000000000000000000000000000..516f52d8a7f9654aeb0f10aeb112c50472caacd0
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/configs/asapo-fluentd.conf
@@ -0,0 +1,39 @@
+<source>
+@type tail
+path /var/log/containers/{{ .service.serviceName }}*{{ .service.serviceName }}*.log
+pos_file /tmp/{{ .service.serviceName }}.log.pos
+tag asapo
+<parse>
+@type json
+</parse>
+</source>
+
+<filter asapo.**>
+@type parser
+key_name log
+format json
+time_format %Y-%m-%d %H:%M:%S.%N
+reserve_data true
+</filter>
+
+<filter asapo.**>
+@type record_transformer
+enable_ruby
+remove_keys ["log","stream"]
+<record>
+source_addr ${hostname}
+</record>
+</filter>
+
+<match asapo.**>
+  @type elasticsearch
+  host asapo-elk-coordinating
+  port {{ .Values.elasticsearch.coordinating.service.port }}
+  flush_interval 5s
+  logstash_format true
+  time_key_format %Y-%m-%dT%H:%M:%S.%N
+  time_key time
+  time_key_exclude_timestamp true
+  buffer_type memory
+</match>
+
diff --git a/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl b/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl
index 14851a6c10620c7347b0ed1e9284bc2f67c1d032..04692f57c8cb6970532415cab677c67efc9f5090 100644
--- a/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl
+++ b/deploy/asapo_helm_chart/asapo/templates/_config-map.tpl
@@ -6,5 +6,17 @@ metadata:
   name: {{ .service.serviceName }}-config
 data:
   {{ .service.serviceName }}.json:  {{ tpl (.Files.Get (printf "configs/%s.json" .service.serviceName)) . | quote }}
+
+{{- if .service.sidecarLogs }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ .service.serviceName }}-fluentd-config
+data:
+  asapo-fluentd.conf:  {{ tpl (.Files.Get  ("configs/asapo-fluentd.conf")) . | quote }}
+{{- end }}
+
 {{- end }}
 
+
diff --git a/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl b/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..88ceda218c8316c3dedae3056401a89dc1223447
--- /dev/null
+++ b/deploy/asapo_helm_chart/asapo/templates/_deployment.tpl
@@ -0,0 +1,29 @@
+{{/* Generate add fluentd sidecar */}}
+{{- define "asapo.fluentd.container" }}
+- name: fluentd
+  image: "yakser/fluentd_elastic"
+  command: ["fluentd"]
+  args: ["-c", "/fluentd/etc/asapo-fluentd.conf"]
+  volumeMounts:
+    - mountPath: "/fluentd/etc"
+      name: fluentd-config
+    - mountPath: /var/log/containers
+      name: logs
+    - mountPath: /var
+      name: var
+{{- end }}
+
+{{/* Generate add fluentd sidecar */}}
+{{- define "asapo.fluentd.volumes" }}
+- name: fluentd-config
+  configMap:
+    name: {{ .serviceName }}-fluentd-config
+- name: logs
+  hostPath:
+    path: /var/log/containers
+    type: Directory
+- name: var
+  hostPath:
+    path: /var
+    type: Directory
+{{- end }}
diff --git a/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml
index 7ad1bd27e79e57d0b98879d51d5e51c394c5f914..e923be6acd41da528beaf088b8852125d3b8cf55 100644
--- a/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml
+++ b/deploy/asapo_helm_chart/asapo/templates/authorizer-deployment.yaml
@@ -16,6 +16,7 @@ spec:
       annotations:
         checksum/config: {{ .Files.Get "configs/asapo-authorizer.json" | sha256sum  }}
         checksum/secret: {{ include (print $.Template.BasePath "/auth-secret.yaml") . | sha256sum }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
     spec:
       volumes:
         - name: all-in-one
@@ -31,8 +32,9 @@ spec:
         - name: shared-volume-online
           persistentVolumeClaim:
             claimName: asapo-online-pv
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.authorizer | indent 8 }}
       containers:
-        - name: authorizer
+        - name: asapo-authorizer
           image: "yakser/asapo-authorizer-dev:{{ .Values.common.asapoVersionTag }}"
           command: ["/asapo-authorizer"]
           args: ["-config", "/etc/authorizer/asapo-authorizer.json"]
@@ -45,5 +47,4 @@ spec:
               name: shared-volume-offline
             - mountPath: {{ .Values.common.onlineDir }}
               name: shared-volume-online
-
-
+      {{- include "asapo.fluentd.container" . | indent 8 }}
\ No newline at end of file
diff --git a/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml b/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml
index 9552f0cc15a145a924e90feb485635132ab03ecb..8c88d8ea0f53bc669b4dae2500191d3824b6b69e 100644
--- a/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml
+++ b/deploy/asapo_helm_chart/asapo/templates/broker-deployment.yaml
@@ -15,6 +15,7 @@ spec:
         app: broker
       annotations:
         checksum/config: {{ .Files.Get "configs/asapo-broker.json" | sha256sum  }}
+        checksum/fluentd-config: {{ .Files.Get "configs/asapo-fluentd.conf" | sha256sum  }}
         checksum/secret: {{ include (print $.Template.BasePath "/auth-secret.yaml") . | sha256sum }}
     spec:
       volumes:
@@ -25,12 +26,13 @@ spec:
                   name: asapo-broker-config
               - secret:
                   name: auth-secret
+        {{- include "asapo.fluentd.volumes" .Values.ownServices.broker | indent 8 }}
       initContainers:
         - name: wait-databases
           image: busybox:1.28
-          command: ['sh', '-c', "until wget asapo-influxdb:8086/ping; do echo waiting for influxdb; sleep 2; done"]
+          command: ['sh', '-c', "echo initializing;until wget asapo-influxdb:8086/ping; do echo waiting for influxdb; sleep 2; done"]
       containers:
-        - name: broker
+        - name: asapo-broker
           image: "yakser/asapo-broker-dev:{{ .Values.common.asapoVersionTag }}"
           command: ["/asapo-broker"]
           args: ["-config", "/etc/broker/asapo-broker.json"]
@@ -39,4 +41,4 @@ spec:
           volumeMounts:
             - mountPath: "/etc/broker"
               name: all-in-one
-
+        {{- include "asapo.fluentd.container" . | indent 8 }}
diff --git a/deploy/asapo_helm_chart/asapo/values.yaml b/deploy/asapo_helm_chart/asapo/values.yaml
index bcd30a3cf5b18b0c901db77d2a517c0bd8877835..d65c1ee1d3a121752a133d06ec43db530a1da5f4 100644
--- a/deploy/asapo_helm_chart/asapo/values.yaml
+++ b/deploy/asapo_helm_chart/asapo/values.yaml
@@ -3,11 +3,13 @@ ownServices:
     serviceName: asapo-authorizer
     replicaCount: 1
     port: 5006
+    sidecarLogs: true
     _exposeServiceExtrernally: false
   broker:
     serviceName: asapo-broker
     replicaCount: 1
     port: 5007
+    sidecarLogs: true
     _exposeServiceExtrernally: true
   discovery:
     serviceName: asapo-discovery
@@ -20,10 +22,40 @@ common:
   offlineDir: "/test_offline"
   onlineDir: "/test_online"
   asapoVersionTag: "develop.20.03-10-g06dc0be"
-
+  nodesPrefix: g
 influxdb:
   authEnabled: false
   influxdb:
     updateStrategy: Recreate
     service:
       port: 8086
+    ingress:
+      enabled: true
+      annotations:
+        kubernetes.io/ingress.class: "nginx"
+        nginx.ingress.kubernetes.io/whitelist-source-range: 131.169.0.0/16
+      hosts:
+        - name: "*.desy.de"
+          path_template: "/{{ .Release.Namespace }}/perfview"
+
+elasticsearch:
+  global:
+    kibanaEnabled: true
+    coordinating:
+      name: elk-coordinating
+  kibana:
+    extraConfiguration:
+      "server.basePath": /{{ .Release.Namespace }}/logsview
+      "server.rewriteBasePath": true
+    plugins:
+      - "https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-7.6.1-0.1.31.zip"
+      - "x-pack"
+    healthCheckPathTemplate: "/{{ .Release.Namespace }}/logsview/app/kibana"
+    ingress:
+      enabled: true
+      annotations:
+        kubernetes.io/ingress.class: "nginx"
+        nginx.ingress.kubernetes.io/whitelist-source-range: 131.169.0.0/16
+      hosts:
+        - name: "*.desy.de"
+          path_template: "/{{ .Release.Namespace }}/logsview"
\ No newline at end of file
diff --git a/deploy/asapo_services/scripts/fluentd.conf.tpl b/deploy/asapo_services/scripts/fluentd.conf.tpl
index a46d063838dc19462084e5a60a60b886f6f5c659..c97a9048e4be3f3114fd59c225be2ca433910cef 100644
--- a/deploy/asapo_services/scripts/fluentd.conf.tpl
+++ b/deploy/asapo_services/scripts/fluentd.conf.tpl
@@ -17,7 +17,6 @@
 <filter asapo.docker>
   @type parser
   key_name log
-
   format json
   time_format %Y-%m-%d %H:%M:%S.%N
   reserve_data true