diff --git a/broker/src/asapo_broker/main/broker.go b/broker/src/asapo_broker/main/broker.go index 202022471fca930a7915561f433806c6dc9eb3aa..8b935e679f2605b10f14b76c468087ebe799ab58 100644 --- a/broker/src/asapo_broker/main/broker.go +++ b/broker/src/asapo_broker/main/broker.go @@ -39,6 +39,8 @@ func main() { log.SetLevel(logLevel) + log.Info("Starting Asapo Broker, version " + version.GetVersion()) + server.CreateDiscoveryService() err = server.InitDB(NewDefaultDatabase()) diff --git a/broker/src/asapo_broker/server/server_nottested.go b/broker/src/asapo_broker/server/server_nottested.go index 990cf1409e6f133ccd48641647a9ea13350d0e35..5a4d32102e14453feddd1b3c86162b4e464222c2 100644 --- a/broker/src/asapo_broker/server/server_nottested.go +++ b/broker/src/asapo_broker/server/server_nottested.go @@ -5,7 +5,6 @@ package server import ( log "asapo_common/logger" "asapo_common/utils" - "asapo_common/version" "errors" "net/http" "strconv" @@ -13,6 +12,7 @@ import ( func StartStatistics() { statistics.Writer = new(StatisticInfluxDbWriter) + statistics.Init() statistics.Reset() go statistics.Monitor() } @@ -20,7 +20,6 @@ func StartStatistics() { func Start() { StartStatistics() mux := utils.NewRouter(listRoutes) - log.Info("Starting Asapo Broker, version " + version.GetVersion()) log.Info("Listening on port: " + strconv.Itoa(settings.Port)) log.Fatal(http.ListenAndServe(":"+strconv.Itoa(settings.Port), http.HandlerFunc(mux.ServeHTTP))) } diff --git a/broker/src/asapo_broker/server/statistics.go b/broker/src/asapo_broker/server/statistics.go index 273e24919bf50b44e6b9ef8699fff1220c7c28fa..1dff498f09d58576b1bf175acc60ddc130fcffbf 100644 --- a/broker/src/asapo_broker/server/statistics.go +++ b/broker/src/asapo_broker/server/statistics.go @@ -9,6 +9,17 @@ import ( type statisticsWriter interface { Write(*serverStatistics) error + Init() error +} + +func (st *serverStatistics) Init() { + st.mux.Lock() + defer st.mux.Unlock() + if err := st.Writer.Init(); err != nil { + log.Warning("cannot initialize statistic writer: " + err.Error()) + } else { + log.Debug("initialized statistic at " + settings.PerformanceDbServer + " for " + settings.PerformanceDbName) + } } type serverStatistics struct { diff --git a/broker/src/asapo_broker/server/statistics_test.go b/broker/src/asapo_broker/server/statistics_test.go index 9e396fd3b133c26ebf817c2982d2649af798b7a8..147c1ff90376ac630f69b3f076d21684e320bcf0 100644 --- a/broker/src/asapo_broker/server/statistics_test.go +++ b/broker/src/asapo_broker/server/statistics_test.go @@ -10,6 +10,11 @@ type mockWriter struct { mock.Mock } +func (writer *mockWriter) Init() error { + args := writer.Called() + return args.Error(0) +} + func (writer *mockWriter) Write(statistics *serverStatistics) error { args := writer.Called(statistics) return args.Error(0) diff --git a/broker/src/asapo_broker/server/statistics_writers.go b/broker/src/asapo_broker/server/statistics_writers.go index cdad6dcb3738503ff48fee5fef00f904cb9cdd33..c5cd4a322b05cdba152a5a076568a7f0f94ea0f7 100644 --- a/broker/src/asapo_broker/server/statistics_writers.go +++ b/broker/src/asapo_broker/server/statistics_writers.go @@ -3,8 +3,8 @@ package server import ( + "github.com/influxdata/influxdb1-client/v2" "log" - "github.com/influxdata/influxdb1-client/v2" "time" ) @@ -16,12 +16,30 @@ func (writer *StatisticLogWriter) Write(statistics *serverStatistics) error { return nil } +func (writer *StatisticLogWriter) Init() error { + return nil +} + type StatisticInfluxDbWriter struct { } +func (writer *StatisticInfluxDbWriter) Init() error { + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://" + settings.PerformanceDbServer, + }) + if err != nil { + return err + } + defer c.Close() + var query client.Query + query.Command = "create database " + settings.PerformanceDbName + _, err = c.Query(query) + return err +} + func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error { c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://"+ settings.PerformanceDbServer, + Addr: "http://" + settings.PerformanceDbServer, }) if err != nil { return err @@ -29,7 +47,7 @@ func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error defer c.Close() bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: settings.PerformanceDbName, + Database: settings.PerformanceDbName, }) tags := map[string]string{"Group ID": "0"} diff --git a/deploy/docker/cluster/Dockerfile b/deploy/docker/cluster/Dockerfile index c0182e934023cdc2fa5056ef5c96469450e3109e..2316d8811393091c18560cb251ae29c33a64846d 100644 --- a/deploy/docker/cluster/Dockerfile +++ b/deploy/docker/cluster/Dockerfile @@ -6,7 +6,8 @@ RUN apt-get update && apt-get install -y supervisor apt-transport-https \ ca-certificates \ curl \ gnupg-agent \ - software-properties-common + software-properties-common dnsutils + RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - @@ -45,5 +46,8 @@ RUN mkdir -p /var/log/supervisord/ COPY scripts/ /var/run/asapo/ +RUN cd /var/run/asapo asapo && terraform init + +COPY asapo-* /usr/bin/ ENTRYPOINT ["supervisord", "--configuration", "/etc/supervisord.conf"] diff --git a/deploy/docker/cluster/asapo-start b/deploy/docker/cluster/asapo-start new file mode 100755 index 0000000000000000000000000000000000000000..21b345a2a562d5216c51a5463ceabece4abfd766 --- /dev/null +++ b/deploy/docker/cluster/asapo-start @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +cd /var/run/asapo && terraform apply -auto-approve "$@" \ No newline at end of file diff --git a/deploy/docker/cluster/asapo-stop b/deploy/docker/cluster/asapo-stop new file mode 100755 index 0000000000000000000000000000000000000000..e4fad7e65f8014ad71696ea08d4f142a434b1e61 --- /dev/null +++ b/deploy/docker/cluster/asapo-stop @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +cd /var/run/asapo && terraform destroy -auto-approve "$@" diff --git a/deploy/docker/cluster/asapo-wait-service b/deploy/docker/cluster/asapo-wait-service new file mode 100755 index 0000000000000000000000000000000000000000..4b70a1630d4b6a5de0796098b0232ee97be96cb9 --- /dev/null +++ b/deploy/docker/cluster/asapo-wait-service @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +if [ "$2" == "false" ]; then + exit 0 +fi + + +until dig +short @127.0.0.1 -p 8600 $1.service.asapo | grep . ; do +sleep 1 +done + diff --git a/deploy/docker/cluster/init_influxdb.sh b/deploy/docker/cluster/init_influxdb.sh deleted file mode 100755 index bda6960f2d1dd592801c9e6076200524a690aa6f..0000000000000000000000000000000000000000 --- a/deploy/docker/cluster/init_influxdb.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -influx=`dig +short @127.0.0.1 -p 8600 influxdb.service.asapo | head -1` - -databases="asapo_receivers asapo_brokers" - -for database in $databases -do - curl -i -XPOST http://${influx}:8086/query --data-urlencode "q=CREATE DATABASE $database" -done diff --git a/deploy/docker/cluster/run.sh b/deploy/docker/cluster/run.sh index f41fc9497d82f4c377f16ae6912c16924da9caf8..a2aa3699b62d7aea6c92af14fff21a047530f542 100755 --- a/deploy/docker/cluster/run.sh +++ b/deploy/docker/cluster/run.sh @@ -7,6 +7,18 @@ DATA_GLOBAL_SHARED=/tmp/asapo/global_shared/data mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED +cd $SERVICE_DATA_CLUSTER_SHARED +mkdir esdatadir fluentd grafana influxdb mongodb +chmod 777 * + +mmc=`cat /proc/sys/vm/max_map_count` + +if (( mmc < 262144 )); then + echo increase max_map_count - needed for elasticsearch + exit 1 +fi + + docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/lib/docker:/var/lib/docker \ -v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \ diff --git a/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl index 6de159576a01950716d6381d6dafd0a15db0b599..bb23c869541f88f96df4e8806c355d7aabec74f9 100644 --- a/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl +++ b/deploy/docker/cluster/scripts/asapo-brokers.nmd.tpl @@ -25,7 +25,7 @@ job "asapo-brokers" { image = "yakser/asapo-broker${image_suffix}" force_pull = true volumes = ["local/config.json:/var/lib/broker/config.json"] - %{ if fluentd_logs } + %{ if elk_logs } logging { type = "fluentd" config { diff --git a/deploy/docker/cluster/scripts/asapo-logging.nmd b/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl similarity index 76% rename from deploy/docker/cluster/scripts/asapo-logging.nmd rename to deploy/docker/cluster/scripts/asapo-logging.nmd.tpl index fead57065f13f92848a670448c7f015327a3cdcc..f8780d341f37409c5b6dc5722e1729ad17c4743e 100644 --- a/deploy/docker/cluster/scripts/asapo-logging.nmd +++ b/deploy/docker/cluster/scripts/asapo-logging.nmd.tpl @@ -9,7 +9,8 @@ job "asapo-logging" { # } group "fluentd" { - count = 1 + + count = "%{ if elk_logs }1%{ else }0%{ endif }" restart { attempts = 2 interval = "3m" @@ -25,20 +26,20 @@ job "asapo-logging" { } config { - dns_servers = ["127.0.0.1"] network_mode = "host" image = "yakser/fluentd_elastic" volumes = ["local/fluentd.conf:/fluentd/etc/fluent.conf", - "/${meta.shared_storage}/fluentd:/shared"] + "/${service_dir}/fluentd:/shared"] } resources { - cpu = 500 - memory = 256 + memory = "${fluentd_total_memory_size}" network { - mbits = 10 port "fluentd" { - static = 9880 + static = "${fluentd_port}" + } + port "fluentd_stream" { + static = "${fluentd_port_stream}" } } } @@ -61,7 +62,7 @@ job "asapo-logging" { } } template { - source = "/usr/local/nomad_jobs/fluentd.conf" + source = "${scripts_dir}/fluentd.conf.tpl" destination = "local/fluentd.conf" change_mode = "restart" } @@ -69,7 +70,7 @@ job "asapo-logging" { } #elasticsearch group "elk" { - count = 1 + count = "%{ if elk_logs }1%{ else }0%{ endif }" restart { attempts = 2 interval = "3m" @@ -93,20 +94,15 @@ job "asapo-logging" { nproc = "8192" } network_mode = "host" - dns_servers = ["127.0.0.1"] - image = "docker.elastic.co/elasticsearch/elasticsearch:6.3.0" - volumes = ["/${meta.shared_storage}/esdatadir:/usr/share/elasticsearch/data"] + image = "docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version}" + volumes = ["/${service_dir}/esdatadir:/usr/share/elasticsearch/data"] } resources { - #MHz - cpu = 4000 - #MB - memory = 2048 + memory = "${elasticsearch_total_memory_size}" network { - mbits = 10 port "elasticsearch" { - static = 9200 + static = "${elasticsearch_port}" } } } @@ -134,24 +130,21 @@ job "asapo-logging" { config { network_mode = "host" - dns_servers = ["127.0.0.1"] - image = "docker.elastic.co/kibana/kibana:6.3.0" + image = "docker.elastic.co/kibana/kibana:${kibana_version}" volumes = ["local/kibana.yml:/usr/share/kibana/config/kibana.yml"] } template { - source = "/usr/local/nomad_jobs/kibana.yml" + source = "${scripts_dir}/kibana.yml" destination = "local/kibana.yml" change_mode = "restart" } resources { - cpu = 256 - memory = 1024 + memory = "${kibana_total_memory_size}" network { - mbits = 10 port "kibana" { - static = 5601 + static = "${kibana_port}" } } } diff --git a/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl index b48075b7d5e1e87e918b56dc2a047db97e065780..e303800314505f4f146302b74fcd4596a622b1c6 100644 --- a/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl +++ b/deploy/docker/cluster/scripts/asapo-nginx.nmd.tpl @@ -23,6 +23,17 @@ job "asapo-nginx" { task "nginx" { driver = "docker" + meta { + fluentd_port = "${fluentd_port}" + fluentd_port_stream = "${fluentd_port_stream}" + kibana_port = "${kibana_port}" + elasticsearch_port = "${elasticsearch_port}" + grafana_port = "${grafana_port}" + influxdb_port = "${influxdb_port}" + authorizer_port = "${authorizer_port}" + discovery_port = "${discovery_port}" + } + config { network_mode = "host" image = "nginx:${nginx_version}" diff --git a/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl index 83b5aeb602dedec7f75b29cb843adc8898908d63..5e78594b3898488fe0ac63a49fe51eddfa150ccf 100644 --- a/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl +++ b/deploy/docker/cluster/scripts/asapo-receivers.nmd.tpl @@ -28,7 +28,7 @@ job "asapo-receivers" { force_pull = true volumes = ["local/config.json:/var/lib/receiver/config.json", "${data_dir}:/var/lib/receiver/data"] - %{ if fluentd_logs } + %{ if elk_logs } logging { type = "fluentd" config { diff --git a/deploy/docker/cluster/scripts/asapo-services.nmd.tpl b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl index 3922fb8a5ad75beaafab1ee2b8c22bc12f1cda4a..45eb3c9384c5e48f7fcbf45a43c0b48575fcc4a2 100644 --- a/deploy/docker/cluster/scripts/asapo-services.nmd.tpl +++ b/deploy/docker/cluster/scripts/asapo-services.nmd.tpl @@ -15,7 +15,7 @@ job "asapo-services" { image = "yakser/asapo-authorizer${image_suffix}" force_pull = true volumes = ["local/config.json:/var/lib/authorizer/config.json"] - %{ if fluentd_logs } + %{ if elk_logs } logging { type = "fluentd" config { @@ -28,11 +28,10 @@ job "asapo-services" { } resources { - cpu = 500 # 500 MHz - memory = 256 # 256MB + memory = "${authorizer_total_memory_size}" network { port "authorizer" { - static = "5007" + static = "${authorizer_port}" } } } @@ -79,7 +78,7 @@ job "asapo-services" { image = "yakser/asapo-discovery${image_suffix}" force_pull = true volumes = ["local/config.json:/var/lib/discovery/config.json"] - %{ if fluentd_logs } + %{ if elk_logs } logging { type = "fluentd" config { @@ -92,11 +91,10 @@ job "asapo-services" { } resources { - cpu = 500 # 500 MHz - memory = 256 # 256MB + memory = "${discovery_total_memory_size}" network { port "discovery" { - static = "5006" + static = "${discovery_port}" } } } diff --git a/deploy/docker/cluster/scripts/asapo.auto.tfvars b/deploy/docker/cluster/scripts/asapo.auto.tfvars index 62a3bf07419046ffb746890bf61a3fe365ff5be0..560ba638de48e0335a3e54b2cea6d2b3e5443b13 100644 --- a/deploy/docker/cluster/scripts/asapo.auto.tfvars +++ b/deploy/docker/cluster/scripts/asapo.auto.tfvars @@ -1,5 +1,7 @@ nginx_version = "1.14" - +elasticsearch_version = "6.3.0" +kibana_version = "6.3.0" +mongo_version = "4.0.0" asapo_imagename_suffix="-dev" diff --git a/deploy/docker/cluster/scripts/fluentd.conf b/deploy/docker/cluster/scripts/fluentd.conf.tpl similarity index 91% rename from deploy/docker/cluster/scripts/fluentd.conf rename to deploy/docker/cluster/scripts/fluentd.conf.tpl index 948c5109d5debb06e0c139ffa4bf0afadf5bad39..de6188765c2a7f0e435fac3c2b817879d1d9e0c5 100644 --- a/deploy/docker/cluster/scripts/fluentd.conf +++ b/deploy/docker/cluster/scripts/fluentd.conf.tpl @@ -1,13 +1,13 @@ <source> @type forward - port 24224 + port {{ env "NOMAD_PORT_fluentd_stream" }} source_hostname_key source_addr bind 0.0.0.0 </source> <source> @type http - port 9880 + port {{ env "NOMAD_PORT_fluentd" }} bind 0.0.0.0 add_remote_addr true format json diff --git a/deploy/docker/cluster/scripts/nginx.conf.tpl b/deploy/docker/cluster/scripts/nginx.conf.tpl index 72a7cefa1e0a1e6f7d5a618ad4cb78363b428a8f..f6cadf53c90bac64c989923fa591fcf6503891ad 100644 --- a/deploy/docker/cluster/scripts/nginx.conf.tpl +++ b/deploy/docker/cluster/scripts/nginx.conf.tpl @@ -27,26 +27,26 @@ http { location /influxdb/ { rewrite ^/influxdb(/.*) $1 break; - proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args; + proxy_pass http://$influxdb_endpoint:{{ env "NOMAD_META_influxdb_port" }}$uri$is_args$args; } location /elasticsearch/ { rewrite ^/elasticsearch(/.*) $1 break; - proxy_pass http://$elasticsearch_endpoint:9200$uri$is_args$args; + proxy_pass http://$elasticsearch_endpoint:{{ env "NOMAD_META_elasticsearch_port" }}$uri$is_args$args; } location /discovery/ { rewrite ^/discovery(/.*) $1 break; - proxy_pass http://$discovery_endpoint:5006$uri$is_args$args; + proxy_pass http://$discovery_endpoint:{{ env "NOMAD_META_discovery_port" }}$uri$is_args$args; } location /logs/ { rewrite ^/logs(/.*) $1 break; - proxy_pass http://$fluentd_endpoint:9880$uri$is_args$args; + proxy_pass http://$fluentd_endpoint:{{ env "NOMAD_META_fluentd_port" }}$uri$is_args$args; } location /logsview/ { - proxy_pass http://$kibana_endpoint:5601$uri$is_args$args; + proxy_pass http://$kibana_endpoint:{{ env "NOMAD_META_kibana_port" }}$uri$is_args$args; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Host $http_host; @@ -54,12 +54,12 @@ http { location /performance/ { rewrite ^/performance(/.*) $1 break; - proxy_pass http://$grafana_endpoint:3000$uri$is_args$args; + proxy_pass http://$grafana_endpoint:{{ env "NOMAD_META_grafana_port" }}$uri$is_args$args; } location /authorizer/ { rewrite ^/authorizer(/.*) $1 break; - proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args; + proxy_pass http://$authorizer_endpoint:{{ env "NOMAD_META_authorizer_port" }}$uri$is_args$args; } location /nginx-health { @@ -77,8 +77,6 @@ stream { server { listen 9881; - proxy_pass $upstream:24224; + proxy_pass $upstream:{{ env "NOMAD_META_fluentd_port_stream" }}; } } - - diff --git a/deploy/docker/cluster/scripts/resources.tf b/deploy/docker/cluster/scripts/resources.tf index 274425056c0fd89faf27fedf475691ec86702627..0ad2631b9c4abf9bda6109f7214b3dfb11289212 100644 --- a/deploy/docker/cluster/scripts/resources.tf +++ b/deploy/docker/cluster/scripts/resources.tf @@ -1,23 +1,31 @@ -resource "nomad_job" "asapo-perfmetrics" { - jobspec = "${data.template_file.asapo_perfmetrics.rendered}" +resource "nomad_job" "asapo-nginx" { + jobspec = "${data.template_file.nginx.rendered}" } resource "nomad_job" "asapo-mongo" { jobspec = "${data.template_file.asapo_mongo.rendered}" } -resource "nomad_job" "asapo-nginx" { - jobspec = "${data.template_file.nginx.rendered}" +resource "nomad_job" "asapo-perfmetrics" { + jobspec = "${data.template_file.asapo_perfmetrics.rendered}" +} + +resource "nomad_job" "asapo-logging" { + jobspec = "${data.template_file.asapo_logging.rendered}" + depends_on = [null_resource.nginx] } resource "nomad_job" "asapo-services" { jobspec = "${data.template_file.asapo_services.rendered}" + depends_on = [null_resource.nginx,null_resource.mongo,null_resource.influxdb,null_resource.fluentd,null_resource.elasticsearch] } resource "nomad_job" "asapo-receivers" { jobspec = "${data.template_file.asapo_receivers.rendered}" + depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery] } resource "nomad_job" "asapo-brokers" { jobspec = "${data.template_file.asapo_brokers.rendered}" + depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery] } diff --git a/deploy/docker/cluster/scripts/resources_services.tf b/deploy/docker/cluster/scripts/resources_services.tf new file mode 100644 index 0000000000000000000000000000000000000000..22499375fdee82d001a0a7285e5b152ae41301d0 --- /dev/null +++ b/deploy/docker/cluster/scripts/resources_services.tf @@ -0,0 +1,75 @@ +resource "null_resource" "nginx" { + provisioner "local-exec" { + command = "asapo-wait-service nginx" + } + depends_on = [nomad_job.asapo-nginx] + +} + +resource "null_resource" "influxdb" { + provisioner "local-exec" { + command = "asapo-wait-service influxdb" + } + depends_on = [nomad_job.asapo-perfmetrics] +} + +resource "null_resource" "fluentd" { + provisioner "local-exec" { + command = "asapo-wait-service fluentd ${var.elk_logs}" + } + depends_on = [nomad_job.asapo-logging] +} + +resource "null_resource" "mongo" { + provisioner "local-exec" { + command = "asapo-wait-service mongo" + } + depends_on = [nomad_job.asapo-mongo] + +} + +resource "null_resource" "asapo-authorizer" { + provisioner "local-exec" { + command = "asapo-wait-service asapo-authorizer" + } + depends_on = [nomad_job.asapo-services] + +} + +resource "null_resource" "asapo-discovery" { + provisioner "local-exec" { + command = "asapo-wait-service asapo-discovery" + } + depends_on = [nomad_job.asapo-services] +} + + +resource "null_resource" "asapo-broker" { + provisioner "local-exec" { + command = "asapo-wait-service asapo-broker" + } + depends_on = [nomad_job.asapo-brokers] +} + +resource "null_resource" "asapo-receiver" { + provisioner "local-exec" { + command = "asapo-wait-service asapo-receiver" + } + depends_on = [nomad_job.asapo-receivers] +} + +resource "null_resource" "elasticsearch" { + provisioner "local-exec" { + command = "asapo-wait-service elasticsearch ${var.elk_logs}" + } + depends_on = [nomad_job.asapo-logging] +} + +resource "null_resource" "kibana" { + provisioner "local-exec" { + command = "asapo-wait-service kibana ${var.elk_logs}" + } + depends_on = [nomad_job.asapo-logging] +} + + diff --git a/deploy/docker/cluster/scripts/templates.tf b/deploy/docker/cluster/scripts/templates.tf index edf06bbadac4920b4d17f866bac0020c8f24d7eb..f6e87b25e765fb543852646a07e541bf104e5681 100644 --- a/deploy/docker/cluster/scripts/templates.tf +++ b/deploy/docker/cluster/scripts/templates.tf @@ -3,6 +3,14 @@ data "template_file" "nginx" { vars = { scripts_dir = "${var.job_scripts_dir}" nginx_version = "${var.nginx_version}" + fluentd_port = "${var.fluentd_port}" + fluentd_port_stream = "${var.fluentd_port_stream}" + kibana_port = "${var.kibana_port}" + elasticsearch_port = "${var.elasticsearch_port}" + grafana_port = "${var.grafana_port}" + influxdb_port = "${var.influxdb_port}" + authorizer_port = "${var.authorizer_port}" + discovery_port = "${var.discovery_port}" } } @@ -11,7 +19,12 @@ data "template_file" "asapo_services" { vars = { scripts_dir = "${var.job_scripts_dir}" image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}" - fluentd_logs = "${var.fluentd_logs}" + elk_logs = "${var.elk_logs}" + authorizer_total_memory_size = "${var.authorizer_total_memory_size}" + discovery_total_memory_size = "${var.discovery_total_memory_size}" + authorizer_port = "${var.authorizer_port}" + discovery_port = "${var.discovery_port}" + } } @@ -21,7 +34,7 @@ data "template_file" "asapo_receivers" { scripts_dir = "${var.job_scripts_dir}" data_dir = "${var.data_dir}" image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}" - fluentd_logs = "${var.fluentd_logs}" + elk_logs = "${var.elk_logs}" receiver_total_memory_size = "${var.receiver_total_memory_size}" receiver_dataserver_cache_size = "${var.receiver_dataserver_cache_size}" } @@ -32,7 +45,7 @@ data "template_file" "asapo_brokers" { vars = { scripts_dir = "${var.job_scripts_dir}" image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}" - fluentd_logs = "${var.fluentd_logs}" + elk_logs = "${var.elk_logs}" } } @@ -47,7 +60,6 @@ data "template_file" "asapo_perfmetrics" { grafana_port = "${var.grafana_port}" influxdb_total_memory_size = "${var.influxdb_total_memory_size}" influxdb_port = "${var.influxdb_port}" - } } @@ -61,3 +73,22 @@ data "template_file" "asapo_mongo" { mongo_port = "${var.mongo_port}" } } + +data "template_file" "asapo_logging" { + template = "${file("${var.job_scripts_dir}/asapo-logging.nmd.tpl")}" + vars = { + service_dir = "${var.service_dir}" + scripts_dir = "${var.job_scripts_dir}" + elk_logs = "${var.elk_logs}" + fluentd_total_memory_size = "${var.fluentd_total_memory_size}" + fluentd_port = "${var.fluentd_port}" + fluentd_port_stream = "${var.fluentd_port_stream}" + kibana_version = "${var.kibana_version}" + kibana_total_memory_size = "${var.kibana_total_memory_size}" + kibana_port = "${var.kibana_port}" + elasticsearch_version = "${var.elasticsearch_version}" + elasticsearch_total_memory_size = "${var.elasticsearch_total_memory_size}" + elasticsearch_port = "${var.elasticsearch_port}" + } +} + diff --git a/deploy/docker/cluster/scripts/vars.tf b/deploy/docker/cluster/scripts/vars.tf index 8144db2fb5e9c5fc506cf58a6ecd9c14fbf1a970..09152c4bc1153debf1c277dd5c1604d7e1bc42c5 100644 --- a/deploy/docker/cluster/scripts/vars.tf +++ b/deploy/docker/cluster/scripts/vars.tf @@ -1,4 +1,4 @@ -variable "fluentd_logs" { +variable "elk_logs" { default = true } @@ -10,11 +10,22 @@ variable "grafana_version" { default = "latest" } +variable "elasticsearch_version" { + default = "latest" +} + +variable "kibana_version" { + default = "latest" +} variable "influxdb_version" { default = "latest" } +variable "mongo_version" { + default = "latest" +} + variable "asapo_imagename_suffix" { default = "" } @@ -33,7 +44,6 @@ variable "service_dir" { variable "data_dir" { } - variable "receiver_total_memory_size" { default = "2000" #mb } @@ -43,11 +53,35 @@ variable "receiver_dataserver_cache_size" { } variable "grafana_total_memory_size" { - default = "2000" #mb + default = "256" #mb } variable "influxdb_total_memory_size" { - default = "2000" #mb + default = "256" #mb +} + +variable "fluentd_total_memory_size" { + default = "256" +} + +variable "elasticsearch_total_memory_size" { + default = "256" +} + +variable "kibana_total_memory_size" { + default = "256" +} + +variable "mongo_total_memory_size" { + default = "300" +} + +variable "authorizer_total_memory_size" { + default = "256" +} + +variable "discovery_total_memory_size" { + default = "256" } variable "grafana_port" { @@ -58,17 +92,30 @@ variable "influxdb_port" { default = "8086" } - variable "mongo_port" { default = "27017" } +variable "fluentd_port" { + default = "9880" +} -variable "mongo_version" { - default = "4.0.0" +variable "fluentd_port_stream" { + default = "24224" } -variable "mongo_total_memory_size" { - default = "300" +variable "elasticsearch_port" { + default = "9200" } +variable "kibana_port" { + default = "5601" +} + +variable "discovery_port" { + default = "5006" +} + +variable "authorizer_port" { + default = "5007" +} diff --git a/deploy/docker/cluster/tf_run.sh b/deploy/docker/cluster/tf_run.sh deleted file mode 100755 index 13bab997ea8dea1d2c26c92ef7dce200c1fd8041..0000000000000000000000000000000000000000 --- a/deploy/docker/cluster/tf_run.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -docker exec -w /var/run/asapo asapo terraform init - -docker exec -w /var/run/asapo asapo terraform apply -auto-approve -var fluentd_logs=false \ No newline at end of file diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go index ceda776a0e2c6c6e79e5a62d37eb2519927cdbe1..8e34fd8f2e0dd5ecb22a3cd39a364722ff0404ab 100644 --- a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go +++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go @@ -97,7 +97,7 @@ func (rh *ConsulRequestHandler) GetMongo() ([]byte, error) { if (rh.client == nil) { return nil, errors.New("consul client not connected") } - response, err := rh.GetServices("asapo-mongo") + response, err := rh.GetServices("mongo") if err != nil { return nil, err } diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go index e1f9d47b0292f86e8c77c95d670fde7f8669088f..af5f08b92c969edcd7b3b5ac3d449f1a677554dc 100644 --- a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go +++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go @@ -51,7 +51,7 @@ func (suite *ConsulHandlerTestSuite) SetupTest() { suite.registerAgents("asapo-receiver") suite.registerAgents("asapo-broker") - suite.registerAgents("asapo-mongo") + suite.registerAgents("mongo") } @@ -60,8 +60,8 @@ func (suite *ConsulHandlerTestSuite) TearDownTest() { suite.client.Agent().ServiceDeregister("asapo-receiver1235") suite.client.Agent().ServiceDeregister("asapo-broker1234") suite.client.Agent().ServiceDeregister("asapo-broker1235") - suite.client.Agent().ServiceDeregister("asapo-mongo1234") - suite.client.Agent().ServiceDeregister("asapo-mongo1235") + suite.client.Agent().ServiceDeregister("mongo1234") + suite.client.Agent().ServiceDeregister("mongo1235") } diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp index 31d64038b2eab30ed95b4191de19bf183066198b..871ae2c64597353d8662d9321e3ef81077d84df5 100644 --- a/examples/worker/getnext_broker/getnext_broker.cpp +++ b/examples/worker/getnext_broker/getnext_broker.cpp @@ -98,7 +98,6 @@ std::vector<std::thread> StartThreads(const Args& params, } if (err) { (*errors)[i] += ProcessError(err); - std::cout << "Received: " << (int) err->GetErrorType() << err << std::endl; if (err == asapo::IOErrorTemplates::kTimeout) { break; } diff --git a/receiver/src/statistics_sender_influx_db.cpp b/receiver/src/statistics_sender_influx_db.cpp index 3598ca27b57081c72e6237301670f81e04ebe194..66bac257969cc08d84f1873147417ebcd9e813d4 100644 --- a/receiver/src/statistics_sender_influx_db.cpp +++ b/receiver/src/statistics_sender_influx_db.cpp @@ -54,6 +54,24 @@ std::string StatisticsSenderInfluxDb::StatisticsToString(const StatisticsToSend& } StatisticsSenderInfluxDb::StatisticsSenderInfluxDb(): httpclient__{DefaultHttpClient()}, log__{GetDefaultReceiverLogger()} { + HttpCode code; + Error err; + auto response = httpclient__->Post(GetReceiverConfig()->performance_db_uri + "/query", + "q=create database " + GetReceiverConfig()->performance_db_name, &code, &err); + std::string msg = "initializing statistics for " + GetReceiverConfig()->performance_db_name + " at " + + GetReceiverConfig()->performance_db_uri; + if (err) { + log__->Warning(msg + " - " + err->Explain()); + return; + } + + if (code != HttpCode::OK && code != HttpCode::NoContent) { + log__->Warning(msg + " - " + response); + return; + } + + log__->Debug(msg); + }; diff --git a/receiver/src/statistics_sender_influx_db.h b/receiver/src/statistics_sender_influx_db.h index 65c1a579d5e0f65e29c3135f491c09bed85fa9dc..1c51e80a71beeea95ba41ead6d07958276178fb1 100644 --- a/receiver/src/statistics_sender_influx_db.h +++ b/receiver/src/statistics_sender_influx_db.h @@ -13,9 +13,7 @@ class StatisticsSenderInfluxDb : public StatisticsSender { virtual void SendStatistics(const StatisticsToSend& statistic) const noexcept override; std::unique_ptr<HttpClient> httpclient__; const AbstractLogger* log__; - ~StatisticsSenderInfluxDb() { - printf("Deleting StatisticsSenderInfluxDb\n"); - }; + ~StatisticsSenderInfluxDb() {}; private: std::string StatisticsToString(const StatisticsToSend& statistic) const noexcept; diff --git a/tests/automatic/broker/check_monitoring/check_linux.sh b/tests/automatic/broker/check_monitoring/check_linux.sh index 7767ebcb4a5d295b6df8987bd2c5f0b445842a47..5945ba27271b04f517f71c23405ab061a21be813 100644 --- a/tests/automatic/broker/check_monitoring/check_linux.sh +++ b/tests/automatic/broker/check_monitoring/check_linux.sh @@ -13,11 +13,8 @@ Cleanup() { kill -9 $brokerid } -influx -execute "create database ${database_name}" - token=`$2 token -secret auth_secret.key data` - $1 -config settings.json & sleep 0.3 diff --git a/tests/automatic/full_chain/simple_chain/check_linux.sh b/tests/automatic/full_chain/simple_chain/check_linux.sh index faafb60cc8abf2ea3bd8aebd12fa35b91ac752de..b350400129e716f6be2bce05d10ff226dd7adde0 100644 --- a/tests/automatic/full_chain/simple_chain/check_linux.sh +++ b/tests/automatic/full_chain/simple_chain/check_linux.sh @@ -28,7 +28,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector nomad run nginx.nmd diff --git a/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh b/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh index 95d5b1b515b4741cad10da18a47df4a55a6b8279..b6326b5946ad9f31956366507a4941f87a12ee60 100644 --- a/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh +++ b/tests/automatic/full_chain/simple_chain_dataset/check_linux.sh @@ -27,7 +27,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector nomad run nginx.nmd diff --git a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh index 552d2f0119ac6ee46dc45ddd2f87bdcc05fc1229..569a50d243caff41e7b15021a20cf997fd0f8b90 100644 --- a/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh +++ b/tests/automatic/full_chain/simple_chain_filegen_readdata_cache/check_linux.sh @@ -32,9 +32,6 @@ Cleanup() { rm out.txt } -influx -execute "create database ${monitor_database_name}" - - echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector nomad run nginx.nmd @@ -63,6 +60,6 @@ grep "hello1" out.txt grep "hello2" out.txt grep "hello3" out.txt -sleep 10 +sleep 12 influx -execute "select sum(n_requests) from statistics where receiver_ds_tag !=''" -database=${monitor_database_name} -format=json | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 3 diff --git a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh index 7496544bb7f140f73033471c9f24ab566b08a66b..26cd262a3f935e773be3fa57b742f4b666a8eebc 100644 --- a/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh +++ b/tests/automatic/full_chain/simple_chain_metadata/check_linux.sh @@ -27,7 +27,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector nomad run nginx.nmd diff --git a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh index 64c35ed36dc0866eca7243ccbaee9437fab26bd9..c965b179656c6f64afdc16bbb504cadadfba07ad 100644 --- a/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh +++ b/tests/automatic/full_chain/simple_chain_usermeta_python/check_linux.sh @@ -27,7 +27,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector nomad run nginx.nmd diff --git a/tests/automatic/full_chain/two_beamlines/check_linux.sh b/tests/automatic/full_chain/two_beamlines/check_linux.sh index c92aaffe610e552b2d1fecba096e4057d86691b5..2bc02dff355baa00183e0808938f2dd4185b0bf0 100644 --- a/tests/automatic/full_chain/two_beamlines/check_linux.sh +++ b/tests/automatic/full_chain/two_beamlines/check_linux.sh @@ -35,7 +35,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" echo "db.${beamtime_id1}_${stream}.insert({dummy:1})" | mongo ${beamtime_id1}_${stream} echo "db.${beamtime_id2}_${stream}.insert({dummy:1})" | mongo ${beamtime_id2}_${stream} diff --git a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh index 86ced0726a978b47a18d27b1d29650c64853474a..a2b10337589d35bb088e1a660ea848ca66d90d4c 100644 --- a/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh +++ b/tests/automatic/high_avail/broker_mongo_restart/check_linux.sh @@ -52,8 +52,6 @@ Cleanup() { kill_mongo } -influx -execute "create database ${monitor_database_name}" - sed -i 's/27017/27016/g' receiver.json.tpl sed -i 's/27017/27016/g' discovery.json.tpl sed -i 's/info/debug/g' broker.json.tpl diff --git a/tests/automatic/high_avail/services_restart/check_linux.sh b/tests/automatic/high_avail/services_restart/check_linux.sh index 2b08ee4c3fa639669121530d6a1b5af38035cdc1..c68d798463f82ea41c5c86a90061a6d203db8330 100644 --- a/tests/automatic/high_avail/services_restart/check_linux.sh +++ b/tests/automatic/high_avail/services_restart/check_linux.sh @@ -23,8 +23,6 @@ Cleanup() { influx -execute "drop database ${monitor_database_name}" } -influx -execute "create database ${monitor_database_name}" - sed -i 's/info/debug/g' broker.json.tpl nomad run nginx.nmd diff --git a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh index 585fc2ee63b0ed1795a196c6e84840bec10b3ae5..ad320971177cdc2de148191907e04797c81b6750 100644 --- a/tests/automatic/producer_receiver/check_monitoring/check_linux.sh +++ b/tests/automatic/producer_receiver/check_monitoring/check_linux.sh @@ -22,8 +22,6 @@ Cleanup() { mkdir -p ${receiver_folder} -influx -execute "create database ${database_name}" - nomad run authorizer.nmd nomad run receiver.nmd nomad run discovery.nmd @@ -36,4 +34,4 @@ $1 localhost:8400 ${beamtime_id} 100 112 4 0 100 sleep 2 # should be 118 requests (112 data transfers + 5 authorizations (4 + 1 after reconnection due to wrong meta)) -influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 117 +influx -execute "select sum(n_requests) from statistics" -database=${database_name} -format=json | tee /dev/stderr | jq .results[0].series[0].values[0][1] | tee /dev/stderr | grep 117 diff --git a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh index 45d21a1fc7fa7ecb0b0a0e1dab731aa4790ca28f..0d3c46ef044b59b1be9409b80d9e95480b11ee08 100644 --- a/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh +++ b/tests/automatic/producer_receiver/transfer_datasets/check_linux.sh @@ -24,7 +24,6 @@ Cleanup() { echo "db.dropDatabase()" | mongo ${beamtime_id}_detector -influx -execute "create database ${database_name}" # create db before worker starts reading it. todo: git rid of it echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh index efcce33f963a4709ae539cdef155455255289502..2b44d752171c64de9df817f8015fd3fce0171f21 100644 --- a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh +++ b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh @@ -21,7 +21,6 @@ Cleanup() { influx -execute "drop database ${database_name}" } -influx -execute "create database ${database_name}" # create db before worker starts reading it. todo: git rid of it echo "db.${beamtime_id}_detector.insert({dummy:1})" | mongo ${beamtime_id}_detector diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh index 1786a56a408e31854b8f08190dd502e39ffc63b8..52add6c8d0234fa2b7fda0b72c5c99fac3a99960 100755 --- a/tests/manual/performance_full_chain_simple/test.sh +++ b/tests/manual/performance_full_chain_simple/test.sh @@ -116,7 +116,6 @@ scp ../../../cmake-build-release/asapo_tools/asapo ${worker_node}:${worker_dir} scp ../../../tests/automatic/settings/auth_secret.key ${worker_node}:${worker_dir}/auth_secret.key #monitoring_start -ssh ${monitor_node} influx -execute \"create database db_test\" #ssh ${monitor_node} docker run -d -p 8086 -p 8086 --name influxdb influxdb #mongo_start