From 8a8cb76b168e3ecf25fcc1a6c05c2cb3e5e694be Mon Sep 17 00:00:00 2001
From: Sergey Yakubov <sergey.yakubov@desy.de>
Date: Fri, 6 Jul 2018 17:46:32 +0200
Subject: [PATCH] more nomad jobs, start logging

---
 common/cpp/src/logger/spd_logger.cpp          |   2 +-
 config/nomad/broker.nmd.in                    |   2 +-
 config/nomad/receiver.nmd.in                  |   2 +-
 deploy/nomad_jobs/asapo-brokers.nmd.in        |  75 +++++++++++
 deploy/nomad_jobs/asapo-logging.nmd.in        |   4 +
 deploy/nomad_jobs/asapo-mongo.nmd.in          |  57 +++++++++
 deploy/nomad_jobs/asapo-nginx.nmd.in          |   2 +-
 deploy/nomad_jobs/asapo-perfmetrics.nmd.in    |   8 +-
 deploy/nomad_jobs/asapo-receivers.nmd.in      |  17 ++-
 deploy/nomad_jobs/asapo-services.nmd.in       | 121 ++++++++++++++++++
 deploy/nomad_jobs/authorizer.json.tpl         |  11 ++
 deploy/nomad_jobs/broker.json.tpl             |   8 ++
 deploy/nomad_jobs/discovery.json.tpl          |  10 ++
 deploy/nomad_jobs/fluentd.conf                |  25 ++++
 deploy/nomad_jobs/init_influx.sh              |   9 +-
 deploy/nomad_jobs/nginx.conf.tpl              |  29 ++++-
 deploy/nomad_jobs/receiver.json.tpl           |   4 +-
 deploy/nomad_jobs/start_asapo.sh              |   9 ++
 deploy/nomad_jobs/stop_asapo.sh               |   9 ++
 .../request_handler/request_handler_consul.go |   4 +-
 .../request_handler_consul_test.go            |  16 +--
 .../dummy-data-producer/check_linux.sh        |   9 +-
 .../dummy-data-producer/check_windows.bat     |   8 +-
 .../dummy_data_producer.cpp                   |   6 +-
 .../transfer_single_file/check_linux.sh       |   4 +-
 .../transfer_single_file/check_windows.bat    |   2 +-
 .../spd_logger/console/check_linux.sh         |   1 +
 .../spd_logger/console/check_windows.bat      |   1 +
 .../spd_logger/console/spd_logger_console.cpp |   1 +
 29 files changed, 407 insertions(+), 49 deletions(-)
 create mode 100644 deploy/nomad_jobs/asapo-brokers.nmd.in
 create mode 100644 deploy/nomad_jobs/asapo-mongo.nmd.in
 create mode 100644 deploy/nomad_jobs/asapo-services.nmd.in
 create mode 100644 deploy/nomad_jobs/authorizer.json.tpl
 create mode 100644 deploy/nomad_jobs/broker.json.tpl
 create mode 100644 deploy/nomad_jobs/discovery.json.tpl
 create mode 100644 deploy/nomad_jobs/start_asapo.sh
 create mode 100644 deploy/nomad_jobs/stop_asapo.sh

diff --git a/common/cpp/src/logger/spd_logger.cpp b/common/cpp/src/logger/spd_logger.cpp
index 4b66a759c..9228b8f61 100644
--- a/common/cpp/src/logger/spd_logger.cpp
+++ b/common/cpp/src/logger/spd_logger.cpp
@@ -26,7 +26,7 @@ void SpdLogger::SetLogLevel(LogLevel level) {
     }
 }
 std::string EncloseMsg(std::string msg) {
-    if (msg.find(":") == std::string::npos) {
+    if (msg.find("\"") != 0) {
         return std::string(R"("message":")") + msg + "\"";
     } else {
         return msg;
diff --git a/config/nomad/broker.nmd.in b/config/nomad/broker.nmd.in
index 211c71e21..c0032fca9 100644
--- a/config/nomad/broker.nmd.in
+++ b/config/nomad/broker.nmd.in
@@ -25,7 +25,7 @@ job "broker" {
       }
 
       service {
-        name = "broker"
+        name = "asapo-broker"
         port = "broker"
         check {
           name     = "alive"
diff --git a/config/nomad/receiver.nmd.in b/config/nomad/receiver.nmd.in
index 1a9a6e893..9642abb23 100644
--- a/config/nomad/receiver.nmd.in
+++ b/config/nomad/receiver.nmd.in
@@ -23,7 +23,7 @@ job "receiver" {
       }
 
       service {
-        name = "receiver"
+        name = "asapo-receiver"
         port = "recv"
         check {
           name     = "alive"
diff --git a/deploy/nomad_jobs/asapo-brokers.nmd.in b/deploy/nomad_jobs/asapo-brokers.nmd.in
new file mode 100644
index 000000000..989929ba8
--- /dev/null
+++ b/deploy/nomad_jobs/asapo-brokers.nmd.in
@@ -0,0 +1,75 @@
+job "asapo-brokers" {
+  datacenters = ["dc1"]
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "brokers" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "fail"
+    }
+
+    task "brokers" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-broker:feature_ha"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/broker/config.json"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        network {
+          port "broker" {}
+        }
+      }
+
+      service {
+        port = "broker"
+        name = "asapo-broker"
+        check {
+          name     = "asapo-broker-alive"
+          type     = "http"
+          path     = "/health"
+          interval = "10s"
+          timeout  = "2s"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "@NOMAD_INSTALL@/broker.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+
+      template {
+        source        = "@NOMAD_INSTALL@/broker_secret.key"
+        destination   = "secrets/secret.key"
+        change_mode   = "restart"
+      }
+   }
+  }
+}
diff --git a/deploy/nomad_jobs/asapo-logging.nmd.in b/deploy/nomad_jobs/asapo-logging.nmd.in
index ef4ce4710..e7c9cdfc1 100644
--- a/deploy/nomad_jobs/asapo-logging.nmd.in
+++ b/deploy/nomad_jobs/asapo-logging.nmd.in
@@ -20,6 +20,10 @@ job "asapo-logging" {
     task "fluentd" {
       driver = "docker"
 
+      meta {
+        change_me_to_restart = 1
+      }
+
       config {
         dns_servers = ["127.0.0.1"]
         network_mode = "host"
diff --git a/deploy/nomad_jobs/asapo-mongo.nmd.in b/deploy/nomad_jobs/asapo-mongo.nmd.in
new file mode 100644
index 000000000..87d25a018
--- /dev/null
+++ b/deploy/nomad_jobs/asapo-mongo.nmd.in
@@ -0,0 +1,57 @@
+job "asapo-mongo" {
+  datacenters = ["dc1"]
+
+  update {
+    max_parallel = 1
+    min_healthy_time = "10s"
+    healthy_deadline = "3m"
+    auto_revert = false
+  }
+
+  group "mongo" {
+    count = 1
+
+    restart {
+      attempts = 2
+      interval = "3m"
+      delay = "15s"
+      mode = "delay"
+    }
+
+    task "mongo" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        image = "mongo:4.0.0"
+        volumes = ["/${meta.shared_storage}/mongodb:/data/db"]
+      }
+
+      resources {
+        network {
+          port "mongo" {
+          static = 27017
+          }
+        }
+      }
+
+      service {
+        port = "mongo"
+        name = "mongo"
+        check {
+          type     = "script"
+          name     = "alive"
+          command  = "mongo"
+          args     = ["--eval","db.version()"]
+          interval = "10s"
+          timeout  = "5s"
+        }
+        check_restart {
+          limit = 2
+          grace = "90s"
+          ignore_warnings = false
+        }
+      }
+    }
+  }
+}
diff --git a/deploy/nomad_jobs/asapo-nginx.nmd.in b/deploy/nomad_jobs/asapo-nginx.nmd.in
index b417ac8fc..b9455b414 100644
--- a/deploy/nomad_jobs/asapo-nginx.nmd.in
+++ b/deploy/nomad_jobs/asapo-nginx.nmd.in
@@ -1,4 +1,4 @@
-job "nginx" {
+job "asapo-nginx" {
   datacenters = ["dc1"]
 
   type = "system"
diff --git a/deploy/nomad_jobs/asapo-perfmetrics.nmd.in b/deploy/nomad_jobs/asapo-perfmetrics.nmd.in
index 3d9ec248d..bb8db1b2b 100644
--- a/deploy/nomad_jobs/asapo-perfmetrics.nmd.in
+++ b/deploy/nomad_jobs/asapo-perfmetrics.nmd.in
@@ -28,8 +28,8 @@ job "asapo-perfmetrics" {
       }
 
       resources {
-        cpu    = 500
-        memory = 256
+        cpu    = 1500
+        memory = 32000
         network {
           mbits = 10
           port "influxdb" {
@@ -74,8 +74,8 @@ job "asapo-perfmetrics" {
       }
 
       resources {
-        cpu    = 500
-        memory = 256
+        cpu    = 1500
+        memory = 2560
         network {
           mbits = 10
           port "grafana" {
diff --git a/deploy/nomad_jobs/asapo-receivers.nmd.in b/deploy/nomad_jobs/asapo-receivers.nmd.in
index effb4e600..c522fa07e 100644
--- a/deploy/nomad_jobs/asapo-receivers.nmd.in
+++ b/deploy/nomad_jobs/asapo-receivers.nmd.in
@@ -1,4 +1,4 @@
-job "receivers" {
+job "asapo-receivers" {
   datacenters = ["dc1"]
 
   update {
@@ -25,9 +25,16 @@ job "receivers" {
         network_mode = "host"
         dns_servers = ["127.0.0.1"]
         image = "yakser/asapo-receiver:feature_ha"
-	force_pull = true
+	    force_pull = true
         volumes = ["local/config.json:/var/lib/receiver/config.json",
                    "/bldocuments/support/asapo/data:/var/lib/receiver/data"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                tag = "asapo.docker"
+            }
+        }
       }
 
       resources {
@@ -38,13 +45,12 @@ job "receivers" {
 
       service {
         port = "recv"
-        name = "recv"
+        name = "asapo-receiver"
         check {
-          name     = "alive"
+          name     = "asapo-receiver-alive"
           type     = "tcp"
           interval = "10s"
           timeout  = "2s"
-          initial_status =   "passing"
         }
         check_restart {
           limit = 2
@@ -61,3 +67,4 @@ job "receivers" {
    }
   }
 }
+
diff --git a/deploy/nomad_jobs/asapo-services.nmd.in b/deploy/nomad_jobs/asapo-services.nmd.in
new file mode 100644
index 000000000..4dc887eb4
--- /dev/null
+++ b/deploy/nomad_jobs/asapo-services.nmd.in
@@ -0,0 +1,121 @@
+job "asapo-services" {
+  datacenters = ["dc1"]
+
+  type = "service"
+
+  group "asapo-authorizer" {
+    count = 1
+
+    task "asapo-authorizer" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-authorizer:feature_ha"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/authorizer/config.json",
+                   "/bldocuments/support/asapo/beamtime_beamline_mapping.txt:/var/lib/authorizer/beamtime_beamline_mapping.txt",
+                   "/bldocuments/support/asapo/ip_beamtime_mapping:/var/lib/authorizer/ip_beamtime_mapping"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "authorizer" {
+            static = "5007"
+          }
+        }
+      }
+
+      service {
+        name = "asapo-authorizer"
+        port = "authorizer"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/health-check"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+      }
+
+      template {
+         source        = "@NOMAD_INSTALL@/authorizer.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+   }
+  } #authorizer
+  group "asapo-discovery" {
+    count = 1
+
+    task "asapo-discovery" {
+      driver = "docker"
+
+      config {
+        network_mode = "host"
+        dns_servers = ["127.0.0.1"]
+        image = "yakser/asapo-discovery:feature_ha"
+	    force_pull = true
+        volumes = ["local/config.json:/var/lib/discovery/config.json"]
+        logging {
+            type = "fluentd"
+            config {
+                fluentd-address = "localhost:9881"
+                tag = "asapo.docker"
+            }
+        }
+      }
+
+      resources {
+        cpu    = 500 # 500 MHz
+        memory = 256 # 256MB
+        network {
+          port "discovery" {
+            static = "5006"
+          }
+        }
+      }
+
+      service {
+        name = "asapo-discovery"
+        port = "discovery"
+        check {
+          name     = "alive"
+          type     = "http"
+          path     = "/receivers"
+          interval = "10s"
+          timeout  = "2s"
+          initial_status =   "passing"
+        }
+        check_restart {
+          limit = 2
+          grace = "15s"
+          ignore_warnings = false
+        }
+
+      }
+
+      template {
+         source        = "@NOMAD_INSTALL@/discovery.json.tpl"
+         destination   = "local/config.json"
+         change_mode   = "restart"
+      }
+   }
+  }
+}
diff --git a/deploy/nomad_jobs/authorizer.json.tpl b/deploy/nomad_jobs/authorizer.json.tpl
new file mode 100644
index 000000000..9dc4efbfa
--- /dev/null
+++ b/deploy/nomad_jobs/authorizer.json.tpl
@@ -0,0 +1,11 @@
+{
+  "Port": {{ env "NOMAD_PORT_authorizer" }},
+  "LogLevel":"debug",
+  "AlwaysAllowedBeamtimes":[{"BeamtimeId":"asapo_test","Beamline":"test"},
+  {"BeamtimeId":"asapo_test1","Beamline":"test1"},
+  {"BeamtimeId":"asapo_test2","Beamline":"test2"}],
+  "BeamtimeBeamlineMappingFile":"//var//lib//authorizer//beamtime_beamline_mapping.txt",
+  "IpBeamlineMappingFolder":"//var//lib//authorizer//ip_beamtime_mapping"
+}
+
+
diff --git a/deploy/nomad_jobs/broker.json.tpl b/deploy/nomad_jobs/broker.json.tpl
new file mode 100644
index 000000000..fe73cff79
--- /dev/null
+++ b/deploy/nomad_jobs/broker.json.tpl
@@ -0,0 +1,8 @@
+{
+  "BrokerDbAddress":"mongo.service.asapo:27017",
+  "MonitorDbAddress":"influxdb.service.asapo:8086",
+  "MonitorDbName": "asapo_brokers",
+  "port":{{ env "NOMAD_PORT_broker" }},
+  "LogLevel":"info",
+  "SecretFile":"/secrets/secret.key"
+}
\ No newline at end of file
diff --git a/deploy/nomad_jobs/discovery.json.tpl b/deploy/nomad_jobs/discovery.json.tpl
new file mode 100644
index 000000000..fdb277328
--- /dev/null
+++ b/deploy/nomad_jobs/discovery.json.tpl
@@ -0,0 +1,10 @@
+{
+  "Mode": "consul",
+  "Receiver": {
+    "MaxConnections": 32
+  },
+  "Port": {{ env "NOMAD_PORT_discovery" }},
+  "LogLevel": "{{ keyOrDefault "log_level" "info" }}"
+}
+
+
diff --git a/deploy/nomad_jobs/fluentd.conf b/deploy/nomad_jobs/fluentd.conf
index 53068d69a..3c7d54708 100644
--- a/deploy/nomad_jobs/fluentd.conf
+++ b/deploy/nomad_jobs/fluentd.conf
@@ -1,3 +1,10 @@
+<source>
+  @type forward
+  port 24224
+  source_hostname_key source_addr
+  bind 0.0.0.0
+</source>
+
 <source>
  @type http
  port 9880
@@ -7,6 +14,23 @@
  time_format %Y-%m-%d %H:%M:%S.%N
 </source>
 
+<filter asapo.docker>
+  @type parser
+  key_name log
+  format json
+  time_format %Y-%m-%d %H:%M:%S.%N
+  reserve_data true
+</filter>
+
+<filter asapo.docker>
+  @type record_transformer
+  enable_ruby
+  remove_keys ["log","container_id","container_name"]
+  <record>
+   source_addr ${record["source_addr"].split('.')[0]}
+ </record>
+</filter>
+
 <match asapo.**>
 @type copy
 <store>
@@ -28,3 +52,4 @@
   path /shared/asapo-logs
   </store>
 </match>
+
diff --git a/deploy/nomad_jobs/init_influx.sh b/deploy/nomad_jobs/init_influx.sh
index 8163a9325..0319c4c4f 100644
--- a/deploy/nomad_jobs/init_influx.sh
+++ b/deploy/nomad_jobs/init_influx.sh
@@ -1,5 +1,10 @@
 #!/usr/bin/env bash
 
-influx=`dig +short @127.0.0.1  influxdb.service.asapo | head -1`
+influx=`dig +short @127.0.0.1 influxdb.service.asapo | head -1`
 
-curl -i -XPOST http://${influx}:8086/query --data-urlencode "q=CREATE DATABASE asapo_receivers"
\ No newline at end of file
+databases="asapo_receivers asapo_brokers"
+
+for database in $databases
+do
+    curl -i -XPOST http://${influx}:8086/query --data-urlencode "q=CREATE DATABASE $database"
+done
\ No newline at end of file
diff --git a/deploy/nomad_jobs/nginx.conf.tpl b/deploy/nomad_jobs/nginx.conf.tpl
index d6fd1a2ad..854cd7a18 100644
--- a/deploy/nomad_jobs/nginx.conf.tpl
+++ b/deploy/nomad_jobs/nginx.conf.tpl
@@ -17,8 +17,8 @@ http {
     resolver 127.0.0.1:53 valid=1s;
     server {
           listen {{ env "NOMAD_PORT_nginx" }};
-          set $discovery_endpoint discovery.service.asapo;
-          set $authorizer_endpoint authorizer.service.asapo;
+          set $discovery_endpoint asapo-discovery.service.asapo;
+#          set $authorizer_endpoint asapo-authorizer.service.asapo;
           set $fluentd_endpoint fluentd.service.asapo;
           set $kibana_endpoint kibana.service.asapo;
           set $grafana_endpoint grafana.service.asapo;
@@ -35,7 +35,6 @@ http {
 
           location /logsview/ {
             proxy_pass http://$kibana_endpoint:5601$uri$is_args$args;
-            proxy_ignore_client_abort on;
             proxy_set_header  X-Real-IP  $remote_addr;
             proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
             proxy_set_header  Host $http_host;
@@ -46,13 +45,29 @@ http {
             proxy_pass http://$grafana_endpoint:3000$uri$is_args$args;
           }
 
-          location /authorizer/ {
-             rewrite ^/authorizer(/.*) $1 break;
-             proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args;
-          }
+#          location /authorizer/ {
+#             rewrite ^/authorizer(/.*) $1 break;
+#             proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args;
+#          }
 
       	  location /nginx-health {
   	        return 200 "healthy\n";
 	      }
     }
 }
+
+stream {
+    resolver 127.0.0.1:53 valid=1s;
+
+    map $remote_addr $upstream {
+        default fluentd.service.asapo;
+    }
+
+
+    server {
+        listen     9881;
+        proxy_pass $upstream:24224;
+    }
+}
+
+
diff --git a/deploy/nomad_jobs/receiver.json.tpl b/deploy/nomad_jobs/receiver.json.tpl
index 0edea1f14..b4cabe708 100644
--- a/deploy/nomad_jobs/receiver.json.tpl
+++ b/deploy/nomad_jobs/receiver.json.tpl
@@ -1,8 +1,8 @@
 {
   "MonitorDbAddress":"influxdb.service.asapo:8086",
   "MonitorDbName": "asapo_receivers",
-  "BrokerDbAddress":"localhost:27017",
-  "AuthorizationServer": "authorizer.service.asapo:8400",
+  "BrokerDbAddress":"mongo.service.asapo:27017",
+  "AuthorizationServer": "asapo-authorizer.service.asapo:5007",
   "AuthorizationInterval": 10000,
   "ListenPort": {{ env "NOMAD_PORT_recv" }},
   "Tag": "{{ env "NOMAD_ADDR_recv" }}",
diff --git a/deploy/nomad_jobs/start_asapo.sh b/deploy/nomad_jobs/start_asapo.sh
new file mode 100644
index 000000000..a6c349117
--- /dev/null
+++ b/deploy/nomad_jobs/start_asapo.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+nomad run asapo-nginx.nmd
+nomad run asapo-logging.nmd
+nomad run asapo-mongo.nmd
+nomad run asapo-services.nmd
+nomad run asapo-perfmetrics.nmd
+nomad run asapo-receivers.nmd
+nomad run asapo-brokers.nmd
diff --git a/deploy/nomad_jobs/stop_asapo.sh b/deploy/nomad_jobs/stop_asapo.sh
new file mode 100644
index 000000000..9ede6dd7d
--- /dev/null
+++ b/deploy/nomad_jobs/stop_asapo.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+nomad stop asapo-nginx
+nomad stop asapo-logging
+nomad stop asapo-mongo
+nomad stop asapo-services
+nomad stop asapo-perfmetrics
+nomad stop asapo-receivers
+nomad stop asapo-brokers
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
index 12f3dc13c..9c2125045 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul.go
@@ -53,7 +53,7 @@ func (rh *ConsulRequestHandler) GetReceivers() ([]byte, error) {
 	}
 	var response Responce
 	var err error
-	response.Uris, err = rh.GetServices("receiver")
+	response.Uris, err = rh.GetServices("asapo-receiver")
 	if err != nil {
 		return nil, err
 	}
@@ -65,7 +65,7 @@ func (rh *ConsulRequestHandler) GetBroker() ([]byte, error) {
 	if (rh.client == nil) {
 		return nil, errors.New("consul client not connected")
 	}
-	response, err := rh.GetServices("broker")
+	response, err := rh.GetServices("asapo-broker")
 	if err != nil {
 		return nil, err
 	}
diff --git a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
index 716d54531..002afc6e0 100644
--- a/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
+++ b/discovery/src/asapo_discovery/request_handler/request_handler_consul_test.go
@@ -49,16 +49,16 @@ func (suite *ConsulHandlerTestSuite) SetupTest() {
 		panic(err)
 	}
 
-	suite.registerAgents("receiver")
-	suite.registerAgents("broker")
+	suite.registerAgents("asapo-receiver")
+	suite.registerAgents("asapo-broker")
 
 }
 
 func (suite *ConsulHandlerTestSuite) TearDownTest() {
-	suite.client.Agent().ServiceDeregister("receiver1234")
-	suite.client.Agent().ServiceDeregister("receiver1235")
-	suite.client.Agent().ServiceDeregister("broker1234")
-	suite.client.Agent().ServiceDeregister("broker1235")
+	suite.client.Agent().ServiceDeregister("asapo-receiver1234")
+	suite.client.Agent().ServiceDeregister("asapo-receiver1235")
+	suite.client.Agent().ServiceDeregister("asapo-broker1234")
+	suite.client.Agent().ServiceDeregister("asapo-broker1235")
 }
 
 func (suite *ConsulHandlerTestSuite) TestInitDefaultUri() {
@@ -127,8 +127,8 @@ func (suite *ConsulHandlerTestSuite) TestGetBrokerRoundRobin() {
 
 
 func (suite *ConsulHandlerTestSuite) TestGetBrokerEmpty() {
-	suite.client.Agent().ServiceDeregister("broker1234")
-	suite.client.Agent().ServiceDeregister("broker1235")
+	suite.client.Agent().ServiceDeregister("asapo-broker1234")
+	suite.client.Agent().ServiceDeregister("asapo-broker1235")
 
 	suite.handler.Init(consul_settings)
 	res, err := suite.handler.GetBroker()
diff --git a/examples/producer/dummy-data-producer/check_linux.sh b/examples/producer/dummy-data-producer/check_linux.sh
index c9601daa1..f6a290300 100644
--- a/examples/producer/dummy-data-producer/check_linux.sh
+++ b/examples/producer/dummy-data-producer/check_linux.sh
@@ -14,8 +14,7 @@ mkdir files
 
 $@ files beamtime_id 11 4 4 1 10 2>&1 | grep Rate
 
-
-ls -ln files/0.bin | awk '{ print $5 }'| grep 11264
-ls -ln files/1.bin | awk '{ print $5 }'| grep 11264
-ls -ln files/2.bin | awk '{ print $5 }'| grep 11264
-ls -ln files/3.bin | awk '{ print $5 }'| grep 11264
+ls -ln files/0.bin | awk '{ print $5 }'| grep 11000
+ls -ln files/1.bin | awk '{ print $5 }'| grep 11000
+ls -ln files/2.bin | awk '{ print $5 }'| grep 11000
+ls -ln files/3.bin | awk '{ print $5 }'| grep 11000
diff --git a/examples/producer/dummy-data-producer/check_windows.bat b/examples/producer/dummy-data-producer/check_windows.bat
index 6270913bc..016a34346 100644
--- a/examples/producer/dummy-data-producer/check_windows.bat
+++ b/examples/producer/dummy-data-producer/check_windows.bat
@@ -5,16 +5,16 @@ mkdir %folder%
 "%1" %folder% beamtime_id 11 4 4 1 10 2>&1 | findstr "Rate" || goto :error
 
 FOR /F "usebackq" %%A IN ('%folder%\0.bin') DO set size=%%~zA
-if %size% NEQ 11264 goto :error
+if %size% NEQ 11000 goto :error
 
 FOR /F "usebackq" %%A IN ('%folder%\1.bin') DO set size=%%~zA
-if %size% NEQ 11264 goto :error
+if %size% NEQ 11000 goto :error
 
 FOR /F "usebackq" %%A IN ('%folder%\2.bin') DO set size=%%~zA
-if %size% NEQ 11264 goto :error
+if %size% NEQ 11000 goto :error
 
 FOR /F "usebackq" %%A IN ('%folder%\3.bin') DO set size=%%~zA
-if %size% NEQ 11264 goto :error
+if %size% NEQ 11000 goto :error
 
 goto :clean
 
diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
index 49769c2f7..87ca3582c 100644
--- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp
+++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp
@@ -25,7 +25,7 @@ struct Args {
 void PrintCommandArguments(const Args& args) {
     std::cout << "receiver_address: " << args.receiver_address << std::endl
               << "beamtime_id: " << args.beamtime_id << std::endl
-              << "Package size: " << args.number_of_bytes / 1024 << "k" << std::endl
+              << "Package size: " << args.number_of_bytes / 1000 << "k" << std::endl
               << "iterations: " << args.iterations << std::endl
               << "nthreads: " << args.nthreads << std::endl
               << "mode: " << args.mode << std::endl
@@ -46,7 +46,7 @@ void ProcessCommandArguments(int argc, char* argv[], Args* args) {
     try {
         args->receiver_address = argv[1];
         args->beamtime_id = argv[2];
-        args->number_of_bytes = std::stoull(argv[3]) * 1024;
+        args->number_of_bytes = std::stoull(argv[3]) * 1000;
         args->iterations = std::stoull(argv[4]);
         args->nthreads = std::stoull(argv[5]);
         args->mode = std::stoull(argv[6]);
@@ -120,7 +120,7 @@ void WaitThreadsFinished(const Args& args) {
 void PrintOutput(const Args& args, const high_resolution_clock::time_point& start) {
     high_resolution_clock::time_point t2 = high_resolution_clock::now();
     double duration_sec = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - start ).count() / 1000.0;
-    double size_gb = double(args.number_of_bytes) * args.iterations / 1024.0  / 1024.0 / 1024.0 * 8.0;
+    double size_gb = double(args.number_of_bytes) * args.iterations / 1000.0  / 1000.0 / 1000.0 * 8.0;
     double rate = args.iterations / duration_sec;
     std::cout << "Rate: " << rate << " Hz" << std::endl;
     std::cout << "Bandwidth " << size_gb / duration_sec << " Gbit/s" << std::endl;
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
index 972324483..433d26bbe 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_linux.sh
@@ -34,7 +34,7 @@ mkdir -p ${receiver_folder}
 
 $1 localhost:8400 ${beamtime_id} 100 1 1  0 30
 
-ls -ln ${receiver_folder}/1.bin | awk '{ print $5 }'| grep 102400
+ls -ln ${receiver_folder}/1.bin | awk '{ print $5 }'| grep 100000
 
 
-$1 localhost:8400 wrong_beamtime_id 100 1 1 0 1 2>1 | grep "authorization failed"
\ No newline at end of file
+$1 localhost:8400 wrong_beamtime_id 100 1 1 0 1 2>1 | grep "authorization failed"
diff --git a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
index 7a453b7b1..c17b785ea 100644
--- a/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
+++ b/tests/automatic/producer_receiver/transfer_single_file/check_windows.bat
@@ -22,7 +22,7 @@ mkdir %receiver_folder%
 ping 1.0.0.0 -n 1 -w 100 > nul
 
 FOR /F "usebackq" %%A IN ('%receiver_folder%\1.bin') DO set size=%%~zA
-if %size% NEQ 102400 goto :error
+if %size% NEQ 100000 goto :error
 
 "%1" localhost:8400 wrong_id 100 1 1 0 2 2>1 | findstr /c:"authorization failed"  || goto :error
 
diff --git a/tests/automatic/spd_logger/console/check_linux.sh b/tests/automatic/spd_logger/console/check_linux.sh
index bc934a732..efc8f639e 100644
--- a/tests/automatic/spd_logger/console/check_linux.sh
+++ b/tests/automatic/spd_logger/console/check_linux.sh
@@ -5,6 +5,7 @@ set -e
 res=`$@`
 
 echo $res | grep '"level":"info","message":"test info"'
+echo $res | grep '"level":"info","message":"test : info"'
 echo $res | grep '"test_int":2,"test_double":1.0}'
 echo $res | grep '"level":"error","message":"test error"'
 echo $res | grep '"level":"debug","message":"test debug"'
diff --git a/tests/automatic/spd_logger/console/check_windows.bat b/tests/automatic/spd_logger/console/check_windows.bat
index 78ecaf0a0..e136f9132 100644
--- a/tests/automatic/spd_logger/console/check_windows.bat
+++ b/tests/automatic/spd_logger/console/check_windows.bat
@@ -1,6 +1,7 @@
 "%1" > output
 
 findstr /I /L /C:"\"level\":\"info\",\"message\":\"test info\"" output || goto :error
+findstr /I /L /C:"\"level\":\"info\",\"message\":\"test : info\"" output || goto :error
 findstr /I /L /C:"\"level\":\"error\",\"message\":\"test error\"" output || goto :error
 findstr /I /L /C:"\"level\":\"debug\",\"message\":\"test debug\"" output || goto :error
 findstr /I /L /C:"\"level\":\"warning\",\"message\":\"test warning\"" output || goto :error
diff --git a/tests/automatic/spd_logger/console/spd_logger_console.cpp b/tests/automatic/spd_logger/console/spd_logger_console.cpp
index 130e163f8..e14e4bf85 100644
--- a/tests/automatic/spd_logger/console/spd_logger_console.cpp
+++ b/tests/automatic/spd_logger/console/spd_logger_console.cpp
@@ -32,6 +32,7 @@ int main(int argc, char* argv[]) {
     logger->Warning("test warning");
     logger->Debug("test debug");
 
+    logger->Info("test : info");
 
     logger->SetLogLevel(LogLevel::Error);
     logger->Info("test info_errorlev");
-- 
GitLab