Skip to content
Snippets Groups Projects
Commit 34c9ebb5 authored by Sergey Yakubov's avatar Sergey Yakubov
Browse files

initialize influxdb automatically

parent ed7c1f2a
No related branches found
No related tags found
No related merge requests found
Showing
with 136 additions and 68 deletions
......@@ -39,6 +39,8 @@ func main() {
log.SetLevel(logLevel)
log.Info("Starting Asapo Broker, version " + version.GetVersion())
server.CreateDiscoveryService()
err = server.InitDB(NewDefaultDatabase())
......
......@@ -5,7 +5,6 @@ package server
import (
log "asapo_common/logger"
"asapo_common/utils"
"asapo_common/version"
"errors"
"net/http"
"strconv"
......@@ -13,6 +12,7 @@ import (
func StartStatistics() {
statistics.Writer = new(StatisticInfluxDbWriter)
statistics.Init()
statistics.Reset()
go statistics.Monitor()
}
......@@ -20,7 +20,6 @@ func StartStatistics() {
func Start() {
StartStatistics()
mux := utils.NewRouter(listRoutes)
log.Info("Starting Asapo Broker, version " + version.GetVersion())
log.Info("Listening on port: " + strconv.Itoa(settings.Port))
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(settings.Port), http.HandlerFunc(mux.ServeHTTP)))
}
......
......@@ -9,6 +9,17 @@ import (
type statisticsWriter interface {
Write(*serverStatistics) error
Init() error
}
func (st *serverStatistics) Init() {
st.mux.Lock()
defer st.mux.Unlock()
if err := st.Writer.Init(); err != nil {
log.Warning("cannot initialize statistic writer: " + err.Error())
} else {
log.Debug("initialized statistic at " + settings.PerformanceDbServer + " for " + settings.PerformanceDbName)
}
}
type serverStatistics struct {
......
......@@ -10,6 +10,11 @@ type mockWriter struct {
mock.Mock
}
func (writer *mockWriter) Init() error {
args := writer.Called()
return args.Error(0)
}
func (writer *mockWriter) Write(statistics *serverStatistics) error {
args := writer.Called(statistics)
return args.Error(0)
......
......@@ -3,8 +3,8 @@
package server
import (
"github.com/influxdata/influxdb1-client/v2"
"log"
"github.com/influxdata/influxdb1-client/v2"
"time"
)
......@@ -16,12 +16,30 @@ func (writer *StatisticLogWriter) Write(statistics *serverStatistics) error {
return nil
}
func (writer *StatisticLogWriter) Init() error {
return nil
}
type StatisticInfluxDbWriter struct {
}
func (writer *StatisticInfluxDbWriter) Init() error {
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://" + settings.PerformanceDbServer,
})
if err != nil {
return err
}
defer c.Close()
var query client.Query
query.Command = "create database " + settings.PerformanceDbName
_, err = c.Query(query)
return err
}
func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error {
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: "http://"+ settings.PerformanceDbServer,
Addr: "http://" + settings.PerformanceDbServer,
})
if err != nil {
return err
......@@ -29,7 +47,7 @@ func (writer *StatisticInfluxDbWriter) Write(statistics *serverStatistics) error
defer c.Close()
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: settings.PerformanceDbName,
Database: settings.PerformanceDbName,
})
tags := map[string]string{"Group ID": "0"}
......
......@@ -6,7 +6,8 @@ RUN apt-get update && apt-get install -y supervisor apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
software-properties-common dnsutils
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
......@@ -45,5 +46,8 @@ RUN mkdir -p /var/log/supervisord/
COPY scripts/ /var/run/asapo/
RUN cd /var/run/asapo asapo && terraform init
COPY asapo-* /usr/bin/
ENTRYPOINT ["supervisord", "--configuration", "/etc/supervisord.conf"]
#!/usr/bin/env bash
cd /var/run/asapo && terraform apply -auto-approve "$@"
\ No newline at end of file
#!/usr/bin/env bash
cd /var/run/asapo && terraform destroy -auto-approve "$@"
#!/usr/bin/env bash
if [ "$2" == "false" ]; then
exit 0
fi
until dig +short @127.0.0.1 -p 8600 $1.service.asapo | grep . ; do
sleep 1
done
#!/usr/bin/env bash
influx=`dig +short @127.0.0.1 -p 8600 influxdb.service.asapo | head -1`
databases="asapo_receivers asapo_brokers"
for database in $databases
do
curl -i -XPOST http://${influx}:8086/query --data-urlencode "q=CREATE DATABASE $database"
done
......@@ -7,6 +7,18 @@ DATA_GLOBAL_SHARED=/tmp/asapo/global_shared/data
mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
cd $SERVICE_DATA_CLUSTER_SHARED
mkdir esdatadir fluentd grafana influxdb mongodb
chmod 777 *
mmc=`cat /proc/sys/vm/max_map_count`
if (( mmc < 262144 )); then
echo increase max_map_count - needed for elasticsearch
exit 1
fi
docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker:/var/lib/docker \
-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
......
......@@ -25,7 +25,7 @@ job "asapo-brokers" {
image = "yakser/asapo-broker${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/broker/config.json"]
%{ if fluentd_logs }
%{ if elk_logs }
logging {
type = "fluentd"
config {
......
......@@ -9,7 +9,8 @@ job "asapo-logging" {
# }
group "fluentd" {
count = 1
count = "%{ if elk_logs }1%{ else }0%{ endif }"
restart {
attempts = 2
interval = "3m"
......@@ -25,20 +26,20 @@ job "asapo-logging" {
}
config {
dns_servers = ["127.0.0.1"]
network_mode = "host"
image = "yakser/fluentd_elastic"
volumes = ["local/fluentd.conf:/fluentd/etc/fluent.conf",
"/${meta.shared_storage}/fluentd:/shared"]
"/${service_dir}/fluentd:/shared"]
}
resources {
cpu = 500
memory = 256
memory = "${fluentd_total_memory_size}"
network {
mbits = 10
port "fluentd" {
static = 9880
static = "${fluentd_port}"
}
port "fluentd_stream" {
static = "${fluentd_port_stream}"
}
}
}
......@@ -61,7 +62,7 @@ job "asapo-logging" {
}
}
template {
source = "/usr/local/nomad_jobs/fluentd.conf"
source = "${scripts_dir}/fluentd.conf.tpl"
destination = "local/fluentd.conf"
change_mode = "restart"
}
......@@ -69,7 +70,7 @@ job "asapo-logging" {
}
#elasticsearch
group "elk" {
count = 1
count = "%{ if elk_logs }1%{ else }0%{ endif }"
restart {
attempts = 2
interval = "3m"
......@@ -93,20 +94,15 @@ job "asapo-logging" {
nproc = "8192"
}
network_mode = "host"
dns_servers = ["127.0.0.1"]
image = "docker.elastic.co/elasticsearch/elasticsearch:6.3.0"
volumes = ["/${meta.shared_storage}/esdatadir:/usr/share/elasticsearch/data"]
image = "docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version}"
volumes = ["/${service_dir}/esdatadir:/usr/share/elasticsearch/data"]
}
resources {
#MHz
cpu = 4000
#MB
memory = 2048
memory = "${elasticsearch_total_memory_size}"
network {
mbits = 10
port "elasticsearch" {
static = 9200
static = "${elasticsearch_port}"
}
}
}
......@@ -134,24 +130,21 @@ job "asapo-logging" {
config {
network_mode = "host"
dns_servers = ["127.0.0.1"]
image = "docker.elastic.co/kibana/kibana:6.3.0"
image = "docker.elastic.co/kibana/kibana:${kibana_version}"
volumes = ["local/kibana.yml:/usr/share/kibana/config/kibana.yml"]
}
template {
source = "/usr/local/nomad_jobs/kibana.yml"
source = "${scripts_dir}/kibana.yml"
destination = "local/kibana.yml"
change_mode = "restart"
}
resources {
cpu = 256
memory = 1024
memory = "${kibana_total_memory_size}"
network {
mbits = 10
port "kibana" {
static = 5601
static = "${kibana_port}"
}
}
}
......
......@@ -23,6 +23,17 @@ job "asapo-nginx" {
task "nginx" {
driver = "docker"
meta {
fluentd_port = "${fluentd_port}"
fluentd_port_stream = "${fluentd_port_stream}"
kibana_port = "${kibana_port}"
elasticsearch_port = "${elasticsearch_port}"
grafana_port = "${grafana_port}"
influxdb_port = "${influxdb_port}"
authorizer_port = "${authorizer_port}"
discovery_port = "${discovery_port}"
}
config {
network_mode = "host"
image = "nginx:${nginx_version}"
......
......@@ -28,7 +28,7 @@ job "asapo-receivers" {
force_pull = true
volumes = ["local/config.json:/var/lib/receiver/config.json",
"${data_dir}:/var/lib/receiver/data"]
%{ if fluentd_logs }
%{ if elk_logs }
logging {
type = "fluentd"
config {
......
......@@ -15,7 +15,7 @@ job "asapo-services" {
image = "yakser/asapo-authorizer${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/authorizer/config.json"]
%{ if fluentd_logs }
%{ if elk_logs }
logging {
type = "fluentd"
config {
......@@ -28,11 +28,10 @@ job "asapo-services" {
}
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
memory = "${authorizer_total_memory_size}"
network {
port "authorizer" {
static = "5007"
static = "${authorizer_port}"
}
}
}
......@@ -79,7 +78,7 @@ job "asapo-services" {
image = "yakser/asapo-discovery${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/discovery/config.json"]
%{ if fluentd_logs }
%{ if elk_logs }
logging {
type = "fluentd"
config {
......@@ -92,11 +91,10 @@ job "asapo-services" {
}
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
memory = "${discovery_total_memory_size}"
network {
port "discovery" {
static = "5006"
static = "${discovery_port}"
}
}
}
......
nginx_version = "1.14"
elasticsearch_version = "6.3.0"
kibana_version = "6.3.0"
mongo_version = "4.0.0"
asapo_imagename_suffix="-dev"
......
<source>
@type forward
port 24224
port {{ env "NOMAD_PORT_fluentd_stream" }}
source_hostname_key source_addr
bind 0.0.0.0
</source>
<source>
@type http
port 9880
port {{ env "NOMAD_PORT_fluentd" }}
bind 0.0.0.0
add_remote_addr true
format json
......
......@@ -27,26 +27,26 @@ http {
location /influxdb/ {
rewrite ^/influxdb(/.*) $1 break;
proxy_pass http://$influxdb_endpoint:8086$uri$is_args$args;
proxy_pass http://$influxdb_endpoint:{{ env "NOMAD_META_influxdb_port" }}$uri$is_args$args;
}
location /elasticsearch/ {
rewrite ^/elasticsearch(/.*) $1 break;
proxy_pass http://$elasticsearch_endpoint:9200$uri$is_args$args;
proxy_pass http://$elasticsearch_endpoint:{{ env "NOMAD_META_elasticsearch_port" }}$uri$is_args$args;
}
location /discovery/ {
rewrite ^/discovery(/.*) $1 break;
proxy_pass http://$discovery_endpoint:5006$uri$is_args$args;
proxy_pass http://$discovery_endpoint:{{ env "NOMAD_META_discovery_port" }}$uri$is_args$args;
}
location /logs/ {
rewrite ^/logs(/.*) $1 break;
proxy_pass http://$fluentd_endpoint:9880$uri$is_args$args;
proxy_pass http://$fluentd_endpoint:{{ env "NOMAD_META_fluentd_port" }}$uri$is_args$args;
}
location /logsview/ {
proxy_pass http://$kibana_endpoint:5601$uri$is_args$args;
proxy_pass http://$kibana_endpoint:{{ env "NOMAD_META_kibana_port" }}$uri$is_args$args;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
......@@ -54,12 +54,12 @@ http {
location /performance/ {
rewrite ^/performance(/.*) $1 break;
proxy_pass http://$grafana_endpoint:3000$uri$is_args$args;
proxy_pass http://$grafana_endpoint:{{ env "NOMAD_META_grafana_port" }}$uri$is_args$args;
}
location /authorizer/ {
rewrite ^/authorizer(/.*) $1 break;
proxy_pass http://$authorizer_endpoint:5007$uri$is_args$args;
proxy_pass http://$authorizer_endpoint:{{ env "NOMAD_META_authorizer_port" }}$uri$is_args$args;
}
location /nginx-health {
......@@ -77,8 +77,6 @@ stream {
server {
listen 9881;
proxy_pass $upstream:24224;
proxy_pass $upstream:{{ env "NOMAD_META_fluentd_port_stream" }};
}
}
resource "nomad_job" "asapo-perfmetrics" {
jobspec = "${data.template_file.asapo_perfmetrics.rendered}"
resource "nomad_job" "asapo-nginx" {
jobspec = "${data.template_file.nginx.rendered}"
}
resource "nomad_job" "asapo-mongo" {
jobspec = "${data.template_file.asapo_mongo.rendered}"
}
resource "nomad_job" "asapo-nginx" {
jobspec = "${data.template_file.nginx.rendered}"
resource "nomad_job" "asapo-perfmetrics" {
jobspec = "${data.template_file.asapo_perfmetrics.rendered}"
}
resource "nomad_job" "asapo-logging" {
jobspec = "${data.template_file.asapo_logging.rendered}"
depends_on = [null_resource.nginx]
}
resource "nomad_job" "asapo-services" {
jobspec = "${data.template_file.asapo_services.rendered}"
depends_on = [null_resource.nginx,null_resource.mongo,null_resource.influxdb,null_resource.fluentd,null_resource.elasticsearch]
}
resource "nomad_job" "asapo-receivers" {
jobspec = "${data.template_file.asapo_receivers.rendered}"
depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery]
}
resource "nomad_job" "asapo-brokers" {
jobspec = "${data.template_file.asapo_brokers.rendered}"
depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery]
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment