Skip to content
Snippets Groups Projects
Commit edc57feb authored by Sergey Yakubov's avatar Sergey Yakubov
Browse files

refactor deploy scripts#

parent da51cb6f
No related branches found
No related tags found
No related merge requests found
Showing
with 258 additions and 4 deletions
......@@ -4,8 +4,8 @@ else()
SET (NOMAD_INSTALL ${CMAKE_INSTALL_PREFIX}/nomad_jobs)
endif()
configure_files(${CMAKE_CURRENT_SOURCE_DIR}/asapo ${CMAKE_CURRENT_BINARY_DIR}/asapo)
configure_files(${CMAKE_CURRENT_SOURCE_DIR}/asapo/scripts ${CMAKE_CURRENT_BINARY_DIR}/asapo/scripts)
configure_files(${CMAKE_CURRENT_SOURCE_DIR}/asapo_services ${CMAKE_CURRENT_BINARY_DIR}/asapo_services)
configure_files(${CMAKE_CURRENT_SOURCE_DIR}/asapo_services/scripts ${CMAKE_CURRENT_BINARY_DIR}/asapo_services/scripts)
......
FROM yakser/asapo-nomad-cluster
MAINTAINER DESY IT
COPY scripts/ /var/run/asapo/
RUN cd /var/run/asapo asapo && terraform init
grafana_total_memory_size = 2000 #MB
#!/usr/bin/env bash
docker build -t yakser/asapo-client .
File moved
#!/usr/bin/env bash
NOMAD_ALLOC_HOST_SHARED=$HOME/asapo_client/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=$HOME/asapo_client/cluster_shared/service_data
DATA_GLOBAL_SHARED=$HOME/asapo_client/global_shared/data
ASAPO_USER=`id -u`:`id -g`
#ADVERTISE_IP=
#RECURSORS=
#IB_ADDRESS=
#SERVER_ADRESSES=
#N_SERVERS=
ASAPO_VAR_FILE=`pwd`/asapo_client_overwrite_vars.tfvars
mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED
cd $SERVICE_DATA_CLUSTER_SHARED
mkdir grafana
chmod 777 *
if [ -f $ASAPO_VAR_FILE ]; then
chmod 666 $ASAPO_VAR_FILE
MOUNT_VAR_FILE="-v $ASAPO_VAR_FILE:/var/run/asapo/user_vars.tfvars"
fi
docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
-u $ASAPO_USER \
--group-add `getent group docker | cut -d: -f3` \
-v /var/lib/docker:/var/lib/docker \
-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
$MOUNT_VAR_FILE \
-e ADVERTISE_IP=$ADVERTISE_IP \
-e RECURSORS=$RECURSORS \
-e TF_VAR_asapo_user=$ASAPO_USER \
-e IB_ADDRESS=$IB_ADDRESS \
-e SERVER_ADRESSES=$SERVER_ADRESSES \
-e N_SERVERS=$N_SERVERS \
--name asapo-client --net=host -d yakser/asapo-client
sleep 5
\ No newline at end of file
#!/usr/bin/env bash
#folders
NOMAD_ALLOC_HOST_SHARED=/var/tmp/asapo_client/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=/home/yakubov/asapo/asapo_client_cluster_shared/service_data
DATA_GLOBAL_SHARED=/gpfs/petra3/scratch/yakubov/asapo_client_shared
MONGO_DIR=/scratch/mongodb # due to performance reasons mongodb can benefit from writing to local filesystem (HA to be worked on)
#service distribution
MAX_NOMAD_SERVERS=3 # rest are clients
N_ASAPO_LIGHTWEIGHT_SERVICE_NODES=1 # where to put influx, elk, ... . Rest are receivers, brokers, mongodb
#DESY stuff
RECURSORS=["\"131.169.40.200\"",\""131.169.194.200\""]
ASAPO_USER=`id -u`:`id -g`
ASAPO_VAR_FILE=`pwd`/asapo_overwrite_vars.tfvars
#docker stuff
DOCKER_ENDPOINT="127.0.0.1:2376" #comment to use unix sockets
DOCKER_TLS_CA=/data/netapp/docker/certs/ca.pem
DOCKER_TLS_KEY=/data/netapp/docker/certs/$USER/key.pem
DOCKER_TLS_CERT=/data/netapp/docker/certs/$USER/cert.pem
#adresses to use
USE_IB_FOR_RECEIVER=true
if [ "$USE_IB_FOR_RECEIVER" == "true" ]; then
IB_HOSTNAME=`hostname --short`-ib
IB_ADDRESS=`getent hosts $IB_HOSTNAME | awk '{ print $1 }'`
fi
#ADVERTISE_IP= #set if differs from default
#prepare env variables based on the above input
N_SERVERS=$(( $SLURM_JOB_NUM_NODES > $MAX_NOMAD_SERVERS ? $MAX_NOMAD_SERVERS : $SLURM_JOB_NUM_NODES ))
SERVER_ADRESSES=`scontrol show hostnames $SLURM_JOB_NODELIST | head -$N_SERVERS | awk 'BEGIN{printf "["} {printf "%s\"%s\"",sep,$0; sep=","} END{print "]"}'`
ASAPO_LIGHTWEIGHT_SERVICE_NODES=`scontrol show hostnames $SLURM_JOB_NODELIST | head -$N_ASAPO_LIGHTWEIGHT_SERVICE_NODES | awk 'BEGIN{printf "["} {printf "%s\"%s\"",sep,$0; sep=","} END{print "]"}'`
# make folders if not exist
mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $MONGO_DIR
chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $MONGO_DIR
cd $SERVICE_DATA_CLUSTER_SHARED
mkdir esdatadir fluentd grafana influxdb mongodb
chmod 777 *
#todo: elastic search check
mmc=`cat /proc/sys/vm/max_map_count`
if (( mmc < 262144 )); then
echo consider increasing max_map_count - needed for elasticsearch
# exit 1
fi
docker rm -f asapo
docker pull yakser/asapo-client
if [ -f $ASAPO_VAR_FILE ]; then
MOUNT_VAR_FILE="-v $ASAPO_VAR_FILE:/var/run/asapo/user_vars.tfvars"
fi
dockerrun --rm \
-u $ASAPO_USER \
-v /scratch/docker/100000.100000:/scratch/docker/100000.100000 \
-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
-v $DOCKER_TLS_CA:/etc/nomad/ca.pem \
-v $DOCKER_TLS_KEY:/etc/nomad/key.pem \
-v $DOCKER_TLS_CERT:/etc/nomad/cert.pem \
-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
$MOUNT_VAR_FILE \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_mongo_dir=$MONGO_DIR \
-e ADVERTISE_IP=$ADVERTISE_IP \
-e RECURSORS=$RECURSORS \
-e TF_VAR_asapo_user=$ASAPO_USER \
-e IB_ADDRESS=$IB_ADDRESS \
-e SERVER_ADRESSES=$SERVER_ADRESSES \
-e ASAPO_LIGHTWEIGHT_SERVICE_NODES=$ASAPO_LIGHTWEIGHT_SERVICE_NODES \
-e DOCKER_ENDPOINT=$DOCKER_ENDPOINT \
-e N_SERVERS=$N_SERVERS \
--name asapo yakser/asapo-client
grafana_version="latest"
job_scripts_dir = "/var/run/asapo"
grafana_total_memory_size = "256"
grafana_port = 3000
job "grafana" {
datacenters = ["dc1"]
affinity {
attribute = "$${meta.asapo_service}"
value = "true"
weight = 100
}
group "grafana" {
count = 1
restart {
attempts = 2
interval = "3m"
delay = "15s"
mode = "delay"
}
task "grafana" {
driver = "docker"
user = "${asapo_user}"
config {
network_mode = "host"
security_opt = ["no-new-privileges"]
userns_mode = "host"
image = "grafana/grafana:${grafana_version}"
volumes = ["/${service_dir}/grafana:/var/lib/grafana"]
}
resources {
memory = "${grafana_total_memory_size}"
network {
port "grafana" {
static = "${grafana_port}"
}
}
}
service {
port = "grafana"
name = "grafana"
check {
name = "alive"
type = "http"
path = "/api/health"
interval = "10s"
timeout = "1s"
}
check_restart {
limit = 2
grace = "90s"
ignore_warnings = false
}
}
} #grafana
}
}
provider "nomad" {
address = "http://localhost:4646"
# secret_id = "${chomp(file("/var/nomad/token"))}"
}
resource "nomad_job" "grafana" {
jobspec = "${data.template_file.grafana_template.rendered}"
}
data "template_file" "grafana_template" {
template = "${file("${var.job_scripts_dir}/grafana.nmd.tpl")}"
vars = {
service_dir = "${var.service_dir}"
grafana_version = "${var.grafana_version}"
grafana_total_memory_size = "${var.grafana_total_memory_size}"
grafana_port = "${var.grafana_port}"
asapo_user = "${var.asapo_user}"
}
}
variable "asapo_user" {}
variable "grafana_version" {}
variable "job_scripts_dir" {}
variable "service_dir" {}
variable "data_dir" {}
variable "grafana_total_memory_size" {}
variable "grafana_port" {}
FROM yakser/asapo-nomad-cluster
MAINTAINER DESY IT
COPY scripts/ /var/run/asapo/
COPY asapo-wait-service /usr/bin/
RUN cd /var/run/asapo asapo && terraform init
File moved
......@@ -4,8 +4,6 @@ if [ "$2" == "false" ]; then
exit 0
fi
until dig +short @127.0.0.1 -p 8600 $1.service.asapo | grep . ; do
sleep 1
done
File moved
#!/usr/bin/env bash
#SBATCH --nodes=1
#SBATCH -t 00:40:00
srun --ntasks=$SLURM_JOB_NUM_NODES --ntasks-per-node=1 ./run_maxwell.sh
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment