Skip to content
Snippets Groups Projects
Commit 71665b26 authored by Sergey Yakubov's avatar Sergey Yakubov
Browse files

Merge pull request #83 in ASAPO/asapo from feature_ASAPO-107-file-transfer-service to develop

* commit '4f50581f':
  update deployment scripts, update for macos
parents e04dbc6b 4f50581f
No related branches found
No related tags found
No related merge requests found
Showing
with 186 additions and 21 deletions
......@@ -110,6 +110,11 @@ SystemIO::~SystemIO() {
}
}
void SystemIO::SetThreadName(std::thread* threadHandle, const std::string& name) const {
// If the length of name is greater than 15 characters, the excess characters are ignored.
pthread_setname_np(threadHandle->native_handle(), name.c_str());
}
void asapo::SystemIO::CloseSocket(SocketDescriptor fd, Error* err) const {
if (err) {
*err = nullptr;
......
......@@ -201,11 +201,6 @@ void SystemIO::CollectFileInformationRecursively(const std::string& path,
closedir(dir);
}
void SystemIO::SetThreadName(std::thread* threadHandle, const std::string& name) const {
// If the length of name is greater than 15 characters, the excess characters are ignored.
pthread_setname_np(threadHandle->native_handle(), name.c_str());
}
void SystemIO::ApplyNetworkOptions(SocketDescriptor socket_fd, Error* err) const {
//TODO: Need to change network layer code, so everything can be NonBlocking
int flag = 1;
......
......@@ -76,6 +76,10 @@ ListSocketDescriptors SystemIO::WaitSocketsActivity(SocketDescriptor master_sock
return active_sockets;
}
void SystemIO::SetThreadName(std::thread* threadHandle, const std::string& name) const {
// does not work on macos (could only set name for current thread, which is not what we want)
}
void asapo::SystemIO::CloseSocket(SocketDescriptor fd, Error* err) const {
if (err) {
*err = nullptr;
......
......@@ -9,8 +9,6 @@ variable "nginx_version" {}
variable "job_scripts_dir" {}
variable "service_dir" {}
variable "data_dir" {}
variable "grafana_total_memory_size" {}
variable "telegraf_total_memory_size" {}
......
elk_logs = true
service_dir="/gpfs/asapo/shared/service_dir"
data_dir="/beamline"
online_dir="/beamline"
offline_dir="/asap3"
mongo_dir="/gpfs/asapo/shared/service_dir/mongodb"
asapo_user="35841:1000"
job_scripts_dir="/gpfs/asapo/shared/terraform"
......@@ -22,3 +23,6 @@ discovery_total_memory_size = 512
n_receivers = 1
n_brokers = 1
n_fts = 1
......@@ -3,6 +3,8 @@
NOMAD_ALLOC_HOST_SHARED=/var/tmp/asapo/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=/var/tmp/asapo/asapo_cluster_shared/service_data
DATA_GLOBAL_SHARED=/var/tmp/asapo/global_shared/data
DATA_GLOBAL_SHARED_ONLINE=/var/tmp/asapo/global_shared/online_data
MONGO_DIR=$SERVICE_DATA_CLUSTER_SHARED/mongodb
ASAPO_USER=`id -u`:`id -g`
......@@ -27,7 +29,7 @@ mmc=`cat /proc/sys/vm/max_map_count`
if (( mmc < 262144 )); then
echo increase max_map_count - needed for elasticsearch
exit 1
exit 1
fi
if [ -f $ASAPO_VAR_FILE ]; then
......@@ -45,7 +47,8 @@ docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
-e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_mongo_dir=$MONGO_DIR \
$MOUNT_VAR_FILE \
-e ADVERTISE_IP=$ADVERTISE_IP \
......
......@@ -4,6 +4,7 @@
NOMAD_ALLOC_HOST_SHARED=/var/tmp/asapo/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=/home/yakubov/asapo/asapo_cluster_shared/service_data
DATA_GLOBAL_SHARED=/gpfs/petra3/scratch/yakubov/asapo_shared
DATA_GLOBAL_SHARED_ONLINE=/tmp
MONGO_DIR=/scratch/mongodb # due to performance reasons mongodb can benefit from writing to local filesystem (HA to be worked on)
#service distribution
MAX_NOMAD_SERVERS=3 # rest are clients
......@@ -72,7 +73,8 @@ dockerrun --rm \
$MOUNT_VAR_FILE \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_data_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
-e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_mongo_dir=$MONGO_DIR \
-e ADVERTISE_IP=$ADVERTISE_IP \
-e RECURSORS=$RECURSORS \
......
job "asapo-fts" {
datacenters = ["dc1"]
affinity {
attribute = "$${meta.node_group}"
value = "utl"
weight = 100
}
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "3m"
auto_revert = false
}
group "fts" {
count = ${n_fts}
restart {
attempts = 2
interval = "3m"
delay = "15s"
mode = "fail"
}
task "fts" {
driver = "docker"
user = "${asapo_user}"
config {
network_mode = "host"
security_opt = ["no-new-privileges"]
userns_mode = "host"
image = "yakser/asapo-file-transfer${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/file_transfer/config.json",
"${offline_dir}:${offline_dir}",
"${online_dir}:${online_dir}"
]
%{ if ! nomad_logs }
logging {
type = "fluentd"
config {
fluentd-address = "localhost:9881"
fluentd-async-connect = true
tag = "asapo.docker"
}
}
%{endif}
}
resources {
network {
port "fts" {}
}
}
service {
port = "fts"
name = "asapo-fts"
check {
name = "asapo-fts-alive"
type = "http"
path = "/health-check"
interval = "10s"
timeout = "2s"
}
check_restart {
limit = 2
grace = "90s"
ignore_warnings = false
}
}
template {
source = "${scripts_dir}/fts.json.tpl"
destination = "local/config.json"
change_mode = "restart"
}
template {
source = "${scripts_dir}/auth_secret.key"
destination = "local/secret.key"
change_mode = "restart"
}
} #task brokers
}
}
......@@ -33,7 +33,8 @@ job "asapo-receivers" {
image = "yakser/asapo-receiver${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/receiver/config.json",
"${data_dir}:/var/lib/receiver/data"]
"${offline_dir}:${offline_dir}",
"${online_dir}:${online_dir}"]
%{ if ! nomad_logs }
logging {
type = "fluentd"
......
......@@ -20,7 +20,10 @@ job "asapo-services" {
userns_mode = "host"
image = "yakser/asapo-authorizer${image_suffix}"
force_pull = true
volumes = ["local/config.json:/var/lib/authorizer/config.json"]
volumes = ["local/config.json:/var/lib/authorizer/config.json",
"${offline_dir}:${offline_dir}",
"${online_dir}:${online_dir}"]
%{ if ! nomad_logs }
logging {
type = "fluentd"
......@@ -42,6 +45,11 @@ job "asapo-services" {
}
}
meta {
offline_dir = "${offline_dir}"
online_dir = "${online_dir}"
}
service {
name = "asapo-authorizer"
port = "authorizer"
......
......@@ -43,4 +43,5 @@ authorizer_port = 5007
consul_dns_port = 8600
n_receivers = 1
n_brokers = 1
\ No newline at end of file
n_brokers = 1
n_fts = 1
\ No newline at end of file
{
"Port": {{ env "NOMAD_PORT_authorizer" }},
"LogLevel":"debug",
"AlwaysAllowedBeamtimes":[{"beamtimeId":"asapo_test","beamline":"test","core-path":"/var/lib/receiver/data/test_facility/gpfs/test/2019/data/asapo_test"},
{"beamtimeId":"asapo_test1","beamline":"test1","core-path":"/var/lib/receiver/data/test_facility/gpfs/test1/2019/data/asapo_test1"},
{"beamtimeId":"asapo_test2","beamline":"test2","core-path":"/var/lib/receiver/data/test_facility/gpfs/test2/2019/data/asapo_test2"}],
"SecretFile":"/local/secret.key"
"AlwaysAllowedBeamtimes":[{"beamtimeId":"asapo_test","beamline":"test","core-path":"{{ env "NOMAD_META_offline_dir" }}/test_facility/gpfs/test/2019/data/asapo_test"},
{"beamtimeId":"asapo_test1","beamline":"test1","core-path":"{{ env "NOMAD_META_offline_dir" }}/test_facility/gpfs/test1/2019/data/asapo_test1"},
{"beamtimeId":"asapo_test2","beamline":"test2","core-path":"{{ env "NOMAD_META_offline_dir" }}/test_facility/gpfs/test2/2019/data/asapo_test2"}],
"RootBeamtimesFolder":"{{ env "NOMAD_META_offline_dir" }}",
"CurrentBeamlinesFolder":"{{ env "NOMAD_META_online_dir" }}",
"SecretFile":"/local/secret.key",
"TokenDurationMin":600
}
{
"Port": {{ env "NOMAD_PORT_fts" }},
"LogLevel":"debug",
"SecretFile":"/local/secret.key"
}
......@@ -29,3 +29,9 @@ resource "nomad_job" "asapo-brokers" {
jobspec = "${data.template_file.asapo_brokers.rendered}"
depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery]
}
resource "nomad_job" "asapo-fts" {
jobspec = "${data.template_file.asapo_fts.rendered}"
depends_on = [nomad_job.asapo-services,null_resource.asapo-authorizer,null_resource.asapo-discovery]
}
......@@ -51,6 +51,13 @@ resource "null_resource" "asapo-broker" {
depends_on = [nomad_job.asapo-brokers]
}
resource "null_resource" "asapo-fts" {
provisioner "local-exec" {
command = "asapo-wait-service asapo-fts"
}
depends_on = [nomad_job.asapo-fts]
}
resource "null_resource" "asapo-receiver" {
provisioner "local-exec" {
command = "asapo-wait-service asapo-receiver"
......
......@@ -20,6 +20,8 @@ data "template_file" "asapo_services" {
template = "${file("${var.job_scripts_dir}/asapo-services.nmd.tpl")}"
vars = {
scripts_dir = "${var.job_scripts_dir}"
online_dir = "${var.online_dir}"
offline_dir = "${var.offline_dir}"
image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
nomad_logs = "${var.nomad_logs}"
authorizer_total_memory_size = "${var.authorizer_total_memory_size}"
......@@ -34,7 +36,8 @@ data "template_file" "asapo_receivers" {
template = "${file("${var.job_scripts_dir}/asapo-receivers.nmd.tpl")}"
vars = {
scripts_dir = "${var.job_scripts_dir}"
data_dir = "${var.data_dir}"
online_dir = "${var.online_dir}"
offline_dir = "${var.offline_dir}"
image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
nomad_logs = "${var.nomad_logs}"
receiver_total_memory_size = "${var.receiver_total_memory_size}"
......@@ -58,6 +61,19 @@ data "template_file" "asapo_brokers" {
}
data "template_file" "asapo_fts" {
template = "${file("${var.job_scripts_dir}/asapo-fts.nmd.tpl")}"
vars = {
scripts_dir = "${var.job_scripts_dir}"
online_dir = "${var.online_dir}"
offline_dir = "${var.offline_dir}"
image_suffix = "${var.asapo_imagename_suffix}:${var.asapo_image_tag}"
nomad_logs = "${var.nomad_logs}"
asapo_user = "${var.asapo_user}"
n_fts = "${var.n_fts}"
}
}
data "template_file" "asapo_perfmetrics" {
template = "${file("${var.job_scripts_dir}/asapo-perfmetrics.nmd.tpl")}"
vars = {
......
......@@ -24,7 +24,8 @@ variable "job_scripts_dir" {}
variable "service_dir" {}
variable "data_dir" {}
variable "online_dir" {}
variable "offline_dir" {}
variable "mongo_dir" {}
......@@ -74,4 +75,6 @@ variable "consul_dns_port" {}
variable "n_receivers" {}
variable "n_brokers" {}
\ No newline at end of file
variable "n_brokers" {}
variable "n_fts" {}
\ No newline at end of file
from __future__ import print_function
import asapo_consumer
import sys
source, beamtime,path, token = sys.argv[1:]
broker = asapo_consumer.create_server_broker(source,path,False, beamtime,"",token,1000)
group_id = broker.generate_group_id()
data, meta = broker.get_by_id(1, group_id, meta_only=False)
print (meta)
print (len(data))
sys.exit(0)
\ No newline at end of file
python3 consumer_api.py asapo-services.desy.de:8400 asapo_test /shared_data/test_facility/gpfs/test/2019/data/asapo_test KmUDdacgBzaOD3NIJvN1NmKGqWKtx0DK-NyPjdpeWkc=
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment