Skip to content
Snippets Groups Projects
Commit ed9f21bc authored by Sergey Yakubov's avatar Sergey Yakubov
Browse files

Merge pull request #184 in ASAPO/asapo from docs to develop

* commit 'cc97c70f':
  fix cmake
  freeze 21.12.0
parents 20fd84a8 cc97c70f
No related branches found
No related tags found
No related merge requests found
Showing
with 701 additions and 0 deletions
import asapo_consumer
#create snippet_start
endpoint = "localhost:8400"
beamtime = "asapo_test"
# test token. In production it is created during the start of the beamtime
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
# set it according to your configuration.
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
consumer = asapo_consumer \
.create_consumer(endpoint,
path_to_files,
True, # True if the path_to_files is accessible locally, False otherwise
beamtime, # Same as for the producer
"test_source", # Same as for the producer
token, # Access token
5000) # Timeout. How long do you want to wait on non-finished stream for a message.
#create snippet_end
#list snippet_start
for stream in consumer.get_stream_list():
print("Stream name: ", stream['name'], "\n",
"LastId: ", stream['lastId'], "\n",
"Stream finished: ", stream['finished'], "\n",
"Next stream: ", stream['nextStream'])
#list snippet_end
#consume snippet_start
group_id = consumer.generate_group_id() # Several consumers can use the same group_id to process messages in parallel
try:
# get_next is the main function to get messages from streams. You would normally call it in loop.
# you can either manually compare the meta['_id'] to the stream['lastId'], or wait for the exception to happen
while True:
data, meta = consumer.get_next(group_id, meta_only = False)
print(data.tobytes().decode("utf-8"), meta)
except asapo_consumer.AsapoStreamFinishedError:
print('stream finished') # all the messages in the stream were processed
except asapo_consumer.AsapoEndOfStreamError:
print('stream ended') # not-finished stream timeout, or wrong or empty stream
#consume snippet_end
#delete snippet_start
consumer.delete_stream(error_on_not_exist = True) # you can delete the stream after consuming
#delete cnippet_end
import asapo_consumer
endpoint = "localhost:8400"
beamtime = "asapo_test"
endpoint = "localhost:8400"
beamtime = "asapo_test"
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
group_id = consumer.generate_group_id()
# dataset snippet_start
try:
# get_next_dataset behaves similarly to the regular get_next
while True:
dataset = consumer.get_next_dataset(group_id, stream = 'default')
print ('Dataset Id:', dataset['id'])
# the initial response only contains the metadata
# the actual content should be retrieved separately
for metadata in dataset['content']:
data = consumer.retrieve_data(metadata)
print ('Part ' + str(metadata['dataset_substream']) + ' out of ' + str(dataset['expected_size']))
print (data.tobytes().decode("utf-8"), metadata)
except asapo_consumer.AsapoStreamFinishedError:
print('stream finished')
except asapo_consumer.AsapoEndOfStreamError:
print('stream ended')
# dataset snippet_end
import asapo_consumer
import asapo_producer
import json
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
print("could not send: ",payload,err)
elif err is not None:
print("sent with warning: ",payload,err)
else:
print("successfuly sent: ",payload)
endpoint = "localhost:8400"
beamtime = "asapo_test"
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
producer.set_log_level('error')
# beamtime_set snippet_start
# sample beamtime metadata. You can add any data you want, with any level of complexity
# in this example we use strings and ints, and one nested structure
beamtime_metadata = {
'name': 'beamtime name',
'condition': 'beamtime condition',
'intvalue1': 5,
'intvalue2': 10,
'structure': {
'structint1': 20,
'structint2': 30
}
}
# send the metadata
# by default the new metadata will completely replace the one that's already there
producer.send_beamtime_meta(json.dumps(beamtime_metadata), callback = callback)
# beamtime_set snippet_end
# beamtime_update snippet_start
# we can update the existing metadata if we want, by modifying the existing fields, or adding new ones
beamtime_metadata_update = {
'condition': 'updated beamtime condition',
'newintvalue': 15
}
# send the metadata in the 'update' mode
producer.send_beamtime_meta(json.dumps(beamtime_metadata_update), mode = 'update', callback = callback)
# beamtime_update snippet_end
# stream_set snippet_start
# sample stream metadata
stream_metadata = {
'name': 'stream name',
'condition': 'stream condition',
'intvalue': 44
}
# works the same way: by default we replace the stream metadata, but update is also possible
# update works exactly the same as for beamtime, but here we will only do 'replace'
producer.send_stream_meta(json.dumps(stream_metadata), callback = callback)
# stream_set snippet_end
# message_set snippet_start
# sample message metadata
message_metadata = {
'name': 'message name',
'condition': 'message condition',
'somevalue': 55
}
# the message metadata is sent together with the message itself
# in case of datasets each part has its own metadata
producer.send(1, "processed/test_file", b'hello', user_meta = json.dumps(message_metadata), stream = "default", callback = callback)
# message_set snippet_end
producer.wait_requests_finished(2000)
consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
# beamtime_get snippet_start
# read the beamtime metadata
beamtime_metadata_read = consumer.get_beamtime_meta()
# the structure is the same as the one that was sent, and the updated values are already there
print('Name:', beamtime_metadata_read['name'])
print('Condition:', beamtime_metadata_read['condition'])
print('Updated value exists:', 'newintvalue' in beamtime_metadata_read)
print('Sum of int values:', beamtime_metadata_read['intvalue1'] + beamtime_metadata_read['intvalue2'])
print('Nested structure value', beamtime_metadata_read['structure']['structint1'])
# beamtime_get snippet_end
# stream_get snippet_start
# read the stream metadata
stream_metadata_read = consumer.get_stream_meta(stream = 'default')
# access various fields from it
print('Stream Name:', stream_metadata_read['name'])
print('Stream Condition:', stream_metadata_read['condition'])
print('Stream int value:', stream_metadata_read['intvalue'])
# stream_get snippet_end
group_id = consumer.generate_group_id()
try:
while True:
# message_get snippet_start
# right now we are only interested in metadata
data, meta = consumer.get_next(group_id, meta_only = True)
print('Message #', meta['_id'])
# our custom metadata is stored inside the message metadata
message_metadata_read = meta['meta']
print('Message Name:', message_metadata_read['name'])
print('Message Condition:', message_metadata_read['condition'])
print('Message int value:', message_metadata_read['somevalue'])
# message_get snippet_end
except asapo_consumer.AsapoStreamFinishedError:
print('stream finished')
except asapo_consumer.AsapoEndOfStreamError:
print('stream ended')
import asapo_consumer
import asapo_producer
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
print("could not send: ",payload,err)
elif err is not None:
print("sent with warning: ",payload,err)
else:
print("successfuly sent: ",payload)
endpoint = "localhost:8400"
beamtime = "asapo_test"
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
producer.set_log_level('error')
# let's start with producing a sample of 10 simple messages
for i in range(1, 11):
producer.send(i, "processed/test_file_" + str(i), ('content of the message #' + str(i)).encode(), stream = 'default', callback = callback)
# next_stream_set snippet_start
# finish the stream and set the next stream to be called 'next'
producer.send_stream_finished_flag('default', i, next_stream = 'next', callback = callback)
# next_stream_set snippet_end
# populate the 'next' stream as well
for i in range(1, 6):
producer.send(i, "processed/test_file_next_" + str(i), ('content of the message #' + str(i)).encode(), stream = 'next', callback = callback)
# we leave the 'next' stream unfinished, but the chain of streams can be of any length
producer.wait_requests_finished(2000)
consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
group_id = consumer.generate_group_id()
# read_stream snippet_start
# we start with the 'default' stream (the first one)
stream_name = 'default'
while True:
try:
data, meta = consumer.get_next(group_id, meta_only = False, stream = stream_name)
text_data = data.tobytes().decode("utf-8")
message_id = meta['_id']
print('Message #', message_id, ':', text_data)
except asapo_consumer.AsapoStreamFinishedError:
# when the stream finishes, we look for the info on the next stream
# first, we find the stream with our name in the list of streams
stream = next(s for s in consumer.get_stream_list() if s['name'] == stream_name)
# then we look if the field 'nextStream' is set and not empty
if 'nextStream' in stream and stream['nextStream']:
# if it's not, we continue with the next stream
stream_name = stream['nextStream']
print('Changing stream to the next one:', stream_name)
continue
# otherwise we stop
print('stream finished')
break
except asapo_consumer.AsapoEndOfStreamError:
print('stream ended')
break
# read_stream snippet_end
import asapo_consumer
import asapo_producer
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
print("could not send: ",payload,err)
elif err is not None:
print("sent with warning: ",payload,err)
else:
print("successfuly sent: ",payload)
endpoint = "localhost:8400"
beamtime = "asapo_test"
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
group_id = consumer.generate_group_id()
# pipeline snippet_start
# put the processed message into the new stream
pipelined_stream_name = 'pipelined'
try:
while True:
# we expect the message to be in the 'default' stream already
data, meta = consumer.get_next(group_id, meta_only = False)
message_id = meta['_id']
# work on our data
text_data = data.tobytes().decode("utf-8")
pipelined_message = (text_data + ' processed').encode()
# you may use the same filename, if you want to rewrite the source file. This will result in warning, but it is a valid usecase
producer.send(message_id, "processed/test_file_" + message_id, pipelined_message, pipelined_stream_name, callback = callback)
except asapo_consumer.AsapoStreamFinishedError:
print('stream finished')
except asapo_consumer.AsapoEndOfStreamError:
print('stream ended')
# pipeline snippet_end
producer.wait_requests_finished(2000)
# finish snippet_start
# the meta from the last iteration corresponds to the last message
last_id = meta['_id']
producer.send_stream_finished_flag("pipelined", last_id)
# finish snippet_end
# you can remove the source stream if you do not need it anymore
consumer.delete_stream(stream = 'default', error_on_not_exist = True)
import asapo_producer
# callback snippet_start
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
# the data was not sent. Something is terribly wrong.
print("could not send: ",payload,err)
elif err is not None:
# The data was sent, but there was some unexpected problem, e.g. the file was overwritten.
print("sent with warning: ",payload,err)
else:
# all fine
print("successfuly sent: ",payload)
# callback snippet_end
# create snippet_start
endpoint = "localhost:8400"
beamtime = "asapo_test"
producer = asapo_producer \
.create_producer(endpoint,
'processed', # should be 'processed' or 'raw', 'processed' writes to the core FS
beamtime, # the folder should exist
'auto', # can be 'auto', if beamtime_id is given
'test_source', # source
'', # athorization token
1, # number of threads. Increase, if the sending speed seems slow
60000) # timeout. Do not change.
producer.set_log_level("error") # other values are "warning", "info" or "debug".
# create snippet_end
# send snippet_start
# we are sending a message with with index 1 to the default stream. Filename must start with processed/
producer.send(1, # message number. Should be unique and ordered.
"processed/test_file", # name of the file. Should be unique, or it will be overwritten
b"hello", # binary data
callback = callback) # callback
# send snippet_end
# send data in loop
# add the following at the end of the script
# finish snippet_start
producer.wait_requests_finished(2000) # will synchronously wait for all the data to be sent.
# Use it when no more data is expected.
# you may want to mark the stream as finished
producer.send_stream_finished_flag("default", # name of the stream. If you didn't specify the stream in 'send', it would be 'default'
1) # the number of the last message in the stream
# finish snippet_end
import asapo_producer
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
print("could not send: ",payload,err)
elif err is not None:
print("sent with warning: ",payload,err)
else:
print("successfuly sent: ",payload)
endpoint = "localhost:8400"
beamtime = "asapo_test"
producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
# dataset snippet_start
#assuming we have three different producers for a single dataset
# add the additional 'dataset' paremeter, which should be (<part_number>, <total_parts_in_dataset>)
producer.send(1, "processed/test_file_dataset_1", b"hello dataset 1", dataset = (1,3), callback = callback)
# this can be done from different producers in any order
producer.send(1, "processed/test_file_dataset_1", b"hello dataset 2", dataset = (2,3), callback = callback)
producer.send(1, "processed/test_file_dataset_1", b"hello dataset 3", dataset = (3,3), callback = callback)
# dataset snippet_end
producer.wait_requests_finished(2000)
# the dataset parts are not counted towards the number of messages in the stream
# the last message id in this example is still 1
producer.send_stream_finished_flag("default", 1)
import asapo_consumer
import asapo_producer
import json
from datetime import datetime, timedelta
def callback(payload,err):
if err is not None and not isinstance(err, asapo_producer.AsapoServerWarning):
print("could not send: ",payload,err)
elif err is not None:
print("sent with warning: ",payload,err)
else:
print("successfuly sent: ",payload)
endpoint = "localhost:8400"
beamtime = "asapo_test"
token = str("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e"
"yJleHAiOjk1NzE3MTAyMTYsImp0aSI6ImMzaXFhbGpmNDNhbGZ"
"wOHJua20wIiwic3ViIjoiYnRfYXNhcG9fdGVzdCIsIkV4dHJhQ"
"2xhaW1zIjp7IkFjY2Vzc1R5cGVzIjpbIndyaXRlIiwicmVhZCJ"
"dfX0.dkWupPO-ysI4t-jtWiaElAzDyJF6T7hu_Wz_Au54mYU")
path_to_files = "/var/tmp/asapo/global_shared/data/test_facility/gpfs/test/2019/data/asapo_test"
producer = asapo_producer.create_producer(endpoint, 'processed', beamtime, 'auto', 'test_source', '', 1, 60000)
producer.set_log_level('error')
# let's start with producing some messages with metadata
for i in range(1, 11):
metadata = {
'condition': 'condition #' + str(i),
'somevalue': i * 10
}
producer.send(i, "processed/test_file_" + str(i), ('message #' + str(i)).encode(), user_meta = json.dumps(metadata), stream = "default", callback = callback)
producer.wait_requests_finished(2000)
consumer = asapo_consumer.create_consumer(endpoint, path_to_files, True, beamtime, "test_source", token, 5000)
# helper function to print messages
def print_messages(metadatas):
# the query will return the list of metadatas
for meta in metadatas:
# for each metadata we need to obtain the actual message first
data = consumer.retrieve_data(meta)
print('Message #', meta['_id'], ', content:', data.tobytes().decode("utf-8"), ', usermetadata:', meta['meta'])
# by_id snippet_start
# simple query, same as get_by_id
metadatas = consumer.query_messages('_id = 1')
# by_id snippet_end
print('Message with ID = 1')
print_messages(metadatas)
# by_ids snippet_start
# the query that requests the range of IDs
metadatas = consumer.query_messages('_id >= 8')
# by_ids snippet_end
print('Messages with ID >= 8')
print_messages(metadatas)
# string_equal snippet_start
# the query that has some specific requirement for message metadata
metadatas = consumer.query_messages('meta.condition = "condition #7"')
# string_equal snippet_end
print('Message with condition = "condition #7"')
print_messages(metadatas)
# int_compare snippet_start
# the query that has several requirements for user metadata
metadatas = consumer.query_messages('meta.somevalue > 30 AND meta.somevalue < 60')
# int_compare snippet_end
print('Message with 30 < somevalue < 60')
print_messages(metadatas)
# timestamp snippet_start
# the query that is based on the message's timestamp
now = datetime.now()
fifteen_minutes_ago = now - timedelta(minutes = 15)
# python uses timestamp in seconds, while ASAP::O in nanoseconds, so we need to multiply it by a billion
metadatas = consumer.query_messages('timestamp < {} AND timestamp > {}'.format(now.timestamp() * 10**9, fifteen_minutes_ago.timestamp() * 10**9))
# timestamp snippet_end
print('Messages in the last 15 minutes')
print_messages(metadatas)
#!/usr/bin/env bash
set -e
ASAPO_HOST_DIR=/var/tmp/asapo # you can change this if needed, make sure there is enough space ( >3GB on disk)
NOMAD_ALLOC_HOST_SHARED=$ASAPO_HOST_DIR/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=$ASAPO_HOST_DIR/asapo_cluster_shared/service_data
DATA_GLOBAL_SHARED=$ASAPO_HOST_DIR/global_shared/data
DATA_GLOBAL_SHARED_ONLINE=$ASAPO_HOST_DIR/global_shared/online_data
MONGO_DIR=$SERVICE_DATA_CLUSTER_SHARED/mongodb
ASAPO_USER=`id -u`:`id -g`
mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
cd $SERVICE_DATA_CLUSTER_SHARED
mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
chmod 777 *
docker run --privileged --rm -v /var/run/docker.sock:/var/run/docker.sock \
-u $ASAPO_USER \
--group-add `getent group docker | cut -d: -f3` \
-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
-e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_mongo_dir=$MONGO_DIR \
-e TF_VAR_asapo_user=$ASAPO_USER \
-e ACL_ENABLED=true \
--name asapo --net=host -d yakser/asapo-cluster:21.12.0
sleep 15
docker exec asapo jobs-start -var elk_logs=false -var influxdb_version=1.8.4
#!/usr/bin/env bash
set -e
ASAPO_HOST_DIR=/var/tmp/asapo # you can change this if needed, make sure there is enough space ( >3GB on disk)
# change this according to your Docker configuration
DOCKER_ENDPOINT="127.0.0.1:2376"
DOCKER_TLS_CA=/usr/local/docker/certs/$USER/ca.pem
DOCKER_TLS_KEY=/usr/local/docker/certs/$USER/key.pem
DOCKER_TLS_CERT=/usr/local/docker/certs/$USER/cert.pem
NOMAD_ALLOC_HOST_SHARED=$ASAPO_HOST_DIR/container_host_shared/nomad_alloc
SERVICE_DATA_CLUSTER_SHARED=$ASAPO_HOST_DIR/asapo_cluster_shared/service_data
DATA_GLOBAL_SHARED=$ASAPO_HOST_DIR/global_shared/data
DATA_GLOBAL_SHARED_ONLINE=$ASAPO_HOST_DIR/global_shared/online_data
MONGO_DIR=$SERVICE_DATA_CLUSTER_SHARED/mongodb
ASAPO_USER=`id -u`:`id -g`
mkdir -p $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
chmod 777 $NOMAD_ALLOC_HOST_SHARED $SERVICE_DATA_CLUSTER_SHARED $DATA_GLOBAL_SHARED $DATA_GLOBAL_SHARED_ONLINE
cd $SERVICE_DATA_CLUSTER_SHAREDdetector
mkdir -p fluentd grafana influxdb influxdb2 mongodb prometheus alertmanager
chmod 777 *
docker run --privileged --userns=host --security-opt no-new-privileges --rm \
-u $ASAPO_USER \
-v $NOMAD_ALLOC_HOST_SHARED:$NOMAD_ALLOC_HOST_SHARED \
-v $SERVICE_DATA_CLUSTER_SHARED:$SERVICE_DATA_CLUSTER_SHARED \
-v $DATA_GLOBAL_SHARED:$DATA_GLOBAL_SHARED \
-e NOMAD_ALLOC_DIR=$NOMAD_ALLOC_HOST_SHARED \
-e TF_VAR_service_dir=$SERVICE_DATA_CLUSTER_SHARED \
-e TF_VAR_online_dir=$DATA_GLOBAL_SHARED_ONLINE \
-e TF_VAR_offline_dir=$DATA_GLOBAL_SHARED \
-e TF_VAR_mongo_dir=$MONGO_DIR \
-e TF_VAR_asapo_user=$ASAPO_USER \
-e ACL_ENABLED=true \
-v $DOCKER_TLS_CA:/etc/nomad/ca.pem \
-v $DOCKER_TLS_KEY:/etc/nomad/key.pem \
-v $DOCKER_TLS_CERT:/etc/nomad/cert.pem \
-e DOCKER_ENDPOINT=$DOCKER_ENDPOINT \
--name asapo --net=host -d yakser/asapo-cluster:21.12.0
sleep 15
docker exec asapo jobs-start -var elk_logs=false
{
"version-21.12.0/docs": [
{
"type": "doc",
"id": "version-21.12.0/getting-started"
},
{
"type": "doc",
"id": "version-21.12.0/overview"
},
{
"type": "doc",
"id": "version-21.12.0/compare-to-others"
},
{
"collapsed": true,
"type": "category",
"label": "Concepts And Architecture",
"items": [
{
"type": "doc",
"id": "version-21.12.0/data-in-asapo"
},
{
"type": "doc",
"id": "version-21.12.0/producer-clients"
},
{
"type": "doc",
"id": "version-21.12.0/consumer-clients"
},
{
"type": "doc",
"id": "version-21.12.0/core-architecture"
}
]
},
{
"collapsed": true,
"type": "category",
"label": "Use Cases",
"items": [
{
"type": "doc",
"id": "version-21.12.0/p02.1"
}
]
},
{
"collapsed": true,
"type": "category",
"label": "Code Examples",
"items": [
{
"type": "doc",
"id": "version-21.12.0/cookbook/overview"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/simple-producer"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/simple-consumer"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/simple-pipeline"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/datasets"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/acknowledgements"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/metadata"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/next_stream"
},
{
"type": "doc",
"id": "version-21.12.0/cookbook/query"
}
]
}
]
}
[
"21.12.0",
"21.09.0",
"21.06.0"
]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment