diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp index 69676f1d7395c32d93005b462f181d8b5d8ec2fe..1d55b247bbeef7e7b6b6c2ed4fb43353bbd73459 100644 --- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp +++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp @@ -85,7 +85,7 @@ std::unique_ptr<asapo::Producer> CreateProducer(const Args& args) { } producer->EnableLocalLog(true); - producer->SetLogLevel(asapo::LogLevel::Debug); + producer->SetLogLevel(asapo::LogLevel::Info); return producer; } diff --git a/examples/worker/getnext_broker/getnext_broker.cpp b/examples/worker/getnext_broker/getnext_broker.cpp index b8bcd17ed7b9f77704aab9d2fc61526d751df011..8655fdbb454a2f3ed04dbd871bbe5e3baffd5f94 100644 --- a/examples/worker/getnext_broker/getnext_broker.cpp +++ b/examples/worker/getnext_broker/getnext_broker.cpp @@ -85,6 +85,6 @@ int main(int argc, char* argv[]) { std::cout << "Processed " << nfiles << " file(s)" << std::endl; std::cout << "Elapsed : " << duration_ms << "ms" << std::endl; - std::cout << "Rate : " << 1000.0f * nfiles / duration_ms << std::endl; + std::cout << "Rate : " << 1000.0f * nfiles / (duration_ms - 10000) << std::endl; return 0; } diff --git a/receiver/src/statistics.cpp b/receiver/src/statistics.cpp index c303c1bff3f54e3180265e5d1640ef57273aae01..fa628baa59bba7fa7284ac7a96ff35ea5d1f4916 100644 --- a/receiver/src/statistics.cpp +++ b/receiver/src/statistics.cpp @@ -62,8 +62,8 @@ void Statistics::IncreaseRequestCounter() noexcept { Statistics::Statistics(unsigned int write_frequency) : write_interval_{write_frequency} { -// statistics_sender_list__.emplace_back(new StatisticsSenderInfluxDb); - statistics_sender_list__.emplace_back(new StatisticsSenderFluentd); + statistics_sender_list__.emplace_back(new StatisticsSenderInfluxDb); +// statistics_sender_list__.emplace_back(new StatisticsSenderFluentd); ResetStatistics(); } diff --git a/receiver/src/statistics_sender_fluentd.cpp b/receiver/src/statistics_sender_fluentd.cpp index 2b90cd37c67c763870b9509bc2e6f84434185afe..4a8ed67838ac31541739630a2f52ef0776b00d81 100644 --- a/receiver/src/statistics_sender_fluentd.cpp +++ b/receiver/src/statistics_sender_fluentd.cpp @@ -5,8 +5,7 @@ namespace asapo { StatisticsSenderFluentd::StatisticsSenderFluentd() : statistics_log__{asapo::CreateDefaultLoggerApi("receiver_stat", "localhost:8400/logs/")} { statistics_log__->SetLogLevel(LogLevel::Info); - statistics_log__->EnableLocalLog(true); - +// statistics_log__->EnableLocalLog(true); } void StatisticsSenderFluentd::SendStatistics(const asapo::StatisticsToSend& statistic) const noexcept { diff --git a/tests/manual/performance_broker/settings.json b/tests/manual/performance_broker/settings.json index c45d16f2f7b59b7966ad9d2d406ef530da720a2b..a2c1a4a5ab7238e14c26667e5bfc7335e935d96d 100644 --- a/tests/manual/performance_broker/settings.json +++ b/tests/manual/performance_broker/settings.json @@ -2,5 +2,6 @@ "BrokerDbAddress":"localhost:27017", "MonitorDbAddress": "localhost:8086", "MonitorDbName": "db_test", - "port":5005 + "port":5005, + "LogLevel":"info" } \ No newline at end of file diff --git a/tests/manual/performance_broker/test.sh b/tests/manual/performance_broker/test.sh index e0b083c3d7e973cd6b1782ecd107b20eea0abb4e..dbb1d4d200b6d03a9c057b1c66cc2a28eb816bd0 100755 --- a/tests/manual/performance_broker/test.sh +++ b/tests/manual/performance_broker/test.sh @@ -4,7 +4,7 @@ # reads fileset into database # calls getnext_broker example from $worker_node -nthreads=16 +nthreads=1 # a directory with many files in it dir=/gpfs/petra3/scratch/yakubov/test run_name=test @@ -34,8 +34,8 @@ ssh ${monitor_node} influx -execute \"create database db_test\" ssh ${service_node} docker run -d -p 27017:27017 --name mongo mongo #ssh ${service_node} docker run -d -p 8086 -p 8086 --name influxdb influxdb -ssh ${service_node} mkdir ${service_dir} -ssh ${worker_node} mkdir ${worker_dir} +ssh ${service_node} mkdir -p ${service_dir} +ssh ${worker_node} mkdir -p ${worker_dir} scp settings_tmp.json ${service_node}:${service_dir}/settings.json diff --git a/tests/manual/performance_full_chain_simple/test.sh b/tests/manual/performance_full_chain_simple/test.sh index e4f8fed91d354606c8c58066bb515b1defb561d0..424451e26fa35147f9c7eb30abc98fcf6f55afa7 100755 --- a/tests/manual/performance_full_chain_simple/test.sh +++ b/tests/manual/performance_full_chain_simple/test.sh @@ -25,7 +25,7 @@ log_dir=~/fullchain_tests/logs # starts receiver on $receiver_node # runs producer with various file sizes from $producer_node and measures performance -file_size=100 +file_size=1000 file_num=$((100000000 / $file_size)) echo filesize: ${file_size}K, filenum: $file_num diff --git a/tests/manual/performance_producer_receiver/receiver.json b/tests/manual/performance_producer_receiver/receiver.json index 7cf0d85c122d91a081050a4c7fe648618e84f841..adf89f8ee2e32fef2d2ea21ca5e3d271a6071d65 100644 --- a/tests/manual/performance_producer_receiver/receiver.json +++ b/tests/manual/performance_producer_receiver/receiver.json @@ -7,5 +7,6 @@ "WriteToDisk":true, "WriteToDb":true, "LogLevel":"info", - "Tag": "test_receiver" + "Tag": "test_receiver", + "RootFolder" : "/gpfs/petra3/scratch/yakubov/receiver_tests/files" } diff --git a/tests/manual/performance_producer_receiver/test.sh b/tests/manual/performance_producer_receiver/test.sh index 83a52730d310e50cd533cb3fe85475cef4a8a391..4498cee1cc2c8c4843807fb3e746cbac2dff6cce 100755 --- a/tests/manual/performance_producer_receiver/test.sh +++ b/tests/manual/performance_producer_receiver/test.sh @@ -4,6 +4,14 @@ set -e trap Cleanup EXIT +Cleanup() { +set +e +ssh ${service_node} rm -f ${service_dir}/files/* +ssh ${service_node} killall receiver +ssh ${service_node} killall asapo-discovery +ssh ${service_node} docker rm -f -v mongo +} + # starts receiver on $service_node # runs producer with various file sizes from $worker_node and measures performance @@ -71,7 +79,10 @@ do ssh ${service_node} docker run -d -p 27017:27017 --name mongo mongo echo =================================================================== ssh ${worker_node} ${worker_dir}/dummy-data-producer ${service_ip}:${discovery_port} ${size} 1000 8 0 -ssh ${service_node} rm -f ${service_dir}/files/* +if [ "$1" == "true" ] +then + ssh ${service_node} rm -f ${service_dir}/files/* +fi ssh ${service_node} docker rm -f -v mongo done ssh ${service_node} killall receiver