diff --git a/CMakeLists.txt b/CMakeLists.txt index c73fe127acce07665d480f089fedbe08b9d3833a..9890271b149df0c35d908aef5305a7b4fedf8a21 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,7 +146,6 @@ ELSE(DEFINED PackageOnly) add_subdirectory(rdbms) add_subdirectory(scheduler) add_subdirectory(tapeserver) - add_subdirectory(tests) #Generate version information configure_file(${PROJECT_SOURCE_DIR}/version.hpp.in @@ -177,28 +176,39 @@ endif (${COMPILE_PACKAGING} STREQUAL "1") configure_file(tests/valgrind.suppr tests/valgrind.suppr COPYONLY) configure_file(tests/helgrind.suppr tests/helgrind.suppr COPYONLY) +set(VALGRIND valgrind) +set(VALGRIND_OPTS "--track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1") +set(VALGRIND_OPTS_W_SUPPR ${VALGRIND_OPTS} --suppressions=tests/valgrind.suppr) + +set(HELGRIND_OPTS "-v --demangle=yes --gen-suppressions=all --num-callers=25 --conflict-cache-size=30000000 --error-exitcode=1 --sim-hints=no-nptl-pthread-stackcache") +set(HELGRIND_OPTS_W_SUPPR ${HELGRIND_OPTS} --suppressions=tests/helgrind.suppr) + +IF(NOT DEFINED PackageOnly) + add_subdirectory(tests) +ENDIF(NOT DEFINED PackageOnly) + add_custom_target(fullunittests tests/cta-unitTests COMMAND tests/cta-unitTests-multiProcess - COMMAND valgrind --track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --suppressions=tests/valgrind.suppr tests/cta-unitTests - COMMAND valgrind --tool=helgrind -v --demangle=yes --gen-suppressions=all --num-callers=25 --conflict-cache-size=30000000 --error-exitcode=1 --suppressions=tests/helgrind.suppr tests/cta-unitTests + COMMAND ${VALGRIND} ${VALGRIND_OPTS} tests/cta-unitTests + COMMAND ${VALGRIND} --tool=helgrind ${HELGRIND_OPTS} tests/cta-unitTests COMMAND tests/cta-unitTests-multiProcess - COMMAND valgrind --track-fds=yes --child-silent-after-fork=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --suppressions=tests/valgrind.suppr tests/cta-unitTests-multiProcess - COMMAND valgrind --tool=helgrind -v --demangle=yes --gen-suppressions=all --num-callers=25 --conflict-cache-size=30000000 --error-exitcode=1 --suppressions=tests/helgrind.suppr tests/cta-unitTests-multiProcess + COMMAND ${VALGRIND} ${VALGRIND_OPTS} --child-silent-after-fork=yes tests/cta-unitTests-multiProcess + COMMAND ${VALGRIND} --tool=helgrind ${HELGRIND_OPTS} tests/cta-unitTests-multiProcess DEPENDS tests/cta-unitTests tests/cta-unitTests-multiProcess tests/valgrind.suppr tests/helgrind.suppr COMMENT "Running unit tests with memory leak and race conditions detection" VERBATIM) add_custom_target(valgrind - valgrind --track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --suppressions=tests/valgrind.suppr tests/cta-unitTests - COMMAND valgrind --track-fds=yes --child-silent-after-fork=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --suppressions=tests/valgrind.suppr tests/cta-unitTests-multiProcess + ${VALGRIND} ${VALGRIND_OPTS} tests/cta-unitTests + COMMAND ${VALGRIND} ${VALGRIND_OPTS} --child-silent-after-fork=yes tests/cta-unitTests-multiProcess DEPENDS tests/cta-unitTests tests/cta-unitTests-multiProcess tests/valgrind.suppr COMMENT "Running unit tests with memory leak detection" VERBATIM) add_custom_target(helgrind - valgrind --tool=helgrind -v --demangle=yes --gen-suppressions=all --num-callers=25 --conflict-cache-size=30000000 --error-exitcode=1 --suppressions=tests/helgrind.suppr tests/cta-unitTests - COMMAND valgrind --tool=helgrind -v --child-silent-after-fork=yes --demangle=yes --gen-suppressions=all --num-callers=25 --conflict-cache-size=30000000 --error-exitcode=1 --suppressions=tests/helgrind.suppr tests/cta-unitTests-multiProcess + ${VALGRIND} --tool=helgrind ${HELGRIND_OPTS} tests/cta-unitTests + COMMAND ${VALGRIND} --tool=helgrind ${HELGRIND_OPTS} tests/cta-unitTests-multiProcess DEPENDS tests/cta-unitTests tests/cta-unitTests-multiProcess tests/helgrind.suppr COMMENT "Running unit tests with race conditions detection" VERBATIM) diff --git a/catalogue/CMakeLists.txt b/catalogue/CMakeLists.txt index 3cde8545579f9834b2d4527943609d1e5b760664..4b077a5654d72b159dc0ec08c21b55be5a1480d0 100644 --- a/catalogue/CMakeLists.txt +++ b/catalogue/CMakeLists.txt @@ -15,9 +15,7 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. cmake_minimum_required (VERSION 2.6) -if (OCCI_SUPPORT) - include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS}) -endif (OCCI_SUPPORT) +include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS}) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") @@ -27,8 +25,10 @@ set (CATALOGUE_LIB_SRC_FILES ArchiveFileItor.cpp ArchiveFileItorImpl.cpp Catalogue.cpp + CatalogueFactory.cpp CmdLineTool.cpp InMemoryCatalogue.cpp + OracleCatalogue.cpp SqliteCatalogueSchema.cpp TapeFileWritten.cpp RdbmsArchiveFileItorImpl.cpp @@ -37,25 +37,12 @@ set (CATALOGUE_LIB_SRC_FILES SqliteCatalogue.cpp TapeForWriting.cpp) -if (OCCI_SUPPORT) - set (CATALOGUE_LIB_SRC_FILES - ${CATALOGUE_LIB_SRC_FILES} - CatalogueFactory.cpp - OracleCatalogue.cpp) -else (OCCI_SUPPORT) - set (CATALOGUE_LIB_SRC_FILES - ${CATALOGUE_LIB_SRC_FILES} - CatalogueFactory_OCCI_SUPPORT_OFF.cpp) -endif (OCCI_SUPPORT) - add_library (ctacatalogue SHARED ${CATALOGUE_LIB_SRC_FILES}) set_property(TARGET ctacatalogue PROPERTY SOVERSION "${CTA_SOVERSION}") set_property(TARGET ctacatalogue PROPERTY VERSION "${CTA_LIBVERSION}") -if (OCCI_SUPPORT) - set_property (TARGET ctacatalogue APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET ctacatalogue APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS ctacatalogue DESTINATION usr/${CMAKE_INSTALL_LIBDIR}) @@ -133,9 +120,7 @@ add_executable(cta-catalogue-schema-create target_link_libraries (cta-catalogue-schema-create ctacatalogue) -if (OCCI_SUPPORT) - set_property (TARGET cta-catalogue-schema-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET cta-catalogue-schema-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS cta-catalogue-schema-create DESTINATION /usr/bin) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-schema-create.1cta DESTINATION /usr/share/man/man1) @@ -148,9 +133,7 @@ add_executable(cta-catalogue-schema-drop target_link_libraries (cta-catalogue-schema-drop ctacatalogue) -if (OCCI_SUPPORT) - set_property (TARGET cta-catalogue-schema-drop APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET cta-catalogue-schema-drop APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS cta-catalogue-schema-drop DESTINATION /usr/bin) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-schema-drop.1cta DESTINATION /usr/share/man/man1) @@ -163,9 +146,7 @@ add_executable(cta-database-poll target_link_libraries (cta-database-poll ctacatalogue) -if (OCCI_SUPPORT) - set_property (TARGET cta-database-poll APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET cta-database-poll APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS cta-database-poll DESTINATION /usr/bin) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-database-poll.1cta DESTINATION /usr/share/man/man1) @@ -178,9 +159,7 @@ add_executable(cta-catalogue-admin-user-create target_link_libraries (cta-catalogue-admin-user-create ctacatalogue) -if (OCCI_SUPPORT) - set_property (TARGET cta-catalogue-admin-user-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET cta-catalogue-admin-user-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS cta-catalogue-admin-user-create DESTINATION /usr/bin) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-admin-user-create.1cta DESTINATION /usr/share/man/man1) @@ -193,9 +172,7 @@ add_executable(cta-catalogue-admin-host-create target_link_libraries (cta-catalogue-admin-host-create ctacatalogue) -if (OCCI_SUPPORT) - set_property (TARGET cta-catalogue-admin-host-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) -endif (OCCI_SUPPORT) +set_property (TARGET cta-catalogue-admin-host-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH}) install (TARGETS cta-catalogue-admin-host-create DESTINATION /usr/bin) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-admin-host-create.1cta DESTINATION /usr/share/man/man1) diff --git a/catalogue/Catalogue.hpp b/catalogue/Catalogue.hpp index bdddd9746c18c8be659fda82ebe079d2f6f1cc5a..790551652df0e6d616e1265162a1318643316989 100644 --- a/catalogue/Catalogue.hpp +++ b/catalogue/Catalogue.hpp @@ -128,6 +128,7 @@ public: * disabled, not full and are in the specified logical library. * * @param logicalLibraryName The name of the logical library. + * @return The list of tapes for writing. */ virtual std::list<TapeForWriting> getTapesForWriting(const std::string &logicalLibraryName) const = 0; diff --git a/catalogue/CatalogueFactory.cpp b/catalogue/CatalogueFactory.cpp index 7f16fb1228a589c0d126a1d51bb1cc190eb77fbd..5cceb8b39eb412ede3b2429784ecac86303b03f2 100644 --- a/catalogue/CatalogueFactory.cpp +++ b/catalogue/CatalogueFactory.cpp @@ -33,16 +33,18 @@ std::unique_ptr<Catalogue> CatalogueFactory::create( log::Logger &log, const rdbms::Login &login, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns) { + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect) { try { switch(login.dbType) { case rdbms::Login::DBTYPE_IN_MEMORY: - return cta::make_unique<InMemoryCatalogue>(log, nbConns, nbArchiveFileListingConns); + return cta::make_unique<InMemoryCatalogue>(log, nbConns, nbArchiveFileListingConns, maxTriesToConnect); case rdbms::Login::DBTYPE_ORACLE: return cta::make_unique<OracleCatalogue>(log, login.username, login.password, login.database, nbConns, - nbArchiveFileListingConns); + nbArchiveFileListingConns, maxTriesToConnect); case rdbms::Login::DBTYPE_SQLITE: - return cta::make_unique<SqliteCatalogue>(log, login.database, nbConns, nbArchiveFileListingConns); + return cta::make_unique<SqliteCatalogue>(log, login.database, nbConns, nbArchiveFileListingConns, + maxTriesToConnect); case rdbms::Login::DBTYPE_NONE: throw exception::Exception("Cannot create a catalogue without a database type"); default: diff --git a/catalogue/CatalogueFactory.hpp b/catalogue/CatalogueFactory.hpp index d76d19461b68cc181f8ba81d70c9590cf9aa47e7..7c0cae56b3d15e3852be295900318deb5f8dc7de 100644 --- a/catalogue/CatalogueFactory.hpp +++ b/catalogue/CatalogueFactory.hpp @@ -51,12 +51,16 @@ public: * listing archive files. * @return The newly created CTA catalogue object. Please note that it is the * responsibility of the caller to delete the returned CTA catalogue object. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ static std::unique_ptr<Catalogue> create( log::Logger &log, const rdbms::Login &login, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect = 3); }; // class CatalogueFactory diff --git a/catalogue/CatalogueFactory_OCCI_SUPPORT_OFF.cpp b/catalogue/CatalogueFactory_OCCI_SUPPORT_OFF.cpp deleted file mode 100644 index 6c0c2895752343e9898f4b960da24ea44730930e..0000000000000000000000000000000000000000 --- a/catalogue/CatalogueFactory_OCCI_SUPPORT_OFF.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * The CERN Tape Archive (CTA) project - * Copyright (C) 2015 CERN - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include "catalogue/CatalogueFactory.hpp" -#include "catalogue/InMemoryCatalogue.hpp" -#include "catalogue/SqliteCatalogue.hpp" -#include "common/exception/Exception.hpp" -#include "common/make_unique.hpp" - -namespace cta { -namespace catalogue { - -//------------------------------------------------------------------------------ -// create -//------------------------------------------------------------------------------ -std::unique_ptr<Catalogue> CatalogueFactory::create(const rdbms::Login &login, const uint64_t nbConns) { - try { - switch(login.dbType) { - case rdbms::Login::DBTYPE_IN_MEMORY: - return cta::make_unique<InMemoryCatalogue>(nbConns); - case rdbms::Login::DBTYPE_ORACLE: - throw exception::Exception("OCCI support disabled at compile time"); - case rdbms::Login::DBTYPE_SQLITE: - return cta::make_unique<SqliteCatalogue>(login.database, nbConns); - case rdbms::Login::DBTYPE_NONE: - throw exception::Exception("Cannot create a catalogue without a database type"); - default: - { - exception::Exception ex; - ex.getMessage() << "Unknown database type: value=" << login.dbType; - throw ex; - } - } - } catch(exception::Exception &ex) { - throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); - } -} - -} // namespace catalogue -} // namespace cta diff --git a/catalogue/CatalogueTest.cpp b/catalogue/CatalogueTest.cpp index e49dc259aad161cd34ad9d2d079bfff8dca22b59..3a01724ce89f560b98d3d9c35e4de1e5d924717f 100644 --- a/catalogue/CatalogueTest.cpp +++ b/catalogue/CatalogueTest.cpp @@ -21,7 +21,7 @@ #include "catalogue/CatalogueTest.hpp" #include "common/exception/Exception.hpp" #include "common/exception/UserError.hpp" -#include "rdbms/ConnFactoryFactory.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" #include <algorithm> #include <gtest/gtest.h> @@ -53,7 +53,7 @@ void cta_catalogue_CatalogueTest::SetUp() { try { const rdbms::Login &login = GetParam()->create(); - auto connFactory = rdbms::ConnFactoryFactory::create(login); + auto connFactory = rdbms::wrapper::ConnFactoryFactory::create(login); const uint64_t nbConns = 2; const uint64_t nbArchiveFileListingConns = 2; @@ -237,6 +237,31 @@ std::map<std::string, cta::common::dataStructures::AdminHost> cta_catalogue_Cata } } +//------------------------------------------------------------------------------ +// tapePoolListToMap +//------------------------------------------------------------------------------ +std::map<std::string, cta::catalogue::TapePool> cta_catalogue_CatalogueTest::tapePoolListToMap( + const std::list<cta::catalogue::TapePool> &listOfTapePools) { + using namespace cta; + + try { + std::map<std::string, cta::catalogue::TapePool> m; + + for(auto &tapePool: listOfTapePools) { + if(m.end() != m.find(tapePool.name)) { + exception::Exception ex; + ex.getMessage() << "Tape pool " << tapePool.name << " is a duplicate"; + throw ex; + } + m[tapePool.name] = tapePool; + } + + return m; + } catch(exception::Exception &ex) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); + } +} + TEST_P(cta_catalogue_CatalogueTest, createAdminUser) { using namespace cta; @@ -1789,7 +1814,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_9_exabytes_capacity) { const std::string logicalLibraryName = "logical_library_name"; const std::string tapePoolName = "tape_pool_name"; // The maximum size of an SQLite integer is a signed 64-bit integer - const uint64_t capacityInBytes = std::numeric_limits<int64_t>::max(); + const uint64_t capacityInBytes = 9L * 1000 * 1000 * 1000 * 1000 * 1000 * 1000; const bool disabledValue = true; const bool fullValue = false; const std::string comment = "Create tape"; @@ -4503,6 +4528,64 @@ TEST_P(cta_catalogue_CatalogueTest, deleteRequesterGroupMountRule_non_existant) exception::UserError); } +TEST_P(cta_catalogue_CatalogueTest, prepareForNewFile_no_archive_routes) { + using namespace cta; + + ASSERT_TRUE(m_catalogue->getRequesterMountRules().empty()); + + const std::string mountPolicyName = "mount_policy"; + const uint64_t archivePriority = 1; + const uint64_t minArchiveRequestAge = 2; + const uint64_t retrievePriority = 3; + const uint64_t minRetrieveRequestAge = 4; + const uint64_t maxDrivesAllowed = 5; + + m_catalogue->createMountPolicy( + m_admin, + mountPolicyName, + archivePriority, + minArchiveRequestAge, + retrievePriority, + minRetrieveRequestAge, + maxDrivesAllowed, + "Create mount policy"); + + const std::string comment = "Create mount rule for requester"; + const std::string diskInstanceName = "disk_instance_name"; + const std::string requesterName = "requester_name"; + m_catalogue->createRequesterMountRule(m_admin, mountPolicyName, diskInstanceName, requesterName, comment); + + const std::list<common::dataStructures::RequesterMountRule> rules = m_catalogue->getRequesterMountRules(); + ASSERT_EQ(1, rules.size()); + + const common::dataStructures::RequesterMountRule rule = rules.front(); + + ASSERT_EQ(diskInstanceName, rule.diskInstance); + ASSERT_EQ(requesterName, rule.name); + ASSERT_EQ(mountPolicyName, rule.mountPolicy); + ASSERT_EQ(comment, rule.comment); + ASSERT_EQ(m_admin.username, rule.creationLog.username); + ASSERT_EQ(m_admin.host, rule.creationLog.host); + ASSERT_EQ(rule.creationLog, rule.lastModificationLog); + + // Do not create any archive routes + ASSERT_TRUE(m_catalogue->getArchiveRoutes().empty()); + + common::dataStructures::StorageClass storageClass; + storageClass.diskInstance = diskInstanceName; + storageClass.name = "storage_class"; + storageClass.nbCopies = 2; + storageClass.comment = "Create storage class"; + m_catalogue->createStorageClass(m_admin, storageClass); + + common::dataStructures::UserIdentity userIdentity; + userIdentity.name = requesterName; + userIdentity.group = "group"; + + ASSERT_THROW(m_catalogue->prepareForNewFile(storageClass.diskInstance, storageClass.name, userIdentity), + exception::UserError); +} + TEST_P(cta_catalogue_CatalogueTest, prepareForNewFile_requester_mount_rule) { using namespace cta; @@ -5497,7 +5580,8 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { const std::string vid1 = "VID123"; const std::string vid2 = "VID456"; const std::string logicalLibraryName = "logical_library_name"; - const std::string tapePoolName = "tape_pool_name"; + const std::string tapePoolName1 = "tape_pool_name_1"; + const std::string tapePoolName2 = "tape_pool_name_2"; const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000; const bool disabledValue = true; const bool fullValue = false; @@ -5505,44 +5589,69 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, "Create logical library"); - m_catalogue->createTapePool(m_admin, tapePoolName, 2, true, "Create tape pool"); + m_catalogue->createTapePool(m_admin, tapePoolName1, 1, true, "Create tape pool"); { const auto pools = m_catalogue->getTapePools(); ASSERT_EQ(1, pools.size()); - const auto &pool = pools.front(); - ASSERT_EQ(tapePoolName, pool.name); + auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName1); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName1, pool.name); ASSERT_EQ(0, pool.nbTapes); ASSERT_EQ(0, pool.capacityGigabytes); ASSERT_EQ(0, pool.dataGigabytes); } - uint64_t totalCapacityInBytes = 0; - m_catalogue->createTape(m_admin, vid1, logicalLibraryName, tapePoolName, capacityInBytes, + m_catalogue->createTapePool(m_admin, tapePoolName2, 1, true, "Create tape pool"); + { + const auto pools = m_catalogue->getTapePools(); + ASSERT_EQ(2, pools.size()); + + auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName2); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName2, pool.name); + ASSERT_EQ(0, pool.nbTapes); + ASSERT_EQ(0, pool.capacityGigabytes); + ASSERT_EQ(0, pool.dataGigabytes); + } + + m_catalogue->createTape(m_admin, vid1, logicalLibraryName, tapePoolName1, capacityInBytes, disabledValue, fullValue, comment); - totalCapacityInBytes += capacityInBytes; { const auto pools = m_catalogue->getTapePools(); - ASSERT_EQ(1, pools.size()); + ASSERT_EQ(2, pools.size()); - const auto &pool = pools.front(); - ASSERT_EQ(tapePoolName, pool.name); + auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName1); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName1, pool.name); ASSERT_EQ(1, pool.nbTapes); - ASSERT_EQ(totalCapacityInBytes/1000000000, pool.capacityGigabytes); + ASSERT_EQ(capacityInBytes/1000000000, pool.capacityGigabytes); ASSERT_EQ(0, pool.dataGigabytes); } - m_catalogue->createTape(m_admin, vid2, logicalLibraryName, tapePoolName, capacityInBytes, + m_catalogue->createTape(m_admin, vid2, logicalLibraryName, tapePoolName2, capacityInBytes, disabledValue, fullValue, comment); - totalCapacityInBytes += capacityInBytes; { const auto pools = m_catalogue->getTapePools(); - ASSERT_EQ(1, pools.size()); + ASSERT_EQ(2, pools.size()); - const auto &pool = pools.front(); - ASSERT_EQ(tapePoolName, pool.name); - ASSERT_EQ(2, pool.nbTapes); - ASSERT_EQ(totalCapacityInBytes/1000000000, pool.capacityGigabytes); + auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName2); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName2, pool.name); + ASSERT_EQ(1, pool.nbTapes); + ASSERT_EQ(capacityInBytes/1000000000, pool.capacityGigabytes); ASSERT_EQ(0, pool.dataGigabytes); } @@ -5558,7 +5667,7 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { const common::dataStructures::Tape &tape = it->second; ASSERT_EQ(vid1, tape.vid); ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName); - ASSERT_EQ(tapePoolName, tape.tapePoolName); + ASSERT_EQ(tapePoolName1, tape.tapePoolName); ASSERT_EQ(capacityInBytes, tape.capacityInBytes); ASSERT_TRUE(disabledValue == tape.disabled); ASSERT_TRUE(fullValue == tape.full); @@ -5582,7 +5691,7 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { const auto &tape = it->second; ASSERT_EQ(vid2, tape.vid); ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName); - ASSERT_EQ(tapePoolName, tape.tapePoolName); + ASSERT_EQ(tapePoolName2, tape.tapePoolName); ASSERT_EQ(capacityInBytes, tape.capacityInBytes); ASSERT_TRUE(disabledValue == tape.disabled); ASSERT_TRUE(fullValue == tape.full); @@ -5617,7 +5726,6 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000; const uint64_t compressedFileSize = archiveFileSize; - uint64_t totalCompressedFileSize = 0; std::set<catalogue::TapeFileWritten> tapeFilesWrittenCopy1; for(uint64_t i = 1; i <= nbArchiveFiles; i++) { std::ostringstream diskFileId; @@ -5645,19 +5753,21 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { fileWritten.copyNb = 1; fileWritten.tapeDrive = tapeDrive; tapeFilesWrittenCopy1.emplace(fileWritten); - - totalCompressedFileSize += compressedFileSize; } m_catalogue->filesWrittenToTape(tapeFilesWrittenCopy1); { const auto pools = m_catalogue->getTapePools(); - ASSERT_EQ(1, pools.size()); + ASSERT_EQ(2, pools.size()); - const auto &pool = pools.front(); - ASSERT_EQ(tapePoolName, pool.name); - ASSERT_EQ(2, pool.nbTapes); - ASSERT_EQ(totalCapacityInBytes/1000000000, pool.capacityGigabytes); - ASSERT_EQ(totalCompressedFileSize/1000000000, pool.dataGigabytes); + const auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName1); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName1, pool.name); + ASSERT_EQ(1, pool.nbTapes); + ASSERT_EQ(capacityInBytes/1000000000, pool.capacityGigabytes); + ASSERT_EQ(nbArchiveFiles * compressedFileSize/1000000000, pool.dataGigabytes); } { @@ -5705,19 +5815,21 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { fileWritten.copyNb = 2; fileWritten.tapeDrive = tapeDrive; tapeFilesWrittenCopy2.emplace(fileWritten); - - totalCompressedFileSize += compressedFileSize; } m_catalogue->filesWrittenToTape(tapeFilesWrittenCopy2); { const auto pools = m_catalogue->getTapePools(); - ASSERT_EQ(1, pools.size()); + ASSERT_EQ(2, pools.size()); - const auto &pool = pools.front(); - ASSERT_EQ(tapePoolName, pool.name); - ASSERT_EQ(2, pool.nbTapes); - ASSERT_EQ(totalCapacityInBytes/1000000000, pool.capacityGigabytes); - ASSERT_EQ(totalCompressedFileSize/1000000000, pool.dataGigabytes); + const auto tapePoolMap = tapePoolListToMap(pools); + auto tapePoolMapItor = tapePoolMap.find(tapePoolName2); + ASSERT_NE(tapePoolMapItor, tapePoolMap.end()); + const auto &pool = tapePoolMapItor->second; + + ASSERT_EQ(tapePoolName2, pool.name); + ASSERT_EQ(1, pool.nbTapes); + ASSERT_EQ(capacityInBytes/1000000000, pool.capacityGigabytes); + ASSERT_EQ(nbArchiveFiles * compressedFileSize/1000000000, pool.dataGigabytes); } { @@ -5749,7 +5861,7 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { searchCriteria.storageClass = storageClass.name; searchCriteria.vid = vid1; searchCriteria.tapeFileCopyNb = 1; - searchCriteria.tapePool = tapePoolName; + searchCriteria.tapePool = tapePoolName1; auto archiveFileItor = m_catalogue->getArchiveFiles(searchCriteria); std::map<uint64_t, common::dataStructures::ArchiveFile> m = archiveFileItorToMap(archiveFileItor); @@ -5988,15 +6100,28 @@ TEST_P(cta_catalogue_CatalogueTest, fileWrittenToTape_many_archive_files) { { catalogue::TapeFileSearchCriteria searchCriteria; - searchCriteria.tapePool = "tape_pool_name"; + searchCriteria.tapePool = tapePoolName1; auto archiveFileItor = m_catalogue->getArchiveFiles(searchCriteria); const auto m = archiveFileItorToMap(archiveFileItor); ASSERT_EQ(nbArchiveFiles, m.size()); const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria); - ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes); - ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes); - ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles); + ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes); + ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes); + ASSERT_EQ(nbArchiveFiles, summary.totalFiles); + } + + { + catalogue::TapeFileSearchCriteria searchCriteria; + searchCriteria.tapePool = tapePoolName2; + auto archiveFileItor = m_catalogue->getArchiveFiles(searchCriteria); + const auto m = archiveFileItorToMap(archiveFileItor); + ASSERT_EQ(nbArchiveFiles, m.size()); + + const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria); + ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes); + ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes); + ASSERT_EQ(nbArchiveFiles, summary.totalFiles); } { diff --git a/catalogue/CatalogueTest.hpp b/catalogue/CatalogueTest.hpp index 2b9fcf6e2f2abbcf67e028d694ac41be617b3ba5..b190b8712082431a119f3938a0947dcc48ca6506 100644 --- a/catalogue/CatalogueTest.hpp +++ b/catalogue/CatalogueTest.hpp @@ -22,7 +22,7 @@ #include "catalogue/CatalogueFactory.hpp" #include "common/exception/Exception.hpp" #include "common/log/DummyLogger.hpp" -#include "rdbms/Conn.hpp" +#include "rdbms/wrapper/Conn.hpp" #include "rdbms/LoginFactory.hpp" #include <gtest/gtest.h> @@ -48,7 +48,7 @@ protected: * A general purpose database connection outside of the m_catalogue object to * be used to run tests directly on the underlying "raw" catalogue database. */ - std::unique_ptr<cta::rdbms::Conn> m_conn; + std::unique_ptr<cta::rdbms::wrapper::Conn> m_conn; virtual void SetUp(); @@ -92,6 +92,16 @@ protected: */ std::map<std::string, cta::common::dataStructures::AdminHost> adminHostListToMap( const std::list<cta::common::dataStructures::AdminHost> &listOfAdminHosts); + + /** + * Creates a map from tape pool name to tape pool from the specified list of + * tape pools. + * + * @param listOfTapePools The list of tape pools. + * @return Map from tape pool name to tape pool. + */ + std::map<std::string, cta::catalogue::TapePool> tapePoolListToMap( + const std::list<cta::catalogue::TapePool> &listOfTapePools); }; // cta_catalogue_CatalogueTest } // namespace unitTests diff --git a/catalogue/CreateAdminHostCmd.cpp b/catalogue/CreateAdminHostCmd.cpp index aad0551c81ce48b221577d4598ddebd673bfe19d..e7540c591ded5adba15c028d155953e15d81acb0 100644 --- a/catalogue/CreateAdminHostCmd.cpp +++ b/catalogue/CreateAdminHostCmd.cpp @@ -21,7 +21,7 @@ #include "catalogue/CreateAdminHostCmdLineArgs.hpp" #include "common/exception/Exception.hpp" #include "common/log/DummyLogger.hpp" -#include "rdbms/ConnFactoryFactory.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" namespace cta { namespace catalogue { diff --git a/catalogue/CreateAdminUserCmd.cpp b/catalogue/CreateAdminUserCmd.cpp index 8bc5d1c98920520207b21956a48d9644b5baceb1..a0af942d14f2f0a0d2005f758c7e3f900dcaa505 100644 --- a/catalogue/CreateAdminUserCmd.cpp +++ b/catalogue/CreateAdminUserCmd.cpp @@ -21,7 +21,7 @@ #include "catalogue/CreateAdminUserCmdLineArgs.hpp" #include "common/exception/Exception.hpp" #include "common/log/DummyLogger.hpp" -#include "rdbms/ConnFactoryFactory.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" namespace cta { namespace catalogue { diff --git a/catalogue/CreateSchemaCmd.cpp b/catalogue/CreateSchemaCmd.cpp index 2e03dab2afdf4a38094eec3d569303c9a7b6adc5..cdd9e046160ba71f79b6b72524568f27e9899361 100644 --- a/catalogue/CreateSchemaCmd.cpp +++ b/catalogue/CreateSchemaCmd.cpp @@ -16,14 +16,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "rdbms/ConnFactoryFactory.hpp" #include "catalogue/CreateSchemaCmd.hpp" #include "catalogue/CreateSchemaCmdLineArgs.hpp" #include "catalogue/OracleCatalogueSchema.hpp" #include "catalogue/SqliteCatalogueSchema.hpp" #include "common/exception/Exception.hpp" - -#include <iostream> +#include "rdbms/ConnPool.hpp" +#include "rdbms/Login.hpp" namespace cta { namespace catalogue { @@ -53,10 +52,11 @@ int CreateSchemaCmd::exceptionThrowingMain(const int argc, char *const *const ar } const auto login = rdbms::Login::parseFile(cmdLineArgs.dbConfigPath); - auto factory = rdbms::ConnFactoryFactory::create(login); - auto conn = factory->create(); + const uint64_t maxNbConns = 1; + rdbms::ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn();; - const bool ctaCatalogueTableExists = tableExists("CTA_CATALOGUE", *conn); + const bool ctaCatalogueTableExists = tableExists("CTA_CATALOGUE", conn); if(ctaCatalogueTableExists) { std::cerr << "Cannot create the database schema because the CTA_CATALOGUE table already exists" << std::endl; @@ -68,13 +68,13 @@ int CreateSchemaCmd::exceptionThrowingMain(const int argc, char *const *const ar case rdbms::Login::DBTYPE_SQLITE: { SqliteCatalogueSchema schema; - conn->executeNonQueries(schema.sql); + conn.executeNonQueries(schema.sql); } break; case rdbms::Login::DBTYPE_ORACLE: { OracleCatalogueSchema schema; - conn->executeNonQueries(schema.sql); + conn.executeNonQueries(schema.sql); } break; case rdbms::Login::DBTYPE_NONE: diff --git a/catalogue/DropSchemaCmd.cpp b/catalogue/DropSchemaCmd.cpp index 1380c7b816e453f82cdc5c3dcfe1962d96833b7d..c698c8bbe8ed028dc580a2fa4f6ae2bb6ad8f78c 100644 --- a/catalogue/DropSchemaCmd.cpp +++ b/catalogue/DropSchemaCmd.cpp @@ -22,11 +22,9 @@ #include "catalogue/DropSchemaCmdLineArgs.hpp" #include "catalogue/DropSqliteCatalogueSchema.hpp" #include "common/exception/Exception.hpp" -#include "rdbms/ConnFactoryFactory.hpp" -#include "rdbms/OcciConn.hpp" +#include "rdbms/ConnPool.hpp" #include <algorithm> -#include <rdbms/OcciConn.hpp> namespace cta { namespace catalogue { @@ -59,45 +57,20 @@ int DropSchemaCmd::exceptionThrowingMain(const int argc, char *const *const argv } const rdbms::Login dbLogin = rdbms::Login::parseFile(cmdLineArgs.dbConfigPath); - auto factory = rdbms::ConnFactoryFactory::create(dbLogin); - auto conn = factory->create(); + const uint64_t maxNbConns = 1; + rdbms::ConnPool connPool(dbLogin, maxNbConns); + auto conn = connPool.getConn(); // Abort if the schema is already dropped - switch(dbLogin.dbType) { - case rdbms::Login::DBTYPE_IN_MEMORY: - case rdbms::Login::DBTYPE_SQLITE: - if (conn->getTableNames().empty()) { - m_out << "Database contains no tables." << std::endl << - "Assuming the schema has already been dropped." << std::endl; - return 0; - } - break; - case rdbms::Login::DBTYPE_ORACLE: - { - rdbms::OcciConn *const occiConn = dynamic_cast<rdbms::OcciConn *>(conn.get()); - if(nullptr == occiConn) { - throw exception::Exception("Failed to down cast rdbms::conn to rdbms::OcciConn"); - } - if(occiConn->getTableNames().empty() && occiConn->getSequenceNames().empty()) { - m_out << "Database contains no tables and no sequences." << std::endl << - "Assuming the schema has already been dropped." << std::endl; - return 0; - } - } - break; - case rdbms::Login::DBTYPE_NONE: - throw exception::Exception("Cannot delete the schema of catalogue database without a database type"); - default: - { - exception::Exception ex; - ex.getMessage() << "Unknown database type: value=" << dbLogin.dbType; - throw ex; - } + if(conn.getTableNames().empty() && conn.getSequenceNames().empty()) { + m_out << "Database contains no tables and no sequences." << std::endl << + "Assuming the schema has already been dropped." << std::endl; + return 0; } if(userConfirmsDropOfSchema(dbLogin)) { m_out << "DROPPING the schema of the CTA calalogue database" << std::endl; - dropCatalogueSchema(dbLogin.dbType, *conn); + dropCatalogueSchema(dbLogin.dbType, conn); } else { m_out << "Aborting" << std::endl; } @@ -186,7 +159,7 @@ void DropSchemaCmd::dropDatabaseTables(rdbms::Conn &conn, const std::list<std::s for(auto tableToDrop : tablesToDrop) { const bool tableToDropIsInDb = tablesInDb.end() != std::find(tablesInDb.begin(), tablesInDb.end(), tableToDrop); if(tableToDropIsInDb) { - conn.executeNonQuery(std::string("DROP TABLE ") + tableToDrop, rdbms::Stmt::AutocommitMode::ON); + conn.executeNonQuery(std::string("DROP TABLE ") + tableToDrop, rdbms::AutocommitMode::ON); m_out << "Dropped table " << tableToDrop << std::endl; } } @@ -220,8 +193,7 @@ void DropSchemaCmd::dropOracleCatalogueSchema(rdbms::Conn &conn) { dropDatabaseTables(conn, tablesToDrop); std::list<std::string> sequencesToDrop = {"ARCHIVE_FILE_ID_SEQ"}; - rdbms::OcciConn &occiConn = dynamic_cast<rdbms::OcciConn &>(conn); - dropDatabaseSequences(occiConn, sequencesToDrop); + dropDatabaseSequences(conn, sequencesToDrop); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -230,14 +202,14 @@ void DropSchemaCmd::dropOracleCatalogueSchema(rdbms::Conn &conn) { //------------------------------------------------------------------------------ // dropDatabaseSequences //------------------------------------------------------------------------------ -void DropSchemaCmd::dropDatabaseSequences(rdbms::OcciConn &conn, const std::list<std::string> &sequencesToDrop) { +void DropSchemaCmd::dropDatabaseSequences(rdbms::Conn &conn, const std::list<std::string> &sequencesToDrop) { try { std::list<std::string> sequencesInDb = conn.getSequenceNames(); for(auto sequenceToDrop : sequencesToDrop) { const bool sequenceToDropIsInDb = sequencesInDb.end() != std::find(sequencesInDb.begin(), sequencesInDb.end(), sequenceToDrop); if(sequenceToDropIsInDb) { - conn.executeNonQuery(std::string("DROP SEQUENCE ") + sequenceToDrop, rdbms::Stmt::AutocommitMode::ON); + conn.executeNonQuery(std::string("DROP SEQUENCE ") + sequenceToDrop, rdbms::AutocommitMode::ON); m_out << "Dropped sequence " << sequenceToDrop << std::endl; } } diff --git a/catalogue/DropSchemaCmd.hpp b/catalogue/DropSchemaCmd.hpp index 760aa3b3dc0fca5383eee65c9b70bef056e22d15..a2e1c669279ce6729f5d4d676659cca792eefb72 100644 --- a/catalogue/DropSchemaCmd.hpp +++ b/catalogue/DropSchemaCmd.hpp @@ -22,7 +22,6 @@ #include "catalogue/CmdLineTool.hpp" #include "rdbms/Conn.hpp" #include "rdbms/Login.hpp" -#include "rdbms/OcciConn.hpp" namespace cta { namespace catalogue { @@ -109,9 +108,10 @@ private: /** * Drops the database sequences with the specified names. * + * @param conn The database connection. * @param seqeuncesToDrop The names of the database sequences to be dropped. */ - void dropDatabaseSequences(rdbms::OcciConn &conn, const std::list<std::string> &sequencesToDrop); + void dropDatabaseSequences(rdbms::Conn &conn, const std::list<std::string> &sequencesToDrop); }; // class DropSchemaCmd diff --git a/catalogue/InMemoryCatalogue.cpp b/catalogue/InMemoryCatalogue.cpp index a88201c16d9fd366da2037168523da84126072df..03af6576982ec2b04ea022c2308ba3cb6340659d 100644 --- a/catalogue/InMemoryCatalogue.cpp +++ b/catalogue/InMemoryCatalogue.cpp @@ -17,9 +17,6 @@ */ #include "catalogue/InMemoryCatalogue.hpp" -#include "catalogue/SqliteCatalogueSchema.hpp" -#include "rdbms/SqliteConn.hpp" -#include "rdbms/SqliteConnFactory.hpp" namespace cta { namespace catalogue { @@ -30,8 +27,10 @@ namespace catalogue { InMemoryCatalogue::InMemoryCatalogue( log::Logger &log, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns): - SchemaCreatingSqliteCatalogue(log, "file::memory:?cache=shared", nbConns, nbArchiveFileListingConns) { + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect): + SchemaCreatingSqliteCatalogue(log, "file::memory:?cache=shared", nbConns, nbArchiveFileListingConns, + maxTriesToConnect) { } //------------------------------------------------------------------------------ diff --git a/catalogue/InMemoryCatalogue.hpp b/catalogue/InMemoryCatalogue.hpp index 6605647dcbc0cde497d75e1ff22c2a29f4a04bdc..06376bd981f3843afb3cd46ca91eebbd4533017a 100644 --- a/catalogue/InMemoryCatalogue.hpp +++ b/catalogue/InMemoryCatalogue.hpp @@ -41,11 +41,15 @@ public: * @param nbArchiveFileListingConns The maximum number of concurrent * connections to the underlying relational database for the sole purpose of * listing archive files. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ InMemoryCatalogue( log::Logger &log, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect); /** * Destructor. diff --git a/catalogue/InMemoryCatalogueTest.cpp b/catalogue/InMemoryCatalogueTest.cpp index 51372829814ce52e91bce7ec0f61af8a39b3dca5..24f58e9e0ab47df8a676c100a85c64ba7d2a4156 100644 --- a/catalogue/InMemoryCatalogueTest.cpp +++ b/catalogue/InMemoryCatalogueTest.cpp @@ -39,15 +39,16 @@ TEST_F(cta_catalogue_InMemoryCatalogue, createSameSchemaInTwoSeparateInMemoryDat log::DummyLogger dummyLog("dummy"); const uint64_t nbConns = 1; const uint64_t nbArchiveFileListingConns = 0; + const uint32_t maxTriesToConnect = 1; // First in-memory database { - catalogue::InMemoryCatalogue inMemoryCatalogue(dummyLog, nbConns, nbArchiveFileListingConns); + catalogue::InMemoryCatalogue inMemoryCatalogue(dummyLog, nbConns, nbArchiveFileListingConns, maxTriesToConnect); } // Second in-memory database { - catalogue::InMemoryCatalogue inMemoryCatalogue(dummyLog, nbConns, nbArchiveFileListingConns); + catalogue::InMemoryCatalogue inMemoryCatalogue(dummyLog, nbConns, nbArchiveFileListingConns, maxTriesToConnect); } } diff --git a/catalogue/OracleCatalogue.cpp b/catalogue/OracleCatalogue.cpp index 8f6627776b27caca7f1ca8b5e1dc12379bd0d273..d8b6d840bfebf4b61bec5d89eb3968a075a0c2b8 100644 --- a/catalogue/OracleCatalogue.cpp +++ b/catalogue/OracleCatalogue.cpp @@ -18,21 +18,99 @@ #include "catalogue/ArchiveFileRow.hpp" #include "catalogue/OracleCatalogue.hpp" -#include "common/exception/UserError.hpp" +#include "catalogue/retryOnLostConnection.hpp" #include "common/exception/Exception.hpp" +#include "common/exception/LostDatabaseConnection.hpp" +#include "common/exception/UserError.hpp" #include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" #include "common/Timer.hpp" #include "common/utils/utils.hpp" #include "rdbms/AutoRollback.hpp" -#include "rdbms/ConnFactoryFactory.hpp" -#include "rdbms/OcciStmt.hpp" +#include "rdbms/wrapper/OcciColumn.hpp" +#include "rdbms/wrapper/OcciStmt.hpp" #include <string.h> namespace cta { namespace catalogue { +namespace { + /** + * Structure used to assemble a batch of rows to insert into the TAPE_FILE + * table. + */ + struct TapeFileBatch { + size_t nbRows; + rdbms::wrapper::OcciColumn vid; + rdbms::wrapper::OcciColumn fSeq; + rdbms::wrapper::OcciColumn blockId; + rdbms::wrapper::OcciColumn compressedSize; + rdbms::wrapper::OcciColumn copyNb; + rdbms::wrapper::OcciColumn creationTime; + rdbms::wrapper::OcciColumn archiveFileId; + + /** + * Constructor. + * + * @param nbRowsValue The Number of rows to be inserted. + */ + TapeFileBatch(const size_t nbRowsValue): + nbRows(nbRowsValue), + vid("VID", nbRows), + fSeq("FSEQ", nbRows), + blockId("BLOCK_ID", nbRows), + compressedSize("COMPRESSED_SIZE_IN_BYTES", nbRows), + copyNb("COPY_NB", nbRows), + creationTime("CREATION_TIME", nbRows), + archiveFileId("ARCHIVE_FILE_ID", nbRows) { + } + }; // struct TapeFileBatch + + /** + * Structure used to assemble a batch of rows to insert into the ARCHIVE_FILE + * table. + */ + struct ArchiveFileBatch { + size_t nbRows; + rdbms::wrapper::OcciColumn archiveFileId; + rdbms::wrapper::OcciColumn diskInstance; + rdbms::wrapper::OcciColumn diskFileId; + rdbms::wrapper::OcciColumn diskFilePath; + rdbms::wrapper::OcciColumn diskFileUser; + rdbms::wrapper::OcciColumn diskFileGroup; + rdbms::wrapper::OcciColumn diskFileRecoveryBlob; + rdbms::wrapper::OcciColumn size; + rdbms::wrapper::OcciColumn checksumType; + rdbms::wrapper::OcciColumn checksumValue; + rdbms::wrapper::OcciColumn storageClassName; + rdbms::wrapper::OcciColumn creationTime; + rdbms::wrapper::OcciColumn reconciliationTime; + + /** + * Constructor. + * + * @param nbRowsValue The Number of rows to be inserted. + */ + ArchiveFileBatch(const size_t nbRowsValue): + nbRows(nbRowsValue), + archiveFileId("ARCHIVE_FILE_ID", nbRows), + diskInstance("DISK_INSTANCE_NAME", nbRows), + diskFileId("DISK_FILE_ID", nbRows), + diskFilePath("DISK_FILE_PATH", nbRows), + diskFileUser("DISK_FILE_USER", nbRows), + diskFileGroup("DISK_FILE_GROUP", nbRows), + diskFileRecoveryBlob("DISK_FILE_RECOVERY_BLOB", nbRows), + size("SIZE_IN_BYTES", nbRows), + checksumType("CHECKSUM_TYPE", nbRows), + checksumValue("CHECKSUM_VALUE", nbRows), + storageClassName("STORAGE_CLASS_NAME", nbRows), + creationTime("CREATION_TIME", nbRows), + reconciliationTime("RECONCILIATION_TIME", nbRows) { + } + }; // struct ArchiveFileBatch +} // anonymous namespace + //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ @@ -42,13 +120,14 @@ OracleCatalogue::OracleCatalogue( const std::string &password, const std::string &database, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns): + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect): RdbmsCatalogue( log, - rdbms::ConnFactoryFactory::create(rdbms::Login(rdbms::Login::DBTYPE_ORACLE, username, password, database)), + rdbms::Login(rdbms::Login::DBTYPE_ORACLE, username, password, database), nbConns, - nbArchiveFileListingConns) { - + nbArchiveFileListingConns, + maxTriesToConnect) { } //------------------------------------------------------------------------------ @@ -61,6 +140,15 @@ OracleCatalogue::~OracleCatalogue() { // deleteArchiveFile //------------------------------------------------------------------------------ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, const uint64_t archiveFileId, + log::LogContext &lc) { + return retryOnLostConnection(m_log, [&]{return deleteArchiveFileInternal(diskInstanceName, archiveFileId, lc);}, + m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// deleteArchiveFileInternal +//------------------------------------------------------------------------------ +void OracleCatalogue::deleteArchiveFileInternal(const std::string &diskInstanceName, const uint64_t archiveFileId, log::LogContext &lc) { try { const char *selectSql = @@ -93,12 +181,13 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con "FOR UPDATE"; utils::Timer t; auto conn = m_connPool.getConn(); + rdbms::AutoRollback autoRollback(conn); const auto getConnTime = t.secs(utils::Timer::resetCounter); - auto selectStmt = conn.createStmt(selectSql, rdbms::Stmt::AutocommitMode::OFF); + auto selectStmt = conn.createStmt(selectSql, rdbms::AutocommitMode::OFF); const auto createStmtTime = t.secs(); - selectStmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + selectStmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); t.reset(); - rdbms::Rset selectRset = selectStmt->executeQuery(); + rdbms::Rset selectRset = selectStmt.executeQuery(); const auto selectFromArchiveFileTime = t.secs(); std::unique_ptr<common::dataStructures::ArchiveFile> archiveFile; while(selectRset.next()) { @@ -189,17 +278,17 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con t.reset(); { const char *const sql = "DELETE FROM TAPE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + stmt.executeNonQuery(); } const auto deleteFromTapeFileTime = t.secs(utils::Timer::resetCounter); { const char *const sql = "DELETE FROM ARCHIVE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + stmt.executeNonQuery(); } const auto deleteFromArchiveFileTime = t.secs(utils::Timer::resetCounter); @@ -239,6 +328,8 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con spc.add("TAPE FILE", tapeCopyLogStream.str()); } lc.log(log::INFO, "Archive file deleted from CTA catalogue"); + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -251,8 +342,17 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con //------------------------------------------------------------------------------ void OracleCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInstanceName, const std::string &diskFileId, log::LogContext &lc) { + return retryOnLostConnection(m_log, [&]{return deleteArchiveFileByDiskFileIdInternal(diskInstanceName, diskFileId, + lc);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// deleteArchiveFileByDiskFileIdInternal +//------------------------------------------------------------------------------ +void OracleCatalogue::deleteArchiveFileByDiskFileIdInternal(const std::string &diskInstanceName, + const std::string &diskFileId, log::LogContext &lc) { try { - const char *selectSql = + const char *const selectSql = "SELECT " "ARCHIVE_FILE.ARCHIVE_FILE_ID AS ARCHIVE_FILE_ID," "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME," @@ -284,12 +384,12 @@ void OracleCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta utils::Timer t; auto conn = m_connPool.getConn(); const auto getConnTime = t.secs(utils::Timer::resetCounter); - auto selectStmt = conn.createStmt(selectSql, rdbms::Stmt::AutocommitMode::OFF); + auto selectStmt = conn.createStmt(selectSql, rdbms::AutocommitMode::OFF); const auto createStmtTime = t.secs(); - selectStmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - selectStmt->bindString(":DISK_FILE_ID", diskFileId); + selectStmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + selectStmt.bindString(":DISK_FILE_ID", diskFileId); t.reset(); - rdbms::Rset selectRset = selectStmt->executeQuery(); + rdbms::Rset selectRset = selectStmt.executeQuery(); const auto selectFromArchiveFileTime = t.secs(); std::unique_ptr<common::dataStructures::ArchiveFile> archiveFile; while(selectRset.next()) { @@ -339,17 +439,17 @@ void OracleCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta t.reset(); { const char *const sql = "DELETE FROM TAPE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); + stmt.executeNonQuery(); } const auto deleteFromTapeFileTime = t.secs(utils::Timer::resetCounter); { const char *const sql = "DELETE FROM ARCHIVE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); + stmt.executeNonQuery(); } const auto deleteFromArchiveFileTime = t.secs(utils::Timer::resetCounter); @@ -389,6 +489,8 @@ void OracleCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta spc.add("TAPE FILE", tapeCopyLogStream.str()); } lc.log(log::INFO, "Archive file deleted from CTA catalogue"); + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -399,15 +501,15 @@ void OracleCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta //------------------------------------------------------------------------------ // getNextArchiveFileId //------------------------------------------------------------------------------ -uint64_t OracleCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { +uint64_t OracleCatalogue::getNextArchiveFileId(rdbms::Conn &conn) { try { const char *const sql = "SELECT " "ARCHIVE_FILE_ID_SEQ.NEXTVAL AS ARCHIVE_FILE_ID " "FROM " "DUAL"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); if (!rset.next()) { throw exception::Exception(std::string("Result set is unexpectedly empty")); } @@ -421,7 +523,7 @@ uint64_t OracleCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { //------------------------------------------------------------------------------ // selectTapeForUpdate //------------------------------------------------------------------------------ -common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::PooledConn &conn, const std::string &vid) { +common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::Conn &conn, const std::string &vid) { try { const char *const sql = "SELECT " @@ -459,9 +561,9 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::PooledC "WHERE " "VID = :VID " "FOR UPDATE"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":VID", vid); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":VID", vid); + auto rset = stmt.executeQuery(); if (!rset.next()) { throw exception::Exception(std::string("The tape with VID " + vid + " does not exist")); } @@ -515,6 +617,13 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::PooledC // filesWrittenToTape //------------------------------------------------------------------------------ void OracleCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events) { + return retryOnLostConnection(m_log, [&]{return filesWrittenToTapeInternal(events);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// filesWrittenToTapeInternal +//------------------------------------------------------------------------------ +void OracleCatalogue::filesWrittenToTapeInternal(const std::set<TapeFileWritten> &events) { try { if (events.empty()) { return; @@ -568,10 +677,10 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events auto lastEventItor = events.cend(); lastEventItor--; const TapeFileWritten &lastEvent = *lastEventItor; - updateTape(conn, rdbms::Stmt::AutocommitMode::OFF, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, + updateTape(conn, rdbms::AutocommitMode::OFF, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, lastEvent.tapeDrive); - idempotentBatchInsertArchiveFiles(conn, rdbms::Stmt::AutocommitMode::OFF, events); + idempotentBatchInsertArchiveFiles(conn, rdbms::AutocommitMode::OFF, events); // Store the value of each field i = 0; @@ -603,8 +712,8 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events ":COPY_NB," ":CREATION_TIME," ":ARCHIVE_FILE_ID)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - rdbms::OcciStmt &occiStmt = dynamic_cast<rdbms::OcciStmt &>(*stmt); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + rdbms::wrapper::OcciStmt &occiStmt = dynamic_cast<rdbms::wrapper::OcciStmt &>(stmt.getStmt()); occiStmt.setColumn(tapeFileBatch.vid); occiStmt.setColumn(tapeFileBatch.fSeq); occiStmt.setColumn(tapeFileBatch.blockId); @@ -616,6 +725,8 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events conn.commit(); + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } catch(std::exception &se) { @@ -626,8 +737,8 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events //------------------------------------------------------------------------------ // idempotentBatchInsertArchiveFiles //------------------------------------------------------------------------------ -void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::PooledConn &conn, - const rdbms::Stmt::AutocommitMode autocommitMode, const std::set<TapeFileWritten> &events) { +void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, + const rdbms::AutocommitMode autocommitMode, const std::set<TapeFileWritten> &events) { try { ArchiveFileBatch archiveFileBatch(events.size()); const time_t now = time(nullptr); @@ -701,7 +812,7 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::PooledConn &conn, ":CREATION_TIME," ":RECONCILIATION_TIME)"; auto stmt = conn.createStmt(sql, autocommitMode); - rdbms::OcciStmt &occiStmt = dynamic_cast<rdbms::OcciStmt &>(*stmt); + rdbms::wrapper::OcciStmt &occiStmt = dynamic_cast<rdbms::wrapper::OcciStmt &>(stmt.getStmt()); occiStmt->setBatchErrorMode(true); occiStmt.setColumn(archiveFileBatch.archiveFileId); diff --git a/catalogue/OracleCatalogue.hpp b/catalogue/OracleCatalogue.hpp index b9587cd54b8b5dc436047483f8065ee373985190..3823ce0b423bf999ee8bd6a6751545128f81b763 100644 --- a/catalogue/OracleCatalogue.hpp +++ b/catalogue/OracleCatalogue.hpp @@ -19,17 +19,11 @@ #pragma once #include "catalogue/RdbmsCatalogue.hpp" -#include "rdbms/OcciColumn.hpp" -#include "rdbms/PooledConn.hpp" - -#include <occi.h> -#include <string.h> +#include "rdbms/Conn.hpp" namespace cta { namespace catalogue { -class CatalogueFactory; - /** * An Oracle based implementation of the CTA catalogue. */ @@ -49,6 +43,9 @@ public: * @param nbArchiveFileListingConns The maximum number of concurrent * connections to the underlying relational database for the sole purpose of * listing archive files. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ OracleCatalogue( log::Logger &log, @@ -56,7 +53,8 @@ public: const std::string &password, const std::string &database, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect = 3); /** * Destructor. @@ -117,7 +115,7 @@ public: * @return A unique archive ID that can be used by a new archive file within * the catalogue. */ - uint64_t getNextArchiveFileId(rdbms::PooledConn &conn) override; + uint64_t getNextArchiveFileId(rdbms::Conn &conn) override; /** * Notifies the catalogue that the specified files have been written to tape. @@ -129,86 +127,70 @@ public: private: /** - * Selects the specified tape within the Tape table for update. + * Deletes the specified archive file and its associated tape copies from the + * catalogue. * - * @param conn The database connection. - * @param vid The volume identifier of the tape. + * Please note that the name of the disk instance is specified in order to + * prevent a disk instance deleting an archive file that belongs to another + * disk instance. + * + * Please note that this method is idempotent. If the file to be deleted does + * not exist in the CTA catalogue then this method returns without error. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param instanceName The name of the instance from where the deletion request + * originated + * @param archiveFileId The unique identifier of the archive file. + * @param lc The log context. + * @return The metadata of the deleted archive file including the metadata of + * the associated and also deleted tape copies. */ - common::dataStructures::Tape selectTapeForUpdate(rdbms::PooledConn &conn, const std::string &vid); + void deleteArchiveFileInternal(const std::string &diskInstanceName, const uint64_t archiveFileId, + log::LogContext &lc); /** - * Structure used to assemble a batch of rows to insert into the TAPE_FILE - * table. + * Deletes the specified archive file and its associated tape copies from the + * catalogue. + * + * Please note that this method is idempotent. If the file to be deleted does + * not exist in the CTA catalogue then this method returns without error. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param diskInstanceName The name of the instance from where the deletion + * request originated + * @param diskFileId The identifier of the source disk file which is unique + * within it's host disk system. Two files from different disk systems may + * have the same identifier. The combination of diskInstanceName and + * diskFileId must be globally unique, in other words unique within the CTA + * catalogue. + * @param lc The log context. + * @return The metadata of the deleted archive file including the metadata of + * the associated and also deleted tape copies. + */ + void deleteArchiveFileByDiskFileIdInternal(const std::string &diskInstanceName, const std::string &diskFileId, + log::LogContext &lc); + + /** + * Notifies the catalogue that the specified files have been written to tape. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param events The tape file written events. */ - struct TapeFileBatch { - size_t nbRows; - rdbms::OcciColumn vid; - rdbms::OcciColumn fSeq; - rdbms::OcciColumn blockId; - rdbms::OcciColumn compressedSize; - rdbms::OcciColumn copyNb; - rdbms::OcciColumn creationTime; - rdbms::OcciColumn archiveFileId; - - /** - * Constructor. - * - * @param nbRowsValue The Number of rows to be inserted. - */ - TapeFileBatch(const size_t nbRowsValue): - nbRows(nbRowsValue), - vid("VID", nbRows), - fSeq("FSEQ", nbRows), - blockId("BLOCK_ID", nbRows), - compressedSize("COMPRESSED_SIZE_IN_BYTES", nbRows), - copyNb("COPY_NB", nbRows), - creationTime("CREATION_TIME", nbRows), - archiveFileId("ARCHIVE_FILE_ID", nbRows) { - } - }; // struct TapeFileBatch + void filesWrittenToTapeInternal(const std::set<TapeFileWritten> &events); /** - * Structure used to assemble a batch of rows to insert into the ARCHIVE_FILE - * table. + * Selects the specified tape within the Tape table for update. + * + * @param conn The database connection. + * @param vid The volume identifier of the tape. */ - struct ArchiveFileBatch { - size_t nbRows; - rdbms::OcciColumn archiveFileId; - rdbms::OcciColumn diskInstance; - rdbms::OcciColumn diskFileId; - rdbms::OcciColumn diskFilePath; - rdbms::OcciColumn diskFileUser; - rdbms::OcciColumn diskFileGroup; - rdbms::OcciColumn diskFileRecoveryBlob; - rdbms::OcciColumn size; - rdbms::OcciColumn checksumType; - rdbms::OcciColumn checksumValue; - rdbms::OcciColumn storageClassName; - rdbms::OcciColumn creationTime; - rdbms::OcciColumn reconciliationTime; - - /** - * Constructor. - * - * @param nbRowsValue The Number of rows to be inserted. - */ - ArchiveFileBatch(const size_t nbRowsValue): - nbRows(nbRowsValue), - archiveFileId("ARCHIVE_FILE_ID", nbRows), - diskInstance("DISK_INSTANCE_NAME", nbRows), - diskFileId("DISK_FILE_ID", nbRows), - diskFilePath("DISK_FILE_PATH", nbRows), - diskFileUser("DISK_FILE_USER", nbRows), - diskFileGroup("DISK_FILE_GROUP", nbRows), - diskFileRecoveryBlob("DISK_FILE_RECOVERY_BLOB", nbRows), - size("SIZE_IN_BYTES", nbRows), - checksumType("CHECKSUM_TYPE", nbRows), - checksumValue("CHECKSUM_VALUE", nbRows), - storageClassName("STORAGE_CLASS_NAME", nbRows), - creationTime("CREATION_TIME", nbRows), - reconciliationTime("RECONCILIATION_TIME", nbRows) { - } - }; // struct ArchiveFileBatch + common::dataStructures::Tape selectTapeForUpdate(rdbms::Conn &conn, const std::string &vid); /** * Batch inserts rows into the ARCHIVE_FILE table that correspond to the @@ -227,7 +209,7 @@ private: * @param autocommitMode The autocommit mode of the SQL insert statement. * @param events The tape file written events. */ - void idempotentBatchInsertArchiveFiles(rdbms::PooledConn &conn, const rdbms::Stmt::AutocommitMode autocommitMode, + void idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const rdbms::AutocommitMode autocommitMode, const std::set<TapeFileWritten> &events); }; // class OracleCatalogue diff --git a/catalogue/PollDatabaseCmd.cpp b/catalogue/PollDatabaseCmd.cpp index bd680a770cfe6dbed7ee3950c004c5ec183ce573..5f796a6f404fdfd54adeb542b09058116c4f34f8 100644 --- a/catalogue/PollDatabaseCmd.cpp +++ b/catalogue/PollDatabaseCmd.cpp @@ -19,7 +19,6 @@ #include "catalogue/CatalogueFactory.hpp" #include "catalogue/PollDatabaseCmd.hpp" #include "catalogue/PollDatabaseCmdLineArgs.hpp" -#include "rdbms/ConnFactoryFactory.hpp" #include "rdbms/ConnPool.hpp" #include <unistd.h> @@ -52,9 +51,8 @@ int PollDatabaseCmd::exceptionThrowingMain(const int argc, char *const *const ar } const auto dbLogin = rdbms::Login::parseFile(cmdLineArgs.dbConfigPath); - auto factory = rdbms::ConnFactoryFactory::create(dbLogin); const uint64_t nbConns = 2; - rdbms::ConnPool connPool(*factory, nbConns); + rdbms::ConnPool connPool(dbLogin, nbConns); uint32_t elapsedSeconds = 0; for(uint32_t i = 0; i < cmdLineArgs.numberOfSecondsToKeepPolling; i++) { diff --git a/catalogue/RdbmsArchiveFileItorImpl.cpp b/catalogue/RdbmsArchiveFileItorImpl.cpp index 61cec15416ffe9cb4c46b2c16d903da6b48bcb8f..03516f2c0ba97fed59fb0a2b4a8d45ea6e0462e9 100644 --- a/catalogue/RdbmsArchiveFileItorImpl.cpp +++ b/catalogue/RdbmsArchiveFileItorImpl.cpp @@ -198,38 +198,38 @@ RdbmsArchiveFileItorImpl::RdbmsArchiveFileItorImpl( } auto conn = connPool.getConn(); - m_stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + m_stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); if(searchCriteria.archiveFileId) { - m_stmt->bindUint64(":ARCHIVE_FILE_ID", searchCriteria.archiveFileId.value()); + m_stmt.bindUint64(":ARCHIVE_FILE_ID", searchCriteria.archiveFileId.value()); } if(searchCriteria.diskInstance) { - m_stmt->bindString(":DISK_INSTANCE_NAME", searchCriteria.diskInstance.value()); + m_stmt.bindString(":DISK_INSTANCE_NAME", searchCriteria.diskInstance.value()); } if(searchCriteria.diskFileId) { - m_stmt->bindString(":DISK_FILE_ID", searchCriteria.diskFileId.value()); + m_stmt.bindString(":DISK_FILE_ID", searchCriteria.diskFileId.value()); } if(searchCriteria.diskFilePath) { - m_stmt->bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value()); + m_stmt.bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value()); } if(searchCriteria.diskFileUser) { - m_stmt->bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value()); + m_stmt.bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value()); } if(searchCriteria.diskFileGroup) { - m_stmt->bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value()); + m_stmt.bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value()); } if(searchCriteria.storageClass) { - m_stmt->bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value()); + m_stmt.bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value()); } if(searchCriteria.vid) { - m_stmt->bindString(":VID", searchCriteria.vid.value()); + m_stmt.bindString(":VID", searchCriteria.vid.value()); } if(searchCriteria.tapeFileCopyNb) { - m_stmt->bindUint64(":TAPE_FILE_COPY_NB", searchCriteria.tapeFileCopyNb.value()); + m_stmt.bindUint64(":TAPE_FILE_COPY_NB", searchCriteria.tapeFileCopyNb.value()); } if(searchCriteria.tapePool) { - m_stmt->bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); + m_stmt.bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); } - m_rset = m_stmt->executeQuery(); + m_rset = m_stmt.executeQuery(); m_rsetIsEmpty = !m_rset.next(); } catch(exception::Exception &ex) { diff --git a/catalogue/RdbmsArchiveFileItorImpl.hpp b/catalogue/RdbmsArchiveFileItorImpl.hpp index 059dbc038fc26fd80ed5d146e6ed99f4d53e85e4..decca68918438ec6c40ad8ca3184bc50e7f3af9f 100644 --- a/catalogue/RdbmsArchiveFileItorImpl.hpp +++ b/catalogue/RdbmsArchiveFileItorImpl.hpp @@ -23,8 +23,8 @@ #include "catalogue/TapeFileSearchCriteria.hpp" #include "common/log/Logger.hpp" #include "rdbms/ConnPool.hpp" -#include "rdbms/Rset.hpp" #include "rdbms/Stmt.hpp" +#include "rdbms/Rset.hpp" namespace cta { namespace catalogue { @@ -97,7 +97,7 @@ private: /** * The database statement. */ - std::unique_ptr<rdbms::Stmt> m_stmt; + rdbms::Stmt m_stmt; /** * The result set of archive files that is to be iterated over. diff --git a/catalogue/RdbmsCatalogue.cpp b/catalogue/RdbmsCatalogue.cpp index 216194ba2c7f4f447e5d3f48f78a595117a590f7..2e4f3b613123d36ed2fbd792d7dc8dddcb84143c 100644 --- a/catalogue/RdbmsCatalogue.cpp +++ b/catalogue/RdbmsCatalogue.cpp @@ -19,6 +19,7 @@ #include "catalogue/ArchiveFileRow.hpp" #include "catalogue/RdbmsArchiveFileItorImpl.hpp" #include "catalogue/RdbmsCatalogue.hpp" +#include "catalogue/retryOnLostConnection.hpp" #include "catalogue/SqliteCatalogueSchema.hpp" #include "common/dataStructures/TapeFile.hpp" #include "common/exception/Exception.hpp" @@ -32,6 +33,7 @@ #include <ctype.h> #include <memory> #include <time.h> +#include <common/exception/LostDatabaseConnection.hpp> namespace cta { namespace catalogue { @@ -41,13 +43,14 @@ namespace catalogue { //------------------------------------------------------------------------------ RdbmsCatalogue::RdbmsCatalogue( log::Logger &log, - std::unique_ptr<rdbms::ConnFactory> connFactory, + const rdbms::Login &login, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns): + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect): Catalogue(log), - m_connFactory(std::move(connFactory)), - m_connPool(*m_connFactory, nbConns), - m_archiveFileListingConnPool(*m_connFactory, nbArchiveFileListingConns) { + m_connPool(login, nbConns), + m_archiveFileListingConnPool(login, nbArchiveFileListingConns), + m_maxTriesToConnect(maxTriesToConnect) { } //------------------------------------------------------------------------------ @@ -95,21 +98,21 @@ void RdbmsCatalogue::createAdminUser( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":ADMIN_USER_NAME", username); + stmt.bindString(":ADMIN_USER_NAME", username); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -120,7 +123,7 @@ void RdbmsCatalogue::createAdminUser( //------------------------------------------------------------------------------ // adminUserExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::adminUserExists(rdbms::PooledConn &conn, const std::string adminUsername) const { +bool RdbmsCatalogue::adminUserExists(rdbms::Conn &conn, const std::string adminUsername) const { try { const char *const sql = "SELECT " @@ -129,9 +132,9 @@ bool RdbmsCatalogue::adminUserExists(rdbms::PooledConn &conn, const std::string "ADMIN_USER " "WHERE " "ADMIN_USER_NAME = :ADMIN_USER_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":ADMIN_USER_NAME", adminUsername); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":ADMIN_USER_NAME", adminUsername); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -145,11 +148,11 @@ void RdbmsCatalogue::deleteAdminUser(const std::string &username) { try { const char *const sql = "DELETE FROM ADMIN_USER WHERE ADMIN_USER_NAME = :ADMIN_USER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":ADMIN_USER_NAME", username); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":ADMIN_USER_NAME", username); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete admin-user ") + username + " because they do not exist"); } } catch(exception::UserError &) { @@ -183,8 +186,8 @@ std::list<common::dataStructures::AdminUser> RdbmsCatalogue::getAdminUsers() con "ORDER BY " "ADMIN_USER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::AdminUser admin; @@ -222,15 +225,15 @@ void RdbmsCatalogue::modifyAdminUserComment(const common::dataStructures::Securi "WHERE " "ADMIN_USER_NAME = :ADMIN_USER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":ADMIN_USER_NAME", username); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":ADMIN_USER_NAME", username); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify admin user ") + username + " because they do not exist"); } } catch(exception::UserError &) { @@ -279,21 +282,21 @@ void RdbmsCatalogue::createAdminHost( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":ADMIN_HOST_NAME", hostName); + stmt.bindString(":ADMIN_HOST_NAME", hostName); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -304,7 +307,7 @@ void RdbmsCatalogue::createAdminHost( //------------------------------------------------------------------------------ // adminHostExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::adminHostExists(rdbms::PooledConn &conn, const std::string adminHost) const { +bool RdbmsCatalogue::adminHostExists(rdbms::Conn &conn, const std::string adminHost) const { try { const char *const sql = "SELECT " @@ -313,9 +316,9 @@ bool RdbmsCatalogue::adminHostExists(rdbms::PooledConn &conn, const std::string "ADMIN_HOST " "WHERE " "ADMIN_HOST_NAME = :ADMIN_HOST_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":ADMIN_HOST_NAME", adminHost); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":ADMIN_HOST_NAME", adminHost); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -329,11 +332,11 @@ void RdbmsCatalogue::deleteAdminHost(const std::string &hostName) { try { const char *const sql = "DELETE FROM ADMIN_HOST WHERE ADMIN_HOST_NAME = :ADMIN_HOST_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":ADMIN_HOST_NAME", hostName); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":ADMIN_HOST_NAME", hostName); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete admin-host ") + hostName + " because it does not exist"); } } catch(exception::UserError &) { @@ -367,8 +370,8 @@ std::list<common::dataStructures::AdminHost> RdbmsCatalogue::getAdminHosts() con "ORDER BY " "ADMIN_HOST_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::AdminHost host; @@ -406,15 +409,15 @@ void RdbmsCatalogue::modifyAdminHostComment(const common::dataStructures::Securi "WHERE " "ADMIN_HOST_NAME = :ADMIN_HOST_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":ADMIN_HOST_NAME", hostName); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":ADMIN_HOST_NAME", hostName); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify admin host ") + hostName + " because it does not exist"); } } catch(exception::UserError &) { @@ -466,23 +469,23 @@ void RdbmsCatalogue::createStorageClass( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", storageClass.diskInstance); - stmt->bindString(":STORAGE_CLASS_NAME", storageClass.name); - stmt->bindUint64(":NB_COPIES", storageClass.nbCopies); + stmt.bindString(":DISK_INSTANCE_NAME", storageClass.diskInstance); + stmt.bindString(":STORAGE_CLASS_NAME", storageClass.name); + stmt.bindUint64(":NB_COPIES", storageClass.nbCopies); - stmt->bindString(":USER_COMMENT", storageClass.comment); + stmt.bindString(":USER_COMMENT", storageClass.comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -493,7 +496,7 @@ void RdbmsCatalogue::createStorageClass( //------------------------------------------------------------------------------ // storageClassExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::storageClassExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::storageClassExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName) const { try { const char *const sql = @@ -505,10 +508,10 @@ bool RdbmsCatalogue::storageClassExists(rdbms::PooledConn &conn, const std::stri "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -527,13 +530,13 @@ void RdbmsCatalogue::deleteStorageClass(const std::string &diskInstanceName, con "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql,rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql,rdbms::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + stmt.executeNonQuery(); + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete storage-class ") + diskInstanceName + ":" + storageClassName + " because it does not exist"); } @@ -571,8 +574,8 @@ std::list<common::dataStructures::StorageClass> "ORDER BY " "DISK_INSTANCE_NAME, STORAGE_CLASS_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::StorageClass storageClass; @@ -613,16 +616,16 @@ void RdbmsCatalogue::modifyStorageClassNbCopies(const common::dataStructures::Se "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":NB_COPIES", nbCopies); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":STORAGE_CLASS_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":NB_COPIES", nbCopies); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":STORAGE_CLASS_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify storage class ") + instanceName + ":" + name + " because it does not exist"); } @@ -650,16 +653,16 @@ void RdbmsCatalogue::modifyStorageClassComment(const common::dataStructures::Sec "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":STORAGE_CLASS_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":STORAGE_CLASS_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify storage class ") + instanceName + ":" + name + " because it does not exist"); } @@ -716,23 +719,23 @@ void RdbmsCatalogue::createTapePool( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":TAPE_POOL_NAME", name); - stmt->bindUint64(":NB_PARTIAL_TAPES", nbPartialTapes); - stmt->bindBool(":IS_ENCRYPTED", encryptionValue); + stmt.bindString(":TAPE_POOL_NAME", name); + stmt.bindUint64(":NB_PARTIAL_TAPES", nbPartialTapes); + stmt.bindBool(":IS_ENCRYPTED", encryptionValue); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -755,7 +758,7 @@ bool RdbmsCatalogue::tapePoolExists(const std::string &tapePoolName) const { //------------------------------------------------------------------------------ // tapePoolExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::tapePoolExists(rdbms::PooledConn &conn, const std::string &tapePoolName) const { +bool RdbmsCatalogue::tapePoolExists(rdbms::Conn &conn, const std::string &tapePoolName) const { try { const char *const sql = "SELECT " @@ -764,9 +767,9 @@ bool RdbmsCatalogue::tapePoolExists(rdbms::PooledConn &conn, const std::string & "TAPE_POOL " "WHERE " "TAPE_POOL_NAME = :TAPE_POOL_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":TAPE_POOL_NAME", tapePoolName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":TAPE_POOL_NAME", tapePoolName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -776,7 +779,7 @@ bool RdbmsCatalogue::tapePoolExists(rdbms::PooledConn &conn, const std::string & //------------------------------------------------------------------------------ // archiveFileExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::archiveFileIdExists(rdbms::PooledConn &conn, const uint64_t archiveFileId) const { +bool RdbmsCatalogue::archiveFileIdExists(rdbms::Conn &conn, const uint64_t archiveFileId) const { try { const char *const sql = "SELECT " @@ -785,9 +788,9 @@ bool RdbmsCatalogue::archiveFileIdExists(rdbms::PooledConn &conn, const uint64_t "ARCHIVE_FILE " "WHERE " "ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -797,7 +800,7 @@ bool RdbmsCatalogue::archiveFileIdExists(rdbms::PooledConn &conn, const uint64_t //------------------------------------------------------------------------------ // diskFileIdExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::diskFileIdExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::diskFileIdExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileId) const { try { const char *const sql = @@ -809,10 +812,10 @@ bool RdbmsCatalogue::diskFileIdExists(rdbms::PooledConn &conn, const std::string "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "DISK_FILE_ID = :DISK_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":DISK_FILE_ID", diskFileId); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":DISK_FILE_ID", diskFileId); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -822,7 +825,7 @@ bool RdbmsCatalogue::diskFileIdExists(rdbms::PooledConn &conn, const std::string //------------------------------------------------------------------------------ // diskFilePathExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::diskFilePathExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::diskFilePathExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFilePath) const { try { const char *const sql = @@ -834,10 +837,10 @@ bool RdbmsCatalogue::diskFilePathExists(rdbms::PooledConn &conn, const std::stri "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "DISK_FILE_PATH = :DISK_FILE_PATH"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":DISK_FILE_PATH", diskFilePath); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":DISK_FILE_PATH", diskFilePath); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -847,7 +850,7 @@ bool RdbmsCatalogue::diskFilePathExists(rdbms::PooledConn &conn, const std::stri //------------------------------------------------------------------------------ // diskFileUserExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::diskFileUserExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::diskFileUserExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileUser) const { try { const char *const sql = @@ -859,10 +862,10 @@ bool RdbmsCatalogue::diskFileUserExists(rdbms::PooledConn &conn, const std::stri "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "DISK_FILE_USER = :DISK_FILE_USER"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":DISK_FILE_USER", diskFileUser); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":DISK_FILE_USER", diskFileUser); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -872,7 +875,7 @@ bool RdbmsCatalogue::diskFileUserExists(rdbms::PooledConn &conn, const std::stri //------------------------------------------------------------------------------ // diskFileGroupExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::diskFileGroupExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::diskFileGroupExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileGroup) const { try { const char *const sql = @@ -884,10 +887,10 @@ bool RdbmsCatalogue::diskFileGroupExists(rdbms::PooledConn &conn, const std::str "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "DISK_FILE_GROUP = :DISK_FILE_GROUP"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":DISK_FILE_GROUP", diskFileGroup); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":DISK_FILE_GROUP", diskFileGroup); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -897,7 +900,7 @@ bool RdbmsCatalogue::diskFileGroupExists(rdbms::PooledConn &conn, const std::str //------------------------------------------------------------------------------ // archiveRouteExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::archiveRouteExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::archiveRouteExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName, const uint64_t copyNb) const { try { const char *const sql = @@ -911,11 +914,11 @@ bool RdbmsCatalogue::archiveRouteExists(rdbms::PooledConn &conn, const std::stri "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME AND " "COPY_NB = :COPY_NB"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->bindUint64(":COPY_NB", copyNb); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindUint64(":COPY_NB", copyNb); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -929,11 +932,11 @@ void RdbmsCatalogue::deleteTapePool(const std::string &name) { try { const char *const sql = "DELETE FROM TAPE_POOL WHERE TAPE_POOL_NAME = :TAPE_POOL_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":TAPE_POOL_NAME", name); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":TAPE_POOL_NAME", name); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete tape-pool ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -987,8 +990,8 @@ std::list<TapePool> RdbmsCatalogue::getTapePools() const { "TAPE_POOL_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { TapePool pool; @@ -1031,15 +1034,15 @@ void RdbmsCatalogue::modifyTapePoolNbPartialTapes(const common::dataStructures:: "WHERE " "TAPE_POOL_NAME = :TAPE_POOL_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":NB_PARTIAL_TAPES", nbPartialTapes); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":TAPE_POOL_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":NB_PARTIAL_TAPES", nbPartialTapes); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":TAPE_POOL_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape pool ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -1064,15 +1067,15 @@ void RdbmsCatalogue::modifyTapePoolComment(const common::dataStructures::Securit "WHERE " "TAPE_POOL_NAME = :TAPE_POOL_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":TAPE_POOL_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":TAPE_POOL_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape pool ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -1098,15 +1101,15 @@ void RdbmsCatalogue::setTapePoolEncryption(const common::dataStructures::Securit "WHERE " "TAPE_POOL_NAME = :TAPE_POOL_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindBool(":IS_ENCRYPTED", encryptionValue); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":TAPE_POOL_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindBool(":IS_ENCRYPTED", encryptionValue); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":TAPE_POOL_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape pool ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -1180,24 +1183,24 @@ void RdbmsCatalogue::createArchiveRoute( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->bindUint64(":COPY_NB", copyNb); - stmt->bindString(":TAPE_POOL_NAME", tapePoolName); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindUint64(":COPY_NB", copyNb); + stmt.bindString(":TAPE_POOL_NAME", tapePoolName); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -1219,13 +1222,13 @@ void RdbmsCatalogue::deleteArchiveRoute(const std::string &diskInstanceName, con "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME AND " "COPY_NB = :COPY_NB"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->bindUint64(":COPY_NB", copyNb); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindUint64(":COPY_NB", copyNb); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { exception::UserError ue; ue.getMessage() << "Cannot delete archive route for storage-class " << diskInstanceName + ":" + storageClassName + " and copy number " << copyNb << " because it does not exist"; @@ -1265,8 +1268,8 @@ std::list<common::dataStructures::ArchiveRoute> RdbmsCatalogue::getArchiveRoutes "ORDER BY " "DISK_INSTANCE_NAME, STORAGE_CLASS_NAME, COPY_NB"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::ArchiveRoute route; @@ -1310,17 +1313,17 @@ void RdbmsCatalogue::modifyArchiveRouteTapePoolName(const common::dataStructures "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME AND " "COPY_NB = :COPY_NB"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":TAPE_POOL_NAME", tapePoolName); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->bindUint64(":COPY_NB", copyNb); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":TAPE_POOL_NAME", tapePoolName); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindUint64(":COPY_NB", copyNb); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { exception::UserError ue; ue.getMessage() << "Cannot modify archive route for storage-class " << instanceName + ":" + storageClassName + " and copy number " << copyNb << " because it does not exist"; @@ -1352,17 +1355,17 @@ void RdbmsCatalogue::modifyArchiveRouteComment(const common::dataStructures::Sec "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME AND " "COPY_NB = :COPY_NB"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - stmt->bindUint64(":COPY_NB", copyNb); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + stmt.bindUint64(":COPY_NB", copyNb); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { exception::UserError ue; ue.getMessage() << "Cannot modify archive route for storage-class " << instanceName + ":" + storageClassName + " and copy number " << copyNb << " because it does not exist"; @@ -1414,21 +1417,21 @@ void RdbmsCatalogue::createLogicalLibrary( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":LOGICAL_LIBRARY_NAME", name); + stmt.bindString(":LOGICAL_LIBRARY_NAME", name); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(std::exception &ex) { @@ -1439,7 +1442,7 @@ void RdbmsCatalogue::createLogicalLibrary( //------------------------------------------------------------------------------ // logicalLibraryExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::logicalLibraryExists(rdbms::PooledConn &conn, const std::string &logicalLibraryName) const { +bool RdbmsCatalogue::logicalLibraryExists(rdbms::Conn &conn, const std::string &logicalLibraryName) const { try { const char *const sql = "SELECT " @@ -1448,9 +1451,9 @@ bool RdbmsCatalogue::logicalLibraryExists(rdbms::PooledConn &conn, const std::st "LOGICAL_LIBRARY " "WHERE " "LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -1464,11 +1467,11 @@ void RdbmsCatalogue::deleteLogicalLibrary(const std::string &name) { try { const char *const sql = "DELETE FROM LOGICAL_LIBRARY WHERE LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LOGICAL_LIBRARY_NAME", name); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LOGICAL_LIBRARY_NAME", name); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete logical-library ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -1503,8 +1506,8 @@ std::list<common::dataStructures::LogicalLibrary> "ORDER BY " "LOGICAL_LIBRARY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::LogicalLibrary lib; @@ -1542,15 +1545,15 @@ void RdbmsCatalogue::modifyLogicalLibraryComment(const common::dataStructures::S "WHERE " "LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":LOGICAL_LIBRARY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LOGICAL_LIBRARY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify logical library ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -1626,28 +1629,28 @@ void RdbmsCatalogue::createTape( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":VID", vid); - stmt->bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); - stmt->bindString(":TAPE_POOL_NAME", tapePoolName); - stmt->bindUint64(":CAPACITY_IN_BYTES", capacityInBytes); - stmt->bindUint64(":DATA_IN_BYTES", 0); - stmt->bindUint64(":LAST_FSEQ", 0); - stmt->bindBool(":IS_DISABLED", disabled); - stmt->bindBool(":IS_FULL", full); + stmt.bindString(":VID", vid); + stmt.bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); + stmt.bindString(":TAPE_POOL_NAME", tapePoolName); + stmt.bindUint64(":CAPACITY_IN_BYTES", capacityInBytes); + stmt.bindUint64(":DATA_IN_BYTES", 0); + stmt.bindUint64(":LAST_FSEQ", 0); + stmt.bindBool(":IS_DISABLED", disabled); + stmt.bindBool(":IS_FULL", full); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -1670,7 +1673,7 @@ bool RdbmsCatalogue::tapeExists(const std::string &vid) const { //------------------------------------------------------------------------------ // tapeExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::tapeExists(rdbms::PooledConn &conn, const std::string &vid) const { +bool RdbmsCatalogue::tapeExists(rdbms::Conn &conn, const std::string &vid) const { try { const char *const sql = "SELECT " @@ -1679,9 +1682,9 @@ bool RdbmsCatalogue::tapeExists(rdbms::PooledConn &conn, const std::string &vid) "TAPE " "WHERE " "VID = :VID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":VID", vid); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":VID", vid); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -1695,11 +1698,11 @@ void RdbmsCatalogue::deleteTape(const std::string &vid) { try { const char *const sql = "DELETE FROM TAPE WHERE VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -1724,7 +1727,7 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(const TapeSearc //------------------------------------------------------------------------------ // getTapes //------------------------------------------------------------------------------ -std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::PooledConn &conn, +std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &conn, const TapeSearchCriteria &searchCriteria) const { try { std::list<common::dataStructures::Tape> tapes; @@ -1810,17 +1813,17 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::PooledCo sql += " ORDER BY VID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); - if(searchCriteria.vid) stmt->bindString(":VID", searchCriteria.vid.value()); - if(searchCriteria.logicalLibrary) stmt->bindString(":LOGICAL_LIBRARY_NAME", searchCriteria.logicalLibrary.value()); - if(searchCriteria.tapePool) stmt->bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); - if(searchCriteria.capacityInBytes) stmt->bindUint64(":CAPACITY_IN_BYTES", searchCriteria.capacityInBytes.value()); - if(searchCriteria.disabled) stmt->bindBool(":IS_DISABLED", searchCriteria.disabled.value()); - if(searchCriteria.full) stmt->bindBool(":IS_FULL", searchCriteria.full.value()); - if(searchCriteria.lbp) stmt->bindBool(":LBP_IS_ON", searchCriteria.lbp.value()); + if(searchCriteria.vid) stmt.bindString(":VID", searchCriteria.vid.value()); + if(searchCriteria.logicalLibrary) stmt.bindString(":LOGICAL_LIBRARY_NAME", searchCriteria.logicalLibrary.value()); + if(searchCriteria.tapePool) stmt.bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); + if(searchCriteria.capacityInBytes) stmt.bindUint64(":CAPACITY_IN_BYTES", searchCriteria.capacityInBytes.value()); + if(searchCriteria.disabled) stmt.bindBool(":IS_DISABLED", searchCriteria.disabled.value()); + if(searchCriteria.full) stmt.bindBool(":IS_FULL", searchCriteria.full.value()); + if(searchCriteria.lbp) stmt.bindBool(":LBP_IS_ON", searchCriteria.lbp.value()); - auto rset = stmt->executeQuery(); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::Tape tape; @@ -1910,17 +1913,17 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getTapesByVid(const std::se } auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); { uint64_t vidNb = 1; for(auto &vid : vids) { - stmt->bindString(":VID" + std::to_string(vidNb), vid); + stmt.bindString(":VID" + std::to_string(vidNb), vid); vidNb++; } } - auto rset = stmt->executeQuery(); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::Tape tape; @@ -1979,16 +1982,16 @@ void RdbmsCatalogue::reclaimTape(const common::dataStructures::SecurityIdentity "IS_FULL != 0 AND " "NOT EXISTS (SELECT VID FROM TAPE_FILE WHERE VID = :SELECT_VID)"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":UPDATE_VID", vid); - stmt->bindString(":SELECT_VID", vid); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":UPDATE_VID", vid); + stmt.bindString(":SELECT_VID", vid); + stmt.executeNonQuery(); // If the update failed due to a user error - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { // Try to determine the user error // // Please note that this is a best effort diagnosis because there is no @@ -2067,15 +2070,15 @@ void RdbmsCatalogue::modifyTapeLogicalLibraryName(const common::dataStructures:: "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2101,15 +2104,15 @@ void RdbmsCatalogue::modifyTapeTapePoolName(const common::dataStructures::Securi "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":TAPE_POOL_NAME", tapePoolName); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":TAPE_POOL_NAME", tapePoolName); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2135,15 +2138,15 @@ void RdbmsCatalogue::modifyTapeCapacityInBytes(const common::dataStructures::Sec "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":CAPACITY_IN_BYTES", capacityInBytes); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":CAPACITY_IN_BYTES", capacityInBytes); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2169,15 +2172,15 @@ void RdbmsCatalogue::modifyTapeEncryptionKey(const common::dataStructures::Secur "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":ENCRYPTION_KEY", encryptionKey); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":ENCRYPTION_KEY", encryptionKey); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2191,6 +2194,13 @@ void RdbmsCatalogue::modifyTapeEncryptionKey(const common::dataStructures::Secur // tapeMountedForArchive //------------------------------------------------------------------------------ void RdbmsCatalogue::tapeMountedForArchive(const std::string &vid, const std::string &drive) { + return retryOnLostConnection(m_log, [&]{return tapeMountedForArchiveInternal(vid, drive);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// tapeMountedForArchiveInternal +//------------------------------------------------------------------------------ +void RdbmsCatalogue::tapeMountedForArchiveInternal(const std::string &vid, const std::string &drive) { try { const time_t now = time(nullptr); const char *const sql = @@ -2200,15 +2210,17 @@ void RdbmsCatalogue::tapeMountedForArchive(const std::string &vid, const std::st "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LAST_WRITE_DRIVE", drive); - stmt->bindUint64(":LAST_WRITE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LAST_WRITE_DRIVE", drive); + stmt.bindUint64(":LAST_WRITE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if (0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + "failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -2220,6 +2232,13 @@ void RdbmsCatalogue::tapeMountedForArchive(const std::string &vid, const std::st // tapeMountedForRetrieve //------------------------------------------------------------------------------ void RdbmsCatalogue::tapeMountedForRetrieve(const std::string &vid, const std::string &drive) { + return retryOnLostConnection(m_log, [&]{return tapeMountedForRetrieveInternal(vid, drive);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// tapeMountedForRetrieveInternal +//------------------------------------------------------------------------------ +void RdbmsCatalogue::tapeMountedForRetrieveInternal(const std::string &vid, const std::string &drive) { try { const time_t now = time(nullptr); const char *const sql = @@ -2229,15 +2248,17 @@ void RdbmsCatalogue::tapeMountedForRetrieve(const std::string &vid, const std::s "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LAST_READ_DRIVE", drive); - stmt->bindUint64(":LAST_READ_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LAST_READ_DRIVE", drive); + stmt.bindUint64(":LAST_READ_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + "failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -2261,15 +2282,15 @@ void RdbmsCatalogue::setTapeFull(const common::dataStructures::SecurityIdentity "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindBool(":IS_FULL", fullValue); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindBool(":IS_FULL", fullValue); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2283,6 +2304,13 @@ void RdbmsCatalogue::setTapeFull(const common::dataStructures::SecurityIdentity // noSpaceLeftOnTape //------------------------------------------------------------------------------ void RdbmsCatalogue::noSpaceLeftOnTape(const std::string &vid) { + return retryOnLostConnection(m_log, [&]{return noSpaceLeftOnTapeInternal(vid);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// noSpaceLeftOnTapeInternal +//------------------------------------------------------------------------------ +void RdbmsCatalogue::noSpaceLeftOnTapeInternal(const std::string &vid) { try { const char *const sql = "UPDATE TAPE SET " @@ -2290,13 +2318,15 @@ void RdbmsCatalogue::noSpaceLeftOnTape(const std::string &vid) { "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if (0 == stmt.getNbAffectedRows()) { throw exception::Exception(std::string("Tape ") + vid + " does not exist"); } + } catch (exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -2318,15 +2348,15 @@ void RdbmsCatalogue::setTapeDisabled(const common::dataStructures::SecurityIdent "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindBool(":IS_DISABLED", disabledValue); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindBool(":IS_DISABLED", disabledValue); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2352,15 +2382,15 @@ void RdbmsCatalogue::modifyTapeComment(const common::dataStructures::SecurityIde "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } } catch(exception::UserError &) { @@ -2387,16 +2417,16 @@ void RdbmsCatalogue::modifyRequesterMountRulePolicy(const common::dataStructures "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_NAME = :REQUESTER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":MOUNT_POLICY_NAME", mountPolicy); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":MOUNT_POLICY_NAME", mountPolicy); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify requester mount rule ") + instanceName + ":" + requesterName + " because it does not exist"); } @@ -2424,16 +2454,16 @@ void RdbmsCatalogue::modifyRequesteMountRuleComment(const common::dataStructures "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_NAME = :REQUESTER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify requester mount rule ") + instanceName + ":" + requesterName + " because it does not exist"); } @@ -2461,16 +2491,16 @@ void RdbmsCatalogue::modifyRequesterGroupMountRulePolicy(const common::dataStruc "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":MOUNT_POLICY_NAME", mountPolicy); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":MOUNT_POLICY_NAME", mountPolicy); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify requester group mount rule ") + instanceName + ":" + requesterGroupName + " because it does not exist"); } @@ -2498,16 +2528,16 @@ void RdbmsCatalogue::modifyRequesterGroupMountRuleComment(const common::dataStru "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":DISK_INSTANCE_NAME", instanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":DISK_INSTANCE_NAME", instanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify requester group mount rule ") + instanceName + ":" + requesterGroupName + " because it does not exist"); } @@ -2578,29 +2608,29 @@ void RdbmsCatalogue::createMountPolicy( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":MOUNT_POLICY_NAME", name); + stmt.bindString(":MOUNT_POLICY_NAME", name); - stmt->bindUint64(":ARCHIVE_PRIORITY", archivePriority); - stmt->bindUint64(":ARCHIVE_MIN_REQUEST_AGE", minArchiveRequestAge); + stmt.bindUint64(":ARCHIVE_PRIORITY", archivePriority); + stmt.bindUint64(":ARCHIVE_MIN_REQUEST_AGE", minArchiveRequestAge); - stmt->bindUint64(":RETRIEVE_PRIORITY", retrievePriority); - stmt->bindUint64(":RETRIEVE_MIN_REQUEST_AGE", minRetrieveRequestAge); + stmt.bindUint64(":RETRIEVE_PRIORITY", retrievePriority); + stmt.bindUint64(":RETRIEVE_MIN_REQUEST_AGE", minRetrieveRequestAge); - stmt->bindUint64(":MAX_DRIVES_ALLOWED", maxDrivesAllowed); + stmt.bindUint64(":MAX_DRIVES_ALLOWED", maxDrivesAllowed); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -2661,23 +2691,23 @@ void RdbmsCatalogue::createRequesterMountRule( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - stmt->bindString(":MOUNT_POLICY_NAME", mountPolicyName); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + stmt.bindString(":MOUNT_POLICY_NAME", mountPolicyName); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -2711,8 +2741,8 @@ std::list<common::dataStructures::RequesterMountRule> RdbmsCatalogue::getRequest "ORDER BY " "DISK_INSTANCE_NAME, REQUESTER_NAME, MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while(rset.next()) { common::dataStructures::RequesterMountRule rule; @@ -2748,12 +2778,12 @@ void RdbmsCatalogue::deleteRequesterMountRule(const std::string &diskInstanceNam "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_NAME = :REQUESTER_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete mount rule for requester ") + diskInstanceName + ":" + requesterName + " because the rule does not exist"); } @@ -2816,23 +2846,23 @@ void RdbmsCatalogue::createRequesterGroupMountRule( ":LAST_UPDATE_USER_NAME," ":LAST_UPDATE_HOST_NAME," ":LAST_UPDATE_TIME)"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - stmt->bindString(":MOUNT_POLICY_NAME", mountPolicyName); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + stmt.bindString(":MOUNT_POLICY_NAME", mountPolicyName); - stmt->bindString(":USER_COMMENT", comment); + stmt.bindString(":USER_COMMENT", comment); - stmt->bindString(":CREATION_LOG_USER_NAME", admin.username); - stmt->bindString(":CREATION_LOG_HOST_NAME", admin.host); - stmt->bindUint64(":CREATION_LOG_TIME", now); + stmt.bindString(":CREATION_LOG_USER_NAME", admin.username); + stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host); + stmt.bindUint64(":CREATION_LOG_TIME", now); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -2844,7 +2874,7 @@ void RdbmsCatalogue::createRequesterGroupMountRule( // getRequesterGroupMountPolicy //------------------------------------------------------------------------------ common::dataStructures::MountPolicy *RdbmsCatalogue::getRequesterGroupMountPolicy( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterGroupName) const { try { @@ -2878,10 +2908,10 @@ common::dataStructures::MountPolicy *RdbmsCatalogue::getRequesterGroupMountPolic "WHERE " "REQUESTER_GROUP_MOUNT_RULE.DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_MOUNT_RULE.REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + auto rset = stmt.executeQuery(); if(rset.next()) { auto policy = cta::make_unique<common::dataStructures::MountPolicy>(); @@ -2938,8 +2968,8 @@ std::list<common::dataStructures::RequesterGroupMountRule> RdbmsCatalogue::getRe "ORDER BY " "DISK_INSTANCE_NAME, REQUESTER_GROUP_NAME, MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while(rset.next()) { common::dataStructures::RequesterGroupMountRule rule; @@ -2977,12 +3007,12 @@ void RdbmsCatalogue::deleteRequesterGroupMountRule(const std::string &diskInstan "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete the mount rule for requester group ") + diskInstanceName + ":" + requesterGroupName + " because it does not exist"); } @@ -2996,7 +3026,7 @@ void RdbmsCatalogue::deleteRequesterGroupMountRule(const std::string &diskInstan //------------------------------------------------------------------------------ // mountPolicyExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::mountPolicyExists(rdbms::PooledConn &conn, const std::string &mountPolicyName) const { +bool RdbmsCatalogue::mountPolicyExists(rdbms::Conn &conn, const std::string &mountPolicyName) const { try { const char *const sql = "SELECT " @@ -3005,9 +3035,9 @@ bool RdbmsCatalogue::mountPolicyExists(rdbms::PooledConn &conn, const std::strin "MOUNT_POLICY " "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":MOUNT_POLICY_NAME", mountPolicyName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":MOUNT_POLICY_NAME", mountPolicyName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -3017,7 +3047,7 @@ bool RdbmsCatalogue::mountPolicyExists(rdbms::PooledConn &conn, const std::strin //------------------------------------------------------------------------------ // requesterMountRuleExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::requesterMountRuleExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::requesterMountRuleExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName) const { try { const char *const sql = @@ -3028,10 +3058,10 @@ bool RdbmsCatalogue::requesterMountRuleExists(rdbms::PooledConn &conn, const std "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_NAME = :REQUESTER_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -3042,7 +3072,7 @@ bool RdbmsCatalogue::requesterMountRuleExists(rdbms::PooledConn &conn, const std // getRequesterMountPolicy //------------------------------------------------------------------------------ common::dataStructures::MountPolicy *RdbmsCatalogue::getRequesterMountPolicy( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName) const { try { @@ -3076,10 +3106,10 @@ common::dataStructures::MountPolicy *RdbmsCatalogue::getRequesterMountPolicy( "WHERE " "REQUESTER_MOUNT_RULE.DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_MOUNT_RULE.REQUESTER_NAME = :REQUESTER_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + auto rset = stmt.executeQuery(); if(rset.next()) { auto policy = cta::make_unique<common::dataStructures::MountPolicy>(); @@ -3116,7 +3146,7 @@ common::dataStructures::MountPolicy *RdbmsCatalogue::getRequesterMountPolicy( //------------------------------------------------------------------------------ // requesterGroupMountRuleExists //------------------------------------------------------------------------------ -bool RdbmsCatalogue::requesterGroupMountRuleExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, +bool RdbmsCatalogue::requesterGroupMountRuleExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterGroupName) const { try { const char *const sql = @@ -3128,10 +3158,10 @@ bool RdbmsCatalogue::requesterGroupMountRuleExists(rdbms::PooledConn &conn, cons "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + auto rset = stmt.executeQuery(); return rset.next(); } catch (exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -3145,11 +3175,11 @@ void RdbmsCatalogue::deleteMountPolicy(const std::string &name) { try { const char *const sql = "DELETE FROM MOUNT_POLICY WHERE MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); - if(0 == stmt->getNbAffectedRows()) { + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot delete mount policy ") + name + " because it does not exist"); } } catch(exception::UserError &) { @@ -3191,8 +3221,8 @@ std::list<common::dataStructures::MountPolicy> RdbmsCatalogue::getMountPolicies( "ORDER BY " "MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); while (rset.next()) { common::dataStructures::MountPolicy policy; @@ -3241,15 +3271,15 @@ void RdbmsCatalogue::modifyMountPolicyArchivePriority(const common::dataStructur "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":ARCHIVE_PRIORITY", archivePriority); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":ARCHIVE_PRIORITY", archivePriority); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3275,15 +3305,15 @@ void RdbmsCatalogue::modifyMountPolicyArchiveMinRequestAge(const common::dataStr "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":ARCHIVE_MIN_REQUEST_AGE", minArchiveRequestAge); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":ARCHIVE_MIN_REQUEST_AGE", minArchiveRequestAge); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3309,15 +3339,15 @@ void RdbmsCatalogue::modifyMountPolicyRetrievePriority(const common::dataStructu "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":RETRIEVE_PRIORITY", retrievePriority); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":RETRIEVE_PRIORITY", retrievePriority); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3343,15 +3373,15 @@ void RdbmsCatalogue::modifyMountPolicyRetrieveMinRequestAge(const common::dataSt "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":RETRIEVE_MIN_REQUEST_AGE", minRetrieveRequestAge); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":RETRIEVE_MIN_REQUEST_AGE", minRetrieveRequestAge); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3377,15 +3407,15 @@ void RdbmsCatalogue::modifyMountPolicyMaxDrivesAllowed(const common::dataStructu "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindUint64(":MAX_DRIVES_ALLOWED", maxDrivesAllowed); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindUint64(":MAX_DRIVES_ALLOWED", maxDrivesAllowed); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3411,15 +3441,15 @@ void RdbmsCatalogue::modifyMountPolicyComment(const common::dataStructures::Secu "WHERE " "MOUNT_POLICY_NAME = :MOUNT_POLICY_NAME"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":USER_COMMENT", comment); - stmt->bindString(":LAST_UPDATE_USER_NAME", admin.username); - stmt->bindString(":LAST_UPDATE_HOST_NAME", admin.host); - stmt->bindUint64(":LAST_UPDATE_TIME", now); - stmt->bindString(":MOUNT_POLICY_NAME", name); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":USER_COMMENT", comment); + stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username); + stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host); + stmt.bindUint64(":LAST_UPDATE_TIME", now); + stmt.bindString(":MOUNT_POLICY_NAME", name); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify mount policy ") + name + " because they do not exist"); } } catch(exception::UserError &) { @@ -3432,7 +3462,7 @@ void RdbmsCatalogue::modifyMountPolicyComment(const common::dataStructures::Secu //------------------------------------------------------------------------------ // insertArchiveFile //------------------------------------------------------------------------------ -void RdbmsCatalogue::insertArchiveFile(rdbms::PooledConn &conn, const rdbms::Stmt::AutocommitMode autocommitMode, +void RdbmsCatalogue::insertArchiveFile(rdbms::Conn &conn, const rdbms::AutocommitMode autocommitMode, const ArchiveFileRow &row) { try { if(!storageClassExists(conn, row.diskInstance, row.storageClassName)) { @@ -3472,21 +3502,21 @@ void RdbmsCatalogue::insertArchiveFile(rdbms::PooledConn &conn, const rdbms::Stm ":RECONCILIATION_TIME)"; auto stmt = conn.createStmt(sql, autocommitMode); - stmt->bindUint64(":ARCHIVE_FILE_ID", row.archiveFileId); - stmt->bindString(":DISK_INSTANCE_NAME", row.diskInstance); - stmt->bindString(":DISK_FILE_ID", row.diskFileId); - stmt->bindString(":DISK_FILE_PATH", row.diskFilePath); - stmt->bindString(":DISK_FILE_USER", row.diskFileUser); - stmt->bindString(":DISK_FILE_GROUP", row.diskFileGroup); - stmt->bindString(":DISK_FILE_RECOVERY_BLOB", row.diskFileRecoveryBlob); - stmt->bindUint64(":SIZE_IN_BYTES", row.size); - stmt->bindString(":CHECKSUM_TYPE", row.checksumType); - stmt->bindString(":CHECKSUM_VALUE", row.checksumValue); - stmt->bindString(":STORAGE_CLASS_NAME", row.storageClassName); - stmt->bindUint64(":CREATION_TIME", now); - stmt->bindUint64(":RECONCILIATION_TIME", now); - - stmt->executeNonQuery(); + stmt.bindUint64(":ARCHIVE_FILE_ID", row.archiveFileId); + stmt.bindString(":DISK_INSTANCE_NAME", row.diskInstance); + stmt.bindString(":DISK_FILE_ID", row.diskFileId); + stmt.bindString(":DISK_FILE_PATH", row.diskFilePath); + stmt.bindString(":DISK_FILE_USER", row.diskFileUser); + stmt.bindString(":DISK_FILE_GROUP", row.diskFileGroup); + stmt.bindString(":DISK_FILE_RECOVERY_BLOB", row.diskFileRecoveryBlob); + stmt.bindUint64(":SIZE_IN_BYTES", row.size); + stmt.bindString(":CHECKSUM_TYPE", row.checksumType); + stmt.bindString(":CHECKSUM_VALUE", row.checksumValue); + stmt.bindString(":STORAGE_CLASS_NAME", row.storageClassName); + stmt.bindUint64(":CREATION_TIME", now); + stmt.bindUint64(":RECONCILIATION_TIME", now); + + stmt.executeNonQuery(); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -3677,38 +3707,38 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary( } auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); if(searchCriteria.archiveFileId) { - stmt->bindUint64(":ARCHIVE_FILE_ID", searchCriteria.archiveFileId.value()); + stmt.bindUint64(":ARCHIVE_FILE_ID", searchCriteria.archiveFileId.value()); } if(searchCriteria.diskInstance) { - stmt->bindString(":DISK_INSTANCE_NAME", searchCriteria.diskInstance.value()); + stmt.bindString(":DISK_INSTANCE_NAME", searchCriteria.diskInstance.value()); } if(searchCriteria.diskFileId) { - stmt->bindString(":DISK_FILE_ID", searchCriteria.diskFileId.value()); + stmt.bindString(":DISK_FILE_ID", searchCriteria.diskFileId.value()); } if(searchCriteria.diskFilePath) { - stmt->bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value()); + stmt.bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value()); } if(searchCriteria.diskFileUser) { - stmt->bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value()); + stmt.bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value()); } if(searchCriteria.diskFileGroup) { - stmt->bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value()); + stmt.bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value()); } if(searchCriteria.storageClass) { - stmt->bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value()); + stmt.bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value()); } if(searchCriteria.vid) { - stmt->bindString(":VID", searchCriteria.vid.value()); + stmt.bindString(":VID", searchCriteria.vid.value()); } if(searchCriteria.tapeFileCopyNb) { - stmt->bindUint64(":TAPE_FILE_COPY_NB", searchCriteria.tapeFileCopyNb.value()); + stmt.bindUint64(":TAPE_FILE_COPY_NB", searchCriteria.tapeFileCopyNb.value()); } if(searchCriteria.tapePool) { - stmt->bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); + stmt.bindString(":TAPE_POOL_NAME", searchCriteria.tapePool.value()); } - auto rset = stmt->executeQuery(); + auto rset = stmt.executeQuery(); if(!rset.next()) { throw exception::Exception("SELECT COUNT statement did not returned a row"); } @@ -3748,6 +3778,13 @@ common::dataStructures::ArchiveFile RdbmsCatalogue::getArchiveFileById(const uin // tapeLabelled //------------------------------------------------------------------------------ void RdbmsCatalogue::tapeLabelled(const std::string &vid, const std::string &drive, const bool lbpIsOn) { + return retryOnLostConnection(m_log, [&]{return tapeLabelledInternal(vid, drive, lbpIsOn);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// tapeLabelledInternal +//------------------------------------------------------------------------------ +void RdbmsCatalogue::tapeLabelledInternal(const std::string &vid, const std::string &drive, const bool lbpIsOn) { try { const time_t now = time(nullptr); const char *const sql = @@ -3758,16 +3795,18 @@ void RdbmsCatalogue::tapeLabelled(const std::string &vid, const std::string &dri "WHERE " "VID = :VID"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":LABEL_DRIVE", drive); - stmt->bindUint64(":LABEL_TIME", now); - stmt->bindBool(":LBP_IS_ON", lbpIsOn); - stmt->bindString(":VID", vid); - stmt->executeNonQuery(); - - if(0 == stmt->getNbAffectedRows()) { + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":LABEL_DRIVE", drive); + stmt.bindUint64(":LABEL_TIME", now); + stmt.bindBool(":LBP_IS_ON", lbpIsOn); + stmt.bindString(":VID", vid); + stmt.executeNonQuery(); + + if(0 == stmt.getNbAffectedRows()) { throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist"); } + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch (exception::Exception &ex) { @@ -3780,6 +3819,16 @@ void RdbmsCatalogue::tapeLabelled(const std::string &vid, const std::string &dri //------------------------------------------------------------------------------ common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::prepareForNewFile(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::UserIdentity &user) { + return retryOnLostConnection( m_log, [&]{return prepareForNewFileInternal(diskInstanceName, storageClassName, user);}, + m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// prepareForNewFileInternal +//------------------------------------------------------------------------------ +common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::prepareForNewFileInternal( + const std::string &diskInstanceName, const std::string &storageClassName, + const common::dataStructures::UserIdentity &user) { try { auto conn = m_connPool.getConn(); const common::dataStructures::TapeCopyToPoolMap copyToPoolMap = getTapeCopyToPoolMap(conn, diskInstanceName, @@ -3793,11 +3842,11 @@ common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::prepareForNewFi throw ue; } if(copyToPoolMap.size() != expectedNbRoutes) { - exception::Exception ex; - ex.getMessage() << "Storage class " << diskInstanceName << ":" << storageClassName << " does not have the" + exception::UserError ue; + ue.getMessage() << "Storage class " << diskInstanceName << ":" << storageClassName << " does not have the" " expected number of archive routes routes: expected=" << expectedNbRoutes << ", actual=" << copyToPoolMap.size(); - throw ex; + throw ue; } const RequesterAndGroupMountPolicies mountPolicies = getMountPolicies(conn, diskInstanceName, user.name, @@ -3817,10 +3866,12 @@ common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::prepareForNewFi } // Now that we have both the archive routes and the mount policy it's safe to - // consume an archive file identifierarchiveFileId + // consume an archive file identifier const uint64_t archiveFileId = getNextArchiveFileId(conn); return common::dataStructures::ArchiveFileQueueCriteria(archiveFileId, copyToPoolMap, mountPolicy); + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -3831,7 +3882,7 @@ common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::prepareForNewFi //------------------------------------------------------------------------------ // getTapeCopyToPoolMap //------------------------------------------------------------------------------ -common::dataStructures::TapeCopyToPoolMap RdbmsCatalogue::getTapeCopyToPoolMap(rdbms::PooledConn &conn, +common::dataStructures::TapeCopyToPoolMap RdbmsCatalogue::getTapeCopyToPoolMap(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName) const { try { common::dataStructures::TapeCopyToPoolMap copyToPoolMap; @@ -3844,10 +3895,10 @@ common::dataStructures::TapeCopyToPoolMap RdbmsCatalogue::getTapeCopyToPoolMap(r "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + auto rset = stmt.executeQuery(); while (rset.next()) { const uint64_t copyNb = rset.columnUint64("COPY_NB"); const std::string tapePoolName = rset.columnString("TAPE_POOL_NAME"); @@ -3863,7 +3914,7 @@ common::dataStructures::TapeCopyToPoolMap RdbmsCatalogue::getTapeCopyToPoolMap(r //------------------------------------------------------------------------------ // getExpectedNbArchiveRoutes //------------------------------------------------------------------------------ -uint64_t RdbmsCatalogue::getExpectedNbArchiveRoutes(rdbms::PooledConn &conn, const std::string &diskInstanceName, +uint64_t RdbmsCatalogue::getExpectedNbArchiveRoutes(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName) const { try { const char *const sql = @@ -3874,10 +3925,10 @@ uint64_t RdbmsCatalogue::getExpectedNbArchiveRoutes(rdbms::PooledConn &conn, con "WHERE " "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":STORAGE_CLASS_NAME", storageClassName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":STORAGE_CLASS_NAME", storageClassName); + auto rset = stmt.executeQuery(); if(!rset.next()) { throw exception::Exception("Result set of SELECT COUNT(*) is empty"); } @@ -3891,8 +3942,8 @@ uint64_t RdbmsCatalogue::getExpectedNbArchiveRoutes(rdbms::PooledConn &conn, con // updateTape //------------------------------------------------------------------------------ void RdbmsCatalogue::updateTape( - rdbms::PooledConn &conn, - const rdbms::Stmt::AutocommitMode autocommitMode, + rdbms::Conn &conn, + const rdbms::AutocommitMode autocommitMode, const std::string &vid, const uint64_t lastFSeq, const uint64_t compressedBytesWritten, @@ -3908,12 +3959,12 @@ void RdbmsCatalogue::updateTape( "WHERE " "VID = :VID"; auto stmt = conn.createStmt(sql, autocommitMode); - stmt->bindString(":VID", vid); - stmt->bindUint64(":LAST_FSEQ", lastFSeq); - stmt->bindUint64(":DATA_IN_BYTES", compressedBytesWritten); - stmt->bindString(":LAST_WRITE_DRIVE", tapeDrive); - stmt->bindUint64(":LAST_WRITE_TIME", now); - stmt->executeNonQuery(); + stmt.bindString(":VID", vid); + stmt.bindUint64(":LAST_FSEQ", lastFSeq); + stmt.bindUint64(":DATA_IN_BYTES", compressedBytesWritten); + stmt.bindString(":LAST_WRITE_DRIVE", tapeDrive); + stmt.bindUint64(":LAST_WRITE_TIME", now); + stmt.executeNonQuery(); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -3923,6 +3974,18 @@ void RdbmsCatalogue::updateTape( // prepareToRetrieveFile //------------------------------------------------------------------------------ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetrieveFile( + const std::string &diskInstanceName, + const uint64_t archiveFileId, + const common::dataStructures::UserIdentity &user, + log::LogContext &lc) { + return retryOnLostConnection( m_log, [&]{return prepareToRetrieveFileInternal(diskInstanceName, archiveFileId, user, + lc);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// prepareToRetrieveFileInternal +//------------------------------------------------------------------------------ +common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetrieveFileInternal( const std::string &diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity &user, @@ -3976,6 +4039,8 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetri criteria.archiveFile = *archiveFile; criteria.mountPolicy = mountPolicy; return criteria; + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -3987,6 +4052,18 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetri // prepareToRetrieveFileByDiskFileId //------------------------------------------------------------------------------ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetrieveFileByDiskFileId( + const std::string &diskInstanceName, + const std::string &diskFileId, + const common::dataStructures::UserIdentity &user, + log::LogContext &lc) { + return retryOnLostConnection( m_log, [&]{return prepareToRetrieveFileByDiskFileIdInternal(diskInstanceName, + diskFileId, user, lc);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// prepareToRetrieveFileByDiskFileIdInternal +//------------------------------------------------------------------------------ +common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetrieveFileByDiskFileIdInternal( const std::string &diskInstanceName, const std::string &diskFileId, const common::dataStructures::UserIdentity &user, @@ -4034,6 +4111,8 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetri criteria.archiveFile = *archiveFile; criteria.mountPolicy = mountPolicy; return criteria; + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::UserError &) { throw; } catch(exception::Exception &ex) { @@ -4045,7 +4124,7 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetri // getMountPolicies //------------------------------------------------------------------------------ RequesterAndGroupMountPolicies RdbmsCatalogue::getMountPolicies( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName, const std::string &requesterGroupName) const { @@ -4105,12 +4184,12 @@ RequesterAndGroupMountPolicies RdbmsCatalogue::getMountPolicies( "REQUESTER_GROUP_MOUNT_RULE.DISK_INSTANCE_NAME = :GROUP_DISK_INSTANCE_NAME AND " "REQUESTER_GROUP_MOUNT_RULE.REQUESTER_GROUP_NAME = :REQUESTER_GROUP_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":REQUESTER_DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":GROUP_DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":REQUESTER_NAME", requesterName); - stmt->bindString(":REQUESTER_GROUP_NAME", requesterGroupName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":REQUESTER_DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":GROUP_DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":REQUESTER_NAME", requesterName); + stmt.bindString(":REQUESTER_GROUP_NAME", requesterGroupName); + auto rset = stmt.executeQuery(); RequesterAndGroupMountPolicies policies; while(rset.next()) { @@ -4147,14 +4226,29 @@ RequesterAndGroupMountPolicies RdbmsCatalogue::getMountPolicies( // isAdmin //------------------------------------------------------------------------------ bool RdbmsCatalogue::isAdmin(const common::dataStructures::SecurityIdentity &admin) const { - auto conn = m_connPool.getConn(); - return userIsAdmin(conn, admin.username) && hostIsAdmin(conn, admin.host); + return retryOnLostConnection(m_log, [&]{return isAdminInternal(admin);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// isAdminInternal +//------------------------------------------------------------------------------ +bool RdbmsCatalogue::isAdminInternal(const common::dataStructures::SecurityIdentity &admin) const { + try { + auto conn = m_connPool.getConn(); + return userIsAdmin(conn, admin.username) && hostIsAdmin(conn, admin.host); + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); + } catch(exception::UserError &) { + throw; + } catch(exception::Exception &ex) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); + } } //------------------------------------------------------------------------------ // userIsAdmin //------------------------------------------------------------------------------ -bool RdbmsCatalogue::userIsAdmin(rdbms::PooledConn &conn, const std::string &userName) const { +bool RdbmsCatalogue::userIsAdmin(rdbms::Conn &conn, const std::string &userName) const { const char *const sql = "SELECT " "ADMIN_USER_NAME AS ADMIN_USER_NAME " @@ -4162,16 +4256,16 @@ bool RdbmsCatalogue::userIsAdmin(rdbms::PooledConn &conn, const std::string &use "ADMIN_USER " "WHERE " "ADMIN_USER_NAME = :ADMIN_USER_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":ADMIN_USER_NAME", userName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":ADMIN_USER_NAME", userName); + auto rset = stmt.executeQuery(); return rset.next(); } //------------------------------------------------------------------------------ // hostIsAdmin //------------------------------------------------------------------------------ -bool RdbmsCatalogue::hostIsAdmin(rdbms::PooledConn &conn, const std::string &hostName) const { +bool RdbmsCatalogue::hostIsAdmin(rdbms::Conn &conn, const std::string &hostName) const { const char *const sql = "SELECT " "ADMIN_HOST_NAME AS ADMIN_HOST_NAME " @@ -4179,9 +4273,9 @@ bool RdbmsCatalogue::hostIsAdmin(rdbms::PooledConn &conn, const std::string &hos "ADMIN_HOST " "WHERE " "ADMIN_HOST_NAME = :ADMIN_HOST_NAME"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":ADMIN_HOST_NAME", hostName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":ADMIN_HOST_NAME", hostName); + auto rset = stmt.executeQuery(); return rset.next(); } @@ -4189,6 +4283,13 @@ bool RdbmsCatalogue::hostIsAdmin(rdbms::PooledConn &conn, const std::string &hos // getTapesForWriting //------------------------------------------------------------------------------ std::list<TapeForWriting> RdbmsCatalogue::getTapesForWriting(const std::string &logicalLibraryName) const { + return retryOnLostConnection(m_log, [&]{return getTapesForWritingInternal(logicalLibraryName);}, m_maxTriesToConnect); +} + +//------------------------------------------------------------------------------ +// getTapesForWritingInternal +//------------------------------------------------------------------------------ +std::list<TapeForWriting> RdbmsCatalogue::getTapesForWritingInternal(const std::string &logicalLibraryName) const { try { std::list<TapeForWriting> tapes; const char *const sql = @@ -4198,19 +4299,20 @@ std::list<TapeForWriting> RdbmsCatalogue::getTapesForWriting(const std::string & "CAPACITY_IN_BYTES AS CAPACITY_IN_BYTES," "DATA_IN_BYTES AS DATA_IN_BYTES," "LAST_FSEQ AS LAST_FSEQ " - "FROM " + "FROM " "TAPE " - "WHERE " + "WHERE " // "LBP_IS_ON IS NOT NULL AND " // Set when the tape has been labelled // "LABEL_DRIVE IS NOT NULL AND " // Set when the tape has been labelled // "LABEL_TIME IS NOT NULL AND " // Set when the tape has been labelled "IS_DISABLED = 0 AND " "IS_FULL = 0 AND " "LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME"; + auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":LOGICAL_LIBRARY_NAME", logicalLibraryName); + auto rset = stmt.executeQuery(); while (rset.next()) { TapeForWriting tape; tape.vid = rset.columnString("VID"); @@ -4221,8 +4323,9 @@ std::list<TapeForWriting> RdbmsCatalogue::getTapesForWriting(const std::string & tapes.push_back(tape); } - return tapes; + } catch(exception::LostDatabaseConnection &le) { + throw exception::LostDatabaseConnection(std::string(__FUNCTION__) + " failed: " + le.getMessage().str()); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -4232,8 +4335,8 @@ std::list<TapeForWriting> RdbmsCatalogue::getTapesForWriting(const std::string & // insertTapeFile //------------------------------------------------------------------------------ void RdbmsCatalogue::insertTapeFile( - rdbms::PooledConn &conn, - const rdbms::Stmt::AutocommitMode autocommitMode, + rdbms::Conn &conn, + const rdbms::AutocommitMode autocommitMode, const common::dataStructures::TapeFile &tapeFile, const uint64_t archiveFileId) { try { @@ -4257,15 +4360,15 @@ void RdbmsCatalogue::insertTapeFile( ":ARCHIVE_FILE_ID)"; auto stmt = conn.createStmt(sql, autocommitMode); - stmt->bindString(":VID", tapeFile.vid); - stmt->bindUint64(":FSEQ", tapeFile.fSeq); - stmt->bindUint64(":BLOCK_ID", tapeFile.blockId); - stmt->bindUint64(":COMPRESSED_SIZE_IN_BYTES", tapeFile.compressedSize); - stmt->bindUint64(":COPY_NB", tapeFile.copyNb); - stmt->bindUint64(":CREATION_TIME", now); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + stmt.bindString(":VID", tapeFile.vid); + stmt.bindUint64(":FSEQ", tapeFile.fSeq); + stmt.bindUint64(":BLOCK_ID", tapeFile.blockId); + stmt.bindUint64(":COMPRESSED_SIZE_IN_BYTES", tapeFile.compressedSize); + stmt.bindUint64(":COPY_NB", tapeFile.copyNb); + stmt.bindUint64(":CREATION_TIME", now); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -4274,7 +4377,7 @@ void RdbmsCatalogue::insertTapeFile( //------------------------------------------------------------------------------ // setTapeLastFseq //------------------------------------------------------------------------------ -void RdbmsCatalogue::setTapeLastFSeq(rdbms::PooledConn &conn, const std::string &vid, const uint64_t lastFSeq) { +void RdbmsCatalogue::setTapeLastFSeq(rdbms::Conn &conn, const std::string &vid, const uint64_t lastFSeq) { try { threading::MutexLocker locker(m_mutex); @@ -4290,10 +4393,10 @@ void RdbmsCatalogue::setTapeLastFSeq(rdbms::PooledConn &conn, const std::string "LAST_FSEQ = :LAST_FSEQ " "WHERE " "VID=:VID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::ON); - stmt->bindString(":VID", vid); - stmt->bindUint64(":LAST_FSEQ", lastFSeq); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); + stmt.bindString(":VID", vid); + stmt.bindUint64(":LAST_FSEQ", lastFSeq); + stmt.executeNonQuery(); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } @@ -4302,7 +4405,7 @@ void RdbmsCatalogue::setTapeLastFSeq(rdbms::PooledConn &conn, const std::string //------------------------------------------------------------------------------ // getTapeLastFSeq //------------------------------------------------------------------------------ -uint64_t RdbmsCatalogue::getTapeLastFSeq(rdbms::PooledConn &conn, const std::string &vid) const { +uint64_t RdbmsCatalogue::getTapeLastFSeq(rdbms::Conn &conn, const std::string &vid) const { try { const char *const sql = "SELECT " @@ -4311,9 +4414,9 @@ uint64_t RdbmsCatalogue::getTapeLastFSeq(rdbms::PooledConn &conn, const std::str "TAPE " "WHERE " "VID = :VID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":VID", vid); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":VID", vid); + auto rset = stmt.executeQuery(); if(rset.next()) { return rset.columnUint64("LAST_FSEQ"); } else { @@ -4328,7 +4431,7 @@ uint64_t RdbmsCatalogue::getTapeLastFSeq(rdbms::PooledConn &conn, const std::str // getArchiveFileByArchiveId //------------------------------------------------------------------------------ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveFileByArchiveFileId( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const uint64_t archiveFileId) const { try { const char *const sql = @@ -4358,9 +4461,9 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF "ARCHIVE_FILE.ARCHIVE_FILE_ID = TAPE_FILE.ARCHIVE_FILE_ID " "WHERE " "ARCHIVE_FILE.ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + auto rset = stmt.executeQuery(); std::unique_ptr<common::dataStructures::ArchiveFile> archiveFile; while (rset.next()) { if(nullptr == archiveFile.get()) { @@ -4408,7 +4511,7 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF // getArchiveFileByDiskFileId //------------------------------------------------------------------------------ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveFileByDiskFileId( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileId) const { try { @@ -4440,10 +4543,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF "WHERE " "ARCHIVE_FILE.DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND " "ARCHIVE_FILE.DISK_FILE_ID = :DISK_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindString(":DISK_INSTANCE_NAME", diskInstanceName); - stmt->bindString(":DISK_FILE_ID", diskFileId); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName); + stmt.bindString(":DISK_FILE_ID", diskFileId); + auto rset = stmt.executeQuery(); std::unique_ptr<common::dataStructures::ArchiveFile> archiveFile; while (rset.next()) { if(nullptr == archiveFile.get()) { @@ -4493,8 +4596,8 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF void RdbmsCatalogue::ping() { const char *const sql = "SELECT COUNT(*) FROM CTA_CATALOGUE"; auto conn = m_connPool.getConn(); - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); } //------------------------------------------------------------------------------ diff --git a/catalogue/RdbmsCatalogue.hpp b/catalogue/RdbmsCatalogue.hpp index c2318545d96d3c20d090006cec5525241161f905..2b57b7fb858d8f484c43539218401e8d965cf07d 100644 --- a/catalogue/RdbmsCatalogue.hpp +++ b/catalogue/RdbmsCatalogue.hpp @@ -21,8 +21,8 @@ #include "catalogue/Catalogue.hpp" #include "catalogue/RequesterAndGroupMountPolicies.hpp" #include "common/threading/Mutex.hpp" -#include "rdbms/Conn.hpp" #include "rdbms/ConnPool.hpp" +#include "rdbms/Login.hpp" #include <memory> @@ -62,19 +62,24 @@ protected: * Protected constructor only to be called by sub-classes. * * @param log Object representing the API to the CTA logging system. - * @param connFactory The factory for creating new database connections. + * @param login The database login details to be used to create new + * connections. * @param nbConns The maximum number of concurrent connections to the * underlying relational database for all operations accept listing archive * files which can be relatively long operations. * @param nbArchiveFileListingConns The maximum number of concurrent * connections to the underlying relational database for the sole purpose of * listing archive files. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ RdbmsCatalogue( log::Logger &log, - std::unique_ptr<rdbms::ConnFactory> connFactory, + const rdbms::Login &login, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect); public: @@ -122,6 +127,7 @@ public: * disabled, not full and are in the specified logical library. * * @param logicalLibraryName The name of the logical library. + * @return The list of tapes for writing. */ std::list<TapeForWriting> getTapesForWriting(const std::string &logicalLibraryName) const override; @@ -533,11 +539,6 @@ protected: */ threading::Mutex m_mutex; - /** - * The database connection factory. - */ - std::unique_ptr<rdbms::ConnFactory> m_connFactory; - /** * The pool of connections to the underlying relational database to be used * for all operations accept listing archive files which can be relatively @@ -551,6 +552,12 @@ protected: */ mutable rdbms::ConnPool m_archiveFileListingConnPool; + /** + * The maximum number of times a single method should try to connect to the + * database in the event of LostDatabaseConnection exceptions being thrown. + */ + uint32_t m_maxTriesToConnect; + /** * Returns true if the specified admin user exists. * @@ -558,7 +565,7 @@ protected: * @param adminUsername The name of the admin user. * @return True if the admin user exists. */ - bool adminUserExists(rdbms::PooledConn &conn, const std::string adminUsername) const; + bool adminUserExists(rdbms::Conn &conn, const std::string adminUsername) const; /** * Returns true if the specified admin host exists. @@ -567,7 +574,7 @@ protected: * @param adminHost The name of the admin host. * @return True if the admin host exists. */ - bool adminHostExists(rdbms::PooledConn &conn, const std::string adminHost) const; + bool adminHostExists(rdbms::Conn &conn, const std::string adminHost) const; /** * Returns true if the specified storage class exists. @@ -578,7 +585,7 @@ protected: * @param storageClassName The name of the storage class. * @return True if the storage class exists. */ - bool storageClassExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &storageClassName) + bool storageClassExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName) const; /** @@ -596,7 +603,7 @@ protected: * @param tapePoolName The name of the tape pool. * @return True if the tape pool exists. */ - bool tapePoolExists(rdbms::PooledConn &conn, const std::string &tapePoolName) const; + bool tapePoolExists(rdbms::Conn &conn, const std::string &tapePoolName) const; /** * Returns true if the specified archive file identifier exists. @@ -605,7 +612,7 @@ protected: * @param archiveFileId The archive file identifier. * @return True if the archive file identifier exists. */ - bool archiveFileIdExists(rdbms::PooledConn &conn, const uint64_t archiveFileId) const; + bool archiveFileIdExists(rdbms::Conn &conn, const uint64_t archiveFileId) const; /** * Returns true if the specified disk file identifier exists. @@ -616,7 +623,7 @@ protected: * @param diskFileId The disk file identifier. * @return True if the disk file identifier exists. */ - bool diskFileIdExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &diskFileId) const; + bool diskFileIdExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileId) const; /** * Returns true if the specified disk file path exists. @@ -627,7 +634,7 @@ protected: * @param diskFilePath The disk file path. * @return True if the disk file path exists. */ - bool diskFilePathExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &diskFilePath) + bool diskFilePathExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFilePath) const; /** @@ -639,7 +646,7 @@ protected: * @param diskFileUSer The name of the disk file user. * @return True if the disk file user exists. */ - bool diskFileUserExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &diskFileUser) + bool diskFileUserExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileUser) const; /** @@ -651,7 +658,7 @@ protected: * @param diskFileGroup The name of the disk file group. * @return True if the disk file group exists. */ - bool diskFileGroupExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &diskFileGroup) + bool diskFileGroupExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileGroup) const; /** @@ -665,7 +672,7 @@ protected: * @param copyNb The copy number of the tape file. * @return True if the archive route exists. */ - bool archiveRouteExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, const std::string &storageClassName, + bool archiveRouteExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName, const uint64_t copyNb) const; /** @@ -683,7 +690,7 @@ protected: * @param vid The volume identifier of the tape. * @return True if the tape exists. */ - bool tapeExists(rdbms::PooledConn &conn, const std::string &vid) const; + bool tapeExists(rdbms::Conn &conn, const std::string &vid) const; /** * Returns the list of tapes that meet the specified search criteria. @@ -692,7 +699,7 @@ protected: * @param searchCriteria The search criteria. * @return The list of tapes. */ - std::list<common::dataStructures::Tape> getTapes(rdbms::PooledConn &conn, const TapeSearchCriteria &searchCriteria) const; + std::list<common::dataStructures::Tape> getTapes(rdbms::Conn &conn, const TapeSearchCriteria &searchCriteria) const; /** * Returns true if the specified logical library exists. @@ -701,7 +708,7 @@ protected: * @param logicalLibraryName The name of the logical library. * @return True if the logical library exists. */ - bool logicalLibraryExists(rdbms::PooledConn &conn, const std::string &logicalLibraryName) const; + bool logicalLibraryExists(rdbms::Conn &conn, const std::string &logicalLibraryName) const; /** * Returns true if the specified mount policy exists. @@ -710,7 +717,7 @@ protected: * @param mountPolicyName The name of the mount policy * @return True if the mount policy exists. */ - bool mountPolicyExists(rdbms::PooledConn &conn, const std::string &mountPolicyName) const; + bool mountPolicyExists(rdbms::Conn &conn, const std::string &mountPolicyName) const; /** * Returns true if the specified requester mount-rule exists. @@ -721,7 +728,7 @@ protected: * to be unique within its disk instance. * @return True if the requester mount-rule exists. */ - bool requesterMountRuleExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, + bool requesterMountRuleExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName) const; /** @@ -735,7 +742,7 @@ protected: * @return The mount policy or nullptr if one does not exists. */ common::dataStructures::MountPolicy *getRequesterMountPolicy( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName) const; @@ -749,7 +756,7 @@ protected: * guaranteed to be unique within its disk instance. * @return True if the requester-group mount-rule exists. */ - bool requesterGroupMountRuleExists(rdbms::PooledConn &conn, const std::string &diskInstanceName, + bool requesterGroupMountRuleExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterGroupName) const; /** @@ -763,7 +770,7 @@ protected: * guaranteed to be unique within its disk instance. * @return The mount policy or nullptr if one does not exists. */ - common::dataStructures::MountPolicy *getRequesterGroupMountPolicy(rdbms::PooledConn &conn, + common::dataStructures::MountPolicy *getRequesterGroupMountPolicy(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterGroupName) const; /** @@ -787,7 +794,7 @@ protected: * @param autocommitMode The autocommit mode of the SQL insert statement. * @param row The row to be inserted. */ - void insertArchiveFile(rdbms::PooledConn &conn, const rdbms::Stmt::AutocommitMode autocommitMode, + void insertArchiveFile(rdbms::Conn &conn, const rdbms::AutocommitMode autocommitMode, const ArchiveFileRow &row); /** @@ -803,7 +810,7 @@ protected: * table. * @return true if the specified user name is listed in the ADMIN_USER table. */ - bool userIsAdmin(rdbms::PooledConn &conn, const std::string &userName) const; + bool userIsAdmin(rdbms::Conn &conn, const std::string &userName) const; /** * Returns true if the specified host name is listed in the ADMIN_HOST table. @@ -813,7 +820,7 @@ protected: * table. * @return true if the specified host name is listed in the ADMIN_HOST table. */ - bool hostIsAdmin(rdbms::PooledConn &conn, const std::string &userName) const; + bool hostIsAdmin(rdbms::Conn &conn, const std::string &userName) const; /** * Returns the expected number of archive routes for the specified storage @@ -828,7 +835,7 @@ protected: * guaranteed to be unique within its disk instance. * @return The expected number of archive routes. */ - uint64_t getExpectedNbArchiveRoutes(rdbms::PooledConn &conn, const std::string &diskInstanceName, + uint64_t getExpectedNbArchiveRoutes(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassNAme) const; /** @@ -841,8 +848,8 @@ protected: * file is a copy. */ void insertTapeFile( - rdbms::PooledConn &conn, - const rdbms::Stmt::AutocommitMode autocommitMode, + rdbms::Conn &conn, + const rdbms::AutocommitMode autocommitMode, const common::dataStructures::TapeFile &tapeFile, const uint64_t archiveFileId); @@ -853,7 +860,7 @@ protected: * @param vid The volume identifier of the tape. * @param lastFseq The new value of the last FSeq. */ - void setTapeLastFSeq(rdbms::PooledConn &conn, const std::string &vid, const uint64_t lastFSeq); + void setTapeLastFSeq(rdbms::Conn &conn, const std::string &vid, const uint64_t lastFSeq); /** * Returns the last FSeq of the specified tape. @@ -862,7 +869,7 @@ protected: * @param vid The volume identifier of the tape. * @return The last FSeq. */ - uint64_t getTapeLastFSeq(rdbms::PooledConn &conn, const std::string &vid) const; + uint64_t getTapeLastFSeq(rdbms::Conn &conn, const std::string &vid) const; /** * Updates the specified tape with the specified information. @@ -877,8 +884,8 @@ protected: * @param tapeDrive The name of the tape drive that last wrote to the tape. */ void updateTape( - rdbms::PooledConn &conn, - const rdbms::Stmt::AutocommitMode autocommitMode, + rdbms::Conn &conn, + const rdbms::AutocommitMode autocommitMode, const std::string &vid, const uint64_t lastFSeq, const uint64_t compressedBytesWritten, @@ -894,7 +901,7 @@ protected: * an empty list. */ std::unique_ptr<common::dataStructures::ArchiveFile> getArchiveFileByArchiveFileId( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const uint64_t archiveFileId) const; /** @@ -912,7 +919,7 @@ protected: * an empty list. */ std::unique_ptr<common::dataStructures::ArchiveFile> getArchiveFileByDiskFileId( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstance, const std::string &diskFileId) const; @@ -929,7 +936,7 @@ protected: * @return The mount policies. */ RequesterAndGroupMountPolicies getMountPolicies( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &requesterName, const std::string &requesterGroupName) const; @@ -946,7 +953,7 @@ protected: * @return A unique archive ID that can be used by a new archive file within * the catalogue. */ - virtual uint64_t getNextArchiveFileId(rdbms::PooledConn &conn) = 0; + virtual uint64_t getNextArchiveFileId(rdbms::Conn &conn) = 0; /** * Returns the mapping from tape copy to tape pool for the specified storage @@ -961,7 +968,7 @@ protected: * class. */ common::dataStructures::TapeCopyToPoolMap getTapeCopyToPoolMap( - rdbms::PooledConn &conn, + rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &storageClassName) const; @@ -973,6 +980,155 @@ protected: */ void checkTapeFileWrittenFieldsAreSet(const TapeFileWritten &event); + /** + * Returns true if the specified user running the CTA command-line tool on + * the specified host has administrator privileges. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param admin The administrator. + * @return True if the specified user running the CTA command-line tool on + * the specified host has administrator privileges. + */ + bool isAdminInternal(const common::dataStructures::SecurityIdentity &admin) const; + + /** + * Notifies the catalogue that the specified tape was labelled. + * + * @param vid The volume identifier of the tape. + * @param drive The name of tape drive that was used to label the tape. + * @param lbpIsOn Set to true if Logical Block Protection (LBP) was enabled. + */ + void tapeLabelledInternal(const std::string &vid, const std::string &drive, const bool lbpIsOn); + + /** + * Returns the list of tapes that can be written to by a tape drive in the + * specified logical library, in other words tapes that are labelled, not + * disabled, not full and are in the specified logical library. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param logicalLibraryName The name of the logical library. + * @return The list of tapes for writing. + */ + std::list<TapeForWriting> getTapesForWritingInternal(const std::string &logicalLibraryName) const; + + /** + * Notifies the CTA catalogue that the specified tape has been mounted in + * order to archive files. + * + * The purpose of this method is to keep track of which drive mounted a given + * tape for archiving files last. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param vid The volume identifier of the tape. + * @param drive The name of the drive where the tape was mounted. + */ + void tapeMountedForArchiveInternal(const std::string &vid, const std::string &drive); + + /** + * Prepares for a file retrieval by returning the information required to + * queue the associated retrieve request(s). + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param diskInstanceName The name of the instance from where the retrieval + * request originated + * @param archiveFileId The unique identifier of the archived file that is + * to be retrieved. + * @param user The user for whom the file is to be retrieved. This will be + * used by the Catalogue to determine the mount policy to be used when + * retrieving the file. + * @param lc The log context. + * + * @return The information required to queue the associated retrieve request(s). + */ + common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFileInternal( + const std::string &diskInstanceName, + const uint64_t archiveFileId, + const common::dataStructures::UserIdentity &user, + log::LogContext &lc); + + /** + * Prepares for a file retrieval by returning the information required to + * queue the associated retrieve request(s). + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param diskInstanceName The name of the instance from where the retrieval + * request originated + * @param diskFileId The identifier of the source disk file which is unique + * within it's host disk system. Two files from different disk systems may + * have the same identifier. The combination of diskInstanceName and + * diskFileId must be globally unique, in other words unique within the CTA + * catalogue. + * @param archiveFileId The unique identifier of the archived file that is + * to be retrieved. + * @param user The user for whom the file is to be retrieved. This will be + * used by the Catalogue to determine the mount policy to be used when + * retrieving the file. + * @param lc The log context. + * + * @return The information required to queue the associated retrieve request(s). + */ + common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFileByDiskFileIdInternal( + const std::string &diskInstanceName, + const std::string &diskFileId, + const common::dataStructures::UserIdentity &user, + log::LogContext &lc); + + /** + * Notifies the CTA catalogue that the specified tape has been mounted in + * order to retrieve files. + * + * The purpose of this method is to keep track of which drive mounted a given + * tape for retrieving files last. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param vid The volume identifier of the tape. + * @param drive The name of the drive where the tape was mounted. + */ + void tapeMountedForRetrieveInternal(const std::string &vid, const std::string &drive); + + /** + * This method notifies the CTA catalogue that there is no more free space on + * the specified tape. + * + * @param vid The volume identifier of the tape. + */ + void noSpaceLeftOnTapeInternal(const std::string &vid); + + /** + * Prepares the catalogue for a new archive file and returns the information + * required to queue the associated archive request. + * + * This internal method can be re-tried if it throws a LostDatabaseConnection + * exception. + * + * @param diskInstanceName The name of the disk instance to which the + * storage class belongs. + * @param storageClassName The name of the storage class of the file to be + * archived. The storage class name is only guaranteed to be unique within + * its disk instance. The storage class name will be used by the Catalogue + * to determine the destination tape pool for each tape copy. + * @param user The user for whom the file is to be archived. This will be + * used by the Catalogue to determine the mount policy to be used when + * archiving the file. + * @return The information required to queue the associated archive request. + */ + common::dataStructures::ArchiveFileQueueCriteria prepareForNewFileInternal( + const std::string &diskInstanceName, + const std::string &storageClassName, + const common::dataStructures::UserIdentity &user); + }; // class RdbmsCatalogue } // namespace catalogue diff --git a/catalogue/SchemaCreatingSqliteCatalogue.cpp b/catalogue/SchemaCreatingSqliteCatalogue.cpp index 1f3d9ef0aa9334a46f682050b2766c0bbe4ba90e..dd236e42d373c2a62657a325dd3cbe84dc2a291d 100644 --- a/catalogue/SchemaCreatingSqliteCatalogue.cpp +++ b/catalogue/SchemaCreatingSqliteCatalogue.cpp @@ -18,8 +18,6 @@ #include "catalogue/SqliteCatalogueSchema.hpp" #include "catalogue/SchemaCreatingSqliteCatalogue.hpp" -#include "rdbms/SqliteConn.hpp" -#include "rdbms/SqliteConnFactory.hpp" namespace cta { namespace catalogue { @@ -31,8 +29,9 @@ SchemaCreatingSqliteCatalogue::SchemaCreatingSqliteCatalogue( log::Logger &log, const std::string &filename, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns): - SqliteCatalogue(log, filename, nbConns, nbArchiveFileListingConns) { + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect): + SqliteCatalogue(log, filename, nbConns, nbArchiveFileListingConns, maxTriesToConnect) { try { createCatalogueSchema(); } catch(exception::Exception &ex) { diff --git a/catalogue/SchemaCreatingSqliteCatalogue.hpp b/catalogue/SchemaCreatingSqliteCatalogue.hpp index fe265c3932deb6fb38d00a91ba58f33e23c9b368..790f1bafd3ae0766c4b6319b84f2022af6927f8b 100644 --- a/catalogue/SchemaCreatingSqliteCatalogue.hpp +++ b/catalogue/SchemaCreatingSqliteCatalogue.hpp @@ -42,12 +42,16 @@ public: * @param nbArchiveFileListingConns The maximum number of concurrent * connections to the underlying relational database for the sole purpose of * listing archive files. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ SchemaCreatingSqliteCatalogue( log::Logger &log, const std::string &filename, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect); /** * Destructor. diff --git a/catalogue/SqliteCatalogue.cpp b/catalogue/SqliteCatalogue.cpp index e8b1241c2c09e978f1aeffe23958d25879000d9b..6ed110590118180bcbb7efbefccb6ff867891f3c 100644 --- a/catalogue/SqliteCatalogue.cpp +++ b/catalogue/SqliteCatalogue.cpp @@ -26,7 +26,6 @@ #include "common/Timer.hpp" #include "common/utils/utils.hpp" #include "rdbms/AutoRollback.hpp" -#include "rdbms/ConnFactoryFactory.hpp" namespace cta { namespace catalogue { @@ -38,12 +37,14 @@ SqliteCatalogue::SqliteCatalogue( log::Logger &log, const std::string &filename, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns): + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect): RdbmsCatalogue( log, - rdbms::ConnFactoryFactory::create(rdbms::Login(rdbms::Login::DBTYPE_SQLITE, "", "", filename)), + rdbms::Login(rdbms::Login::DBTYPE_SQLITE, "", "", filename), nbConns, - nbArchiveFileListingConns) { + nbArchiveFileListingConns, + maxTriesToConnect) { } //------------------------------------------------------------------------------ @@ -117,17 +118,17 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con t.reset(); { const char *const sql = "DELETE FROM TAPE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID;"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + stmt.executeNonQuery(); } const auto deleteFromTapeFileTime = t.secs(utils::Timer::resetCounter); { const char *const sql = "DELETE FROM ARCHIVE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID;"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFileId); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId); + stmt.executeNonQuery(); } const auto deleteFromArchiveFileTime = t.secs(utils::Timer::resetCounter); @@ -198,17 +199,17 @@ void SqliteCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta t.reset(); { const char *const sql = "DELETE FROM TAPE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID;"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); + stmt.executeNonQuery(); } const auto deleteFromTapeFileTime = t.secs(utils::Timer::resetCounter); { const char *const sql = "DELETE FROM ARCHIVE_FILE WHERE ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID;"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - stmt->bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); - stmt->executeNonQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFile->archiveFileID); + stmt.executeNonQuery(); } const auto deleteFromArchiveFileTime = t.secs(utils::Timer::resetCounter); @@ -257,7 +258,7 @@ void SqliteCatalogue::deleteArchiveFileByDiskFileId(const std::string &diskInsta //------------------------------------------------------------------------------ // getNextArchiveFileId //------------------------------------------------------------------------------ -uint64_t SqliteCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { +uint64_t SqliteCatalogue::getNextArchiveFileId(rdbms::Conn &conn) { try { // The SQLite implemenation of getNextArchiveFileId() serializes access to // the SQLite database in order to avoid busy errors @@ -265,7 +266,7 @@ uint64_t SqliteCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { rdbms::AutoRollback autoRollback(conn); - conn.executeNonQuery("UPDATE ARCHIVE_FILE_ID SET ID = ID + 1", rdbms::Stmt::AutocommitMode::OFF); + conn.executeNonQuery("UPDATE ARCHIVE_FILE_ID SET ID = ID + 1", rdbms::AutocommitMode::OFF); uint64_t archiveFileId = 0; { const char *const sql = @@ -273,8 +274,8 @@ uint64_t SqliteCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { "ID AS ID " "FROM " "ARCHIVE_FILE_ID"; - auto stmt = conn.createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); - auto rset = stmt->executeQuery(); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::OFF); + auto rset = stmt.executeQuery(); if(!rset.next()) { throw exception::Exception("ARCHIVE_FILE_ID table is empty"); } @@ -294,8 +295,8 @@ uint64_t SqliteCatalogue::getNextArchiveFileId(rdbms::PooledConn &conn) { //------------------------------------------------------------------------------ // selectTapeForUpdate //------------------------------------------------------------------------------ -common::dataStructures::Tape SqliteCatalogue::selectTape(const rdbms::Stmt::AutocommitMode autocommitMode, - rdbms::PooledConn &conn, const std::string &vid) { +common::dataStructures::Tape SqliteCatalogue::selectTape(const rdbms::AutocommitMode autocommitMode, + rdbms::Conn &conn, const std::string &vid) { try { const char *const sql = "SELECT " @@ -334,8 +335,8 @@ common::dataStructures::Tape SqliteCatalogue::selectTape(const rdbms::Stmt::Auto "VID = :VID;"; auto stmt = conn.createStmt(sql, autocommitMode); - stmt->bindString(":VID", vid); - auto rset = stmt->executeQuery(); + stmt.bindString(":VID", vid); + auto rset = stmt.executeQuery(); if (!rset.next()) { throw exception::Exception(std::string("The tape with VID " + vid + " does not exist")); } @@ -407,7 +408,7 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events threading::MutexLocker locker(m_mutex); auto conn = m_connPool.getConn(); - const auto tape = selectTape(rdbms::Stmt::AutocommitMode::ON, conn, firstEvent.vid); + const auto tape = selectTape(rdbms::AutocommitMode::ON, conn, firstEvent.vid); uint64_t expectedFSeq = tape.lastFSeq + 1; uint64_t totalCompressedBytesWritten = 0; @@ -432,11 +433,11 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events auto lastEventItor = events.cend(); lastEventItor--; const TapeFileWritten &lastEvent = *lastEventItor; - updateTape(conn, rdbms::Stmt::AutocommitMode::ON, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, + updateTape(conn, rdbms::AutocommitMode::ON, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, lastEvent.tapeDrive); for(const auto &event : events) { - fileWrittenToTape(rdbms::Stmt::AutocommitMode::ON, conn, event); + fileWrittenToTape(rdbms::AutocommitMode::ON, conn, event); } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -446,7 +447,7 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeFileWritten> &events //------------------------------------------------------------------------------ // fileWrittenToTape //------------------------------------------------------------------------------ -void SqliteCatalogue::fileWrittenToTape(const rdbms::Stmt::AutocommitMode autocommitMode, rdbms::PooledConn &conn, +void SqliteCatalogue::fileWrittenToTape(const rdbms::AutocommitMode autocommitMode, rdbms::Conn &conn, const TapeFileWritten &event) { try { checkTapeFileWrittenFieldsAreSet(event); diff --git a/catalogue/SqliteCatalogue.hpp b/catalogue/SqliteCatalogue.hpp index 86a6f49c970c7507a62dd6dc42a9d601109f6e70..db78c06be4dba1f88b040968caa942be48556548 100644 --- a/catalogue/SqliteCatalogue.hpp +++ b/catalogue/SqliteCatalogue.hpp @@ -42,12 +42,16 @@ public: * @param nbArchiveFileListingConns The maximum number of concurrent * connections to the underlying relational database for the sole purpose of * listing archive files. + * @param maxTriesToConnext The maximum number of times a single method should + * try to connect to the database in the event of LostDatabaseConnection + * exceptions being thrown. */ SqliteCatalogue( log::Logger &log, const std::string &filename, const uint64_t nbConns, - const uint64_t nbArchiveFileListingConns); + const uint64_t nbArchiveFileListingConns, + const uint32_t maxTriesToConnect); public: @@ -116,7 +120,7 @@ protected: * @return A unique archive ID that can be used by a new archive file within * the catalogue. */ - uint64_t getNextArchiveFileId(rdbms::PooledConn &conn) override; + uint64_t getNextArchiveFileId(rdbms::Conn &conn) override; /** * Notifies the catalogue that the specified files have been written to tape. @@ -134,7 +138,7 @@ private: * @param conn The database connection. * @param event The tape file written event. */ - void fileWrittenToTape(const rdbms::Stmt::AutocommitMode autocommitMode, rdbms::PooledConn &conn, + void fileWrittenToTape(const rdbms::AutocommitMode autocommitMode, rdbms::Conn &conn, const TapeFileWritten &event); /** @@ -144,7 +148,7 @@ private: * @param conn The database connection. * @param vid The volume identifier of the tape. */ - common::dataStructures::Tape selectTape(const rdbms::Stmt::AutocommitMode autocommitMode, rdbms::PooledConn &conn, + common::dataStructures::Tape selectTape(const rdbms::AutocommitMode autocommitMode, rdbms::Conn &conn, const std::string &vid); }; // class SqliteCatalogue diff --git a/catalogue/retryOnLostConnection.hpp b/catalogue/retryOnLostConnection.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d3be72926099eed8bb98fd3319df33398691caa2 --- /dev/null +++ b/catalogue/retryOnLostConnection.hpp @@ -0,0 +1,74 @@ +/* + * The CERN Tape Archive(CTA) project + * Copyright(C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + *(at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "common/exception/Exception.hpp" +#include "common/exception/LostDatabaseConnection.hpp" +#include "common/log/Logger.hpp" + +#include <list> +#include <stdint.h> +#include <type_traits> + +namespace cta { +namespace catalogue { + +/** + * Retries calling the specified callable if it throws a LostDatabaseConnection + * exception. + * + * @tparam T The type of the callable. + * @param log Object representing the API to the CTA logging system. + * @param callable The callable. + * @param maxTriesToConnect The maximum number of times the callable should be called in the event of a + * LostDatbaseConnection exception. + * @return The result of calling the callable. + */ +template<typename T> +typename std::result_of<T()>::type retryOnLostConnection(log::Logger &log, const T &callable, + const uint32_t maxTriesToConnect) { + try { + for (uint32_t tryNb = 1; tryNb <= maxTriesToConnect; tryNb++) { + try { + return callable(); + } catch (exception::LostDatabaseConnection &le) { + // Log lost connection + std::list<log::Param> params = { + {"maxTriesToConnect", maxTriesToConnect}, + {"tryNb", tryNb}, + {"msg", le.getMessage()} + }; + log(cta::log::WARNING, "Lost database connection", params); + } + } + + exception::Exception ex; + ex.getMessage() << "Lost the database connection after trying " << maxTriesToConnect << " times"; + throw ex; + } catch (exception::UserError &) { + throw; + } catch (exception::Exception &) { + throw; + } catch (std::exception &se) { + throw exception::Exception(se.what()); + } +} + +} // namespace catalogue +} // namespace cta diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 73f7306c0342996a58fb068fb23baa8d74beaa46..3aa0c85c7c16e0ea7eb2925c0d80f8287ebe8a72 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -83,6 +83,7 @@ set (COMMON_LIB_SRC_FILES exception/Exception.cpp exception/InvalidArgument.cpp exception/InvalidConfigEntry.cpp + exception/LostDatabaseConnection.cpp exception/Mismatch.cpp exception/MissingOperand.cpp exception/MountFailed.cpp diff --git a/common/exception/LostDatabaseConnection.cpp b/common/exception/LostDatabaseConnection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f464441cb708617605dffbf4d9057eaf8c6004cd --- /dev/null +++ b/common/exception/LostDatabaseConnection.cpp @@ -0,0 +1,33 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "common/exception/LostDatabaseConnection.hpp" + + +//------------------------------------------------------------------------------ +// constructor +//------------------------------------------------------------------------------ +cta::exception::LostDatabaseConnection::LostDatabaseConnection(const std::string &context, const bool embedBacktrace): + Exception(context, embedBacktrace) { +} + +//------------------------------------------------------------------------------ +// destructor +//------------------------------------------------------------------------------ +cta::exception::LostDatabaseConnection::~LostDatabaseConnection() noexcept { +} diff --git a/common/exception/LostDatabaseConnection.hpp b/common/exception/LostDatabaseConnection.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9acc170dd4b02609928eb21936b94378731d0e75 --- /dev/null +++ b/common/exception/LostDatabaseConnection.hpp @@ -0,0 +1,55 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "common/exception/Exception.hpp" + +#include <string> + + +namespace cta { +namespace exception { + +/** + * The database connection has been lost. + */ +class LostDatabaseConnection : public cta::exception::Exception { +public: + + /** + * Constructor. + * + * @param context optional context string added to the message + * at initialisation time. + * @param embedBacktrace whether to embed a backtrace of where the + * exception was throw in the message + */ + LostDatabaseConnection(const std::string &context = "", const bool embedBacktrace = true); + + /** + * Empty Destructor, explicitely non-throwing (needed for std::exception + * inheritance) + */ + ~LostDatabaseConnection() noexcept override; + +}; // class LostDatabaseConnection + +} // namespace exception +} // namespace cta + diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh index 35c5e98f7177950f4d0e3ad430d59e6c97c3d0c6..ae052099815a701a57c3c758024a52b5d5b8c55f 100755 --- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh +++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh @@ -34,7 +34,7 @@ if [ "$KEEP_OBJECTSTORE" == "0" ]; then else if [[ $(rados -p $OBJECTSTOREPOOL --id $OBJECTSTOREID --namespace $OBJECTSTORENAMESPACE ls | wc -l) -gt 0 ]]; then echo "Rados objectstore ${OBJECTSTOREURL} is not empty: deleting content" - rados -p $OBJECTSTOREPOOL --id $OBJECTSTOREID --namespace $OBJECTSTORENAMESPACE ls | xargs -itoto -P 100 rados -p $OBJECTSTOREPOOL --id $OBJECTSTOREID --namespace $OBJECTSTORENAMESPACE rm toto + rados -p $OBJECTSTOREPOOL --id $OBJECTSTOREID --namespace $OBJECTSTORENAMESPACE ls | xargs -L 100 -P 100 rados -p $OBJECTSTOREPOOL --id $OBJECTSTOREID --namespace $OBJECTSTORENAMESPACE rm fi cta-objectstore-initialize $OBJECTSTOREURL echo "Rados objectstore ${OBJECTSTOREURL} content:" diff --git a/cta.spec.in b/cta.spec.in index 980132934869badf897fa52ac0ae875f127ae405..dcead60bc982dd3ae9d026cf7e60d14f173f94e9 100644 --- a/cta.spec.in +++ b/cta.spec.in @@ -199,6 +199,7 @@ The shared libraries %{_libdir}/libctamessages.so* %{_libdir}/libctamessagesutils.so* %{_libdir}/libctardbms.so* +%{_libdir}/libctardbmswrapper.so* %attr(0644,root,root) %config(noreplace) %{_sysconfdir}/cta/cta-catalogue.conf.example #CTA-lib installs libraries so we need ldconfig. @@ -229,6 +230,7 @@ Unit tests and system tests with virtual tape drives %{_libdir}/libctainmemorycatalogueunittests.so* %{_libdir}/libctaobjectstoreunittests.so* %{_libdir}/libctardbmsunittests.so* +%{_libdir}/libctardbmswrapperunittests.so* %{_libdir}/libctaschedulerunittests.so* %{_libdir}/libctatapeserverdaemonunittests.so* %{_libdir}/libctatapeserverdriveunittests.so* diff --git a/objectstore/ArchiveQueue.cpp b/objectstore/ArchiveQueue.cpp index 57fef41d6e5444d15df99c9ca9290990ef1c8dfe..15c60917e880eb577aba19830a407a0bddb90c6c 100644 --- a/objectstore/ArchiveQueue.cpp +++ b/objectstore/ArchiveQueue.cpp @@ -22,6 +22,8 @@ #include "EntryLogSerDeser.hpp" #include "RootEntry.hpp" #include "ValueCountMap.hpp" +#include "ArchiveQueueShard.hpp" +#include "AgentReference.hpp" #include <google/protobuf/util/json_util.h> namespace cta { namespace objectstore { @@ -57,6 +59,7 @@ void ArchiveQueue::initialize(const std::string& name) { m_payload.set_tapepool(name); // set the archive jobs counter to zero m_payload.set_archivejobstotalsize(0); + m_payload.set_archivejobscount(0); m_payload.set_oldestjobcreationtime(0); // set the initial summary map rebuild count to zero m_payload.set_mapsrebuildcount(0); @@ -65,34 +68,138 @@ void ArchiveQueue::initialize(const std::string& name) { } void ArchiveQueue::commit() { - // Before calling ObjectOps::commit, check that we have coherent queue summaries + if (!checkMapsAndShardsCoherency()) { + rebuild(); + m_payload.set_mapsrebuildcount(m_payload.mapsrebuildcount()+1); + } + ObjectOps<serializers::ArchiveQueue, serializers::ArchiveQueue_t>::commit(); +} + +bool ArchiveQueue::checkMapsAndShardsCoherency() { + checkPayloadReadable(); + uint64_t bytesFromShardPointers = 0; + uint64_t jobsExpectedFromShardsPointers = 0; + // Add up shard summaries + for (auto & aqs: m_payload.archivequeuesshards()) { + bytesFromShardPointers += aqs.shardbytescount(); + jobsExpectedFromShardsPointers += aqs.shardjobscount(); + } + uint64_t totalBytes = m_payload.archivejobstotalsize(); + uint64_t totalJobs = m_payload.archivejobscount(); + // The sum of shards should be equal to the summary + if (totalBytes != bytesFromShardPointers || + totalJobs != jobsExpectedFromShardsPointers) + return false; + // Check that we have coherent queue summaries ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); ValueCountMap priorityMap(m_payload.mutable_prioritymap()); ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); - if (maxDriveAllowedMap.total() != (uint64_t)m_payload.pendingarchivejobs_size() || - priorityMap.total() != (uint64_t)m_payload.pendingarchivejobs_size() || - minArchiveRequestAgeMap.total() != (uint64_t)m_payload.pendingarchivejobs_size()) { - // The maps counts are off: recompute them. - maxDriveAllowedMap.clear(); - priorityMap.clear(); - minArchiveRequestAgeMap.clear(); - for (size_t i=0; i<(size_t)m_payload.pendingarchivejobs_size(); i++) { - maxDriveAllowedMap.incCount(m_payload.pendingarchivejobs(i).maxdrivesallowed()); - priorityMap.incCount(m_payload.pendingarchivejobs(i).priority()); - minArchiveRequestAgeMap.incCount(m_payload.pendingarchivejobs(i).priority()); + if (maxDriveAllowedMap.total() != m_payload.archivejobscount() || + priorityMap.total() != m_payload.archivejobscount() || + minArchiveRequestAgeMap.total() != m_payload.archivejobscount()) + return false; + return true; +} + +void ArchiveQueue::rebuild() { + checkPayloadWritable(); + // Something is off with the queue. We will hence rebuild it. The rebuild of the + // queue will consist in: + // 1) Attempting to read all shards in parallel. Absent shards are possible, and will + // mean we have dangling pointers. + // 2) Rebuild the summaries from the shards. + // As a side note, we do not go as far as validating the pointers to jobs within th + // shards, as this is already handled as access goes. + std::list<ArchiveQueueShard> shards; + std::list<std::unique_ptr<ArchiveQueueShard::AsyncLockfreeFetcher>> shardsFetchers; + + // Get the summaries structures ready + ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); + maxDriveAllowedMap.clear(); + ValueCountMap priorityMap(m_payload.mutable_prioritymap()); + priorityMap.clear(); + ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); + minArchiveRequestAgeMap.clear(); + for (auto & sa: m_payload.archivequeuesshards()) { + shards.emplace_back(ArchiveQueueShard(sa.address(), m_objectStore)); + shardsFetchers.emplace_back(shards.back().asyncLockfreeFetch()); + } + auto s = shards.begin(); + auto sf = shardsFetchers.begin(); + uint64_t totalJobs=0; + uint64_t totalBytes=0; + time_t oldestJobCreationTime=std::numeric_limits<time_t>::max(); + while (s != shards.end()) { + // Each shard could be gone + try { + (*sf)->wait(); + } catch (Backend::NoSuchObject & ex) { + // Remove the shard from the list + auto aqs = m_payload.mutable_archivequeuesshards()->begin(); + while (aqs != m_payload.mutable_archivequeuesshards()->end()) { + if (aqs->address() == s->getAddressIfSet()) { + aqs = m_payload.mutable_archivequeuesshards()->erase(aqs); + } else { + aqs++; + } + } + goto nextShard; } - m_payload.set_mapsrebuildcount(m_payload.mapsrebuildcount()+1); + { + // The shard is still around, let's compute its summaries. + uint64_t jobs = 0; + uint64_t size = 0; + for (auto & j: s->dumpJobs()) { + jobs++; + size += j.size; + priorityMap.incCount(j.priority); + minArchiveRequestAgeMap.incCount(j.minArchiveRequestAge); + maxDriveAllowedMap.incCount(j.maxDrivesAllowed); + if (j.startTime < oldestJobCreationTime) oldestJobCreationTime = j.startTime; + } + // Add the summary to total. + totalJobs+=jobs; + totalBytes+=size; + // And store the value in the shard pointers. + auto maqs = m_payload.mutable_archivequeuesshards(); + for (auto & aqs: *maqs) { + if (aqs.address() == s->getAddressIfSet()) { + aqs.set_shardjobscount(jobs); + aqs.set_shardbytescount(size); + goto shardUpdated; + } + } + { + // We had to update a shard and did not find it. This is an error. + throw exception::Exception(std::string ("In ArchiveQueue::rebuild(): failed to record summary for shard " + s->getAddressIfSet())); + } + shardUpdated:; + // We still need to check if the shard itself is coherent (we have an opportunity to + // match its summary with the jobs total we just recomputed. + if (size != s->getJobsSummary().bytes) { + ArchiveQueueShard aqs(s->getAddressIfSet(), m_objectStore); + m_exclusiveLock->includeSubObject(aqs); + aqs.fetch(); + aqs.rebuild(); + aqs.commit(); + } + } + nextShard:; + s++; + sf++; } - ObjectOps<serializers::ArchiveQueue, serializers::ArchiveQueue_t>::commit(); + m_payload.set_archivejobscount(totalJobs); + m_payload.set_archivejobstotalsize(totalBytes); + m_payload.set_oldestjobcreationtime(oldestJobCreationTime); + // We went through all the shard, re-updated the summaries, removed references to + // gone shards. Done. } bool ArchiveQueue::isEmpty() { checkPayloadReadable(); // Check we have no archive jobs pending - if (m_payload.pendingarchivejobs_size() - || m_payload.orphanedarchivejobsnscreation_size() - || m_payload.orphanedarchivejobsnsdeletion_size()) + if (m_payload.archivequeuesshards_size()) return false; // If we made it to here, it seems the pool is indeed empty. return true; @@ -144,43 +251,127 @@ std::string ArchiveQueue::getTapePool() { return m_payload.tapepool(); } -void ArchiveQueue::addJob(const ArchiveRequest::JobDump& job, - const std::string & archiveRequestAddress, uint64_t archiveFileId, - uint64_t fileSize, const cta::common::dataStructures::MountPolicy & policy, - time_t startTime) { +void ArchiveQueue::addJobsAndCommit(std::list<JobToAdd> & jobsToAdd, AgentReference & agentReference, log::LogContext & lc) { checkPayloadWritable(); - // Keep track of the mounting criteria - ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.incCount(policy.maxDrivesAllowed); - ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.incCount(policy.archivePriority); - ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); - minArchiveRequestAgeMap.incCount(policy.archiveMinRequestAge); - if (m_payload.pendingarchivejobs_size()) { - if ((uint64_t)startTime < m_payload.oldestjobcreationtime()) - m_payload.set_oldestjobcreationtime(startTime); - m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() + fileSize); - } else { - m_payload.set_archivejobstotalsize(fileSize); - m_payload.set_oldestjobcreationtime(startTime); - } - auto * j = m_payload.add_pendingarchivejobs(); - j->set_address(archiveRequestAddress); - j->set_size(fileSize); - j->set_fileid(archiveFileId); - j->set_copynb(job.copyNb); - j->set_maxdrivesallowed(policy.maxDrivesAllowed); - j->set_priority(policy.archivePriority); - j->set_minarchiverequestage(policy.archiveMinRequestAge); + // Before adding the jobs, we have to decide how to lay them out in the shards. + // We are here in FIFO mode, so the algorithm is just 1) complete the current last + // shard, if it did not reach the maximum size + // 2) create new shard(s) as needed. + // + // First implementation is shard by shard. A batter, parallel one could be implemented, + // but the performance gain should be marginal as most of the time we will be dealing + // with a single shard. + + auto nextJob = jobsToAdd.begin(); + while (nextJob != jobsToAdd.end()) { + // If we're here, the is at least a job to add. + // Let's find a shard for it/them. It can be either the last (incomplete) shard or + // a new shard to create. In all case, we will max out the shard, jobs list permitting. + // If we do fill up the shard, we'll go through another round here. + // Is there a last shard, and is it not full? + ArchiveQueueShard aqs(m_objectStore); + serializers::ArchiveQueueShardPointer * aqsp = nullptr; + bool newShard=false; + uint64_t shardCount = m_payload.archivequeuesshards_size(); + if (shardCount && m_payload.archivequeuesshards(shardCount - 1).shardjobscount() < c_maxShardSize) { + auto & shardPointer=m_payload.archivequeuesshards(shardCount - 1); + aqs.setAddress(shardPointer.address()); + // include-locking does not check existence of the object in the object store. + // we will find out on fetch. If we fail, we have to rebuild. + m_exclusiveLock->includeSubObject(aqs); + try { + aqs.fetch(); + } catch (Backend::NoSuchObject & ex) { + log::ScopedParamContainer params (lc); + params.add("archiveQueueObject", getAddressIfSet()) + .add("shardNumber", shardCount - 1) + .add("shardObject", shardPointer.address()); + lc.log(log::ERR, "In ArchiveQueue::addJobsAndCommit(): shard not present. Rebuilding queue."); + rebuild(); + commit(); + continue; + } + // Validate that the shard is as expected from the pointer. If not we need to + // rebuild the queue and restart the shard selection. + auto shardSummary = aqs.getJobsSummary(); + if (shardPointer.shardbytescount() != shardSummary.bytes || + shardPointer.shardjobscount() != shardSummary.jobs) { + log::ScopedParamContainer params(lc); + params.add("archiveQueueObject", getAddressIfSet()) + .add("shardNumber", shardCount - 1) + .add("shardObject", shardPointer.address()) + .add("shardReportedBytes", shardSummary.bytes) + .add("shardReportedJobs", shardSummary.jobs) + .add("expectedBytes", shardPointer.shardbytescount()) + .add("expectedJobs", shardPointer.shardjobscount()); + lc.log(log::ERR, "In ArchiveQueue::addJobsAndCommit(): mismatch found. Rebuilding the queue."); + rebuild(); + commit(); + continue; + } + // The shard looks good. We will now proceed with the addition of individual jobs. + aqsp = m_payload.mutable_archivequeuesshards(shardCount - 1); + } else { + // We need a new shard. Just add it (in memory). + newShard = true; + aqsp = m_payload.mutable_archivequeuesshards()->Add(); + // Create the shard in memory. + std::stringstream shardName; + shardName << "ArchiveQueueShard-" << m_payload.tapepool(); + aqs.setAddress(agentReference.nextId(shardName.str())); + aqs.initialize(getAddressIfSet()); + // Reference the shard in the pointer, and initialized counters. + aqsp->set_address(aqs.getAddressIfSet()); + aqsp->set_shardbytescount(0); + aqsp->set_shardjobscount(0); + } + // We can now add the individual jobs, commit the main queue and then insert or commit the shard. + { + // As the queue could be rebuilt on each shard round, we get access to the + // value maps here + ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); + ValueCountMap priorityMap(m_payload.mutable_prioritymap()); + ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); + while (nextJob != jobsToAdd.end() && aqsp->shardjobscount() < c_maxShardSize) { + // Update stats and global counters. + maxDriveAllowedMap.incCount(nextJob->policy.maxDrivesAllowed); + priorityMap.incCount(nextJob->policy.archivePriority); + minArchiveRequestAgeMap.incCount(nextJob->policy.archiveMinRequestAge); + if (m_payload.archivejobscount()) { + if ((uint64_t)nextJob->startTime < m_payload.oldestjobcreationtime()) + m_payload.set_oldestjobcreationtime(nextJob->startTime); + m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() + nextJob->fileSize); + } else { + m_payload.set_archivejobstotalsize(nextJob->fileSize); + m_payload.set_oldestjobcreationtime(nextJob->startTime); + } + m_payload.set_archivejobscount(m_payload.archivejobscount()+1); + // Add the job to shard, update pointer counts and queue summary. + aqsp->set_shardjobscount(aqs.addJob(*nextJob)); + aqsp->set_shardbytescount(aqsp->shardbytescount() + nextJob->fileSize); + // And move to the next job + nextJob++; + } + } + // We will new commit this shard (and the queue) before moving to the next. + // Commit in the right order: + // 1) commit the queue so the shard is referenced in all cases (creation). + commit(); + // Now get the shard on storage. Could be either insert or commit. + if (newShard) + aqs.insert(); + else + aqs.commit(); + } // end of loop over all objects. } auto ArchiveQueue::getJobsSummary() -> JobsSummary { checkPayloadReadable(); JobsSummary ret; - ret.files = m_payload.pendingarchivejobs_size(); + ret.jobs = m_payload.archivejobscount(); ret.bytes = m_payload.archivejobstotalsize(); ret.oldestJobStartTime = m_payload.oldestjobcreationtime(); - if (ret.files) { + if (ret.jobs) { ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); ret.maxDrivesAllowed = maxDriveAllowedMap.maxValue(); ValueCountMap priorityMap(m_payload.mutable_prioritymap()); @@ -195,124 +386,179 @@ auto ArchiveQueue::getJobsSummary() -> JobsSummary { return ret; } -bool ArchiveQueue::addJobIfNecessary( - const ArchiveRequest::JobDump& job, - const std::string & archiveRequestAddress, uint64_t archiveFileId, - uint64_t fileSize, const cta::common::dataStructures::MountPolicy & policy, - time_t startTime) { +ArchiveQueue::AdditionSummary ArchiveQueue::addJobsIfNecessaryAndCommit(std::list<JobToAdd>& jobsToAdd, + AgentReference & agentReference, log::LogContext & lc) { checkPayloadWritable(); - auto & jl=m_payload.pendingarchivejobs(); - for (auto j=jl.begin(); j!= jl.end(); j++) { - if (j->address() == archiveRequestAddress) - return false; + // First get all the shards of the queue to understand which jobs to add. + std::list<ArchiveQueueShard> shards; + std::list<std::unique_ptr<ArchiveQueueShard::AsyncLockfreeFetcher>> shardsFetchers; + + for (auto & sa: m_payload.archivequeuesshards()) { + shards.emplace_back(ArchiveQueueShard(sa.address(), m_objectStore)); + shardsFetchers.emplace_back(shards.back().asyncLockfreeFetch()); } - // Keep track of the mounting criteria - ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.incCount(policy.maxDrivesAllowed); - ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.incCount(policy.archivePriority); - ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); - minArchiveRequestAgeMap.incCount(policy.archiveMinRequestAge); - if (m_payload.pendingarchivejobs_size()) { - if ((uint64_t)startTime < m_payload.oldestjobcreationtime()) - m_payload.set_oldestjobcreationtime(startTime); - m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() + fileSize); - } else { - m_payload.set_archivejobstotalsize(fileSize); - m_payload.set_oldestjobcreationtime(startTime); + std::list<std::list<JobDump>> shardsDumps; + auto s = shards.begin(); + auto sf = shardsFetchers.begin(); + + while (s!= shards.end()) { + try { + (*sf)->wait(); + } catch (Backend::NoSuchObject & ex) { + goto nextShard; + } + shardsDumps.emplace_back(std::list<JobDump>()); + for (auto & j: s->dumpJobs()) { + shardsDumps.back().emplace_back(JobDump({j.size, j.address, j.copyNb})); + } + nextShard: + s++; + sf++; } - auto * j = m_payload.add_pendingarchivejobs(); - j->set_address(archiveRequestAddress); - j->set_size(fileSize); - j->set_fileid(archiveFileId); - j->set_copynb(job.copyNb); - j->set_maxdrivesallowed(policy.maxDrivesAllowed); - j->set_priority(policy.archivePriority); - j->set_minarchiverequestage(policy.archiveMinRequestAge); - return true; + + // Now filter the jobs to add + AdditionSummary ret; + std::list<JobToAdd> jobsToReallyAdd; + for (auto & jta: jobsToAdd) { + for (auto & sd: shardsDumps) { + for (auto & sjd: sd) { + if (sjd.address == jta.archiveRequestAddress) + goto found; + } + } + jobsToReallyAdd.emplace_back(jta); + ret.bytes += jta.fileSize; + ret.files++; + found:; + } + + // We can now proceed with the standard addition. + addJobsAndCommit(jobsToReallyAdd, agentReference, lc); + return ret; } -void ArchiveQueue::removeJob(const std::string& archiveToFileAddress) { +void ArchiveQueue::removeJobsAndCommit(const std::list<std::string>& jobsToRemove) { checkPayloadWritable(); - auto * jl=m_payload.mutable_pendingarchivejobs(); - bool found = false; - do { - found = false; - // Push the found entry all the way to the end. - for (size_t i=0; i<(size_t)jl->size(); i++) { - if (jl->Get(i).address() == archiveToFileAddress) { - found = true; - ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.decCount(jl->Get(i).maxdrivesallowed()); - ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.decCount(jl->Get(i).priority()); - ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); - minArchiveRequestAgeMap.decCount(jl->Get(i).minarchiverequestage()); - m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() - jl->Get(i).size()); - while (i+1 < (size_t)jl->size()) { - jl->SwapElements(i, i+1); - i++; - } - break; + ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); + ValueCountMap priorityMap(m_payload.mutable_prioritymap()); + ValueCountMap minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); + // Make a working copy of the jobs to remove. We will progressively trim this local list. + auto localJobsToRemove = jobsToRemove; + // The jobs are expected to be removed from the front shards first. + // Remove jobs until there are no more jobs or no more shards. + ssize_t shardIndex=0; + auto * mutableArchiveQueueShards= m_payload.mutable_archivequeuesshards(); + while (localJobsToRemove.size() && shardIndex < mutableArchiveQueueShards->size()) { + auto * shardPointer = mutableArchiveQueueShards->Mutable(shardIndex); + // Get hold of the shard + ArchiveQueueShard aqs(shardPointer->address(), m_objectStore); + m_exclusiveLock->includeSubObject(aqs); + aqs.fetch(); + // Remove jobs from shard + auto removalResult = aqs.removeJobs(localJobsToRemove); + // If the shard is drained, remove, otherwise commit. We update the pointer afterwards. + if (removalResult.jobsAfter) { + aqs.commit(); + } else { + aqs.remove(); + } + // We still need to update the tracking queue side. + // Update stats and remove the jobs from the todo list. + for (auto & j: removalResult.removedJobs) { + maxDriveAllowedMap.decCount(j.maxDrivesAllowed); + priorityMap.decCount(j.priority); + minArchiveRequestAgeMap.decCount(j.minArchiveRequestAge); + } + // In all cases, we should update the global statistics. + m_payload.set_archivejobscount(m_payload.archivejobscount() - removalResult.jobsRemoved); + m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() - removalResult.bytesRemoved); + // If the shard is still around, we shall update its pointer's stats too. + if (removalResult.jobsAfter) { + // Also update the shard pointers's stats. In case of mismatch, we will trigger a rebuild. + shardPointer->set_shardbytescount(shardPointer->shardbytescount() - removalResult.bytesRemoved); + shardPointer->set_shardjobscount(shardPointer->shardjobscount() - removalResult.jobsRemoved); + if (shardPointer->shardbytescount() != removalResult.bytesAfter + || shardPointer->shardjobscount() != removalResult.jobsAfter) { + rebuild(); + } + // We will commit when exiting anyway... + shardIndex++; + } else { + // Shard's gone, so should the pointer. Push it to the end of the queue and + // trim it. + for (auto i=shardIndex; i<mutableArchiveQueueShards->size()-1; i++) { + mutableArchiveQueueShards->SwapElements(i, i+1); } + m_payload.mutable_archivequeuesshards()->RemoveLast(); } - // and remove it - if (found) - jl->RemoveLast(); - } while (found); + // We should also trim the removed jobs from our list. + localJobsToRemove.remove_if( + [&removalResult](const std::string & ja){ + return std::count_if(removalResult.removedJobs.begin(), removalResult.removedJobs.end(), + [&ja](ArchiveQueueShard::JobInfo & j) { + return j.address == ja; + } + ); + } + ); // end of remove_if + // And commit the queue (once per shard should not hurt performance). + commit(); + } } -auto ArchiveQueue::dumpJobs() -> std::list<ArchiveQueue::JobDump> { +auto ArchiveQueue::dumpJobs() -> std::list<JobDump> { checkPayloadReadable(); + // Go read the shards in parallel... std::list<JobDump> ret; - auto & jl=m_payload.pendingarchivejobs(); - for (auto j=jl.begin(); j!=jl.end(); j++) { - ret.push_back(JobDump()); - JobDump & jd = ret.back(); - jd.address = j->address(); - jd.size = j->size(); - jd.copyNb = j->copynb(); + std::list<ArchiveQueueShard> shards; + std::list<std::unique_ptr<ArchiveQueueShard::AsyncLockfreeFetcher>> shardsFetchers; + for (auto & sa: m_payload.archivequeuesshards()) { + shards.emplace_back(ArchiveQueueShard(sa.address(), m_objectStore)); + shardsFetchers.emplace_back(shards.back().asyncLockfreeFetch()); } - return ret; -} - -bool ArchiveQueue::addOrphanedJobPendingNsCreation( - const ArchiveRequest::JobDump& job, - const std::string& archiveToFileAddress, - uint64_t fileid, - uint64_t size, const cta::common::dataStructures::MountPolicy & policy) { - checkPayloadWritable(); - auto & jl=m_payload.orphanedarchivejobsnscreation(); - for (auto j=jl.begin(); j!= jl.end(); j++) { - if (j->address() == archiveToFileAddress) - return false; + auto s = shards.begin(); + auto sf = shardsFetchers.begin(); + while (s != shards.end()) { + try { + (*sf)->wait(); + } catch (Backend::NoSuchObject & ex) { + // We are possibly in read only mode, so we cannot rebuild. + // Just skip this shard. + goto nextShard; + } + for (auto & j: s->dumpJobs()) { + ret.emplace_back(JobDump{j.size, j.address, j.copyNb}); + } + nextShard: + s++; sf++; } - auto * j = m_payload.add_orphanedarchivejobsnscreation(); - j->set_address(archiveToFileAddress); - j->set_size(size); - j->set_fileid(fileid); - j->set_copynb(job.copyNb); - j->set_maxdrivesallowed(policy.maxDrivesAllowed); - j->set_priority(policy.archivePriority); - j->set_minarchiverequestage(policy.archiveMinRequestAge); - return true; + return ret; } -bool ArchiveQueue::addOrphanedJobPendingNsDeletion( - const ArchiveRequest::JobDump& job, - const std::string& archiveToFileAddress, - uint64_t fileid, uint64_t size) { - checkPayloadWritable(); - auto & jl=m_payload.orphanedarchivejobsnsdeletion(); - for (auto j=jl.begin(); j!= jl.end(); j++) { - if (j->address() == archiveToFileAddress) - return false; +auto ArchiveQueue::getCandidateList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> archiveRequestsToSkip) -> CandidateJobList { + checkPayloadReadable(); + CandidateJobList ret; + for (auto & aqsp: m_payload.archivequeuesshards()) { + // We need to go through all shard poiters unconditionnaly to count what is left (see else part) + if (ret.candidateBytes < maxBytes && ret.candidateFiles < maxFiles) { + // Fetch the shard + ArchiveQueueShard aqs(aqsp.address(), m_objectStore); + aqs.fetchNoLock(); + auto shardCandidates = aqs.getCandidateJobList(maxBytes - ret.candidateBytes, maxFiles - ret.candidateFiles, archiveRequestsToSkip); + ret.candidateBytes += shardCandidates.candidateBytes; + ret.candidateFiles += shardCandidates.candidateFiles; + // We overwrite the remaining values each time as the previous + // shards have exhaustied their candidate lists. + ret.remainingBytesAfterCandidates = shardCandidates.remainingBytesAfterCandidates; + ret.remainingFilesAfterCandidates = shardCandidates.remainingFilesAfterCandidates; + ret.candidates.splice(ret.candidates.end(), shardCandidates.candidates); + } else { + // We are done with finding candidates. We just need to count what is left in the non-visited shards. + ret.remainingBytesAfterCandidates += aqsp.shardbytescount(); + ret.remainingFilesAfterCandidates += aqsp.shardjobscount(); + } } - auto * j = m_payload.add_orphanedarchivejobsnsdeletion(); - j->set_address(archiveToFileAddress); - j->set_size(size); - j->set_fileid(fileid); - return true; + return ret; } }} // namespace cta::objectstore diff --git a/objectstore/ArchiveQueue.hpp b/objectstore/ArchiveQueue.hpp index 943add52387a1962b0dcad72b1714f93688048e4..188aaa47cbcfb04b1a3379f7b3aa8a913680cccd 100644 --- a/objectstore/ArchiveQueue.hpp +++ b/objectstore/ArchiveQueue.hpp @@ -51,33 +51,46 @@ public: // Commit with sanity checks (override from ObjectOps void commit(); +private: + // Validates all summaries are in accordance with each other. + bool checkMapsAndShardsCoherency(); + // Rebuild from shards if something goes wrong. + void rebuild(); + +public: // Set/get tape pool void setTapePool(const std::string & name); std::string getTapePool(); - // Archive jobs management =================================================== - void addJob(const ArchiveRequest::JobDump & job, - const std::string & archiveRequestAddress, uint64_t archiveFileId, - uint64_t fileSize, const cta::common::dataStructures::MountPolicy & policy, time_t startTime); - /// This version will check for existence of the job in the queue before - // returns true if a new job was actually inserted. - bool addJobIfNecessary(const ArchiveRequest::JobDump & job, - const std::string & archiveRequestAddress, uint64_t archiveFileId, - uint64_t fileSize, const cta::common::dataStructures::MountPolicy & policy, time_t startTime); - /// This version will check for existence of the job in the queue before - // returns true if a new job was actually inserted. - bool addOrphanedJobPendingNsCreation(const ArchiveRequest::JobDump& job, - const std::string& archiveToFileAddress, uint64_t fileid, - uint64_t size, const cta::common::dataStructures::MountPolicy & policy); + // Archive jobs management =================================================== + struct JobToAdd { + ArchiveRequest::JobDump job; + const std::string archiveRequestAddress; + uint64_t archiveFileId; + uint64_t fileSize; + const cta::common::dataStructures::MountPolicy policy; + time_t startTime; + }; + /** Add the jobs to the queue. + * The lock will be used to mark the shards as locked (the lock is the same for + * the main object and the shard, the is no shared access. + * As we potentially have to create new shard(s), we need access to the agent + * reference (to generate a non-colliding object name). + * We will also log the shard creation (hence the context) + */ + void addJobsAndCommit(std::list<JobToAdd> & jobsToAdd, AgentReference & agentReference, log::LogContext & lc); /// This version will check for existence of the job in the queue before - // returns true if a new job was actually inserted. - bool addOrphanedJobPendingNsDeletion(const ArchiveRequest::JobDump& job, - const std::string& archiveToFileAddress, - uint64_t fileid, uint64_t size); + // returns the count and sizes of actually added jobs (if any). + struct AdditionSummary { + uint64_t files = 0; + uint64_t bytes = 0; + }; + AdditionSummary addJobsIfNecessaryAndCommit(std::list<JobToAdd> & jobsToAdd, + AgentReference & agentReference, log::LogContext & lc); struct JobsSummary { - uint64_t files; + uint64_t jobs; uint64_t bytes; time_t oldestJobStartTime; uint64_t priority; @@ -86,15 +99,24 @@ public: }; JobsSummary getJobsSummary(); - void removeJob(const std::string &archiveToFileAddress); - class JobDump { - public: + void removeJobsAndCommit(const std::list<std::string> & jobsToRemove); + struct JobDump { uint64_t size; std::string address; uint16_t copyNb; }; std::list<JobDump> dumpJobs(); - + struct CandidateJobList { + uint64_t remainingFilesAfterCandidates = 0; + uint64_t remainingBytesAfterCandidates = 0; + uint64_t candidateFiles = 0; + uint64_t candidateBytes = 0; + std::list<JobDump> candidates; + }; + // The set of archive requests to skip are requests previously identified by the caller as bad, + // which still should be removed from the queue. They will be disregarded from listing. + CandidateJobList getCandidateList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> archiveRequestsToSkip); + // Check that the tape pool is empty (of both tapes and jobs) bool isEmpty(); @@ -104,6 +126,13 @@ public: cta::catalogue::Catalogue & catalogue) override; std::string dump(); + + // The shard size. From experience, 100k is where we start to see performance difference, + // but nothing prevents us from using a smaller size. + // The performance will be roughly flat until the queue size reaches the square of this limit + // (meaning the queue object updates start to take too much time). + // with this current value of 25k, the performance should be roughly flat until 25k^2=625M. + static const uint64_t c_maxShardSize = 25000; }; }} diff --git a/objectstore/ArchiveQueueShard.cpp b/objectstore/ArchiveQueueShard.cpp new file mode 100644 index 0000000000000000000000000000000000000000..345e4ea9f7570648acd4e9cede8d04108bdb4c7b --- /dev/null +++ b/objectstore/ArchiveQueueShard.cpp @@ -0,0 +1,170 @@ + +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "ArchiveQueueShard.hpp" +#include "GenericObject.hpp" +#include <google/protobuf/util/json_util.h> + + + +namespace cta { namespace objectstore { + +ArchiveQueueShard::ArchiveQueueShard(Backend& os): + ObjectOps<serializers::ArchiveQueueShard, serializers::ArchiveQueueShard_t>(os) { } + +ArchiveQueueShard::ArchiveQueueShard(const std::string& address, Backend& os): + ObjectOps<serializers::ArchiveQueueShard, serializers::ArchiveQueueShard_t>(os, address) { } + +ArchiveQueueShard::ArchiveQueueShard(GenericObject& go): + ObjectOps<serializers::ArchiveQueueShard, serializers::ArchiveQueueShard_t>(go.objectStore()) { + // Here we transplant the generic object into the new object + go.transplantHeader(*this); + // And interpret the header. + getPayloadFromHeader(); +} + +void ArchiveQueueShard::rebuild() { + checkPayloadWritable(); + uint64_t totalSize=0; + for (auto j: m_payload.archivejobs()) { + totalSize += j.size(); + } + m_payload.set_archivejobstotalsize(totalSize); +} + +std::string ArchiveQueueShard::dump() { + checkPayloadReadable(); + google::protobuf::util::JsonPrintOptions options; + options.add_whitespace = true; + options.always_print_primitive_fields = true; + std::string headerDump; + google::protobuf::util::MessageToJsonString(m_payload, &headerDump, options); + return headerDump; +} + +void ArchiveQueueShard::garbageCollect(const std::string& presumedOwner, AgentReference& agentReference, log::LogContext& lc, cta::catalogue::Catalogue& catalogue) { + throw exception::Exception("In ArchiveQueueShard::garbageCollect(): garbage collection should not be necessary for this type of object."); +} + +ArchiveQueue::CandidateJobList ArchiveQueueShard::getCandidateJobList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> archiveRequestsToSkip) { + checkPayloadReadable(); + ArchiveQueue::CandidateJobList ret; + ret.remainingBytesAfterCandidates = m_payload.archivejobstotalsize(); + ret.remainingFilesAfterCandidates = m_payload.archivejobs_size(); + for (auto & j: m_payload.archivejobs()) { + if (!archiveRequestsToSkip.count(j.address())) { + ret.candidates.push_back({j.size(), j.address(), (uint16_t)j.copynb()}); + ret.candidateBytes += j.size(); + ret.candidateFiles ++; + } + ret.remainingBytesAfterCandidates -= j.size(); + ret.remainingFilesAfterCandidates--; + if (ret.candidateBytes >= maxBytes || ret.candidateFiles >= maxFiles) break; + } + return ret; +} + +auto ArchiveQueueShard::removeJobs(const std::list<std::string>& jobsToRemove) -> RemovalResult { + checkPayloadWritable(); + RemovalResult ret; + uint64_t totalSize = m_payload.archivejobstotalsize(); + auto * jl=m_payload.mutable_archivejobs(); + for (auto &rrt: jobsToRemove) { + bool found = false; + do { + found = false; + // Push the found entry all the way to the end. + for (size_t i=0; i<(size_t)jl->size(); i++) { + if (jl->Get(i).address() == rrt) { + found = true; + const auto & j = jl->Get(i); + ret.removedJobs.emplace_back(JobInfo()); + ret.removedJobs.back().address = j.address(); + ret.removedJobs.back().copyNb = j.copynb(); + ret.removedJobs.back().maxDrivesAllowed = j.maxdrivesallowed(); + ret.removedJobs.back().minArchiveRequestAge = j.minarchiverequestage(); + ret.removedJobs.back().priority = j.priority(); + ret.removedJobs.back().size = j.size(); + ret.removedJobs.back().startTime = j.starttime(); + ret.bytesRemoved += j.size(); + totalSize -= j.size(); + ret.jobsRemoved++; + m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize() - j.size()); + while (i+1 < (size_t)jl->size()) { + jl->SwapElements(i, i+1); + i++; + } + break; + } + } + // and remove it + if (found) + jl->RemoveLast(); + } while (found); + } + ret.bytesAfter = totalSize; + ret.jobsAfter = m_payload.archivejobs_size(); + return ret; +} + +void ArchiveQueueShard::initialize(const std::string& owner) { + ObjectOps<serializers::ArchiveQueueShard, serializers::ArchiveQueueShard_t>::initialize(); + setOwner(owner); + setBackupOwner(owner); + m_payload.set_archivejobstotalsize(0); + m_payloadInterpreted=true; +} + +auto ArchiveQueueShard::dumpJobs() -> std::list<JobInfo> { + checkPayloadReadable(); + std::list<JobInfo> ret; + for (auto &j: m_payload.archivejobs()) { + ret.emplace_back(JobInfo{j.size(), j.address(), (uint16_t)j.copynb(), j.priority(), + j.minarchiverequestage(), j.maxdrivesallowed(), (time_t)j.starttime()}); + } + return ret; +} + +auto ArchiveQueueShard::getJobsSummary() -> JobsSummary { + checkPayloadReadable(); + JobsSummary ret; + ret.bytes = m_payload.archivejobstotalsize(); + ret.jobs = m_payload.archivejobs_size(); + return ret; +} + +uint64_t ArchiveQueueShard::addJob(ArchiveQueue::JobToAdd& jobToAdd) { + checkPayloadWritable(); + auto * j = m_payload.mutable_archivejobs()->Add(); + j->set_address(jobToAdd.archiveRequestAddress); + j->set_size(jobToAdd.fileSize); + j->set_fileid(jobToAdd.archiveFileId); + j->set_copynb(jobToAdd.job.copyNb); + j->set_maxdrivesallowed(jobToAdd.policy.maxDrivesAllowed); + j->set_priority(jobToAdd.policy.archivePriority); + j->set_minarchiverequestage(jobToAdd.policy.archiveMinRequestAge); + j->set_starttime(jobToAdd.startTime); + m_payload.set_archivejobstotalsize(m_payload.archivejobstotalsize()+jobToAdd.fileSize); + return m_payload.archivejobs_size(); +} + + + + +}} \ No newline at end of file diff --git a/objectstore/ArchiveQueueShard.hpp b/objectstore/ArchiveQueueShard.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1a2074305da88d48db600bc8e0a110ecfb66b280 --- /dev/null +++ b/objectstore/ArchiveQueueShard.hpp @@ -0,0 +1,89 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "ArchiveQueue.hpp" + +namespace cta { namespace objectstore { + +class ArchiveQueueShard: public ObjectOps<serializers::ArchiveQueueShard, serializers::ArchiveQueueShard_t> { +public: + // Constructor with undefined address + ArchiveQueueShard(Backend & os); + + // Constructor + ArchiveQueueShard(const std::string & address, Backend & os); + + // Upgrader form generic object + ArchiveQueueShard(GenericObject & go); + + // Forbid/hide base initializer + void initialize() = delete; + + // Initializer + void initialize(const std::string & owner); + + // dumper + std::string dump(); + + void garbageCollect(const std::string& presumedOwner, AgentReference& agentReference, log::LogContext& lc, cta::catalogue::Catalogue& catalogue) override; + + struct JobInfo { + uint64_t size; + std::string address; + uint16_t copyNb; + uint64_t priority; + uint64_t minArchiveRequestAge; + uint64_t maxDrivesAllowed; + time_t startTime; + }; + std::list<JobInfo> dumpJobs(); + + struct JobsSummary { + uint64_t jobs; + uint64_t bytes; + }; + JobsSummary getJobsSummary(); + + /** + * adds job, returns new size + */ + uint64_t addJob(ArchiveQueue::JobToAdd & jobToAdd); + + + struct RemovalResult { + uint64_t jobsRemoved = 0; + uint64_t jobsAfter = 0; + uint64_t bytesRemoved = 0; + uint64_t bytesAfter = 0; + std::list<JobInfo> removedJobs; + }; + /** + * Removes jobs from shard (and from the to remove list). Returns list of removed jobs. + */ + RemovalResult removeJobs(const std::list<std::string> & jobsToRemove); + + ArchiveQueue::CandidateJobList getCandidateJobList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> archiveRequestsToSkip); + + /** Re compute summaries in case they do not match the array content. */ + void rebuild(); + +}; + +}} // namespace cta::objectstore \ No newline at end of file diff --git a/objectstore/ArchiveRequest.cpp b/objectstore/ArchiveRequest.cpp index f3197ca10c8c47191f9418ac8dfe6f35631df60b..0462e33ac7dd0335981f512c413bc533ecae0228 100644 --- a/objectstore/ArchiveRequest.cpp +++ b/objectstore/ArchiveRequest.cpp @@ -59,8 +59,6 @@ void cta::objectstore::ArchiveRequest::addJob(uint16_t copyNumber, j->set_status(serializers::ArchiveJobStatus::AJS_LinkingToArchiveQueue); j->set_tapepool(tapepool); j->set_owner(archivequeueaddress); - // XXX This field (archivequeueaddress) is a leftover from a past layout when tape pools were static - // in the object store, and should be eventually removed. j->set_archivequeueaddress(""); j->set_totalretries(0); j->set_retrieswithinmount(0); @@ -315,9 +313,10 @@ void ArchiveRequest::garbageCollect(const std::string &presumedOwner, AgentRefer jd.tapePool = j->tapepool(); jd.owner = j->owner(); jd.status = j->status(); - if (aq.addJobIfNecessary(jd, getAddressIfSet(), getArchiveFile().archiveFileID, - getArchiveFile().fileSize, getMountPolicy(), getEntryLog().time)) - aq.commit(); + std::list<ArchiveQueue::JobToAdd> jta; + jta.push_back({jd, getAddressIfSet(), getArchiveFile().archiveFileID, + getArchiveFile().fileSize, getMountPolicy(), getEntryLog().time}); + aq.addJobsIfNecessaryAndCommit(jta, agentReference, lc); auto queueUpdateTime = t.secs(utils::Timer::resetCounter); j->set_owner(aq.getAddressIfSet()); j->set_status(serializers::AJS_PendingMount); diff --git a/objectstore/BackendVFS.cpp b/objectstore/BackendVFS.cpp index 108d6ea4ccea6dd9cfe0143af55f396b81cdb40c..2f3daad10c122c84851829f0f2e43633f9406177 100644 --- a/objectstore/BackendVFS.cpp +++ b/objectstore/BackendVFS.cpp @@ -23,6 +23,7 @@ #include "common/Timer.hpp" #include "tests/TestsCompileTimeSwitches.hpp" #include "common/exception/Exception.hpp" +#include "common/threading/MutexLocker.hpp" #include <fstream> #include <stdlib.h> @@ -441,24 +442,29 @@ void BackendVFS::AsyncDeleter::wait() { } BackendVFS::AsyncLockfreeFetcher::AsyncLockfreeFetcher(BackendVFS& be, const std::string& name): - m_backend(be), m_name(name), - m_job(std::async(std::launch::async, - [&](){ - auto ret = m_backend.read(name); - ANNOTATE_HAPPENS_BEFORE(&m_job); - return ret; - })) -{ } + m_backend(be), m_name(name) { + cta::threading::Thread::start(); +} + +void BackendVFS::AsyncLockfreeFetcher::run() { + threading::MutexLocker ml(m_mutex); + try { + m_value = m_backend.read(m_name); + } catch (...) { + m_exception = std::current_exception(); + } +} Backend::AsyncLockfreeFetcher* BackendVFS::asyncLockfreeFetch(const std::string& name) { return new AsyncLockfreeFetcher(*this, name); } std::string BackendVFS::AsyncLockfreeFetcher::wait() { - auto ret = m_job.get(); - ANNOTATE_HAPPENS_AFTER(&m_job); - ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&m_job); - return ret; + cta::threading::Thread::wait(); + threading::MutexLocker ml(m_mutex); + if (m_exception) + std::rethrow_exception(m_exception); + return m_value; } std::string BackendVFS::Parameters::toStr() { diff --git a/objectstore/BackendVFS.hpp b/objectstore/BackendVFS.hpp index 39952d955344501300a8666244ca4b41576db256..6bd2cd03c7d1ee78a5625428fe7121d83bd2e86b 100644 --- a/objectstore/BackendVFS.hpp +++ b/objectstore/BackendVFS.hpp @@ -19,6 +19,7 @@ #pragma once #include "Backend.hpp" +#include "common/threading/Thread.hpp" #include <future> #include <functional> @@ -127,7 +128,7 @@ public: /** * A class mimicking AIO using C++ async tasks */ - class AsyncLockfreeFetcher: public Backend::AsyncLockfreeFetcher { + class AsyncLockfreeFetcher: public Backend::AsyncLockfreeFetcher, public cta::threading::Thread { public: AsyncLockfreeFetcher(BackendVFS & be, const std::string & name); std::string wait() override; @@ -136,8 +137,14 @@ public: BackendVFS &m_backend; /** The object name */ const std::string m_name; - /** The future that will both do the job and allow synchronization with the caller. */ - std::future<std::string> m_job; + /** The fetched value */ + std::string m_value; + /** The exception we might receive */ + std::exception_ptr m_exception = nullptr; + /** A mutex to make helgrind happy */ + cta::threading::Mutex m_mutex; + /** The thread that will both do the job and allow synchronization with the caller. */ + void run() override; }; Backend::AsyncUpdater* asyncUpdate(const std::string & name, std::function <std::string(const std::string &)> & update) override; diff --git a/objectstore/CMakeLists.txt b/objectstore/CMakeLists.txt index 734424ff3606e9555ec7f090249fc12898c31d1e..aaed1b155875e5677670dee1cfd6b7846f4b140e 100644 --- a/objectstore/CMakeLists.txt +++ b/objectstore/CMakeLists.txt @@ -51,6 +51,7 @@ SET_SOURCE_FILES_PROPERTIES(${CTAProtoDependants} include_directories (${PROTOBUF3_INCLUDE_DIRS}) add_library (ctaobjectstore SHARED ${CTAProtoSources} + ObjectOps.cpp RootEntry.cpp Agent.cpp AgentHeartbeatThread.cpp @@ -58,6 +59,7 @@ add_library (ctaobjectstore SHARED AgentRegister.cpp AgentWatchdog.cpp ArchiveQueue.cpp + ArchiveQueueShard.cpp RetrieveQueue.cpp ArchiveRequest.cpp RetrieveRequest.cpp diff --git a/objectstore/GarbageCollector.cpp b/objectstore/GarbageCollector.cpp index de01b4dbfbe85f3dc248513d742cf2fb054fa85c..55ef6cd19f82b9d548a6c24e5d0f93bef06ee359 100644 --- a/objectstore/GarbageCollector.cpp +++ b/objectstore/GarbageCollector.cpp @@ -363,8 +363,7 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon // 1) Get the archive requests done. for (auto & tapepool: ownedObjectSorter.archiveQueuesAndRequests) { double queueLockFetchTime=0; - double queuePreparationTime=0; - double queueCommitTime=0; + double queueProcessAndCommitTime=0; double requestsUpdatePreparationTime=0; double requestsUpdatingTime=0; double queueRecommitTime=0; @@ -376,38 +375,32 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon uint64_t bytesBefore=0; utils::Timer t; // Get the archive queue and add references to the jobs in it. - bool didAddToQueue=false; ArchiveQueue aq(m_objectStore); ScopedExclusiveLock aql; Helpers::getLockedAndFetchedQueue<ArchiveQueue>(aq, aql, m_ourAgentReference, tapepool.first, lc); queueLockFetchTime = t.secs(utils::Timer::resetCounter); auto jobsSummary=aq.getJobsSummary(); - filesBefore=jobsSummary.files; + filesBefore=jobsSummary.jobs; bytesBefore=jobsSummary.bytes; // We have the queue. We will loop on the requests, add them to the queue. We will launch their updates // after committing the queue. + std::list<ArchiveQueue::JobToAdd> jtal; for (auto & ar: tapepool.second) { // Determine the copy number and feed the queue with it. for (auto &j: ar->dumpJobs()) { if (j.tapePool == tapepool.first) { - if (aq.addJobIfNecessary(j, ar->getAddressIfSet(), ar->getArchiveFile().archiveFileID, - ar->getArchiveFile().fileSize, ar->getMountPolicy(), ar->getEntryLog().time)) { - didAddToQueue = true; - filesQueued++; - bytesQueued += ar->getArchiveFile().fileSize; - } + jtal.push_back({j, ar->getAddressIfSet(), ar->getArchiveFile().archiveFileID, + ar->getArchiveFile().fileSize, ar->getMountPolicy(), ar->getEntryLog().time}); } } } - queuePreparationTime = t.secs(utils::Timer::resetCounter); + auto addedJobs = aq.addJobsIfNecessaryAndCommit(jtal, m_ourAgentReference, lc); + queueProcessAndCommitTime = t.secs(utils::Timer::resetCounter); // If we have an unexpected failure, we will re-run the individual garbage collection. Before that, // we will NOT remove the object from agent's ownership. This variable is declared a bit ahead so // the goto will not cross its initialization. std::set<std::string> jobsIndividuallyGCed; - if (didAddToQueue) { - aq.commit(); - queueCommitTime = t.secs(utils::Timer::resetCounter); - } else { + if (!addedJobs.files) { goto agentCleanupForArchive; } // We will keep individual references for each job update we launch so that we make @@ -433,7 +426,7 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon } requestsUpdatePreparationTime = t.secs(utils::Timer::resetCounter); // Now collect the results. - bool aqUpdated=false; + std::list<std::string> requestsToDequeue; for (auto & arup: arUpdatersParams) { try { arup.updater->wait(); @@ -474,16 +467,15 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon // In all cases, the object did NOT make it to the queue. filesDequeued ++; bytesDequeued += arup.archiveRequest->getArchiveFile().fileSize; - aq.removeJob(arup.archiveRequest->getAddressIfSet()); - aqUpdated=true; + requestsToDequeue.push_back(arup.archiveRequest->getAddressIfSet()); } } requestsUpdatingTime = t.secs(utils::Timer::resetCounter); - if (aqUpdated) { - aq.commit(); + if (requestsToDequeue.size()) { + aq.removeJobsAndCommit(requestsToDequeue); log::ScopedParamContainer params(lc); params.add("archiveQueueObject", aq.getAddressIfSet()); - lc.log(log::INFO, "In GarbageCollector::cleanupDeadAgent(): RE-committed archive queue after error handling."); + lc.log(log::INFO, "In GarbageCollector::cleanupDeadAgent(): Cleaned up and re-committed archive queue after error handling."); queueRecommitTime = t.secs(utils::Timer::resetCounter); } } @@ -500,11 +492,10 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon .add("bytesDequeuedAfterErrors", bytesDequeued) .add("filesBefore", filesBefore) .add("bytesBefore", bytesBefore) - .add("filesAfter", jobsSummary.files) + .add("filesAfter", jobsSummary.jobs) .add("bytesAfter", jobsSummary.bytes) .add("queueLockFetchTime", queueLockFetchTime) - .add("queuePreparationTime", queuePreparationTime) - .add("queueCommitTime", queueCommitTime) + .add("queueProcessAndCommitTime", queueProcessAndCommitTime) .add("requestsUpdatePreparationTime", requestsUpdatePreparationTime) .add("requestsUpdatingTime", requestsUpdatingTime) .add("queueRecommitTime", queueRecommitTime); @@ -532,8 +523,7 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon // Then should hence not have changes since we pre-fetched them. for (auto & tape: ownedObjectSorter.retrieveQueuesAndRequests) { double queueLockFetchTime=0; - double queuePreparationTime=0; - double queueCommitTime=0; + double queueProcessAndCommitTime=0; double requestsUpdatePreparationTime=0; double requestsUpdatingTime=0; double queueRecommitTime=0; @@ -545,7 +535,6 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon uint64_t bytesBefore=0; utils::Timer t; // Get the retrieve queue and add references to the jobs to it. - bool didAddToQueue=false; RetrieveQueue rq(m_objectStore); ScopedExclusiveLock rql; Helpers::getLockedAndFetchedQueue<RetrieveQueue>(rq,rql, m_ourAgentReference, tape.first, lc); @@ -553,30 +542,26 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon auto jobsSummary=rq.getJobsSummary(); filesBefore=jobsSummary.files; bytesBefore=jobsSummary.bytes; - // We have the queue. We will loop on the requests, add them to the queue. We will launch their updates + // Prepare the list of requests to add to the queue (if needed). + std::list<RetrieveQueue::JobToAdd> jta; + // We have the queue. We will loop on the requests, add them to the list. We will launch their updates // after committing the queue. for (auto & rr: tape.second) { // Determine the copy number and feed the queue with it. for (auto &tf: rr->getArchiveFile().tapeFiles) { if (tf.second.vid == tape.first) { - if (rq.addJobIfNecessary(tf.second.copyNb, tf.second.fSeq, rr->getAddressIfSet(), rr->getArchiveFile().fileSize, - rr->getRetrieveFileQueueCriteria().mountPolicy, rr->getEntryLog().time)) { - didAddToQueue = true; - filesQueued++; - bytesQueued += rr->getArchiveFile().fileSize; - } + jta.push_back({tf.second.copyNb, tf.second.fSeq, rr->getAddressIfSet(), rr->getArchiveFile().fileSize, + rr->getRetrieveFileQueueCriteria().mountPolicy, rr->getEntryLog().time}); } } } - queuePreparationTime = t.secs(utils::Timer::resetCounter); + auto addedJobs = rq.addJobsIfNecessaryAndCommit(jta); + queueProcessAndCommitTime = t.secs(utils::Timer::resetCounter); // If we have an unexpected failure, we will re-run the individual garbage collection. Before that, // we will NOT remove the object from agent's ownership. This variable is declared a bit ahead so // the goto will not cross its initialization. std::set<std::string> jobsIndividuallyGCed; - if (didAddToQueue) { - rq.commit(); - queueCommitTime = t.secs(utils::Timer::resetCounter); - } else { + if (!addedJobs.files) { goto agentCleanupForRetrieve; } // We will keep individual references for each job update we launch so that we make @@ -601,7 +586,7 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon } requestsUpdatePreparationTime = t.secs(utils::Timer::resetCounter); // Now collect the results. - bool rqUpdated=false; + std::list<std::string> requestsToDequeue; for (auto & rrup: rrUpdatersParams) { try { rrup.updater->wait(); @@ -644,16 +629,15 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon // In all cases, the object did NOT make it to the queue. filesDequeued ++; bytesDequeued += rrup.retrieveRequest->getArchiveFile().fileSize; - rq.removeJob(rrup.retrieveRequest->getAddressIfSet()); - rqUpdated=true; + requestsToDequeue.push_back(rrup.retrieveRequest->getAddressIfSet()); } } requestsUpdatingTime = t.secs(utils::Timer::resetCounter); - if (rqUpdated) { - rq.commit(); + if (requestsToDequeue.size()) { + rq.removeJobsAndCommit(requestsToDequeue); log::ScopedParamContainer params(lc); params.add("retreveQueueObject", rq.getAddressIfSet()); - lc.log(log::INFO, "In GarbageCollector::cleanupDeadAgent(): RE-committed retrieve queue after error handling."); + lc.log(log::INFO, "In GarbageCollector::cleanupDeadAgent(): Cleaned up and re-committed retrieve queue after error handling."); queueRecommitTime = t.secs(utils::Timer::resetCounter); } } @@ -673,8 +657,7 @@ void GarbageCollector::cleanupDeadAgent(const std::string & address, log::LogCon .add("filesAfter", jobsSummary.files) .add("bytesAfter", jobsSummary.bytes) .add("queueLockFetchTime", queueLockFetchTime) - .add("queuePreparationTime", queuePreparationTime) - .add("queueCommitTime", queueCommitTime) + .add("queuePreparationTime", queueProcessAndCommitTime) .add("requestsUpdatePreparationTime", requestsUpdatePreparationTime) .add("requestsUpdatingTime", requestsUpdatingTime) .add("queueRecommitTime", queueRecommitTime); diff --git a/objectstore/GarbageCollectorTest.cpp b/objectstore/GarbageCollectorTest.cpp index 5b3e6edbc98705fe92973a44b7a15ffe09bf7d9c..092df08f8e6fb77b76d9fafef87e696631e57162 100644 --- a/objectstore/GarbageCollectorTest.cpp +++ b/objectstore/GarbageCollectorTest.cpp @@ -329,7 +329,7 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) { cta::objectstore::ArchiveQueue aq(tpAddr[i], be); } // Create the various ATFR's, stopping one step further each time. - int pass=0; + unsigned int pass=0; while (true) { // -just referenced @@ -385,8 +385,9 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) { policy.archiveMinRequestAge = 0; policy.archivePriority = 1; policy.maxDrivesAllowed = 1; - aq.addJob(jd, ar.getAddressIfSet(), ar.getArchiveFile().archiveFileID, 1000+pass, policy, time(NULL)); - aq.commit(); + std::list <cta::objectstore::ArchiveQueue::JobToAdd> jta; + jta.push_back({jd, ar.getAddressIfSet(), ar.getArchiveFile().archiveFileID, 1000U+pass, policy, time(NULL)}); + aq.addJobsAndCommit(jta, agentRef, lc); } if (pass < 4) { pass++; continue; } // TODO: partially migrated or selected @@ -403,8 +404,9 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) { policy.archiveMinRequestAge = 0; policy.archivePriority = 1; policy.maxDrivesAllowed = 1; - aq.addJob(jd, ar.getAddressIfSet(), ar.getArchiveFile().archiveFileID, 1000+pass, policy, time(NULL)); - aq.commit(); + std::list <cta::objectstore::ArchiveQueue::JobToAdd> jta; + jta.push_back({jd, ar.getAddressIfSet(), ar.getArchiveFile().archiveFileID, 1000+pass, policy, time(NULL)}); + aq.addJobsAndCommit(jta, agentRef, lc); } if (pass < 5) { pass++; continue; } // - Still marked a not owned but referenced in the agent @@ -437,8 +439,8 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) { auto d1=aq1.dumpJobs(); // We expect all jobs with sizes 1002-1005 inclusive to be connected to // their respective tape pools. - ASSERT_EQ(5, aq0.getJobsSummary().files); - ASSERT_EQ(5, aq1.getJobsSummary().files); + ASSERT_EQ(5, aq0.getJobsSummary().jobs); + ASSERT_EQ(5, aq1.getJobsSummary().jobs); } // Unregister gc's agent cta::objectstore::ScopedExclusiveLock gcal(gcAgent); @@ -454,10 +456,11 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) { cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tp), be); cta::objectstore::ScopedExclusiveLock aql(aq); aq.fetch(); + std::list<std::string> ajtr; for (auto &j: aq.dumpJobs()) { - aq.removeJob(j.address); + ajtr.push_back(j.address); } - aq.commit(); + aq.removeJobsAndCommit(ajtr); aql.release(); // Remove queues from root re.removeArchiveQueueAndCommit(tp, lc); @@ -544,7 +547,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) { rqc.archiveFile.reconciliationTime = 0; rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo(); rqc.archiveFile.diskInstance = "eoseos"; - rqc.archiveFile.fileSize = 667; + rqc.archiveFile.fileSize = 1000 + pass; rqc.archiveFile.storageClass = "sc"; rqc.archiveFile.tapeFiles[1].blockId=0; rqc.archiveFile.tapeFiles[1].compressedSize=1; @@ -584,11 +587,12 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) { cta::objectstore::RetrieveQueue rq(tAddr[0], be); cta::objectstore::ScopedExclusiveLock rql(rq); rq.fetch(); - rq.addJob(1,rqc.archiveFile.tapeFiles[1].fSeq, rr.getAddressIfSet(), rqc.archiveFile.fileSize, rqc.mountPolicy, sReq.creationLog.time); - rq.commit(); + std::list <cta::objectstore::RetrieveQueue::JobToAdd> jta; + jta.push_back({1,rqc.archiveFile.tapeFiles[1].fSeq, rr.getAddressIfSet(), rqc.archiveFile.fileSize, rqc.mountPolicy, sReq.creationLog.time}); + rq.addJobsAndCommit(jta); } if (pass < 5) { pass++; continue; } - // - Still marked a not owned but referenced in the agent + // - Still marked as not owned but referenced in the agent { rr.setOwner(tAddr[0]); rr.setActiveCopyNumber(1); @@ -635,10 +639,11 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) { cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress(vid), be); cta::objectstore::ScopedExclusiveLock rql(rq); rq.fetch(); + std::list<std::string> jtrl; for (auto &j: rq.dumpJobs()) { - rq.removeJob(j.address); + jtrl.push_back(j.address); } - rq.commit(); + rq.removeJobsAndCommit(jtrl); rql.release(); // Remove queues from root re.removeRetrieveQueueAndCommit(vid, lc); diff --git a/objectstore/GenericObject.cpp b/objectstore/GenericObject.cpp index 3edfca13d6ea5d6039117c95352b6d35721bd895..d980678369bac1987caa442665750ca6d65a910f 100644 --- a/objectstore/GenericObject.cpp +++ b/objectstore/GenericObject.cpp @@ -25,6 +25,7 @@ #include "RootEntry.hpp" #include "SchedulerGlobalLock.hpp" #include "ArchiveQueue.hpp" +#include "ArchiveQueueShard.hpp" #include "RetrieveQueue.hpp" #include "DriveRegister.hpp" #include <stdexcept> @@ -148,18 +149,17 @@ namespace { using cta::objectstore::GenericObject; using cta::objectstore::ScopedExclusiveLock; template <class C> - std::string dumpWithType(GenericObject * gop, ScopedSharedLock& lock) { + std::string dumpWithType(GenericObject * gop) { C typedObject(*gop); - lock.transfer(typedObject); + ScopedLock::transfer(*gop, typedObject); std::string ret = typedObject.dump(); // Release the lock now as if we let the caller do, it will point // to the then-removed typedObject. - lock.release(); return ret; } } -std::string GenericObject::dump(ScopedSharedLock& lock) { +std::string GenericObject::dump() { checkHeaderReadable(); google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; @@ -169,31 +169,34 @@ std::string GenericObject::dump(ScopedSharedLock& lock) { google::protobuf::util::MessageToJsonString(m_header, &headerDump, options); switch(m_header.type()) { case serializers::RootEntry_t: - bodyDump = dumpWithType<RootEntry>(this, lock); + bodyDump = dumpWithType<RootEntry>(this); break; case serializers::AgentRegister_t: - bodyDump = dumpWithType<AgentRegister>(this, lock); + bodyDump = dumpWithType<AgentRegister>(this); break; case serializers::Agent_t: - bodyDump = dumpWithType<Agent>(this, lock); + bodyDump = dumpWithType<Agent>(this); break; case serializers::DriveRegister_t: - bodyDump = dumpWithType<DriveRegister>(this, lock); + bodyDump = dumpWithType<DriveRegister>(this); break; case serializers::ArchiveQueue_t: - bodyDump = dumpWithType<cta::objectstore::ArchiveQueue>(this, lock); + bodyDump = dumpWithType<cta::objectstore::ArchiveQueue>(this); + break; + case serializers::ArchiveQueueShard_t: + bodyDump = dumpWithType<cta::objectstore::ArchiveQueueShard>(this); break; case serializers::RetrieveQueue_t: - bodyDump = dumpWithType<cta::objectstore::RetrieveQueue>(this, lock); + bodyDump = dumpWithType<cta::objectstore::RetrieveQueue>(this); break; case serializers::ArchiveRequest_t: - bodyDump = dumpWithType<ArchiveRequest>(this, lock); + bodyDump = dumpWithType<ArchiveRequest>(this); break; case serializers::RetrieveRequest_t: - bodyDump = dumpWithType<RetrieveRequest>(this, lock); + bodyDump = dumpWithType<RetrieveRequest>(this); break; case serializers::SchedulerGlobalLock_t: - bodyDump = dumpWithType<SchedulerGlobalLock>(this, lock); + bodyDump = dumpWithType<SchedulerGlobalLock>(this); break; default: std::stringstream err; diff --git a/objectstore/GenericObject.hpp b/objectstore/GenericObject.hpp index 45e6320ce1edbbc5bf6fe1f0c6e68f836c4e8fa6..40bdc95fbce2a7c9d28bbfba735364c93d9e9b17 100644 --- a/objectstore/GenericObject.hpp +++ b/objectstore/GenericObject.hpp @@ -78,7 +78,7 @@ public: * * @param lock reference to the generic object's lock */ - std::string dump(ScopedSharedLock & lock); + std::string dump(); CTA_GENERATE_EXCEPTION_CLASS(UnsupportedType); diff --git a/objectstore/ObjectOps.cpp b/objectstore/ObjectOps.cpp new file mode 100644 index 0000000000000000000000000000000000000000..33ebad952f99721b90ac4cb0cd924833d7d9eb84 --- /dev/null +++ b/objectstore/ObjectOps.cpp @@ -0,0 +1,27 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "ObjectOps.hpp" + +namespace cta { namespace objectstore { + +ObjectOpsBase::~ObjectOpsBase() { + if (m_lockForSubObject) m_lockForSubObject->dereferenceSubObject(*this); +} + +}} \ No newline at end of file diff --git a/objectstore/ObjectOps.hpp b/objectstore/ObjectOps.hpp index a88ea5f7a6724603b418efb90d3b760c35aab457..dd0bbaf3adcd537a97009781bc56047a65b8f817 100644 --- a/objectstore/ObjectOps.hpp +++ b/objectstore/ObjectOps.hpp @@ -30,6 +30,8 @@ namespace cta { namespace objectstore { class AgentReference; +class ScopedLock; +class ScopedExclusiveLock; class ObjectOpsBase { friend class ScopedLock; @@ -42,6 +44,8 @@ protected: m_headerInterpreted(false), m_payloadInterpreted(false), m_existingObject(false), m_locksCount(0), m_locksForWriteCount(0) {} + + virtual ~ObjectOpsBase(); public: CTA_GENERATE_EXCEPTION_CLASS(AddressNotSet); CTA_GENERATE_EXCEPTION_CLASS(NotLocked); @@ -82,6 +86,8 @@ protected: void checkWritable() { if (m_existingObject && !m_locksForWriteCount) throw NotLocked("In ObjectOps::checkWritable: object not locked for write"); + if (m_existingObject && !m_exclusiveLock && !m_lockForSubObject) + throw exception::Exception("In ObjectOps::checkWritable: missing reference to exclusive lock"); } void checkReadable() { @@ -163,6 +169,12 @@ protected: int m_locksCount; int m_locksForWriteCount; bool m_noLock = false; + // When locked exclusively, we will keep a reference to the lock, + // so we can propagate it to sub objects with minimal passing through. + ScopedExclusiveLock * m_exclusiveLock = nullptr; + // When being locked as a sub object, we will keep a reference to the lock + // we are provided with. Likewise, the lock will update ourselves when released. + ScopedLock * m_lockForSubObject = nullptr; }; class ScopedLock { @@ -176,6 +188,39 @@ public: return m_locked; } + /** + * Virtual function (implemented differently in shared and exclusive locks), + * marking the object as locked. + * @param objectOps pointer to the ObjectOpsBase. + */ + virtual void setObjectLocked(ObjectOpsBase * objectOps) = 0; + + /** + * Virtual function (implemented differently in shared and exclusive locks), + * marking the object as unlocked. + * @param objectOps pointer to the ObjectOpsBase. + */ + virtual void setObjectUnlocked(ObjectOpsBase * objectOps) = 0; + + /** + * Expand the scope of the current lock to a sub object, which will also be covered + * by this lock. This will allow the sub object to benefit from the same protection + * from lack of proper locking. This feature is to be used with sharded objects. + */ + void includeSubObject(ObjectOpsBase & subObject) { + // To propagate a lock, we should have one to begin with. + checkLocked(); + ObjectOpsBase * oob = & subObject; + // Validate the sub object is defined. + checkObjectAndAddressSet(oob); + // Propagate the lock to the sub object (this is lock type dependant). + setObjectLocked(oob); + // Reference ourselves to the sub object so it can declare it destruction to us. + oob->m_lockForSubObject = this; + // Add a reference to the object. + m_subObjectsOps.push_back(oob); + } + /** Move the locked object reference to a new one. This is done when the locked * object is a GenericObject and the caller instantiated a derived object from * it. The lock follows the move. @@ -183,20 +228,38 @@ public: * use case). * New object's locks are moved from the old one (referenced in the lock) */ - void transfer(ObjectOpsBase & newObject) { - decltype(m_objectOps) oldObj(m_objectOps); - m_objectOps = & newObject; + static void transfer(ObjectOpsBase & oldObject, ObjectOpsBase & newObject) { // Transfer the locks from old to new object - m_objectOps->m_locksCount = oldObj->m_locksCount; - m_objectOps->m_locksForWriteCount = oldObj->m_locksForWriteCount; + newObject.m_locksCount = oldObject.m_locksCount; + newObject.m_locksForWriteCount = oldObject.m_locksForWriteCount; + newObject.m_exclusiveLock = oldObject.m_exclusiveLock; + newObject.m_lockForSubObject = oldObject.m_lockForSubObject; + newObject.m_noLock = oldObject.m_noLock; // The old object is not considered locked anymore and should be // discarded. A previous call the the new object's constructor should - oldObj->m_locksCount = 0; - oldObj->m_locksForWriteCount = 0; + oldObject.m_locksCount = 0; + oldObject.m_locksForWriteCount = 0; + oldObject.m_exclusiveLock = nullptr; + oldObject.m_lockForSubObject = nullptr; + oldObject.m_noLock=false; + } + + /** + * + * @param subObject + */ + + /** + * Dereference a sub object at destruction time + * @param subObject + */ + void dereferenceSubObject(ObjectOpsBase & subObject) { + m_subObjectsOps.remove(&subObject); } virtual ~ScopedLock() { - releaseIfNeeded(); + // Each child class will have to call releaseIfNeeded() in their own destructor + // as it relies on pure virtual members of this base class. } CTA_GENERATE_EXCEPTION_CLASS(AlreadyLocked); @@ -207,6 +270,7 @@ protected: ScopedLock(): m_objectOps(NULL), m_locked(false) {} std::unique_ptr<Backend::ScopedLock> m_lock; ObjectOpsBase * m_objectOps; + std::list <ObjectOpsBase *> m_subObjectsOps; bool m_locked; void checkNotLocked() { if (m_locked) @@ -216,20 +280,27 @@ protected: if (!m_locked) throw NotLocked("In ScopedLock::checkLocked: trying to unlock an unlocked lock"); } - void checkObjectAndAddressSet() { - if (!m_objectOps) { + void checkObjectAndAddressSet(ObjectOpsBase * oob = nullptr) { + // By default we deal with the main object. + if (!oob) oob = m_objectOps; + if (!oob) { throw MissingAddress("In ScopedLock::checkAddressSet: trying to lock a NULL object"); - } else if (!m_objectOps->m_nameSet || m_objectOps->m_name.empty()) { + } else if (!oob->m_nameSet || oob->m_name.empty()) { throw MissingAddress("In ScopedLock::checkAddressSet: trying to lock an object without address"); } } virtual void releaseIfNeeded() { if(!m_locked) return; m_lock.reset(NULL); - m_objectOps->m_locksCount--; m_locked = false; + setObjectUnlocked(m_objectOps); // Releasing a lock voids the object content in memory as stored object can now change. - m_objectOps->m_payloadInterpreted=false; + m_objectOps->m_payloadInterpreted = false; + // Apply the same to sub objects + for (auto & oob: m_subObjectsOps) { + setObjectUnlocked(oob); + oob->m_payloadInterpreted = false; + } } }; @@ -239,14 +310,28 @@ public: ScopedSharedLock(ObjectOpsBase & oo) { lock(oo); } + + void setObjectLocked(ObjectOpsBase* objectOps) override { + objectOps->m_locksCount++; + } + + void setObjectUnlocked(ObjectOpsBase* objectOps) override { + objectOps->m_locksCount--; + } + void lock(ObjectOpsBase & oo) { checkNotLocked(); m_objectOps = & oo; checkObjectAndAddressSet(); m_lock.reset(m_objectOps->m_objectStore.lockShared(m_objectOps->getAddressIfSet())); - m_objectOps->m_locksCount++; + setObjectLocked(m_objectOps); m_locked = true; } + + virtual ~ScopedSharedLock() { + releaseIfNeeded(); + } + }; class ScopedExclusiveLock: public ScopedLock { @@ -255,21 +340,52 @@ public: ScopedExclusiveLock(ObjectOpsBase & oo, uint64_t timeout_us = 0) { lock(oo, timeout_us); } + + void setObjectLocked(ObjectOpsBase* objectOps) override { + objectOps->m_locksCount++; + objectOps->m_locksForWriteCount++; + } + + void setObjectUnlocked(ObjectOpsBase* objectOps) override { + objectOps->m_locksCount--; + objectOps->m_locksForWriteCount--; + } + void lock(ObjectOpsBase & oo, uint64_t timeout_us = 0) { checkNotLocked(); m_objectOps = &oo; checkObjectAndAddressSet(); m_lock.reset(m_objectOps->m_objectStore.lockExclusive(m_objectOps->getAddressIfSet(), timeout_us)); - m_objectOps->m_locksCount++; - m_objectOps->m_locksForWriteCount++; + setObjectLocked(m_objectOps); + m_objectOps->m_exclusiveLock = this; m_locked = true; } -protected: - void releaseIfNeeded() { - if (!m_locked) return; - ScopedLock::releaseIfNeeded(); - m_objectOps->m_locksForWriteCount--; + + /** Move the locked object reference to a new one. This is done when the locked + * object is a GenericObject and the caller instantiated a derived object from + * it. The lock follows the move. + * We check we move the lock from a Generic object (this is the only allowed + * use case). + * New object's locks are moved from the old one (referenced in the lock) + */ + void transfer(ObjectOpsBase & newObject) { + // Sanity checks: we should be the lock for this object. + if ((m_objectOps->m_exclusiveLock && m_objectOps->m_exclusiveLock != this) || + (m_objectOps->m_lockForSubObject && m_objectOps->m_lockForSubObject != this)) { + std::stringstream err; + err << "In ScopedExclusiveLock::transfer(): we should be this object's lock (and are not): " + << std::hex << std::showbase << " exclusiveLock=" << m_objectOps->m_exclusiveLock + << " lockForSubObject=" << m_objectOps->m_lockForSubObject + << " this=" << this; + throw exception::Exception (err.str()); + } + ScopedLock::transfer(*m_objectOps, newObject); + } + + virtual ~ScopedExclusiveLock() { + releaseIfNeeded(); } + }; template <class PayloadType, serializers::ObjectType PayloadTypeId> diff --git a/objectstore/RetrieveQueue.cpp b/objectstore/RetrieveQueue.cpp index 9caeb93a6d901a36cffe53d6ea25aefd133771e1..aee2b9eecce1daf6f590da936bb5ce8b043ac06b 100644 --- a/objectstore/RetrieveQueue.cpp +++ b/objectstore/RetrieveQueue.cpp @@ -99,84 +99,94 @@ std::string cta::objectstore::RetrieveQueue::dump() { return headerDump; } -void cta::objectstore::RetrieveQueue::addJob(uint64_t copyNb, uint64_t fSeq, - const std::string & retrieveRequestAddress, uint64_t size, - const cta::common::dataStructures::MountPolicy & policy, time_t startTime) { +void cta::objectstore::RetrieveQueue::addJobsAndCommit(std::list<cta::objectstore::RetrieveQueue::JobToAdd> & jobsToAdd) { checkPayloadWritable(); // Keep track of the mounting criteria ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.incCount(policy.maxDrivesAllowed); ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.incCount(policy.retrievePriority); ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); - minRetrieveRequestAgeMap.incCount(policy.retrieveMinRequestAge); - if (m_payload.retrievejobs_size()) { - if (m_payload.oldestjobcreationtime() > (uint64_t)startTime) { - m_payload.set_oldestjobcreationtime(startTime); + for (auto & jta: jobsToAdd) { + maxDriveAllowedMap.incCount(jta.policy.maxDrivesAllowed); + priorityMap.incCount(jta.policy.retrievePriority); + minRetrieveRequestAgeMap.incCount(jta.policy.retrieveMinRequestAge); + if (m_payload.retrievejobs_size()) { + if (m_payload.oldestjobcreationtime() > (uint64_t)jta.startTime) { + m_payload.set_oldestjobcreationtime(jta.startTime); + } + m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize() + jta.size); + } else { + m_payload.set_oldestjobcreationtime(jta.startTime); + m_payload.set_retrievejobstotalsize(jta.size); + } + auto * j = m_payload.add_retrievejobs(); + j->set_address(jta.retrieveRequestAddress); + j->set_size(jta.size); + j->set_copynb(jta.copyNb); + j->set_fseq(jta.fSeq); + j->set_priority(jta.policy.retrievePriority); + j->set_minretrieverequestage(jta.policy.retrieveMinRequestAge); + j->set_maxdrivesallowed(jta.policy.maxDrivesAllowed); + // move the the new job in the right spot on the queue. + // i points to the newly added job all the time. + size_t i=m_payload.retrievejobs_size() - 1; + while (i > 0 && m_payload.retrievejobs(i).fseq() < m_payload.retrievejobs(i - 1).fseq()) { + m_payload.mutable_retrievejobs()->SwapElements(i-1, i); + i--; } - m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize() + size); - } else { - m_payload.set_oldestjobcreationtime(startTime); - m_payload.set_retrievejobstotalsize(size); - } - auto * j = m_payload.add_retrievejobs(); - j->set_address(retrieveRequestAddress); - j->set_size(size); - j->set_copynb(copyNb); - j->set_fseq(fSeq); - j->set_priority(policy.retrievePriority); - j->set_minretrieverequestage(policy.retrieveMinRequestAge); - j->set_maxdrivesallowed(policy.maxDrivesAllowed); - // move the the new job in the right spot on the queue. - // i points to the newly added job all the time. - size_t i=m_payload.retrievejobs_size() - 1; - while (i > 0 && m_payload.retrievejobs(i).fseq() < m_payload.retrievejobs(i - 1).fseq()) { - m_payload.mutable_retrievejobs()->SwapElements(i-1, i); - i--; } + commit(); } -bool cta::objectstore::RetrieveQueue::addJobIfNecessary(uint64_t copyNb, uint64_t fSeq, - const std::string & retrieveRequestAddress, uint64_t size, - const cta::common::dataStructures::MountPolicy & policy, time_t startTime) { +auto cta::objectstore::RetrieveQueue::addJobsIfNecessaryAndCommit(std::list<cta::objectstore::RetrieveQueue::JobToAdd> & jobsToAdd) +-> AdditionSummary { checkPayloadWritable(); - // Check if the job is present and skip insertion if so - for (auto &j: m_payload.retrievejobs()) { - if (j.address() == retrieveRequestAddress) - return false; - } - // Keep track of the mounting criteria + AdditionSummary ret; ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.incCount(policy.maxDrivesAllowed); ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.incCount(policy.retrievePriority); ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); - minRetrieveRequestAgeMap.incCount(policy.retrieveMinRequestAge); - if (m_payload.retrievejobs_size()) { - if (m_payload.oldestjobcreationtime() > (uint64_t)startTime) { - m_payload.set_oldestjobcreationtime(startTime); + for (auto & jta: jobsToAdd) { + // Check if the job is present and skip insertion if so + for (auto &j: m_payload.retrievejobs()) { + if (j.address() == jta.retrieveRequestAddress) + goto skipInsertion; } - m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize() + size); - } else { - m_payload.set_oldestjobcreationtime(startTime); - m_payload.set_retrievejobstotalsize(size); - } - auto * j = m_payload.add_retrievejobs(); - j->set_address(retrieveRequestAddress); - j->set_size(size); - j->set_copynb(copyNb); - j->set_fseq(fSeq); - j->set_priority(policy.retrievePriority); - j->set_minretrieverequestage(policy.retrieveMinRequestAge); - j->set_maxdrivesallowed(policy.maxDrivesAllowed); - // move the the new job in the right spot on the queue. - // i points to the newly added job all the time. - size_t i=m_payload.retrievejobs_size() - 1; - while (i > 0 && m_payload.retrievejobs(i).fseq() < m_payload.retrievejobs(i - 1).fseq()) { - m_payload.mutable_retrievejobs()->SwapElements(i-1, i); - i--; + { + // Keep track of the mounting criteria + maxDriveAllowedMap.incCount(jta.policy.maxDrivesAllowed); + priorityMap.incCount(jta.policy.retrievePriority); + minRetrieveRequestAgeMap.incCount(jta.policy.retrieveMinRequestAge); + if (m_payload.retrievejobs_size()) { + if (m_payload.oldestjobcreationtime() > (uint64_t)jta.startTime) { + m_payload.set_oldestjobcreationtime(jta.startTime); + } + m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize() + jta.size); + } else { + m_payload.set_oldestjobcreationtime(jta.startTime); + m_payload.set_retrievejobstotalsize(jta.size); + } + auto * j = m_payload.add_retrievejobs(); + j->set_address(jta.retrieveRequestAddress); + j->set_size(jta.size); + j->set_copynb(jta.copyNb); + j->set_fseq(jta.fSeq); + j->set_priority(jta.policy.retrievePriority); + j->set_minretrieverequestage(jta.policy.retrieveMinRequestAge); + j->set_maxdrivesallowed(jta.policy.maxDrivesAllowed); + // move the the new job in the right spot on the queue. + // i points to the newly added job all the time. + size_t i=m_payload.retrievejobs_size() - 1; + while (i > 0 && m_payload.retrievejobs(i).fseq() < m_payload.retrievejobs(i - 1).fseq()) { + m_payload.mutable_retrievejobs()->SwapElements(i-1, i); + i--; + } + // Keep track of this addition. + ret.files++; + ret.bytes+=jta.size; + } + skipInsertion:; } - return true; + if (ret.files) commit(); + return ret; } cta::objectstore::RetrieveQueue::JobsSummary cta::objectstore::RetrieveQueue::getJobsSummary() { @@ -234,34 +244,56 @@ auto cta::objectstore::RetrieveQueue::dumpJobs() -> std::list<JobDump> { return ret; } -void cta::objectstore::RetrieveQueue::removeJob(const std::string& retrieveToFileAddress) { +auto cta::objectstore::RetrieveQueue::getCandidateList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> retrieveRequestsToSkip) -> CandidateJobList { + CandidateJobList ret; + ret.remainingBytesAfterCandidates = m_payload.retrievejobstotalsize(); + ret.remainingFilesAfterCandidates = m_payload.retrievejobs_size(); + for (auto & j: m_payload.retrievejobs()) { + if (!retrieveRequestsToSkip.count(j.address())) { + ret.candidates.push_back({j.address(), (uint16_t)j.copynb(), j.size()}); + ret.candidateBytes += j.size(); + ret.candidateFiles ++; + } + ret.remainingBytesAfterCandidates -= j.size(); + ret.remainingFilesAfterCandidates--; + if (ret.candidateBytes >= maxBytes || ret.candidateFiles >= maxFiles) break; + } + return ret; +} + +void cta::objectstore::RetrieveQueue::removeJobsAndCommit(const std::list<std::string>& requestsToRemove) { checkPayloadWritable(); + ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); + ValueCountMap priorityMap(m_payload.mutable_prioritymap()); + ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); auto * jl = m_payload.mutable_retrievejobs(); - bool found=false; - do { - found=false; - // Push the found entry all the way to the end. - for (size_t i=0; i<(size_t)jl->size(); i++) { - if (jl->Get(i).address() == retrieveToFileAddress) { - found = true; - // Keep track of the mounting criteria - ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap()); - maxDriveAllowedMap.decCount(jl->Get(i).maxdrivesallowed()); - ValueCountMap priorityMap(m_payload.mutable_prioritymap()); - priorityMap.decCount(jl->Get(i).priority()); - ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); - minRetrieveRequestAgeMap.decCount(jl->Get(i).minretrieverequestage()); - while (i+1 < (size_t)jl->size()) { - jl->SwapElements(i, i+1); - i++; + bool jobRemoved=false; + for (auto &rrt: requestsToRemove) { + bool found=false; + do { + found=false; + // Push the found entry all the way to the end. + for (size_t i=0; i<(size_t)jl->size(); i++) { + if (jl->Get(i).address() == rrt) { + found = jobRemoved = true; + // Keep track of the mounting criteria + maxDriveAllowedMap.decCount(jl->Get(i).maxdrivesallowed()); + priorityMap.decCount(jl->Get(i).priority()); + minRetrieveRequestAgeMap.decCount(jl->Get(i).minretrieverequestage()); + m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize() - jl->Get(i).size()); + while (i+1 < (size_t)jl->size()) { + jl->SwapElements(i, i+1); + i++; + } + break; } - break; } - } - // and remove it - if (found) - jl->RemoveLast(); - } while (found); + // and remove it + if (found) + jl->RemoveLast(); + } while (found); + } + if (jobRemoved) commit(); } void cta::objectstore::RetrieveQueue::garbageCollect(const std::string &presumedOwner, AgentReference & agentReference, log::LogContext & lc, diff --git a/objectstore/RetrieveQueue.hpp b/objectstore/RetrieveQueue.hpp index e816303a72dbcdb18c4524a90c3456508ac41dd1..89dd673c3c219f461115b1a05f24ebdbabff77aa 100644 --- a/objectstore/RetrieveQueue.hpp +++ b/objectstore/RetrieveQueue.hpp @@ -45,14 +45,22 @@ public: std::string dump(); // Retrieve jobs management ================================================== - void addJob(uint64_t copyNb, uint64_t fSeq, - const std::string & retrieveRequestAddress, uint64_t size, - const cta::common::dataStructures::MountPolicy & policy, time_t startTime); + struct JobToAdd { + uint64_t copyNb; + uint64_t fSeq; + const std::string retrieveRequestAddress; + uint64_t size; + const cta::common::dataStructures::MountPolicy policy; + time_t startTime; + }; + void addJobsAndCommit(std::list<JobToAdd> & jobsToAdd); /// This version will check for existence of the job in the queue before - // returns true if a new job was actually inserted. - bool addJobIfNecessary(uint64_t copyNb, uint64_t fSeq, - const std::string & retrieveRequestAddress, uint64_t size, - const cta::common::dataStructures::MountPolicy & policy, time_t startTime); + // returns the count and sizes of actually added jobs (if any). + struct AdditionSummary { + uint64_t files = 0; + uint64_t bytes = 0; + }; + AdditionSummary addJobsIfNecessaryAndCommit(std::list<JobToAdd> & jobsToAdd); struct JobsSummary { uint64_t files; uint64_t bytes; @@ -69,8 +77,18 @@ public: uint64_t size; }; std::list<JobDump> dumpJobs(); + struct CandidateJobList { + uint64_t remainingFilesAfterCandidates = 0; + uint64_t remainingBytesAfterCandidates = 0; + uint64_t candidateFiles = 0; + uint64_t candidateBytes = 0; + std::list<JobDump> candidates; + }; + // The set of retrieve requests to skip are requests previously identified by the caller as bad, + // which still should be removed from the queue. They will be disregarded from listing. + CandidateJobList getCandidateList(uint64_t maxBytes, uint64_t maxFiles, std::set<std::string> retrieveRequestsToSkip); - void removeJob(const std::string & retrieveToFileAddress); + void removeJobsAndCommit(const std::list<std::string> & requestsToRemove); // -- Generic parameters std::string getVid(); }; diff --git a/objectstore/RetrieveRequest.cpp b/objectstore/RetrieveRequest.cpp index bfca680d22a1b51379c1117e50bce1975d564841..6581a81d6af5bfe71fab22621011fc12a0d431a8 100644 --- a/objectstore/RetrieveRequest.cpp +++ b/objectstore/RetrieveRequest.cpp @@ -139,10 +139,11 @@ jobFound:; // Enqueue add the job to the queue objectstore::MountPolicySerDeser mp; mp.deserialize(m_payload.mountpolicy()); - rq.addJobIfNecessary(bestTapeFile->copynb(), bestTapeFile->fseq(), getAddressIfSet(), m_payload.archivefile().filesize(), - mp, m_payload.schedulerrequest().entrylog().time()); + std::list<RetrieveQueue::JobToAdd> jta; + jta.push_back({bestTapeFile->copynb(), bestTapeFile->fseq(), getAddressIfSet(), m_payload.archivefile().filesize(), + mp, (signed)m_payload.schedulerrequest().entrylog().time()}); + rq.addJobsIfNecessaryAndCommit(jta); auto jobsSummary=rq.getJobsSummary(); - rq.commit(); auto queueUpdateTime = t.secs(utils::Timer::resetCounter); // We can now make the transition official bestJob->set_status(serializers::RetrieveJobStatus::RJS_Pending); diff --git a/objectstore/cta-objectstore-dump-object.cpp b/objectstore/cta-objectstore-dump-object.cpp index 9cce8d42b295c6493f802213904cfd3282ec1e05..fecfc4ab1bdf7163a70b451b00530f9273c739f7 100644 --- a/objectstore/cta-objectstore-dump-object.cpp +++ b/objectstore/cta-objectstore-dump-object.cpp @@ -54,9 +54,8 @@ int main(int argc, char ** argv) { std::cout << "Object store path: " << be->getParams()->toURL() << std::endl << "Object name: " << objectName << std::endl; cta::objectstore::GenericObject ge(objectName, *be); - cta::objectstore::ScopedSharedLock gel(ge); - ge.fetch(); - std::cout << ge.dump(gel) << std::endl; + ge.fetchNoLock(); + std::cout << ge.dump() << std::endl; } catch (std::exception & e) { std::cerr << "Failed to dump object: " << std::endl << e.what() << std::endl; diff --git a/objectstore/cta.proto b/objectstore/cta.proto index 91d580f13c2875976ae65fd3ee93aea45b3b9ade..cb1029f42f4bdfce5146a5bf41be2b02ba04bbc1 100644 --- a/objectstore/cta.proto +++ b/objectstore/cta.proto @@ -28,7 +28,9 @@ enum ObjectType { ArchiveRequest_t = 7; RetrieveRequest_t = 8; ArchiveQueue_t = 9; + ArchiveQueueShard_t = 90; RetrieveQueue_t = 10; + RetrieveQueueShard_t = 100; GenericObject_t = 1000; } @@ -137,28 +139,6 @@ message AgentRegister { repeated string untrackedagents = 2101; } -// ------------- Jobs handling ------------------------------------------------- - -message ArchiveJobPointer { - required uint64 fileid = 3000; - required uint64 size = 3001; - required string address = 3002; - required uint32 copynb = 3003; - required uint64 priority = 3004; - required uint64 minarchiverequestage = 3005; - required uint64 maxdrivesallowed = 3006; -} - -message RetrieveJobPointer { - required uint64 size = 3101; - required string address = 3102; - required uint32 copynb = 3103; - required uint64 fseq = 3107; - required uint64 priority = 3104; - required uint64 minretrieverequestage = 3105; - required uint64 maxdrivesallowed = 3106; -} - // ------------- Mount criteria and quota ------------------------------------- message MountCriteria { @@ -387,19 +367,50 @@ message ValueCountPair { required uint64 count = 9302; } +message ArchiveJobPointer { + required uint64 fileid = 3000; + required uint64 size = 3001; + required string address = 3002; + required uint32 copynb = 3003; + required uint64 priority = 3004; + required uint64 minarchiverequestage = 3005; + required uint64 maxdrivesallowed = 3006; + required uint64 starttime = 3007; +} + +message ArchiveQueueShardPointer { + required string address = 10200; + required uint64 shardjobscount = 10201; + required uint64 shardbytescount = 10202; +} + +message ArchiveQueueShard { + repeated ArchiveJobPointer archivejobs = 10300; + required uint64 archivejobstotalsize = 10301; +} + message ArchiveQueue { required string tapepool = 10000; - repeated ArchiveJobPointer pendingarchivejobs = 10010; - repeated ArchiveJobPointer orphanedarchivejobsnscreation = 10020; - repeated ArchiveJobPointer orphanedarchivejobsnsdeletion = 10030; + repeated ArchiveQueueShardPointer archivequeuesshards = 10010; repeated ValueCountPair prioritymap = 10031; repeated ValueCountPair minarchiverequestagemap = 10032; repeated ValueCountPair maxdrivesallowedmap = 10033; required uint64 archivejobstotalsize = 10040; + required uint64 archivejobscount = 10045; required uint64 oldestjobcreationtime = 10050; required uint64 mapsrebuildcount = 10060; } +message RetrieveJobPointer { + required uint64 size = 3101; + required string address = 3102; + required uint32 copynb = 3103; + required uint64 fseq = 3107; + required uint64 priority = 3104; + required uint64 minretrieverequestage = 3105; + required uint64 maxdrivesallowed = 3106; +} + message RetrieveQueue { required string vid = 10100; repeated RetrieveJobPointer retrievejobs = 10110; diff --git a/rdbms/AutoRollback.cpp b/rdbms/AutoRollback.cpp index d205f54dd0f4238fa09ab28087782628a0b59ad3..9cfb642497c9169ae27cf176f4bdf7e8586636aa 100644 --- a/rdbms/AutoRollback.cpp +++ b/rdbms/AutoRollback.cpp @@ -18,7 +18,7 @@ #include "common/exception/Exception.hpp" #include "rdbms/AutoRollback.hpp" -#include "rdbms/PooledConn.hpp" +#include "rdbms/Conn.hpp" namespace cta { namespace rdbms { @@ -26,7 +26,7 @@ namespace rdbms { //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ -AutoRollback::AutoRollback(PooledConn &conn): +AutoRollback::AutoRollback(Conn &conn): m_cancelled(false), m_conn(conn) { } diff --git a/rdbms/AutoRollback.hpp b/rdbms/AutoRollback.hpp index 155aa4da5efa7a6f6e5347fe924a451fa831ab57..5d7af16d867e4193688143fff9a832914129adf1 100644 --- a/rdbms/AutoRollback.hpp +++ b/rdbms/AutoRollback.hpp @@ -24,7 +24,7 @@ namespace rdbms { /** * Forward declaration. */ -class PooledConn; +class Conn; /** * A class to automatically rollback a database connection when an instance of @@ -38,7 +38,7 @@ public: * * @param conn The database connection. */ - AutoRollback(PooledConn &conn); + AutoRollback(Conn &conn); /** * Prevent copying. @@ -72,7 +72,7 @@ private: /** * The database connection or nullptr if no rollback should take place. */ - PooledConn &m_conn; + Conn &m_conn; }; // class Login diff --git a/rdbms/AutocommitMode.hpp b/rdbms/AutocommitMode.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6d392f1a86d7d5de09e7a5ac385c8227cc61a369 --- /dev/null +++ b/rdbms/AutocommitMode.hpp @@ -0,0 +1,33 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +namespace cta { +namespace rdbms { + +/** + * A database statement can either have auto commiting mode turned on or off. + */ +enum class AutocommitMode { + ON, + OFF +}; + +} // namespace rdbms +} // namespace cta diff --git a/rdbms/CMakeLists.txt b/rdbms/CMakeLists.txt index 3a4143140df1961acd071a2c90e94bfc10c096da..70dea64e10858cfc58ea0711a2194838e10fec42 100644 --- a/rdbms/CMakeLists.txt +++ b/rdbms/CMakeLists.txt @@ -15,70 +15,33 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. cmake_minimum_required (VERSION 2.6) +add_subdirectory (wrapper) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") find_package (sqlite REQUIRED) +find_package (oracle-instantclient REQUIRED) -if(OCCI_SUPPORT) - find_package (oracle-instantclient REQUIRED) - include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS}) -endif(OCCI_SUPPORT) +include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS}) set (RDBMS_LIB_SRC_FILES AutoRollback.cpp - ColumnNameToIdx.cpp - ColumnNameToIdxAndType.cpp Conn.cpp - ConnFactory.cpp ConnPool.cpp Login.cpp LoginFactory.cpp + NullDbValue.cpp Rset.cpp - RsetImpl.cpp Stmt.cpp - NullDbValue.cpp - ParamNameToIdx.cpp - PooledConn.cpp - Sqlite.cpp - SqliteConn.cpp - SqliteConnFactory.cpp - SqliteRsetImpl.cpp - SqliteStmt.cpp) - -if (OCCI_SUPPORT) - set (RDBMS_LIB_SRC_FILES - ${RDBMS_LIB_SRC_FILES} - ConnFactoryFactory.cpp - OcciColumn.cpp - OcciConn.cpp - OcciConnFactory.cpp - OcciEnv.cpp - OcciEnvSingleton.cpp - OcciRsetImpl.cpp - OcciStmt.cpp) -else (OCCI_SUPPORT) - set (RDBMS_LIB_SRC_FILES - ${RDBMS_LIB_SRC_FILES} - ConnFactoryFactory_OCCI_SUPPORT_OFF.cpp) -endif (OCCI_SUPPORT) + StmtPool.cpp) add_library (ctardbms SHARED ${RDBMS_LIB_SRC_FILES}) set_property(TARGET ctardbms PROPERTY SOVERSION "${CTA_SOVERSION}") set_property(TARGET ctardbms PROPERTY VERSION "${CTA_LIBVERSION}") -set (CTARDBMS_LINK_LIBRARIES - ctacommon - ${SQLITE_LIBRARIES}) - -if (OCCI_SUPPORT) - set (CTARDBMS_LINK_LIBRARIES - ${CTARDBMS_LINK_LIBRARIES} - ${ORACLE-INSTANTCLIENT_LIBRARIES}) -endif (OCCI_SUPPORT) - target_link_libraries (ctardbms - ${CTARDBMS_LINK_LIBRARIES}) + ctardbmswrapper) install (TARGETS ctardbms DESTINATION usr/${CMAKE_INSTALL_LIBDIR}) @@ -86,12 +49,7 @@ set(RDBMS_UNIT_TESTS_LIB_SRC_FILES ConnPoolTest.cpp ConnTest.cpp LoginTest.cpp - ParamNameToIdxTest.cpp - SqliteStmtTest.cpp) - -if (OCCI_SUPPORT) - list (APPEND RDBMS_UNIT_TESTS_LIB_SRC_FILES OcciColumnTest.cpp) -endif (OCCI_SUPPORT) + StmtPoolTest.cpp) add_library (ctardbmsunittests SHARED ${RDBMS_UNIT_TESTS_LIB_SRC_FILES}) diff --git a/rdbms/Conn.cpp b/rdbms/Conn.cpp index f70c14b855d45147253aa655e85826fc8f7cd47e..9e92793189701796c1f536c039f6c666dd2da311 100644 --- a/rdbms/Conn.cpp +++ b/rdbms/Conn.cpp @@ -17,18 +17,72 @@ */ #include "common/exception/Exception.hpp" -#include "common/utils/utils.hpp" +#include "common/utils/utils.cpp" +#include "rdbms/ConnPool.hpp" #include "rdbms/Conn.hpp" -#include <string> - namespace cta { namespace rdbms { +//------------------------------------------------------------------------------ +// constructor +//------------------------------------------------------------------------------ +Conn::Conn(std::unique_ptr<ConnAndStmts> connAndStmts, ConnPool *pool): + m_connAndStmts(std::move(connAndStmts)), + m_pool(pool) { +} + +//------------------------------------------------------------------------------ +// move constructor +//------------------------------------------------------------------------------ +Conn::Conn(Conn &&other): + m_connAndStmts(std::move(other.m_connAndStmts)), + m_pool(other.m_pool) { + other.m_pool = nullptr; +} + //------------------------------------------------------------------------------ // destructor //------------------------------------------------------------------------------ -Conn::~Conn() throw() { +Conn::~Conn() noexcept { + try { + // If this smart database connection currently points to a database connection then return it back to its pool + if(nullptr != m_pool && nullptr != m_connAndStmts) { + m_pool->returnConn(std::move(m_connAndStmts)); + } + } catch(...) { + } +} + +//------------------------------------------------------------------------------ +// operator= +//------------------------------------------------------------------------------ +Conn &Conn::operator=(Conn &&rhs) { + // If the database connection is not the one already owned + if(rhs.m_connAndStmts != m_connAndStmts) { + // If this smart database connection currently points to a database connection then return it back to its pool + if(nullptr != m_pool && nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + m_pool->returnConn(std::move(m_connAndStmts)); + } + + // Take ownership of the new database connection + m_connAndStmts = std::move(rhs.m_connAndStmts); + m_pool = rhs.m_pool; + + rhs.m_pool = nullptr; + } + return *this; +} + +//------------------------------------------------------------------------------ +// createStmt +//------------------------------------------------------------------------------ +Stmt Conn::createStmt(const std::string &sql, const AutocommitMode autocommitMode) { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + return m_connAndStmts->stmtPool->getStmt(*m_connAndStmts->conn, sql, autocommitMode); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } } //------------------------------------------------------------------------------ @@ -46,7 +100,7 @@ void Conn::executeNonQueries(const std::string &sqlStmts) { searchPos = findResult + 1; if(0 < sqlStmt.size()) { // Ignore empty statements - executeNonQuery(sqlStmt, rdbms::Stmt::AutocommitMode::ON); + executeNonQuery(sqlStmt, AutocommitMode::ON); } } @@ -58,14 +112,69 @@ void Conn::executeNonQueries(const std::string &sqlStmts) { //------------------------------------------------------------------------------ // executeNonQuery //------------------------------------------------------------------------------ -void Conn::executeNonQuery(const std::string &sql, const Stmt::AutocommitMode autocommitMode) { +void Conn::executeNonQuery(const std::string &sql, const AutocommitMode autocommitMode) { try { auto stmt = createStmt(sql, autocommitMode); - stmt->executeNonQuery(); + stmt.executeNonQuery(); } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.what()); } } +//------------------------------------------------------------------------------ +// commit +//------------------------------------------------------------------------------ +void Conn::commit() { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + m_connAndStmts->conn->commit(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } +} + +//------------------------------------------------------------------------------ +// commit +//------------------------------------------------------------------------------ +void Conn::rollback() { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + m_connAndStmts->conn->rollback(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } +} + +//------------------------------------------------------------------------------ +// getTableNames +//------------------------------------------------------------------------------ +std::list<std::string> Conn::getTableNames() { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + return m_connAndStmts->conn->getTableNames(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } +} + +//------------------------------------------------------------------------------ +// isOpen +//------------------------------------------------------------------------------ +bool Conn::isOpen() const { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + return m_connAndStmts->conn->isOpen(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } +} + +//------------------------------------------------------------------------------ +// getSequenceNames +//------------------------------------------------------------------------------ +std::list<std::string> Conn::getSequenceNames() { + if(nullptr != m_connAndStmts && nullptr != m_connAndStmts->conn) { + return m_connAndStmts->conn->getSequenceNames(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Conn does not contain a connection"); + } +} + } // namespace rdbms } // namespace cta diff --git a/rdbms/Conn.hpp b/rdbms/Conn.hpp index ac2740b90956e986c5efae66ac0031d32867b290..3d66a4fcc49832aec54d6c985db9adb183c89840 100644 --- a/rdbms/Conn.hpp +++ b/rdbms/Conn.hpp @@ -18,31 +18,65 @@ #pragma once -#include "Stmt.hpp" +#include "rdbms/ConnAndStmts.hpp" +#include "rdbms/Stmt.hpp" -#include <atomic> #include <list> #include <memory> -#include <string> namespace cta { namespace rdbms { +class ConnPool; + /** - * Abstract class that specifies the interface to a database connection. + * A smart database connection that will automatically return the underlying + * database connection to its parent connection pool when it goes out of scope. */ class Conn { public: + /** + * Constructor. + * + * @param connAndStmts The database connection and its pool of prepared + * statements. + * @param pool The database connection pool to which the connection + * should be returned. + */ + Conn(std::unique_ptr<ConnAndStmts> connAndStmts, ConnPool *const pool); + + /** + * Deletion of the copy constructor. + */ + Conn(Conn &) = delete; + + /** + * Move constructor. + * + * @param other The other object. + */ + Conn(Conn &&other); + /** * Destructor. + * + * Returns the database connection back to its pool. */ - virtual ~Conn() throw() = 0; + ~Conn() noexcept; /** - * Idempotent close() method. The destructor calls this method. + * Deletion of the copy assignment operator. */ - virtual void close() = 0; + Conn &operator=(const Conn &) = delete; + + /** + * Move assignment operator. + * + * @param rhs The object on the right-hand side of the operator. + * @return This object. + */ + Conn &operator=(Conn &&rhs); /** * Creates a prepared statement. @@ -51,7 +85,7 @@ public: * @param autocommitMode The autocommit mode of the statement. * @return The prepared statement. */ - virtual std::unique_ptr<Stmt> createStmt(const std::string &sql, const Stmt::AutocommitMode autocommitMode) = 0; + Stmt createStmt(const std::string &sql, const AutocommitMode autocommitMode); /** * Convenience method that parses the specified string of multiple SQL @@ -73,17 +107,17 @@ public: * @param sql The SQL statement. * @param autocommitMode The autocommit mode of the statement. */ - void executeNonQuery(const std::string &sql, const Stmt::AutocommitMode autocommitMode); + void executeNonQuery(const std::string &sql, const AutocommitMode autocommitMode); /** * Commits the current transaction. */ - virtual void commit() = 0; + void commit(); /** * Rolls back the current transaction. */ - virtual void rollback() = 0; + void rollback(); /** * Returns the names of all the tables in the database schema in alphabetical @@ -92,12 +126,36 @@ public: * @return The names of all the tables in the database schema in alphabetical * order. */ - virtual std::list<std::string> getTableNames() = 0; + std::list<std::string> getTableNames(); /** * Returns true if this connection is open. */ - virtual bool isOpen() const = 0; + bool isOpen() const; + + /** + * Returns the names of all the sequences in the database schema in + * alphabetical order. + * + * If the underlying database technologies does not supported sequences then + * this method simply returns an empty list. + * + * @return The names of all the sequences in the database schema in + * alphabetical order. + */ + std::list<std::string> getSequenceNames(); + +private: + + /** + * The database connection and its pool of prepared statements. + */ + std::unique_ptr<ConnAndStmts> m_connAndStmts; + + /** + * The database connection pool to which the m_conn should be returned. + */ + ConnPool *m_pool; }; // class Conn diff --git a/rdbms/ConnAndStmts.hpp b/rdbms/ConnAndStmts.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b5aa5d20dd84043fb70e86493a0be1b38b99cbdd --- /dev/null +++ b/rdbms/ConnAndStmts.hpp @@ -0,0 +1,98 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "rdbms/StmtPool.hpp" +#include "rdbms/wrapper/Conn.hpp" + +#include <iostream> +#include <memory> + +namespace cta { +namespace rdbms { + +/** + * Class to enforce prepared statements are destroyed before their corresponding + * database connection. + */ +struct ConnAndStmts { + + /** + * Constructor. + */ + ConnAndStmts() { + } + + /** + * Deletion of the copy constructor. + */ + ConnAndStmts(ConnAndStmts &) = delete; + + /** + * Move constructor. + * + * @param other The other object. + */ + ConnAndStmts(ConnAndStmts &&other): + conn(std::move(other.conn)), + stmtPool(std::move(other.stmtPool)) { + } + + /** + * Equality operator. + * + * @param rhs The object on the right hand side of the operator. + * @return True if equal. + */ + bool operator==(const ConnAndStmts &rhs) { + return conn.get() == rhs.conn.get(); + } + + /** + * Inequality operator. + * + * @param rhs The object on the right hand side of the operator. + * @return True if not equal. + */ + bool operator!=(const ConnAndStmts &rhs) { + return !operator==(rhs); + } + + /** + * The database connection. + * + * The database connection must be destroyed after all of its corresponding + * prepared statements. This means the conn member-variable must be declared + * before the stmtPool member-variable. + */ + std::unique_ptr<wrapper::Conn> conn; + + /** + * Pool of prepared statements. + * + * The prepared statements must be destroyed before their corresponding + * database connection. This means the stmtPool member-variable must be + * declared after the conn member-variable. + */ + std::unique_ptr<StmtPool> stmtPool; + +}; // class ConnAndStmts + +} // namespace rdbms +} // namespace cta diff --git a/rdbms/ConnPool.cpp b/rdbms/ConnPool.cpp index 0be866bc728dcbbd7a2f72c103516a07bce4bc48..f8f80d56acdbf61434bc1a25f90043bb8c28eea5 100644 --- a/rdbms/ConnPool.cpp +++ b/rdbms/ConnPool.cpp @@ -17,8 +17,10 @@ */ #include "common/exception/Exception.hpp" +#include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" #include "rdbms/ConnPool.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" #include <memory> @@ -28,15 +30,10 @@ namespace rdbms { //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ -ConnPool::ConnPool(ConnFactory &connFactory, const uint64_t maxNbConns): - m_connFactory(connFactory), +ConnPool::ConnPool(const Login &login, const uint64_t maxNbConns): + m_connFactory(wrapper::ConnFactoryFactory::create(login)), m_maxNbConns(maxNbConns), - m_nbConnsOnLoan(0) { - try { - createConns(m_maxNbConns); - } catch(exception::Exception &ex) { - throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); - } + m_nbConnsOnLoan(0){ } //------------------------------------------------------------------------------ @@ -45,7 +42,10 @@ ConnPool::ConnPool(ConnFactory &connFactory, const uint64_t maxNbConns): void ConnPool::createConns(const uint64_t nbConns) { try { for(uint64_t i = 0; i < nbConns; i++) { - m_conns.push_back(m_connFactory.create()); + auto connAndStmts = cta::make_unique<ConnAndStmts>(); + connAndStmts->conn = m_connFactory->create(); + connAndStmts->stmtPool = cta::make_unique<StmtPool>(); + m_connsAndStmts.push_back(std::move(connAndStmts)); } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -55,42 +55,50 @@ void ConnPool::createConns(const uint64_t nbConns) { //------------------------------------------------------------------------------ // getConn //------------------------------------------------------------------------------ -PooledConn ConnPool::getConn() { - threading::MutexLocker locker(m_connsMutex); +Conn ConnPool::getConn() { + threading::MutexLocker locker(m_connsAndStmtsMutex); - while(m_conns.size() == 0 && m_nbConnsOnLoan == m_maxNbConns) { - m_connsCv.wait(locker); + while(m_connsAndStmts.size() == 0 && m_nbConnsOnLoan == m_maxNbConns) { + m_connsAndStmtsCv.wait(locker); } - if(m_conns.size() == 0) { - m_conns.push_back(m_connFactory.create()); + if(m_connsAndStmts.size() == 0) { + auto connAndStmts = cta::make_unique<ConnAndStmts>(); + connAndStmts->conn = m_connFactory->create(); + connAndStmts->stmtPool = cta::make_unique<StmtPool>(); + m_connsAndStmts.push_back(std::move(connAndStmts)); } - std::unique_ptr<Conn> conn = std::move(m_conns.front()); - m_conns.pop_front(); + std::unique_ptr<ConnAndStmts> connAndStmts = std::move(m_connsAndStmts.front()); + m_connsAndStmts.pop_front(); m_nbConnsOnLoan++; - if(conn->isOpen()) { - return PooledConn(std::move(conn), this); + if(connAndStmts->conn->isOpen()) { + return Conn(std::move(connAndStmts), this); } else { - return PooledConn(m_connFactory.create(), this); + auto newConnAndStmts = cta::make_unique<ConnAndStmts>(); + newConnAndStmts->conn = m_connFactory->create(); + newConnAndStmts->stmtPool = cta::make_unique<StmtPool>(); + return Conn(std::move(newConnAndStmts), this); } } //------------------------------------------------------------------------------ // returnConn //------------------------------------------------------------------------------ -void ConnPool::returnConn(std::unique_ptr<Conn> conn) { +void ConnPool::returnConn(std::unique_ptr<ConnAndStmts> connAndStmts) { try { // If the connection is open - if(conn->isOpen()) { + if(connAndStmts->conn->isOpen()) { // Try to commit the connection and put it back in the pool try { - conn->commit(); + connAndStmts->conn->commit(); } catch(...) { - // If the commit failed then close the connection + // If the commit failed then destroy any prepare statements and then + // close the connection try { - conn->close(); + connAndStmts->stmtPool->clear(); + connAndStmts->conn->close(); } catch(...) { // Ignore any exceptions } @@ -99,25 +107,25 @@ void ConnPool::returnConn(std::unique_ptr<Conn> conn) { // connection, if there is one, has been lost. Delete all the connections // currently in the pool because their underlying TCP/IP connections may // also have been lost. - threading::MutexLocker locker(m_connsMutex); - while(!m_conns.empty()) { - m_conns.pop_front(); + threading::MutexLocker locker(m_connsAndStmtsMutex); + while(!m_connsAndStmts.empty()) { + m_connsAndStmts.pop_front(); } if(0 == m_nbConnsOnLoan) { throw exception::Exception("Would have reached -1 connections on loan"); } m_nbConnsOnLoan--; - m_connsCv.signal(); + m_connsAndStmtsCv.signal(); return; } - threading::MutexLocker locker(m_connsMutex); + threading::MutexLocker locker(m_connsAndStmtsMutex); if(0 == m_nbConnsOnLoan) { throw exception::Exception("Would have reached -1 connections on loan"); } m_nbConnsOnLoan--; - m_conns.push_back(std::move(conn)); - m_connsCv.signal(); + m_connsAndStmts.push_back(std::move(connAndStmts)); + m_connsAndStmtsCv.signal(); // Else the connection is closed } else { @@ -126,15 +134,15 @@ void ConnPool::returnConn(std::unique_ptr<Conn> conn) { // connection, if there is one, has been lost. Delete all the connections // currently in the pool because their underlying TCP/IP connections may // also have been lost. - threading::MutexLocker locker(m_connsMutex); - while(!m_conns.empty()) { - m_conns.pop_front(); + threading::MutexLocker locker(m_connsAndStmtsMutex); + while(!m_connsAndStmts.empty()) { + m_connsAndStmts.pop_front(); } if(0 == m_nbConnsOnLoan) { throw exception::Exception("Would have reached -1 connections on loan"); } m_nbConnsOnLoan--; - m_connsCv.signal(); + m_connsAndStmtsCv.signal(); } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); diff --git a/rdbms/ConnPool.hpp b/rdbms/ConnPool.hpp index 98917197e54506ab671f18b06b04c6876ad879d2..76344d0238f1b5ff99e9d2f65239691010723587 100644 --- a/rdbms/ConnPool.hpp +++ b/rdbms/ConnPool.hpp @@ -20,9 +20,10 @@ #include "common/threading/CondVar.hpp" #include "common/threading/Mutex.hpp" +#include "rdbms/ConnAndStmts.hpp" #include "rdbms/Conn.hpp" -#include "rdbms/ConnFactory.hpp" -#include "rdbms/PooledConn.hpp" +#include "rdbms/wrapper/Conn.hpp" +#include "rdbms/wrapper/ConnFactory.hpp" #include <list> #include <memory> @@ -30,6 +31,8 @@ namespace cta { namespace rdbms { +class Login; + /** * A pool of database connections. */ @@ -39,11 +42,12 @@ public: /** * Constructor. * - * @param connFactory The database connection factory. + * @param login The database login details to be used to create new + * connections. * @param maxNbConns The maximum number of database connections within the * pool. */ - ConnPool(ConnFactory &connFactory, const uint64_t maxNbConns); + ConnPool(const Login &login, const uint64_t maxNbConns); /** * Takes a connection from the pool. @@ -54,11 +58,11 @@ public: * * @return A connection from the pool. */ - PooledConn getConn(); + Conn getConn(); private: - friend PooledConn; + friend Conn; /** * If the specified database connection is open, then this method calls @@ -69,14 +73,14 @@ private: * * A closed connection is reopened when it is pulled from the pool. * - * @param conn The connection to be commited and returned to the pool. + * @param connAndStmts The connection to be commited and returned to the pool. */ - void returnConn(std::unique_ptr<Conn> conn); + void returnConn(std::unique_ptr<ConnAndStmts> connAndStmts); /** * The database connection factory. */ - ConnFactory &m_connFactory; + std::unique_ptr<wrapper::ConnFactory> m_connFactory; /** * The maximum number of database connections within the pool. @@ -91,18 +95,18 @@ private: /** * Mutex used to serialize access to the database connections within the pool. */ - threading::Mutex m_connsMutex; + threading::Mutex m_connsAndStmtsMutex; /** * Condition variable used by threads returning connections to the pool to * notify threads waiting for connections. */ - threading::CondVar m_connsCv; + threading::CondVar m_connsAndStmtsCv; /** * The database connections within the pool. */ - std::list< std::unique_ptr<Conn> > m_conns; + std::list<std::unique_ptr<ConnAndStmts> > m_connsAndStmts; /** * Creates the specified number of database connections with the pool. diff --git a/rdbms/ConnPoolTest.cpp b/rdbms/ConnPoolTest.cpp index e3a4b9d7c0870525a4f457e76d825a2c2eb35ccd..93d2d617abb1aee6edac2c8095db7e4a447202af 100644 --- a/rdbms/ConnPoolTest.cpp +++ b/rdbms/ConnPoolTest.cpp @@ -17,11 +17,10 @@ */ #include "common/exception/Exception.hpp" -#include "rdbms/ConnFactoryFactory.hpp" #include "rdbms/ConnPool.hpp" +#include "rdbms/Login.hpp" #include <gtest/gtest.h> -#include <sstream> namespace unitTests { @@ -39,24 +38,22 @@ TEST_F(cta_rdbms_ConnPoolTest, getPooledConn) { using namespace cta::rdbms; const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); const uint64_t nbConns = 2; - ConnPool pool(*connFactory, nbConns); + ConnPool pool(login, nbConns); - PooledConn conn = pool.getConn(); + Conn conn = pool.getConn(); } TEST_F(cta_rdbms_ConnPoolTest, assignment) { using namespace cta::rdbms; const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); const uint64_t nbConns = 2; - ConnPool pool(*connFactory, nbConns); + ConnPool pool(login, nbConns); - PooledConn conn = pool.getConn(); + Conn conn = pool.getConn(); - PooledConn conn2(nullptr, nullptr); + Conn conn2(nullptr, nullptr); conn2 = pool.getConn(); } @@ -65,13 +62,12 @@ TEST_F(cta_rdbms_ConnPoolTest, moveConstructor) { using namespace cta::rdbms; const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); const uint64_t nbConns = 2; - ConnPool pool(*connFactory, nbConns); + ConnPool pool(login, nbConns); - PooledConn conn = pool.getConn(); + Conn conn = pool.getConn(); - PooledConn conn2(std::move(conn)); + Conn conn2(std::move(conn)); } } // namespace unitTests diff --git a/rdbms/ConnTest.cpp b/rdbms/ConnTest.cpp index 0d0a953b915884c09493bf3fba165df61c0d7cef..322e893ee37c7d1c8f60d8985fc7777db804fea7 100644 --- a/rdbms/ConnTest.cpp +++ b/rdbms/ConnTest.cpp @@ -17,11 +17,10 @@ */ #include "common/exception/Exception.hpp" -#include "rdbms/ConnFactoryFactory.hpp" #include "rdbms/ConnPool.hpp" +#include "rdbms/Login.hpp" #include <gtest/gtest.h> -#include <sstream> namespace unitTests { @@ -35,6 +34,45 @@ protected: } }; +TEST_F(cta_rdbms_ConnTest, createTableInMemoryDatabase_executeNonQuery) { + using namespace cta::rdbms; + + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); + + ASSERT_TRUE(conn.getTableNames().empty()); + + conn.executeNonQuery(sql, AutocommitMode::ON); + + ASSERT_EQ(1, conn.getTableNames().size()); + } +} + +TEST_F(cta_rdbms_ConnTest, createTableInMemoryDatabase_executeNonQueries) { + using namespace cta::rdbms; + + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER);"; + + // First in-memory database + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); + + ASSERT_TRUE(conn.getTableNames().empty()); + + conn.executeNonQueries(sql); + + ASSERT_EQ(1, conn.getTableNames().size()); + } +} + TEST_F(cta_rdbms_ConnTest, createSameTableInTwoSeparateInMemoryDatabases_executeNonQuery) { using namespace cta::rdbms; @@ -43,27 +81,29 @@ TEST_F(cta_rdbms_ConnTest, createSameTableInTwoSeparateInMemoryDatabases_execute // First in-memory database { const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); - auto conn = connFactory->create(); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); - ASSERT_TRUE(conn->getTableNames().empty()); + ASSERT_TRUE(conn.getTableNames().empty()); - conn->executeNonQuery(sql, Stmt::AutocommitMode::ON); + conn.executeNonQuery(sql, AutocommitMode::ON); - ASSERT_EQ(1, conn->getTableNames().size()); + ASSERT_EQ(1, conn.getTableNames().size()); } // Second in-memory database { const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); - auto conn = connFactory->create(); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); - ASSERT_TRUE(conn->getTableNames().empty()); + ASSERT_TRUE(conn.getTableNames().empty()); - conn->executeNonQuery(sql, Stmt::AutocommitMode::ON); + conn.executeNonQuery(sql, AutocommitMode::ON); - ASSERT_EQ(1, conn->getTableNames().size()); + ASSERT_EQ(1, conn.getTableNames().size()); } } @@ -75,27 +115,29 @@ TEST_F(cta_rdbms_ConnTest, createSameTableInTwoSeparateInMemoryDatabases_execute // First in-memory database { const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); - auto conn = connFactory->create(); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); - ASSERT_TRUE(conn->getTableNames().empty()); + ASSERT_TRUE(conn.getTableNames().empty()); - conn->executeNonQueries(sql); + conn.executeNonQueries(sql); - ASSERT_EQ(1, conn->getTableNames().size()); + ASSERT_EQ(1, conn.getTableNames().size()); } // Second in-memory database { const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); - auto connFactory = ConnFactoryFactory::create(login); - auto conn = connFactory->create(); + const uint64_t maxNbConns = 1; + ConnPool connPool(login, maxNbConns); + auto conn = connPool.getConn(); - ASSERT_TRUE(conn->getTableNames().empty()); + ASSERT_TRUE(conn.getTableNames().empty()); - conn->executeNonQueries(sql); + conn.executeNonQueries(sql); - ASSERT_EQ(1, conn->getTableNames().size()); + ASSERT_EQ(1, conn.getTableNames().size()); } } diff --git a/rdbms/Login.cpp b/rdbms/Login.cpp index 25faa7389a7caac4c8eb750b1b7798757b5346d7..ee024f732e95b35f9d3eac1ef8d37cc08dfd53c2 100644 --- a/rdbms/Login.cpp +++ b/rdbms/Login.cpp @@ -16,9 +16,9 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "Login.hpp" #include "common/exception/Exception.hpp" #include "common/utils/utils.hpp" +#include "Login.hpp" #include <fstream> @@ -153,5 +153,5 @@ Login Login::parseOracleUserPassAndDb(const std::string &userPassAndDb) { return Login(DBTYPE_ORACLE, user, pass, db); } -} // namesapce catalogue +} // namespace catalogue } // namespace cta diff --git a/rdbms/LoginFactory.cpp b/rdbms/LoginFactory.cpp index d1acb5436bae67310aa4e78344ddfd5d3dda91db..b5eeb17b990a2d390736f43c766716c5c48ca7d4 100644 --- a/rdbms/LoginFactory.cpp +++ b/rdbms/LoginFactory.cpp @@ -16,7 +16,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "LoginFactory.hpp" +#include "rdbms/LoginFactory.hpp" namespace cta { namespace rdbms { diff --git a/rdbms/LoginFactory.hpp b/rdbms/LoginFactory.hpp index 1959a134907aab0fde556784c5b5dc6897b4796c..7795d803301937f89dce0487a84065418a5ac791 100644 --- a/rdbms/LoginFactory.hpp +++ b/rdbms/LoginFactory.hpp @@ -18,7 +18,7 @@ #pragma once -#include "Login.hpp" +#include "rdbms/Login.hpp" namespace cta { namespace rdbms { diff --git a/rdbms/LoginTest.cpp b/rdbms/LoginTest.cpp index a7a9a3460320dd072a9de8a260726e7b57a0ee64..e4a5430195afa74a2a1ba00ca2cb3e700147b27a 100644 --- a/rdbms/LoginTest.cpp +++ b/rdbms/LoginTest.cpp @@ -16,8 +16,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "Login.hpp" #include "common/exception/Exception.hpp" +#include "rdbms/Login.hpp" #include <gtest/gtest.h> #include <sstream> diff --git a/rdbms/PooledConn.cpp b/rdbms/PooledConn.cpp deleted file mode 100644 index dec6e9d9d2a9a5c01427dd9749ff896f0e8cc0cc..0000000000000000000000000000000000000000 --- a/rdbms/PooledConn.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* - * The CERN Tape Archive (CTA) project - * Copyright (C) 2015 CERN - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include "common/exception/Exception.hpp" -#include "rdbms/ConnPool.hpp" -#include "rdbms/PooledConn.hpp" - -namespace cta { -namespace rdbms { - -//------------------------------------------------------------------------------ -// constructor -//------------------------------------------------------------------------------ -PooledConn::PooledConn(std::unique_ptr<Conn> conn, ConnPool *pool): - m_conn(conn.release()), - m_pool(pool) { -} - -//------------------------------------------------------------------------------ -// move constructor -//------------------------------------------------------------------------------ -PooledConn::PooledConn(PooledConn &&other): - m_conn(std::move(other.m_conn)), - m_pool(other.m_pool) { - other.m_pool = nullptr; -} - -//------------------------------------------------------------------------------ -// destructor -//------------------------------------------------------------------------------ -PooledConn::~PooledConn() noexcept { - try { - // If this smart database connection currently points to a database connection then return it back to its pool - if(nullptr != m_pool && nullptr != m_conn) { - m_pool->returnConn(std::move(m_conn)); - } - } catch(...) { - } -} - -//------------------------------------------------------------------------------ -// operator= -//------------------------------------------------------------------------------ -PooledConn &PooledConn::operator=(PooledConn &&rhs) { - // If the database connection is not the one already owned - if(rhs.m_conn != m_conn) { - // If this smart database connection currently points to a database connection then return it back to its pool - if(nullptr != m_pool && nullptr != m_conn) { - m_pool->returnConn(std::move(m_conn)); - } - - // Take ownership of the new database connection - m_conn = std::move(rhs.m_conn); - m_pool = rhs.m_pool; - - rhs.m_pool = nullptr; - } - return *this; -} - -//------------------------------------------------------------------------------ -// createStmt -//------------------------------------------------------------------------------ -std::unique_ptr<Stmt> PooledConn::createStmt(const std::string &sql, - const Stmt::AutocommitMode autocommitMode) { - if(nullptr != m_conn) { - return m_conn->createStmt(sql, autocommitMode); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// executeNonQueries -//------------------------------------------------------------------------------ -void PooledConn::executeNonQueries(const std::string &sqlStmts) { - if(nullptr != m_conn) { - return m_conn->executeNonQueries(sqlStmts); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// executeNonQuery -//------------------------------------------------------------------------------ -void PooledConn::executeNonQuery(const std::string &sql, const Stmt::AutocommitMode autocommitMode) { - if(nullptr != m_conn) { - return m_conn->executeNonQuery(sql, autocommitMode); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// commit -//------------------------------------------------------------------------------ -void PooledConn::commit() { - if(nullptr != m_conn) { - m_conn->commit(); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// commit -//------------------------------------------------------------------------------ -void PooledConn::rollback() { - if(nullptr != m_conn) { - m_conn->rollback(); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// getTableNames -//------------------------------------------------------------------------------ -std::list<std::string> PooledConn::getTableNames() { - if(nullptr != m_conn) { - return m_conn->getTableNames(); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -//------------------------------------------------------------------------------ -// isOpen -//------------------------------------------------------------------------------ -bool PooledConn::isOpen() const { - if(nullptr != m_conn) { - return m_conn->isOpen(); - } else { - throw exception::Exception(std::string(__FUNCTION__) + " failed: PooledConn does not contain a connection"); - } -} - -} // namespace rdbms -} // namespace cta diff --git a/rdbms/PooledConn.hpp b/rdbms/PooledConn.hpp deleted file mode 100644 index 94c26c0d06a038fb8945e4f2be378ebd10f7bfd4..0000000000000000000000000000000000000000 --- a/rdbms/PooledConn.hpp +++ /dev/null @@ -1,151 +0,0 @@ -/* - * The CERN Tape Archive (CTA) project - * Copyright (C) 2015 CERN - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#pragma once - -#include "rdbms/Stmt.hpp" - -#include <list> -#include <memory> - -namespace cta { -namespace rdbms { - -class Conn; - -class ConnPool; - -/** - * A smart database connection that will automatically return the underlying - * database connection to its parent connection pool when it goes out of scope. - */ -class PooledConn { -public: - - /** - * Constructor. - * - * @param conn The database connection. - * @param pool The database connection pool to which the connection - * should be returned. - */ - PooledConn(std::unique_ptr<Conn> conn, ConnPool *const pool); - - /** - * Deletion of the copy constructor. - */ - PooledConn(PooledConn &) = delete; - - /** - * Move constructor. - * - * @param other The other object. - */ - PooledConn(PooledConn &&other); - - /** - * Destructor. - * - * Returns the database connection back to its pool. - */ - ~PooledConn() noexcept; - - /** - * Deletion of the copy assignment operator. - */ - PooledConn &operator=(const PooledConn &) = delete; - - /** - * Move assignment operator. - * - * @param rhs The object on the right-hand side of the operator. - * @return This object. - */ - PooledConn &operator=(PooledConn &&rhs); - - /** - * Creates a prepared statement. - * - * @param sql The SQL statement. - * @param autocommitMode The autocommit mode of the statement. - * @return The prepared statement. - */ - std::unique_ptr<Stmt> createStmt(const std::string &sql, const Stmt::AutocommitMode autocommitMode); - - /** - * Convenience method that parses the specified string of multiple SQL - * statements and calls executeNonQuery() for each individual statement found. - * - * Please note that each statement should be a non-query terminated by a - * semicolon and that each individual statement will be executed with - * autocommit ON. - * - * @param sqlStmts The SQL statements to be executed. - * @param autocommitMode The autocommit mode of the statement. - */ - void executeNonQueries(const std::string &sqlStmts); - - /** - * Convenience method that wraps Conn::createStmt() followed by - * Stmt::executeNonQuery(). - * - * @param sql The SQL statement. - * @param autocommitMode The autocommit mode of the statement. - */ - void executeNonQuery(const std::string &sql, const Stmt::AutocommitMode autocommitMode); - - /** - * Commits the current transaction. - */ - void commit(); - - /** - * Rolls back the current transaction. - */ - void rollback(); - - /** - * Returns the names of all the tables in the database schema in alphabetical - * order. - * - * @return The names of all the tables in the database schema in alphabetical - * order. - */ - std::list<std::string> getTableNames(); - - /** - * Returns true if this connection is open. - */ - bool isOpen() const; - -private: - - /** - * The database connection. - */ - std::unique_ptr<Conn> m_conn; - - /** - * The database connection pool to which the m_conn should be returned. - */ - ConnPool *m_pool; - -}; // class PooledConn - -} // namespace rdbms -} // namespace cta diff --git a/rdbms/Rset.cpp b/rdbms/Rset.cpp index 4b18232f1c03f788e089288eb393ec3fdd6b46f0..df390212897e5ed0f2e7aa5f4f647764bb9f76ac 100644 --- a/rdbms/Rset.cpp +++ b/rdbms/Rset.cpp @@ -16,10 +16,9 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "Rset.hpp" -#include "RsetImpl.hpp" -#include "NullDbValue.hpp" - +#include "rdbms/NullDbValue.hpp" +#include "rdbms/Rset.hpp" +#include "rdbms/wrapper/Rset.hpp" namespace cta { namespace rdbms { @@ -34,9 +33,9 @@ Rset::Rset(): //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ -Rset::Rset(RsetImpl *const impl): - m_impl(impl) { - if(nullptr == impl) { +Rset::Rset(std::unique_ptr<wrapper::Rset> impl): + m_impl(std::move(impl)) { + if(nullptr == m_impl.get()) { throw exception::Exception(std::string(__FUNCTION__) + " failed: Pointer to implementation object is null"); } } @@ -45,23 +44,16 @@ Rset::Rset(RsetImpl *const impl): // constructor //------------------------------------------------------------------------------ Rset::Rset(Rset &&other): - m_impl(other.m_impl) { - other.m_impl = nullptr; -} - -//------------------------------------------------------------------------------ -// destructor -//------------------------------------------------------------------------------ -Rset::~Rset() { - delete m_impl; + m_impl(std::move(other.m_impl)) { } //------------------------------------------------------------------------------ // operator= //------------------------------------------------------------------------------ Rset &Rset::operator=(Rset &&rhs) { - m_impl = rhs.m_impl; - rhs.m_impl = nullptr; + if(m_impl != rhs.m_impl) { + m_impl = std::move(rhs.m_impl); + } return *this; } diff --git a/rdbms/Rset.hpp b/rdbms/Rset.hpp index caa23d5443e3eb6424da58381df32093b1daf5b0..b8c9d1c4e9b1dd14e39bd0d2e06dec8997beff35 100644 --- a/rdbms/Rset.hpp +++ b/rdbms/Rset.hpp @@ -20,16 +20,16 @@ #include "common/optional.hpp" +#include <memory> #include <stdint.h> #include <string> namespace cta { namespace rdbms { -/** - * Forward declarartion. - */ -class RsetImpl; +namespace wrapper { + class Rset; +} /** * A wrapper around an object that iterators over a result set from the @@ -50,7 +50,7 @@ public: * * @param impl The object actually implementing this result set. */ - Rset(RsetImpl *const impl); + Rset(std::unique_ptr<wrapper::Rset> impl); /** * Deletion of copy constructor. @@ -64,11 +64,6 @@ public: */ Rset(Rset &&other); - /** - * Destructor. - */ - ~Rset() throw(); - /** * Deletion of copy assignment. */ @@ -176,7 +171,7 @@ private: /** * The object actually implementing this result set. */ - RsetImpl *m_impl; + std::unique_ptr<wrapper::Rset> m_impl; }; // class Rset diff --git a/rdbms/Stmt.cpp b/rdbms/Stmt.cpp index 00a4e4f17b2a98befb262422b31fe99db4694d7e..34eac226fb333327990dc6024344896c9c576410 100644 --- a/rdbms/Stmt.cpp +++ b/rdbms/Stmt.cpp @@ -16,70 +16,203 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "Stmt.hpp" +#include "common/exception/Exception.hpp" +#include "rdbms/Stmt.hpp" +#include "rdbms/StmtPool.hpp" +#include "rdbms/wrapper/Stmt.hpp" namespace cta { namespace rdbms { -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- // constructor -//------------------------------------------------------------------------------ -Stmt::Stmt(const std::string &sql, const AutocommitMode autocommitMode): - m_sql(sql), - m_autoCommitMode(autocommitMode), - m_paramNameToIdx(sql) { +//----------------------------------------------------------------------------- +Stmt::Stmt(): + m_stmtPool(nullptr) { } -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- +// constructor +//----------------------------------------------------------------------------- +Stmt::Stmt(std::unique_ptr<wrapper::Stmt> stmt, StmtPool &stmtPool): + m_stmt(std::move(stmt)), + m_stmtPool(&stmtPool) { +} + +//----------------------------------------------------------------------------- +// constructor +//----------------------------------------------------------------------------- +Stmt::Stmt(Stmt &&other): + m_stmt(std::move(other.m_stmt)), + m_stmtPool(other.m_stmtPool){ +} + +//----------------------------------------------------------------------------- // destructor -//------------------------------------------------------------------------------ -Stmt::~Stmt() throw() { +//----------------------------------------------------------------------------- +Stmt::~Stmt() noexcept { + try { + // If this smart prepared statement currently points to a prepared + // statement then return it back to its pool + if(nullptr != m_stmtPool && nullptr != m_stmt) { + m_stmtPool->returnStmt(std::move(m_stmt)); + } + } catch(...) { + // Ignore any exceptions + } } -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- +// operator= +//----------------------------------------------------------------------------- +Stmt &Stmt::operator=(Stmt &&rhs) { + // If the cached statement is not already owned + if(rhs.m_stmt != m_stmt) { + // If this smart cached statement already points to cached statement, then + // return it back to its pool + if(nullptr != m_stmt && nullptr != m_stmtPool) { + m_stmtPool->returnStmt(std::move(m_stmt)); + } + + // Take ownership of the new cached statement + m_stmt = std::move(rhs.m_stmt); + m_stmtPool = rhs.m_stmtPool; + + rhs.m_stmtPool = nullptr; + } + + return *this; +} + +//----------------------------------------------------------------------------- // getSql -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- const std::string &Stmt::getSql() const { - return m_sql; + if(nullptr != m_stmt) { + return m_stmt->getSql(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } } -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- // getParamIdx -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- uint32_t Stmt::getParamIdx(const std::string ¶mName) const { - return m_paramNameToIdx.getIdx(paramName); + if(nullptr != m_stmt) { + return m_stmt->getParamIdx(paramName); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } } -//------------------------------------------------------------------------------ -// getSqlForException -//------------------------------------------------------------------------------ -std::string Stmt::getSqlForException() const { - if(m_sql.length() <= c_maxSqlLenInExceptions) { - return m_sql; +//----------------------------------------------------------------------------- +// bindUint64 +//----------------------------------------------------------------------------- +void Stmt::bindUint64(const std::string ¶mName, const uint64_t paramValue) { + if(nullptr != m_stmt) { + return m_stmt->bindUint64(paramName, paramValue); } else { - if(c_maxSqlLenInExceptions >= 3) { - return m_sql.substr(0, c_maxSqlLenInExceptions - 3) + "..."; - } else { - return std::string("..."). substr(0, c_maxSqlLenInExceptions); - } + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// bindOptionalUint64 +//----------------------------------------------------------------------------- +void Stmt::bindOptionalUint64(const std::string ¶mName, const optional<uint64_t> ¶mValue) { + if(nullptr != m_stmt) { + return m_stmt->bindOptionalUint64(paramName, paramValue); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); } } -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- // bindBool -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- void Stmt::bindBool(const std::string ¶mName, const bool paramValue) { - bindOptionalBool(paramName, paramValue); + if(nullptr != m_stmt) { + return m_stmt->bindBool(paramName, paramValue); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } } -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- // bindOptionalBool -//------------------------------------------------------------------------------ +//----------------------------------------------------------------------------- void Stmt::bindOptionalBool(const std::string ¶mName, const optional<bool> ¶mValue) { - if(paramValue) { - bindOptionalUint64(paramName, paramValue.value() ? 1 : 0); + if(nullptr != m_stmt) { + return m_stmt->bindOptionalBool(paramName, paramValue); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// bindString +//----------------------------------------------------------------------------- +void Stmt::bindString(const std::string ¶mName, const std::string ¶mValue) { + if(nullptr != m_stmt) { + return m_stmt->bindString(paramName, paramValue); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// bindOptionalString +//----------------------------------------------------------------------------- +void Stmt::bindOptionalString(const std::string ¶mName, const optional<std::string> ¶mValue) { + if(nullptr != m_stmt) { + return m_stmt->bindOptionalString(paramName, paramValue); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// executeQuery +//----------------------------------------------------------------------------- +Rset Stmt::executeQuery() { + if(nullptr != m_stmt) { + return Rset(m_stmt->executeQuery()); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// executeNonQuery +//----------------------------------------------------------------------------- +void Stmt::executeNonQuery() { + if(nullptr != m_stmt) { + return m_stmt->executeNonQuery(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// getNbAffectedRows +//----------------------------------------------------------------------------- +uint64_t Stmt::getNbAffectedRows() const { + if(nullptr != m_stmt) { + return m_stmt->getNbAffectedRows(); + } else { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); + } +} + +//----------------------------------------------------------------------------- +// getStmt +//----------------------------------------------------------------------------- +wrapper::Stmt &Stmt::getStmt() { + if(nullptr != m_stmt) { + return *m_stmt; } else { - bindOptionalUint64(paramName, nullopt); + throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement"); } } diff --git a/rdbms/Stmt.hpp b/rdbms/Stmt.hpp index 8aa49c5c4dea4626b31ef547a8e6a201f2b55e51..899e2f83e6b88ec0df9ca9ec1435a02f73e188c6 100644 --- a/rdbms/Stmt.hpp +++ b/rdbms/Stmt.hpp @@ -18,50 +18,42 @@ #pragma once -#include "common/optional.hpp" -#include "rdbms/ParamNameToIdx.hpp" #include "rdbms/Rset.hpp" +#include "common/optional.hpp" +#include <list> #include <memory> -#include <stdint.h> -#include <string> +#include <mutex> namespace cta { namespace rdbms { +namespace wrapper { + class Stmt; +} + +class StmtPool; + /** - * Abstract class specifying the interface to a database statement. + * A smart database statement that will automatically return the underlying + * database statement to its parent database connection when it goes out of + * scope. */ class Stmt { public: - /** - * A statement can either have auto commiting mode turned on or off. - */ - enum class AutocommitMode { - ON, - OFF - }; - /** * Constructor. - * - * @param sql The SQL statement. - * @param autocommitMode The autocommit mode of the statement. */ - Stmt(const std::string &sql, const AutocommitMode autocommitMode); + Stmt(); /** - * Returns the autocommit mode of teh statement. + * Constructor. * - * @return The autocommit mode of teh statement. + * @param stmt The database statement. + * @param stmtPool The database statement pool to which the m_stmt should be returned. */ - AutocommitMode getAutoCommitMode() const noexcept; - - /** - * Destructor. - */ - virtual ~Stmt() throw() = 0; + Stmt(std::unique_ptr<wrapper::Stmt> stmt, StmtPool &stmtPool); /** * Deletion of the copy constructor. @@ -69,24 +61,31 @@ public: Stmt(Stmt &) = delete; /** - * Deletion of the move constructor. + * Move constructor. + * + * @param other The other object. */ - Stmt(Stmt &&) = delete; + Stmt(Stmt &&other); /** - * Deletion of the copy assignment operator. + * Destructor. + * + * Returns the database statement back to its connection. */ - Stmt &operator=(const Stmt &) = delete; + ~Stmt() noexcept; /** - * Deletion of the move assignment operator. + * Deletion of the copy assignment operator. */ - Stmt &operator=(Stmt &&) = delete; + Stmt &operator=(const Stmt &) = delete; /** - * Idempotent close() method. The destructor calls this method. + * Move assignment operator. + * + * @param rhs The object on the right-hand side of the operator. + * @return This object. */ - virtual void close() = 0; + Stmt &operator=(Stmt &&rhs); /** * Returns the SQL statement. @@ -109,7 +108,7 @@ public: * @param paramName The name of the parameter. * @param paramValue The value to be bound. */ - virtual void bindUint64(const std::string ¶mName, const uint64_t paramValue) = 0; + void bindUint64(const std::string ¶mName, const uint64_t paramValue); /** * Binds an SQL parameter. @@ -117,7 +116,7 @@ public: * @param paramName The name of the parameter. * @param paramValue The value to be bound. */ - virtual void bindOptionalUint64(const std::string ¶mName, const optional<uint64_t> ¶mValue) = 0; + void bindOptionalUint64(const std::string ¶mName, const optional<uint64_t> ¶mValue); /** * Binds an SQL parameter. @@ -145,7 +144,7 @@ public: * @param paramName The name of the parameter. * @param paramValue The value to be bound. */ - virtual void bindString(const std::string ¶mName, const std::string ¶mValue) = 0; + void bindString(const std::string ¶mName, const std::string ¶mValue); /** * Binds an SQL parameter of type optional-string. @@ -157,19 +156,19 @@ public: * @param paramName The name of the parameter. * @param paramValue The value to be bound. */ - virtual void bindOptionalString(const std::string ¶mName, const optional<std::string> ¶mValue) = 0; + void bindOptionalString(const std::string ¶mName, const optional<std::string> ¶mValue); /** * Executes the statement and returns the result set. * * @return The result set. */ - virtual Rset executeQuery() = 0; + Rset executeQuery(); /** * Executes the statement. */ - virtual void executeNonQuery() = 0; + void executeNonQuery(); /** * Returns the number of rows affected by the last execution of this @@ -177,43 +176,25 @@ public: * * @return The number of affected rows. */ - virtual uint64_t getNbAffectedRows() const = 0; - -protected: - - /** - * The maximum length an SQL statement can have in exception error message. - */ - const uint32_t c_maxSqlLenInExceptions = 80; + uint64_t getNbAffectedRows() const; /** - * Returns the SQL string to be used in an exception message. The string - * will be clipped at a maxmum of c_maxSqlLenInExceptions characters. If the - * string is actually clipped then the three last characters will be an - * replaced by an ellipsis of three dots, in other word "...". These 3 - * characters will indicate to the reader of the exception message that the - * SQL statement has been clipped. - * - * @return The SQL string to be used in an exception message. + * Returns a reference to the underlying statement object that is not pool + * aware. */ - std::string getSqlForException() const; + wrapper::Stmt &getStmt(); private: /** - * The SQL statement. - */ - std::string m_sql; - - /** - * The autocommit mode of the statement. + * The database statement. */ - AutocommitMode m_autoCommitMode; + std::unique_ptr<wrapper::Stmt> m_stmt; /** - * Map from SQL parameter name to parameter index. + * The database statement pool to which the m_stmt should be returned. */ - ParamNameToIdx m_paramNameToIdx; + StmtPool *m_stmtPool; }; // class Stmt diff --git a/rdbms/StmtPool.cpp b/rdbms/StmtPool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..537efae8d32cadf20ec58c7f8fd24fc640306c6c --- /dev/null +++ b/rdbms/StmtPool.cpp @@ -0,0 +1,93 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "common/exception/Exception.hpp" +#include "common/threading/MutexLocker.hpp" +#include "rdbms/wrapper/Conn.hpp" +#include "rdbms/StmtPool.hpp" + +namespace cta { +namespace rdbms { + +//------------------------------------------------------------------------------ +// getStmt +//------------------------------------------------------------------------------ +Stmt StmtPool::getStmt(wrapper::Conn &conn, const std::string &sql, const AutocommitMode autocommitMode) { + const CachedStmtKey key(sql, autocommitMode); + + threading::MutexLocker locker(m_stmtsMutex); + + auto itor = m_stmts.find(key); + + // If there is no prepared statement in the cache + if(itor == m_stmts.end()) { + auto stmt = conn.createStmt(sql, autocommitMode); + return Stmt(std::move(stmt), *this); + } else { + auto &stmtList = itor->second; + if(stmtList.empty()) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: Unexpected empty list of cached statements"); + } + auto stmt = std::move(stmtList.front()); + stmtList.pop_front(); + + // If there are no more cached prepared statements then remove the empty list from the cache + if(stmtList.empty()) { + m_stmts.erase(itor); + } + + return Stmt(std::move(stmt), *this); + } +} + +//------------------------------------------------------------------------------ +// getNbStmts +//------------------------------------------------------------------------------ +uint64_t StmtPool::getNbStmts() const { + threading::MutexLocker locker(m_stmtsMutex); + + uint64_t nbStmts = 0; + for(const auto &maplet: m_stmts) { + auto &stmtList = maplet.second; + nbStmts += stmtList.size(); + } + return nbStmts; +} + +//------------------------------------------------------------------------------ +// returnStmt +//------------------------------------------------------------------------------ +void StmtPool::returnStmt(std::unique_ptr<wrapper::Stmt> stmt) { + threading::MutexLocker locker(m_stmtsMutex); + + const CachedStmtKey key(stmt->getSql(), stmt->getAutocommitMode()); + + stmt->clear(); + + m_stmts[key].push_back(std::move(stmt)); +} + +//------------------------------------------------------------------------------ +// clear +//------------------------------------------------------------------------------ +void StmtPool::clear() { + m_stmts.clear(); +} + +} // namespace rdbms +} // namespace cta diff --git a/rdbms/StmtPool.hpp b/rdbms/StmtPool.hpp new file mode 100644 index 0000000000000000000000000000000000000000..04c8ce6cd53a1226df5f8077e764dc5f96a87d7f --- /dev/null +++ b/rdbms/StmtPool.hpp @@ -0,0 +1,132 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "common/threading/CondVar.hpp" +#include "common/threading/Mutex.hpp" +#include "rdbms/AutocommitMode.hpp" +#include "rdbms/Stmt.hpp" + +#include <iostream> +#include <list> +#include <map> +#include <memory> +#include <stdint.h> + +namespace cta { +namespace rdbms { + +namespace wrapper { + class Conn; + class Stmt; +} + +/** + * A pool of prepared database statements. + */ +class StmtPool { +public: + + /** + * Takes a database statement from the pool if one is present else a new + * statement will be prepared. + * + * @param conn The database connection to which the database statements + * @param sql The SQL statement. + * @param autocommitMode The autocommit mode of the statement. + * @return The prepared statement. + */ + Stmt getStmt(wrapper::Conn &conn, const std::string &sql, const AutocommitMode autocommitMode); + + /** + * Returns the number of cached statements currently in the pool. + * + * @return The number of cached statements currently in the pool. + */ + uint64_t getNbStmts() const; + + /** + * Clears the pooll of prepared statements which includes destroying those + * statements. + */ + void clear(); + +private: + + friend Stmt; + + /** + * Returns the specified statement to the pool. + * + * @param stmt The database statement to be returned to the pool. + */ + void returnStmt(std::unique_ptr<wrapper::Stmt> stmt); + + /** + * Mutex used to serialize access to the database statements within the pool. + */ + mutable threading::Mutex m_stmtsMutex; + + /** + * Key used to lookup prepares statements within the cache. + */ + struct CachedStmtKey { + /** + * The SQL of the cached statement. + */ + std::string sql; + + /** + * The autocommit mode of the cached statement. + */ + const AutocommitMode autocommitMode; + + /** + * Constructor. + * + * @param s The SQL of the cached statement. + * @param a The autocommit mode of the cached statement. + */ + CachedStmtKey(const std::string &s, const AutocommitMode a): sql(s), autocommitMode(a) { + } + + /** + * Less than operator. + */ + bool operator<(const CachedStmtKey &rhs) const { + if(sql != rhs.sql) { + return sql < rhs.sql; + } else { + return autocommitMode < rhs.autocommitMode; + } + } + }; + + /** + * The cached database statements. + * + * Please note that for a single key there maybe more than one cached + * statement, hence the map to list of statements. + */ + std::map<CachedStmtKey, std::list< std::unique_ptr<wrapper::Stmt> > > m_stmts; + +}; // class StmtPool + +} // namespace rdbms +} // namespace cta diff --git a/rdbms/StmtPoolTest.cpp b/rdbms/StmtPoolTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..00bbc303804729772cf44d924776e09588de4800 --- /dev/null +++ b/rdbms/StmtPoolTest.cpp @@ -0,0 +1,234 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "common/exception/Exception.hpp" +#include "rdbms/ConnPool.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" + +#include <gtest/gtest.h> +#include <sstream> + +namespace unitTests { + +class cta_rdbms_StmtPoolTest : public ::testing::Test { +protected: + + virtual void SetUp() { + } + + virtual void TearDown() { + } +}; + +TEST_F(cta_rdbms_StmtPoolTest, getStmt) { + using namespace cta::rdbms; + + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + StmtPool pool; + ASSERT_EQ(0, pool.getNbStmts()); + { + auto stmt = pool.getStmt(*conn, sql, AutocommitMode::ON); + ASSERT_EQ(0, pool.getNbStmts()); + } + ASSERT_EQ(1, pool.getNbStmts()); +} + +TEST_F(cta_rdbms_StmtPoolTest, moveAssignment) { + using namespace cta::rdbms; + + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + StmtPool pool; + ASSERT_EQ(0, pool.getNbStmts()); + { + Stmt stmt1 = pool.getStmt(*conn, sql, AutocommitMode::ON); + Stmt stmt2; + stmt2 = std::move(stmt1); + ASSERT_EQ(0, pool.getNbStmts()); + } + ASSERT_EQ(1, pool.getNbStmts()); +} + +TEST_F(cta_rdbms_StmtPoolTest, moveConstructor) { + using namespace cta::rdbms; + + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + StmtPool pool; + ASSERT_EQ(0, pool.getNbStmts()); + { + Stmt stmt1 = pool.getStmt(*conn, sql, AutocommitMode::ON); + Stmt stmt2(std::move(stmt1)); + ASSERT_EQ(0, pool.getNbStmts()); + } + ASSERT_EQ(1, pool.getNbStmts()); +} + +TEST_F(cta_rdbms_StmtPoolTest, createSameTableInTwoSeparateInMemoryDatabases) { + using namespace cta::rdbms; + + const std::string createTableSql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + const std::string selectTableNamesSql = + "SELECT " + "NAME AS NAME " + "FROM " + "SQLITE_MASTER " + "WHERE " + "TYPE = 'table' " + "ORDER BY " + "NAME;"; + + // First in-memory database + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + StmtPool pool; + + { + Stmt stmt = pool.getStmt(*conn, selectTableNamesSql, AutocommitMode::ON); + auto rset = stmt.executeQuery(); + std::list<std::string> names; + while(rset.next()) { + names.push_back(rset.columnString("NAME")); + } + ASSERT_EQ(0, names.size()); + } + + { + Stmt stmt = pool.getStmt(*conn, createTableSql, AutocommitMode::ON); + stmt.executeNonQuery(); + } + + { + Stmt stmt = pool.getStmt(*conn, selectTableNamesSql, AutocommitMode::ON); + auto rset = stmt.executeQuery(); + std::list<std::string> names; + while(rset.next()) { + names.push_back(rset.columnString("NAME")); + } + ASSERT_EQ(1, names.size()); + ASSERT_EQ("POOLED_STMT_TEST", names.front()); + } + } + + // Second in-memory database + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + StmtPool pool; + { + Stmt stmt = pool.getStmt(*conn, selectTableNamesSql, AutocommitMode::ON); + auto rset = stmt.executeQuery(); + std::list<std::string> names; + while(rset.next()) { + names.push_back(rset.columnString("NAME")); + } + ASSERT_EQ(0, names.size()); + } + + { + Stmt stmt = pool.getStmt(*conn, createTableSql, AutocommitMode::ON); + stmt.executeNonQuery(); + } + + { + Stmt stmt = pool.getStmt(*conn, selectTableNamesSql, AutocommitMode::ON); + auto rset = stmt.executeQuery(); + std::list<std::string> names; + while(rset.next()) { + names.push_back(rset.columnString("NAME")); + } + ASSERT_EQ(1, names.size()); + ASSERT_EQ("POOLED_STMT_TEST", names.front()); + } + } +} + +TEST_F(cta_rdbms_StmtPoolTest, createSameTableInTwoSeparateInMemoryDatabases_getTableNames) { + using namespace cta::rdbms; + + const std::string createTableSql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + + // First in-memory database + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + StmtPool pool; + + ASSERT_TRUE(conn->getTableNames().empty()); + + { + Stmt stmt = pool.getStmt(*conn, createTableSql, AutocommitMode::ON); + stmt.executeNonQuery(); + } + + ASSERT_EQ(1, conn->getTableNames().size()); + } + + // Second in-memory database + { + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + StmtPool pool; + + ASSERT_TRUE(conn->getTableNames().empty()); + + { + Stmt stmt = pool.getStmt(*conn, createTableSql, AutocommitMode::ON); + stmt.executeNonQuery(); + } + + ASSERT_EQ(1, conn->getTableNames().size()); + } +} + +TEST_F(cta_rdbms_StmtPoolTest, sameSqlTwoCachedStmts) { + using namespace cta::rdbms; + + const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = wrapper::ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + StmtPool pool; + ASSERT_EQ(0, pool.getNbStmts()); + { + Stmt stmt1 = pool.getStmt(*conn, sql, AutocommitMode::ON); + Stmt stmt2 = pool.getStmt(*conn, sql, AutocommitMode::ON); + ASSERT_EQ(0, pool.getNbStmts()); + } + ASSERT_EQ(2, pool.getNbStmts()); +} + + + +} // namespace unitTests diff --git a/rdbms/wrapper/CMakeLists.txt b/rdbms/wrapper/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3777fa7068df4f6c9d899be92a2f5032285217ad --- /dev/null +++ b/rdbms/wrapper/CMakeLists.txt @@ -0,0 +1,76 @@ +# The CERN Tape Archive (CTA) project +# Copyright (C) 2015 CERN +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +cmake_minimum_required (VERSION 2.6) + +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") + +find_package (sqlite REQUIRED) + +find_package (oracle-instantclient REQUIRED) +include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS}) + +set (RDBMS_WRAPPER_LIB_SRC_FILES + ColumnNameToIdx.cpp + ColumnNameToIdxAndType.cpp + Conn.cpp + ConnFactory.cpp + Rset.cpp + Stmt.cpp + ParamNameToIdx.cpp + Sqlite.cpp + SqliteConn.cpp + SqliteConnFactory.cpp + SqliteRset.cpp + SqliteStmt.cpp) + +set (RDBMS_WRAPPER_LIB_SRC_FILES + ${RDBMS_WRAPPER_LIB_SRC_FILES} + ConnFactoryFactory.cpp + OcciColumn.cpp + OcciConn.cpp + OcciConnFactory.cpp + OcciEnv.cpp + OcciEnvSingleton.cpp + OcciRset.cpp + OcciStmt.cpp) + +add_library (ctardbmswrapper SHARED + ${RDBMS_WRAPPER_LIB_SRC_FILES}) +set_property(TARGET ctardbmswrapper PROPERTY SOVERSION "${CTA_SOVERSION}") +set_property(TARGET ctardbmswrapper PROPERTY VERSION "${CTA_LIBVERSION}") + +target_link_libraries (ctardbmswrapper + ctacommon + ${SQLITE_LIBRARIES} + ${ORACLE-INSTANTCLIENT_LIBRARIES}) + +install (TARGETS ctardbmswrapper DESTINATION usr/${CMAKE_INSTALL_LIBDIR}) + +set(RDBMS_UNIT_TESTS_LIB_SRC_FILES + ConnTest.cpp + OcciColumnTest.cpp + ParamNameToIdxTest.cpp + SqliteStmtTest.cpp) + +add_library (ctardbmswrapperunittests SHARED + ${RDBMS_UNIT_TESTS_LIB_SRC_FILES}) +set_property(TARGET ctardbmswrapperunittests PROPERTY SOVERSION "${CTA_SOVERSION}") +set_property(TARGET ctardbmswrapperunittests PROPERTY VERSION "${CTA_LIBVERSION}") + +target_link_libraries (ctardbmswrapperunittests + ctacatalogue) + +install(TARGETS ctardbmswrapperunittests DESTINATION usr/${CMAKE_INSTALL_LIBDIR}) diff --git a/rdbms/ColumnNameToIdx.cpp b/rdbms/wrapper/ColumnNameToIdx.cpp similarity index 95% rename from rdbms/ColumnNameToIdx.cpp rename to rdbms/wrapper/ColumnNameToIdx.cpp index c0f7fb5c685a0307aa4ec5a1d53849cdf0edde8c..bae20d1512f4a1507d94b663930fd470ca7ca7e0 100644 --- a/rdbms/ColumnNameToIdx.cpp +++ b/rdbms/wrapper/ColumnNameToIdx.cpp @@ -16,11 +16,12 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "ColumnNameToIdx.hpp" #include "common/exception/Exception.hpp" +#include "rdbms/wrapper/ColumnNameToIdx.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // add @@ -50,5 +51,6 @@ bool ColumnNameToIdx::empty() const { return m_nameToIdx.empty(); } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ColumnNameToIdx.hpp b/rdbms/wrapper/ColumnNameToIdx.hpp similarity index 97% rename from rdbms/ColumnNameToIdx.hpp rename to rdbms/wrapper/ColumnNameToIdx.hpp index 511beb6bec235100ec3fe789ce90ecc9a53a7d42..698964a14ef7be3a40ca0ed78da421a7b22f6361 100644 --- a/rdbms/ColumnNameToIdx.hpp +++ b/rdbms/wrapper/ColumnNameToIdx.hpp @@ -23,6 +23,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * A map from column name to column index. @@ -67,5 +68,6 @@ private: }; // class ColumnNameToIdx +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ColumnNameToIdxAndType.cpp b/rdbms/wrapper/ColumnNameToIdxAndType.cpp similarity index 96% rename from rdbms/ColumnNameToIdxAndType.cpp rename to rdbms/wrapper/ColumnNameToIdxAndType.cpp index 09f4842e1dface3674df5cb6913576f49f6f9124..5f02829aeac4d8082fe225f8a5c86ecf3a832527 100644 --- a/rdbms/ColumnNameToIdxAndType.cpp +++ b/rdbms/wrapper/ColumnNameToIdxAndType.cpp @@ -16,11 +16,12 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "ColumnNameToIdxAndType.hpp" #include "common/exception/Exception.hpp" +#include "rdbms/wrapper/ColumnNameToIdxAndType.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // add @@ -57,5 +58,6 @@ void ColumnNameToIdxAndType::clear() { m_nameToIdxAndType.clear(); } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ColumnNameToIdxAndType.hpp b/rdbms/wrapper/ColumnNameToIdxAndType.hpp similarity index 98% rename from rdbms/ColumnNameToIdxAndType.hpp rename to rdbms/wrapper/ColumnNameToIdxAndType.hpp index d65f29a6c2d9a8b4ddce9a3a025ddb3ebae5c623..8c9478ef0f9f8fc1352d69c5a6b38dba6fe3af25 100644 --- a/rdbms/ColumnNameToIdxAndType.hpp +++ b/rdbms/wrapper/ColumnNameToIdxAndType.hpp @@ -21,6 +21,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * A map from column name to column index and type. @@ -96,5 +97,6 @@ private: }; // class ColumnNameToIdxAndType +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ConnFactoryFactory_OCCI_SUPPORT_OFF.cpp b/rdbms/wrapper/Conn.cpp similarity index 50% rename from rdbms/ConnFactoryFactory_OCCI_SUPPORT_OFF.cpp rename to rdbms/wrapper/Conn.cpp index 99c5c9acec0e5501dc4263278f43a8e16b39a627..f3af845a966ffbaaa0fea242a96f2216ee7e7802 100644 --- a/rdbms/ConnFactoryFactory_OCCI_SUPPORT_OFF.cpp +++ b/rdbms/wrapper/Conn.cpp @@ -17,38 +17,19 @@ */ #include "common/exception/Exception.hpp" -#include "common/make_unique.hpp" -#include "rdbms/ConnFactoryFactory.hpp" -#include "rdbms/SqliteConnFactory.hpp" +#include "common/utils/utils.hpp" +#include "rdbms/wrapper/Conn.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ -// create +// destructor //------------------------------------------------------------------------------ -std::unique_ptr<ConnFactory> ConnFactoryFactory::create(const Login &login) { - try { - switch(login.dbType) { - case rdbms::Login::DBTYPE_IN_MEMORY: - return cta::make_unique<SqliteConnFactory>(":memory:"); - case rdbms::Login::DBTYPE_ORACLE: - throw exception::Exception("OCCI support disabled at compile time"); - case rdbms::Login::DBTYPE_SQLITE: - return cta::make_unique<SqliteConnFactory>(login.database); - case rdbms::Login::DBTYPE_NONE: - throw exception::Exception("Cannot create a catalogue without a database type"); - default: - { - exception::Exception ex; - ex.getMessage() << "Unknown database type: value=" << login.dbType; - throw ex; - } - } - } catch(exception::Exception &ex) { - throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); - } +Conn::~Conn() throw() { } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/wrapper/Conn.hpp b/rdbms/wrapper/Conn.hpp new file mode 100644 index 0000000000000000000000000000000000000000..708a25e076bc89ef6688ff31909b468c224ed753 --- /dev/null +++ b/rdbms/wrapper/Conn.hpp @@ -0,0 +1,97 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "rdbms/wrapper/Stmt.hpp" + +#include <atomic> +#include <list> +#include <memory> +#include <string> + +namespace cta { +namespace rdbms { +namespace wrapper { + +/** + * Abstract class that specifies the interface to a database connection. + */ +class Conn { +public: + + /** + * Destructor. + */ + virtual ~Conn() throw() = 0; + + /** + * Idempotent close() method. The destructor calls this method. + */ + virtual void close() = 0; + + /** + * Creates a prepared statement. + * + * @param sql The SQL statement. + * @param autocommitMode The autocommit mode of the statement. + * @return The prepared statement. + */ + virtual std::unique_ptr<Stmt> createStmt(const std::string &sql, const AutocommitMode autocommitMode) = 0; + + /** + * Commits the current transaction. + */ + virtual void commit() = 0; + + /** + * Rolls back the current transaction. + */ + virtual void rollback() = 0; + + /** + * Returns the names of all the tables in the database schema in alphabetical + * order. + * + * @return The names of all the tables in the database schema in alphabetical + * order. + */ + virtual std::list<std::string> getTableNames() = 0; + + /** + * Returns true if this connection is open. + */ + virtual bool isOpen() const = 0; + + /** + * Returns the names of all the sequences in the database schema in + * alphabetical order. + * + * If the underlying database technologies does not supported sequences then + * this method simply returns an empty list. + * + * @return The names of all the sequences in the database schema in + * alphabetical order. + */ + virtual std::list<std::string> getSequenceNames() = 0; + +}; // class Conn + +} // namespace wrapper +} // namespace rdbms +} // namespace cta diff --git a/rdbms/ConnFactory.cpp b/rdbms/wrapper/ConnFactory.cpp similarity index 92% rename from rdbms/ConnFactory.cpp rename to rdbms/wrapper/ConnFactory.cpp index 69e15806a3b292fff99a109425e94ea9819b41b1..45f70c30ff9e07875fd6c22bcf689e899c83f4ea 100644 --- a/rdbms/ConnFactory.cpp +++ b/rdbms/wrapper/ConnFactory.cpp @@ -16,10 +16,11 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "ConnFactory.hpp" +#include "rdbms/wrapper/ConnFactory.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // destructor @@ -27,5 +28,6 @@ namespace rdbms { ConnFactory::~ConnFactory() throw() { } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ConnFactory.hpp b/rdbms/wrapper/ConnFactory.hpp similarity index 93% rename from rdbms/ConnFactory.hpp rename to rdbms/wrapper/ConnFactory.hpp index 79efb5b587a1c3aaad7d59223e3f9ee18c8feec9..a980b7e4f2c8614d885c11c1f62f44c9688894a8 100644 --- a/rdbms/ConnFactory.hpp +++ b/rdbms/wrapper/ConnFactory.hpp @@ -18,12 +18,13 @@ #pragma once -#include "Conn.hpp" +#include "rdbms/wrapper/Conn.hpp" #include <memory> namespace cta { namespace rdbms { +namespace wrapper { /** * Abstract class that specifies the interface of a factory of Conn objects. @@ -45,5 +46,6 @@ public: }; // class ConnFactory +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ConnFactoryFactory.cpp b/rdbms/wrapper/ConnFactoryFactory.cpp similarity index 85% rename from rdbms/ConnFactoryFactory.cpp rename to rdbms/wrapper/ConnFactoryFactory.cpp index a63d6a8e92bda2fc1c6daca04945ea8c92a67929..95e2e10047ae27b71e9b8c8023287fdf8bca3af8 100644 --- a/rdbms/ConnFactoryFactory.cpp +++ b/rdbms/wrapper/ConnFactoryFactory.cpp @@ -18,12 +18,13 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" -#include "rdbms/ConnFactoryFactory.hpp" -#include "rdbms/OcciConnFactory.hpp" -#include "rdbms/SqliteConnFactory.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" +#include "rdbms/wrapper/OcciConnFactory.hpp" +#include "rdbms/wrapper/SqliteConnFactory.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // create @@ -31,13 +32,13 @@ namespace rdbms { std::unique_ptr<ConnFactory> ConnFactoryFactory::create(const Login &login) { try { switch(login.dbType) { - case rdbms::Login::DBTYPE_IN_MEMORY: + case Login::DBTYPE_IN_MEMORY: return cta::make_unique<SqliteConnFactory>("file::memory:?cache=shared"); - case rdbms::Login::DBTYPE_ORACLE: + case Login::DBTYPE_ORACLE: return cta::make_unique<OcciConnFactory>(login.username, login.password, login.database); - case rdbms::Login::DBTYPE_SQLITE: + case Login::DBTYPE_SQLITE: return cta::make_unique<SqliteConnFactory>(login.database); - case rdbms::Login::DBTYPE_NONE: + case Login::DBTYPE_NONE: throw exception::Exception("Cannot create a catalogue without a database type"); default: { @@ -51,5 +52,6 @@ std::unique_ptr<ConnFactory> ConnFactoryFactory::create(const Login &login) { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ConnFactoryFactory.hpp b/rdbms/wrapper/ConnFactoryFactory.hpp similarity index 93% rename from rdbms/ConnFactoryFactory.hpp rename to rdbms/wrapper/ConnFactoryFactory.hpp index cdacc5dafc9ecad608f54ccafb5a4f1823aab8df..49f96acc75ee2a36ba288c1a3568dbed6e011318 100644 --- a/rdbms/ConnFactoryFactory.hpp +++ b/rdbms/wrapper/ConnFactoryFactory.hpp @@ -18,13 +18,14 @@ #pragma once -#include "rdbms/ConnFactory.hpp" +#include "rdbms/wrapper/ConnFactory.hpp" #include "rdbms/Login.hpp" #include <memory> namespace cta { namespace rdbms { +namespace wrapper { /** * Abstract class that specifies the interface to a factory of ConnFactory objects. @@ -43,5 +44,6 @@ public: }; // class ConnFactoryFactory +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/wrapper/ConnTest.cpp b/rdbms/wrapper/ConnTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b5c46c501b483459fffadea169d98bb567b32de --- /dev/null +++ b/rdbms/wrapper/ConnTest.cpp @@ -0,0 +1,72 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "common/exception/Exception.hpp" +#include "rdbms/wrapper/ConnFactoryFactory.hpp" + +#include <gtest/gtest.h> +#include <sstream> + +namespace unitTests { + +class cta_rdbms_wrapper_ConnTest : public ::testing::Test { +protected: + + virtual void SetUp() { + } + + virtual void TearDown() { + } +}; + +TEST_F(cta_rdbms_wrapper_ConnTest, createSameTableInTwoSeparateInMemoryDatabases) { + using namespace cta; + using namespace cta::rdbms::wrapper; + + const std::string sql = "CREATE TABLE POOLED_STMT_TEST(ID INTEGER)"; + + // First in-memory database + { + const rdbms::Login login(rdbms::Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + ASSERT_TRUE(conn->getTableNames().empty()); + + auto stmt = conn->createStmt(sql, rdbms::AutocommitMode::ON); + stmt->executeNonQuery(); + + ASSERT_EQ(1, conn->getTableNames().size()); + } + + // Second in-memory database + { + const rdbms::Login login(rdbms::Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared"); + auto connFactory = ConnFactoryFactory::create(login); + auto conn = connFactory->create(); + + ASSERT_TRUE(conn->getTableNames().empty()); + + auto stmt = conn->createStmt(sql, rdbms::AutocommitMode::ON); + stmt->executeNonQuery(); + + ASSERT_EQ(1, conn->getTableNames().size()); + } +} + +} // namespace unitTests diff --git a/rdbms/OcciColumn.cpp b/rdbms/wrapper/OcciColumn.cpp similarity index 98% rename from rdbms/OcciColumn.cpp rename to rdbms/wrapper/OcciColumn.cpp index 7e29e1193857497356916e7ee28d6a7dd9bbf649..86c7f11902ae8eaffb3a6e94c32707dde38c5813 100644 --- a/rdbms/OcciColumn.cpp +++ b/rdbms/wrapper/OcciColumn.cpp @@ -17,10 +17,11 @@ */ #include "common/exception/Exception.hpp" -#include "rdbms/OcciColumn.hpp" +#include "rdbms/wrapper/OcciColumn.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -149,5 +150,6 @@ void OcciColumn::copyStrIntoField(const size_t index, const std::string &str) { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciColumn.hpp b/rdbms/wrapper/OcciColumn.hpp similarity index 98% rename from rdbms/OcciColumn.hpp rename to rdbms/wrapper/OcciColumn.hpp index 4fa8f2059591172e3497c8c22b41d71ccdd7e478..af926721f4155beaf03b6d7ecd00d0d4dbb5f911 100644 --- a/rdbms/OcciColumn.hpp +++ b/rdbms/wrapper/OcciColumn.hpp @@ -18,14 +18,14 @@ #pragma once -#include "catalogue/RdbmsCatalogue.hpp" - +#include <memory> #include <occi.h> #include <string.h> #include <typeinfo> namespace cta { namespace rdbms { +namespace wrapper { /** * A class to help with preparing batch inserts and updatesi with the OCCI @@ -210,5 +210,6 @@ private: }; // OcciColumn +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciColumnTest.cpp b/rdbms/wrapper/OcciColumnTest.cpp similarity index 77% rename from rdbms/OcciColumnTest.cpp rename to rdbms/wrapper/OcciColumnTest.cpp index af834309fb1103148db4d73d3ddc8274d3a53fe2..29d5b870c15ee78a377dddce9380b3881a1c9d42 100644 --- a/rdbms/OcciColumnTest.cpp +++ b/rdbms/wrapper/OcciColumnTest.cpp @@ -16,13 +16,14 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "OcciColumn.hpp" +#include "common/exception/Exception.hpp" +#include "rdbms/wrapper/OcciColumn.hpp" #include <gtest/gtest.h> namespace unitTests { -class cta_rdbms_OcciColumnTest : public ::testing::Test { +class cta_rdbms_wrapper_OcciColumnTest : public ::testing::Test { protected: virtual void SetUp() { @@ -32,9 +33,9 @@ protected: } }; -TEST_F(cta_rdbms_OcciColumnTest, getColName) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getColName) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -43,9 +44,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getColName) { ASSERT_EQ(colName, col.getColName()); } -TEST_F(cta_rdbms_OcciColumnTest, getNbRows) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getNbRows) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -54,9 +55,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getNbRows) { ASSERT_EQ(nbRows, col.getNbRows()); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldLen) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldLen) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -69,9 +70,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldLen) { ASSERT_EQ(5, col.getMaxFieldLength()); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldLenToValueLen_stringValue) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldLenToValueLen_stringValue) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -84,9 +85,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldLenToValueLen_stringValue) { ASSERT_EQ(field0Value.length() + 1, col.getMaxFieldLength()); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldLenToValueLen_uint64_tValue) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldLenToValueLen_uint64_tValue) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -99,9 +100,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldLenToValueLen_uint64_tValue) { ASSERT_EQ(5, col.getMaxFieldLength()); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldLen_tooLate) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldLen_tooLate) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 2; @@ -116,9 +117,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldLen_tooLate) { ASSERT_THROW(col.setFieldLenToValueLen(1, field1Value), exception::Exception); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldLen_invalidIndex) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldLen_invalidIndex) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -128,9 +129,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldLen_invalidIndex) { ASSERT_THROW(col.setFieldLenToValueLen(1, field1Value), exception::Exception); } -TEST_F(cta_rdbms_OcciColumnTest, getFieldLengths) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getFieldLengths) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 3; @@ -150,9 +151,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getFieldLengths) { ASSERT_EQ(4, fieldLens[2]); } -TEST_F(cta_rdbms_OcciColumnTest, getBuffer) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getBuffer) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -165,9 +166,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getBuffer) { ASSERT_NE(nullptr, buf); } -TEST_F(cta_rdbms_OcciColumnTest, getBuffer_tooEarly) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getBuffer_tooEarly) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -176,9 +177,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getBuffer_tooEarly) { ASSERT_THROW(col.getBuffer(), exception::Exception); } -TEST_F(cta_rdbms_OcciColumnTest, getMaxFieldLength) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, getMaxFieldLength) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 4; @@ -196,9 +197,9 @@ TEST_F(cta_rdbms_OcciColumnTest, getMaxFieldLength) { ASSERT_EQ(4, col.getMaxFieldLength()); } -TEST_F(cta_rdbms_OcciColumnTest, copyStrIntoField_1_oneField) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, copyStrIntoField_1_oneField) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; @@ -212,9 +213,9 @@ TEST_F(cta_rdbms_OcciColumnTest, copyStrIntoField_1_oneField) { ASSERT_EQ(field0Value, std::string(buf)); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldValue_twoFields) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldValue_twoFields) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 2; @@ -234,9 +235,9 @@ TEST_F(cta_rdbms_OcciColumnTest, setFieldValue_twoFields) { ASSERT_EQ(field1Value, std::string(bufField1)); } -TEST_F(cta_rdbms_OcciColumnTest, setFieldValue_tooLong) { +TEST_F(cta_rdbms_wrapper_OcciColumnTest, setFieldValue_tooLong) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const std::string colName = "TEST_COLUMN"; const size_t nbRows = 1; diff --git a/rdbms/OcciConn.cpp b/rdbms/wrapper/OcciConn.cpp similarity index 91% rename from rdbms/OcciConn.cpp rename to rdbms/wrapper/OcciConn.cpp index 36702ed5ccedad612c8ee5f9a2423d3b91ac5150..d042314fede06d04f334ef8f8ce6201c0f4294be 100644 --- a/rdbms/OcciConn.cpp +++ b/rdbms/wrapper/OcciConn.cpp @@ -19,15 +19,16 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" -#include "rdbms/OcciConn.hpp" -#include "rdbms/OcciEnv.hpp" -#include "rdbms/OcciStmt.hpp" +#include "rdbms/wrapper/OcciConn.hpp" +#include "rdbms/wrapper/OcciEnv.hpp" +#include "rdbms/wrapper/OcciStmt.hpp" #include <stdexcept> #include <string> namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -46,7 +47,7 @@ OcciConn::OcciConn(oracle::occi::Environment *const env, oracle::occi::Connectio //------------------------------------------------------------------------------ OcciConn::~OcciConn() throw() { try { - close(); // Idempotent close() mthod + close(); // Idempotent close() method } catch(...) { // Destructor should not throw any exceptions } @@ -67,7 +68,7 @@ void OcciConn::close() { //------------------------------------------------------------------------------ // createStmt //------------------------------------------------------------------------------ -std::unique_ptr<Stmt> OcciConn::createStmt(const std::string &sql, Stmt::AutocommitMode autocommitMode) { +std::unique_ptr<Stmt> OcciConn::createStmt(const std::string &sql, AutocommitMode autocommitMode) { try { threading::MutexLocker locker(m_mutex); @@ -139,10 +140,13 @@ std::list<std::string> OcciConn::getTableNames() { "USER_TABLES " "ORDER BY " "TABLE_NAME"; - auto stmt = createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + auto stmt = createStmt(sql, AutocommitMode::OFF); auto rset = stmt->executeQuery(); - while (rset.next()) { - names.push_back(rset.columnString("TABLE_NAME")); + while (rset->next()) { + auto name = rset->columnOptionalString("TABLE_NAME"); + if(name) { + names.push_back(name.value()); + } } return names; @@ -164,10 +168,11 @@ std::list<std::string> OcciConn::getSequenceNames() { "USER_SEQUENCES " "ORDER BY " "SEQUENCE_NAME"; - auto stmt = createStmt(sql, rdbms::Stmt::AutocommitMode::OFF); + auto stmt = createStmt(sql, AutocommitMode::OFF); auto rset = stmt->executeQuery(); - while (rset.next()) { - names.push_back(rset.columnString("SEQUENCE_NAME")); + while (rset->next()) { + auto name = rset->columnOptionalString("SEQUENCE_NAME"); + names.push_back(name.value()); } return names; @@ -206,5 +211,6 @@ void OcciConn::closeStmt(oracle::occi::Statement *const stmt) { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciConn.hpp b/rdbms/wrapper/OcciConn.hpp similarity index 90% rename from rdbms/OcciConn.hpp rename to rdbms/wrapper/OcciConn.hpp index 7e18d703a8224ebf1208a63e8470354f882e1e80..614a778fe2d34792f860ec56ea24834137d8c11c 100644 --- a/rdbms/OcciConn.hpp +++ b/rdbms/wrapper/OcciConn.hpp @@ -19,12 +19,13 @@ #pragma once #include "common/threading/MutexLocker.hpp" -#include "rdbms/Conn.hpp" +#include "rdbms/wrapper/Conn.hpp" #include <occi.h> namespace cta { namespace rdbms { +namespace wrapper { /** * Forward declaraion to avoid a circular dependency beween OcciConn and @@ -66,7 +67,7 @@ public: * @param autocommitMode The autocommit mode of the statement. * @return The prepared statement. */ - virtual std::unique_ptr<Stmt> createStmt(const std::string &sql, const Stmt::AutocommitMode autocommitMode) override; + virtual std::unique_ptr<Stmt> createStmt(const std::string &sql, const AutocommitMode autocommitMode) override; /** * Commits the current transaction. @@ -96,10 +97,13 @@ public: * Returns the names of all the sequences in the database schema in * alphabetical order. * + * If the underlying database technologies does not supported sequences then + * this method simply returns an empty list. + * * @return The names of all the sequences in the database schema in * alphabetical order. */ - std::list<std::string> getSequenceNames(); + virtual std::list<std::string> getSequenceNames() override; private: @@ -129,5 +133,6 @@ private: }; // class OcciConn +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciConnFactory.cpp b/rdbms/wrapper/OcciConnFactory.cpp similarity index 93% rename from rdbms/OcciConnFactory.cpp rename to rdbms/wrapper/OcciConnFactory.cpp index f1f7695261ef0541143d3326b8f4879a6bb96dd5..16f52edfa91197e88eb17a2c03a840671a525222 100644 --- a/rdbms/OcciConnFactory.cpp +++ b/rdbms/wrapper/OcciConnFactory.cpp @@ -18,11 +18,12 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" -#include "OcciConnFactory.hpp" -#include "OcciEnvSingleton.hpp" +#include "rdbms/wrapper/OcciConnFactory.hpp" +#include "rdbms/wrapper/OcciEnvSingleton.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -53,5 +54,6 @@ std::unique_ptr<Conn> OcciConnFactory::create() { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciConnFactory.hpp b/rdbms/wrapper/OcciConnFactory.hpp similarity index 95% rename from rdbms/OcciConnFactory.hpp rename to rdbms/wrapper/OcciConnFactory.hpp index cb3e42a877e22994a0560f5a63456d3c9cc78f3a..9e7cab5a2543a9a35e35dd92d63340dbeaa8cd5d 100644 --- a/rdbms/OcciConnFactory.hpp +++ b/rdbms/wrapper/OcciConnFactory.hpp @@ -18,10 +18,11 @@ #pragma once -#include "ConnFactory.hpp" +#include "rdbms/wrapper/ConnFactory.hpp" namespace cta { namespace rdbms { +namespace wrapper { /** * A concrete factory of Conn objects. @@ -72,5 +73,6 @@ private: }; // class OcciConnFactory +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciEnv.cpp b/rdbms/wrapper/OcciEnv.cpp similarity index 95% rename from rdbms/OcciEnv.cpp rename to rdbms/wrapper/OcciEnv.cpp index e290c6f13edacb01cc16466c2c020a2100aa6617..b322079fbbcdc75ec3e9f3c3ed97b28c61ee61d3 100644 --- a/rdbms/OcciEnv.cpp +++ b/rdbms/wrapper/OcciEnv.cpp @@ -16,13 +16,14 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "OcciConn.hpp" -#include "OcciEnv.hpp" #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" +#include "rdbms/wrapper/OcciConn.hpp" +#include "rdbms/wrapper/OcciEnv.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -66,5 +67,6 @@ std::unique_ptr<Conn> OcciEnv::createConn( } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciEnv.hpp b/rdbms/wrapper/OcciEnv.hpp similarity index 95% rename from rdbms/OcciEnv.hpp rename to rdbms/wrapper/OcciEnv.hpp index 659e0862450141940ac2b68fe8a9d460876c3795..ae922ebc4f528d87ef4c4fb681cd762ede61af17 100644 --- a/rdbms/OcciEnv.hpp +++ b/rdbms/wrapper/OcciEnv.hpp @@ -18,13 +18,14 @@ #pragma once -#include "rdbms/Conn.hpp" +#include "rdbms/wrapper/Conn.hpp" #include <memory> #include <occi.h> namespace cta { namespace rdbms { +namespace wrapper { /** * A convenience wrapper around an OCCI environment. @@ -71,5 +72,6 @@ private: }; // class OcciEnv +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciEnvSingleton.cpp b/rdbms/wrapper/OcciEnvSingleton.cpp similarity index 96% rename from rdbms/OcciEnvSingleton.cpp rename to rdbms/wrapper/OcciEnvSingleton.cpp index 84844fff7900ae3812c1c137b51a713dea336743..551ad0dd8df9929edcfc4ebfa4e061723fde7bd3 100644 --- a/rdbms/OcciEnvSingleton.cpp +++ b/rdbms/wrapper/OcciEnvSingleton.cpp @@ -16,12 +16,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "rdbms/OcciEnvSingleton.hpp" #include "common/exception/Exception.hpp" #include "common/threading/MutexLocker.hpp" +#include "rdbms/wrapper/OcciEnvSingleton.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // s_mutex @@ -57,5 +58,6 @@ OcciEnvSingleton &OcciEnvSingleton::instance() { OcciEnvSingleton::OcciEnvSingleton() { } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciEnvSingleton.hpp b/rdbms/wrapper/OcciEnvSingleton.hpp similarity index 95% rename from rdbms/OcciEnvSingleton.hpp rename to rdbms/wrapper/OcciEnvSingleton.hpp index c6c36b2faa91edf2367955e3285a47b4fa16f5ab..a2afe8454a9c6c1071c838c162a43a314af097c9 100644 --- a/rdbms/OcciEnvSingleton.hpp +++ b/rdbms/wrapper/OcciEnvSingleton.hpp @@ -19,12 +19,13 @@ #pragma once #include "common/threading/Mutex.hpp" -#include "rdbms/OcciEnv.hpp" +#include "rdbms/wrapper/OcciEnv.hpp" #include <memory> namespace cta { namespace rdbms { +namespace wrapper { /** * A singleton version of OcciEnv. @@ -67,5 +68,6 @@ private: }; // class OcciEnvSingleton +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciRsetImpl.cpp b/rdbms/wrapper/OcciRset.cpp similarity index 90% rename from rdbms/OcciRsetImpl.cpp rename to rdbms/wrapper/OcciRset.cpp index 12b11d90573976df1cd05e37cc602ae2814dfa17..788d1f5d0ec31dd5d7a337a87118fe128dbf33f8 100644 --- a/rdbms/OcciRsetImpl.cpp +++ b/rdbms/wrapper/OcciRset.cpp @@ -16,11 +16,11 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "NullDbValue.hpp" -#include "OcciRsetImpl.hpp" -#include "OcciStmt.hpp" #include "common/exception/Exception.hpp" #include "common/utils/utils.hpp" +#include "rdbms/NullDbValue.hpp" +#include "rdbms/wrapper/OcciRset.hpp" +#include "rdbms/wrapper/OcciStmt.hpp" #include <cstring> #include <map> @@ -28,11 +28,12 @@ namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ -OcciRsetImpl::OcciRsetImpl(OcciStmt &stmt, oracle::occi::ResultSet *const rset): +OcciRset::OcciRset(OcciStmt &stmt, oracle::occi::ResultSet *const rset): m_stmt(stmt), m_rset(rset) { try { @@ -49,7 +50,7 @@ OcciRsetImpl::OcciRsetImpl(OcciStmt &stmt, oracle::occi::ResultSet *const rset): //------------------------------------------------------------------------------ // populateColNameToIdx //------------------------------------------------------------------------------ -void OcciRsetImpl::populateColNameToIdxMap() { +void OcciRset::populateColNameToIdxMap() { using namespace oracle; try { @@ -69,7 +70,7 @@ void OcciRsetImpl::populateColNameToIdxMap() { //------------------------------------------------------------------------------ // destructor //------------------------------------------------------------------------------ -OcciRsetImpl::~OcciRsetImpl() throw() { +OcciRset::~OcciRset() throw() { try { close(); // Idempotent close() } catch(...) { @@ -80,14 +81,14 @@ OcciRsetImpl::~OcciRsetImpl() throw() { //------------------------------------------------------------------------------ // getSql //------------------------------------------------------------------------------ -const std::string &OcciRsetImpl::getSql() const { +const std::string &OcciRset::getSql() const { return m_stmt.getSql(); } //------------------------------------------------------------------------------ // next //------------------------------------------------------------------------------ -bool OcciRsetImpl::next() { +bool OcciRset::next() { using namespace oracle; try { @@ -102,7 +103,7 @@ bool OcciRsetImpl::next() { //------------------------------------------------------------------------------ // columnIsNull //------------------------------------------------------------------------------ -bool OcciRsetImpl::columnIsNull(const std::string &colName) const { +bool OcciRset::columnIsNull(const std::string &colName) const { try { const int colIdx = m_colNameToIdx.getIdx(colName); return m_rset->isNull(colIdx); @@ -116,7 +117,7 @@ bool OcciRsetImpl::columnIsNull(const std::string &colName) const { //------------------------------------------------------------------------------ // close //------------------------------------------------------------------------------ -void OcciRsetImpl::close() { +void OcciRset::close() { threading::Mutex locker(m_mutex); if(nullptr != m_rset) { @@ -128,7 +129,7 @@ void OcciRsetImpl::close() { //------------------------------------------------------------------------------ // columnOptionalString //------------------------------------------------------------------------------ -optional<std::string> OcciRsetImpl::columnOptionalString(const std::string &colName) const { +optional<std::string> OcciRset::columnOptionalString(const std::string &colName) const { try { const int colIdx = m_colNameToIdx.getIdx(colName); const std::string stringValue = m_rset->getString(colIdx); @@ -148,7 +149,7 @@ optional<std::string> OcciRsetImpl::columnOptionalString(const std::string &colN //------------------------------------------------------------------------------ // columnOptionalUint64 //------------------------------------------------------------------------------ -optional<uint64_t> OcciRsetImpl::columnOptionalUint64(const std::string &colName) const { +optional<uint64_t> OcciRset::columnOptionalUint64(const std::string &colName) const { try { threading::Mutex locker(m_mutex); @@ -171,5 +172,6 @@ optional<uint64_t> OcciRsetImpl::columnOptionalUint64(const std::string &colName } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciRsetImpl.hpp b/rdbms/wrapper/OcciRset.hpp similarity index 91% rename from rdbms/OcciRsetImpl.hpp rename to rdbms/wrapper/OcciRset.hpp index d6cc3ca54827c2a849398b88f2473209ead03213..bad427edb7d5b0dfa3dfbc615b425920aeea4942 100644 --- a/rdbms/OcciRsetImpl.hpp +++ b/rdbms/wrapper/OcciRset.hpp @@ -19,14 +19,15 @@ #pragma once #include "common/threading/Mutex.hpp" -#include "rdbms/ColumnNameToIdx.hpp" -#include "rdbms/RsetImpl.hpp" +#include "rdbms/wrapper/ColumnNameToIdx.hpp" +#include "rdbms/wrapper/Rset.hpp" #include <memory> #include <occi.h> namespace cta { namespace rdbms { +namespace wrapper { /** * Forward declaration to avoid a circular dependency between OcciRset and @@ -37,7 +38,7 @@ class OcciStmt; /** * A convenience wrapper around an OCCI result set. */ -class OcciRsetImpl: public RsetImpl { +class OcciRset: public Rset { public: /** @@ -49,12 +50,12 @@ public: * @param stmt The OCCI statement. * @param rset The OCCI result set. */ - OcciRsetImpl(OcciStmt &stmt, oracle::occi::ResultSet *const rset); + OcciRset(OcciStmt &stmt, oracle::occi::ResultSet *const rset); /** * Destructor. */ - virtual ~OcciRsetImpl() throw() override; + virtual ~OcciRset() throw() override; /** * Returns the SQL statement. @@ -131,7 +132,8 @@ private: */ void populateColNameToIdxMap(); -}; // class OcciRsetImpl +}; // class OcciRset +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciStmt.cpp b/rdbms/wrapper/OcciStmt.cpp similarity index 91% rename from rdbms/OcciStmt.cpp rename to rdbms/wrapper/OcciStmt.cpp index 320be053b6f8e77b2d2ddf80b3e85bfd8e01a78d..62f839926dd78dd34e923b7b602478a5b19b7c2d 100644 --- a/rdbms/OcciStmt.cpp +++ b/rdbms/wrapper/OcciStmt.cpp @@ -17,12 +17,13 @@ */ #include "common/exception/Exception.hpp" +#include "common/exception/LostDatabaseConnection.hpp" #include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" -#include "rdbms/OcciColumn.hpp" -#include "rdbms/OcciConn.hpp" -#include "rdbms/OcciRsetImpl.hpp" -#include "rdbms/OcciStmt.hpp" +#include "rdbms/wrapper/OcciColumn.hpp" +#include "rdbms/wrapper/OcciConn.hpp" +#include "rdbms/wrapper/OcciRset.hpp" +#include "rdbms/wrapper/OcciStmt.hpp" #include <cstring> #include <map> @@ -31,6 +32,7 @@ namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -79,6 +81,12 @@ OcciStmt::~OcciStmt() throw() { } } +//------------------------------------------------------------------------------ +// clear +//------------------------------------------------------------------------------ +void OcciStmt::clear() { +} + //------------------------------------------------------------------------------ // close //------------------------------------------------------------------------------ @@ -170,12 +178,15 @@ void OcciStmt::bindOptionalString(const std::string ¶mName, const optional<s //------------------------------------------------------------------------------ // executeQuery //------------------------------------------------------------------------------ -Rset OcciStmt::executeQuery() { +std::unique_ptr<Rset> OcciStmt::executeQuery() { using namespace oracle; try { - return Rset(new OcciRsetImpl(*this, m_stmt->executeQuery())); + return cta::make_unique<OcciRset>(*this, m_stmt->executeQuery()); } catch(occi::SQLException &ex) { + std::ostringstream msg; + msg << std::string(__FUNCTION__) << " failed for SQL statement " << getSqlForException() << ": " << ex.what(); + if(connShouldBeClosed(ex)) { // Close the statement first and then the connection try { @@ -187,9 +198,9 @@ Rset OcciStmt::executeQuery() { m_conn.close(); } catch(...) { } + throw exception::LostDatabaseConnection(msg.str()); } - throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + - getSqlForException() + ": " + ex.what()); + throw exception::Exception(msg.str()); } } @@ -283,5 +294,6 @@ bool OcciStmt::connShouldBeClosed(const oracle::occi::SQLException &ex) { }; } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/OcciStmt.hpp b/rdbms/wrapper/OcciStmt.hpp similarity index 94% rename from rdbms/OcciStmt.hpp rename to rdbms/wrapper/OcciStmt.hpp index a180db9f3d877c0bec3f85af5415e75c5c45cd33..5b58998eed81a81ddb78ee77d939471bb992ce7b 100644 --- a/rdbms/OcciStmt.hpp +++ b/rdbms/wrapper/OcciStmt.hpp @@ -19,7 +19,7 @@ #pragma once #include "common/threading/Mutex.hpp" -#include "rdbms/Stmt.hpp" +#include "rdbms/wrapper/Stmt.hpp" #include <memory> #include <occi.h> @@ -27,6 +27,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * Forward declaration to avoid a circular dependency between OcciStmt and @@ -38,7 +39,7 @@ class OcciConn; * Forward declaration to avoid a circular dependency between OcciStmt and * OcciRset. */ -class OcciRsetImpl; +class OcciRset; class OcciColumn; @@ -72,6 +73,11 @@ public: */ OcciStmt(const OcciStmt &) = delete; + /** + * Clears the prepared statement so that it is ready to be reused. + */ + void clear() override; + /** * Idempotent close() method. The destructor calls this method. */ @@ -122,7 +128,7 @@ public: * * @return The result set. */ - Rset executeQuery() override; + std::unique_ptr<Rset> executeQuery() override; /** * Executes the statement. @@ -186,5 +192,6 @@ private: }; // class OcciStmt +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ParamNameToIdx.cpp b/rdbms/wrapper/ParamNameToIdx.cpp similarity index 97% rename from rdbms/ParamNameToIdx.cpp rename to rdbms/wrapper/ParamNameToIdx.cpp index 8173850517892b63508bd352093c2da16c3ce16a..00aeadafa6f2095678813aac9788f6ba4ad1a488 100644 --- a/rdbms/ParamNameToIdx.cpp +++ b/rdbms/wrapper/ParamNameToIdx.cpp @@ -16,13 +16,14 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "ParamNameToIdx.hpp" #include "common/exception/Exception.hpp" +#include "rdbms/wrapper/ParamNameToIdx.hpp" #include <sstream> namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -90,5 +91,6 @@ uint32_t ParamNameToIdx::getIdx(const std::string ¶mName) const { return itor->second; } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ParamNameToIdx.hpp b/rdbms/wrapper/ParamNameToIdx.hpp similarity index 97% rename from rdbms/ParamNameToIdx.hpp rename to rdbms/wrapper/ParamNameToIdx.hpp index 3ab4d175da2f1a2650f20702d14c2962c3184883..79c79eb6c910a5484998bf267a335a1005a635bc 100644 --- a/rdbms/ParamNameToIdx.hpp +++ b/rdbms/wrapper/ParamNameToIdx.hpp @@ -22,6 +22,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * Map from SQL parameter name to parameter index. @@ -65,5 +66,6 @@ private: }; // class ParamNameToIdx +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/ParamNameToIdxTest.cpp b/rdbms/wrapper/ParamNameToIdxTest.cpp similarity index 88% rename from rdbms/ParamNameToIdxTest.cpp rename to rdbms/wrapper/ParamNameToIdxTest.cpp index 7ded28ade1a364948f6f0e2cb76dec084ac4f9e8..380ab4a2bffb280df8440d5d31adb93cc037d787 100644 --- a/rdbms/ParamNameToIdxTest.cpp +++ b/rdbms/wrapper/ParamNameToIdxTest.cpp @@ -16,15 +16,15 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "ParamNameToIdx.hpp" #include "common/exception/Exception.hpp" +#include "rdbms/wrapper/ParamNameToIdx.hpp" #include <gtest/gtest.h> #include <sstream> namespace unitTests { -class cta_rdbms_ParamNameToIdxTest : public ::testing::Test { +class cta_rdbms_wrapper_ParamNameToIdxTest : public ::testing::Test { protected: virtual void SetUp() { @@ -34,8 +34,8 @@ protected: } }; -TEST_F(cta_rdbms_ParamNameToIdxTest, getIdx_existing_params) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_ParamNameToIdxTest, getIdx_existing_params) { + using namespace cta::rdbms::wrapper; const char *const sql = "INSERT INTO ADMIN_USER(" @@ -80,9 +80,9 @@ TEST_F(cta_rdbms_ParamNameToIdxTest, getIdx_existing_params) { ASSERT_EQ(10, paramNameToIdx.getIdx(":LAST_UPDATE_TIME")); } -TEST_F(cta_rdbms_ParamNameToIdxTest, getIdx_non_existing_param) { +TEST_F(cta_rdbms_wrapper_ParamNameToIdxTest, getIdx_non_existing_param) { using namespace cta; - using namespace cta::rdbms; + using namespace cta::rdbms::wrapper; const char *const sql = "String containing no bind parameters"; diff --git a/rdbms/RsetImpl.cpp b/rdbms/wrapper/Rset.cpp similarity index 90% rename from rdbms/RsetImpl.cpp rename to rdbms/wrapper/Rset.cpp index 6eb67a189cb3c47d91b1fa85adef93a08470ba82..9448304a56cda20a241676166549e19803924c96 100644 --- a/rdbms/RsetImpl.cpp +++ b/rdbms/wrapper/Rset.cpp @@ -16,16 +16,18 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "rdbms/RsetImpl.hpp" +#include "rdbms/wrapper/Rset.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // destructor //------------------------------------------------------------------------------ -RsetImpl::~RsetImpl() throw() { +Rset::~Rset() throw() { } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/RsetImpl.hpp b/rdbms/wrapper/Rset.hpp similarity index 95% rename from rdbms/RsetImpl.hpp rename to rdbms/wrapper/Rset.hpp index 72db6ed246e8dbd6eb9a4a9bbb3d763c0dcaa8d0..bcc98870d73496dc43e156732aaa3d3cb843d0bb 100644 --- a/rdbms/RsetImpl.hpp +++ b/rdbms/wrapper/Rset.hpp @@ -25,18 +25,19 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * Abstract class specifying the interface to an implementation of the result * set of an sql query. */ -class RsetImpl { +class Rset { public: /** * Destructor. */ - virtual ~RsetImpl() throw() = 0; + virtual ~Rset() throw() = 0; /** * Returns the SQL statement. @@ -81,7 +82,8 @@ public: */ virtual optional<uint64_t> columnOptionalUint64(const std::string &colName) const = 0; -}; // class RsetImpl +}; // class Rset +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/Sqlite.cpp b/rdbms/wrapper/Sqlite.cpp similarity index 96% rename from rdbms/Sqlite.cpp rename to rdbms/wrapper/Sqlite.cpp index 52ae52af15b6636c047478c0d894c77da0ccb16c..287aeab85b9933707c2759b735ec925bd47d9a72 100644 --- a/rdbms/Sqlite.cpp +++ b/rdbms/wrapper/Sqlite.cpp @@ -16,12 +16,13 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "Sqlite.hpp" +#include "rdbms/wrapper/Sqlite.hpp" #include <sstream> namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // rcToStr @@ -33,7 +34,7 @@ std::string Sqlite::rcToStr(const int rc) { case SQLITE_AUTH: return "Authorization denied"; case SQLITE_BUSY: - return "Failed to take locks"; + return "Busy"; case SQLITE_CANTOPEN: return "Cannot open database file"; case SQLITE_CONSTRAINT: @@ -90,5 +91,6 @@ std::string Sqlite::rcToStr(const int rc) { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/Sqlite.hpp b/rdbms/wrapper/Sqlite.hpp similarity index 96% rename from rdbms/Sqlite.hpp rename to rdbms/wrapper/Sqlite.hpp index b69abf7991a649efac86a2ae712109c611eb7da2..93c46f975c9d20e7368a3290bd3fe61e23e02bff 100644 --- a/rdbms/Sqlite.hpp +++ b/rdbms/wrapper/Sqlite.hpp @@ -23,6 +23,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * A helper class for working with SQLite. @@ -40,5 +41,6 @@ public: }; // class SqlLiteStmt +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteConn.cpp b/rdbms/wrapper/SqliteConn.cpp similarity index 81% rename from rdbms/SqliteConn.cpp rename to rdbms/wrapper/SqliteConn.cpp index 3b08df7341d0cdb16c54a427f0f3907ed93be97f..98ad9a79c277754bb44d5f36bc47256708709aed 100644 --- a/rdbms/SqliteConn.cpp +++ b/rdbms/wrapper/SqliteConn.cpp @@ -19,14 +19,17 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" -#include "rdbms/SqliteConn.hpp" -#include "rdbms/SqliteStmt.hpp" +#include "rdbms/wrapper/Sqlite.hpp" +#include "rdbms/wrapper/SqliteConn.hpp" +#include "rdbms/wrapper/SqliteStmt.hpp" +#include <iostream> #include <stdexcept> #include <string> namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -66,7 +69,11 @@ SqliteConn::SqliteConn(const std::string &filename): // destructor //------------------------------------------------------------------------------ SqliteConn::~SqliteConn() throw() { - close(); + try { + close(); // Idempotent close() method + } catch(...) { + // Destructor should not throw any exceptions + } } //------------------------------------------------------------------------------ @@ -76,7 +83,12 @@ void SqliteConn::close() { threading::MutexLocker locker(m_mutex); if(nullptr != m_sqliteConn) { - sqlite3_close(m_sqliteConn); + const int closeRc = sqlite3_close(m_sqliteConn); + if(SQLITE_OK != closeRc) { + exception::Exception ex; + ex.getMessage() << "Failed to close SQLite connection: " << Sqlite::rcToStr(closeRc); + throw ex; + } m_sqliteConn = nullptr; } } @@ -84,7 +96,7 @@ void SqliteConn::close() { //------------------------------------------------------------------------------ // createStmt //------------------------------------------------------------------------------ -std::unique_ptr<Stmt> SqliteConn::createStmt(const std::string &sql, const Stmt::AutocommitMode autocommitMode) { +std::unique_ptr<Stmt> SqliteConn::createStmt(const std::string &sql, const AutocommitMode autocommitMode) { try { threading::MutexLocker locker(m_mutex); @@ -170,14 +182,14 @@ void SqliteConn::printSchema(std::ostream &os) { "ORDER BY " "TYPE, " "NAME;"; - auto stmt = createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = createStmt(sql, AutocommitMode::ON); auto rset = stmt->executeQuery(); os << "NAME, TYPE" << std::endl; os << "==========" << std::endl; - while (rset.next()) { - const std::string name = rset.columnString("NAME"); - const std::string type = rset.columnString("TYPE"); - os << name << ", " << type << std::endl; + while (rset->next()) { + const auto name = rset->columnOptionalString("NAME"); + const auto type = rset->columnOptionalString("TYPE"); + os << (name ? name.value() : "NULL") << ", " << (type ? type.value() : "NULL") << std::endl; } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); @@ -198,11 +210,14 @@ std::list<std::string> SqliteConn::getTableNames() { "TYPE = 'table' " "ORDER BY " "NAME;"; - auto stmt = createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = createStmt(sql, AutocommitMode::ON); auto rset = stmt->executeQuery(); std::list<std::string> names; - while (rset.next()) { - names.push_back(rset.columnString("NAME")); + while (rset->next()) { + auto name = rset->columnOptionalString("NAME"); + if(name) { + names.push_back(name.value()); + } } return names; } catch(exception::Exception &ex) { @@ -217,5 +232,17 @@ bool SqliteConn::isOpen() const { return nullptr != m_sqliteConn; } +//------------------------------------------------------------------------------ +// getSequenceNames +//------------------------------------------------------------------------------ +std::list<std::string> SqliteConn::getSequenceNames() { + try { + return std::list<std::string>(); + } catch(exception::Exception &ex) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); + } +} + +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteConn.hpp b/rdbms/wrapper/SqliteConn.hpp similarity index 85% rename from rdbms/SqliteConn.hpp rename to rdbms/wrapper/SqliteConn.hpp index d8f3df7bd997ec5a91e68d8eec42adb95b86996c..ee66e6749c80186e375446a710aa89e931c9748c 100644 --- a/rdbms/SqliteConn.hpp +++ b/rdbms/wrapper/SqliteConn.hpp @@ -19,12 +19,13 @@ #pragma once #include "common/threading/Mutex.hpp" -#include "rdbms/Conn.hpp" +#include "rdbms/wrapper/Conn.hpp" #include <sqlite3.h> namespace cta { namespace rdbms { +namespace wrapper { /** * Forward declaration to avoid a circular dependency between SqliteConn and @@ -68,7 +69,7 @@ public: * @param autocommitMode The autocommit mode of the statement. * @return The prepared statement. */ - std::unique_ptr<Stmt> createStmt(const std::string &sql, const Stmt::AutocommitMode autocommitMode) override; + std::unique_ptr<Stmt> createStmt(const std::string &sql, const AutocommitMode autocommitMode) override; /** * Commits the current transaction. @@ -94,6 +95,18 @@ public: */ bool isOpen() const override; + /** + * Returns the names of all the sequences in the database schema in + * alphabetical order. + * + * If the underlying database technologies does not supported sequences then + * this method simply returns an empty list. + * + * @return The names of all the sequences in the database schema in + * alphabetical order. + */ + std::list<std::string> getSequenceNames() override; + /** * This ia an SqliteConn specific method that prints the database schema to * the specified output stream. @@ -124,5 +137,6 @@ private: }; // class SqliteConn +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteConnFactory.cpp b/rdbms/wrapper/SqliteConnFactory.cpp similarity index 93% rename from rdbms/SqliteConnFactory.cpp rename to rdbms/wrapper/SqliteConnFactory.cpp index f7898e1087af491730f384d7905b510269a1fca9..72bdecd387f5f9aa66117c301330f1df0e634b72 100644 --- a/rdbms/SqliteConnFactory.cpp +++ b/rdbms/wrapper/SqliteConnFactory.cpp @@ -18,11 +18,12 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" -#include "SqliteConn.hpp" -#include "SqliteConnFactory.hpp" +#include "rdbms/wrapper/SqliteConn.hpp" +#include "rdbms/wrapper/SqliteConnFactory.hpp" namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -48,5 +49,6 @@ std::unique_ptr<Conn> SqliteConnFactory::create() { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteConnFactory.hpp b/rdbms/wrapper/SqliteConnFactory.hpp similarity index 94% rename from rdbms/SqliteConnFactory.hpp rename to rdbms/wrapper/SqliteConnFactory.hpp index a1838c9921cfc7a016d8e3b58a710dc6e0dfcc1d..b04c5e98a6fa856025c667b0f3e054658ce913b4 100644 --- a/rdbms/SqliteConnFactory.hpp +++ b/rdbms/wrapper/SqliteConnFactory.hpp @@ -18,10 +18,11 @@ #pragma once -#include "ConnFactory.hpp" +#include "rdbms/wrapper/ConnFactory.hpp" namespace cta { namespace rdbms { +namespace wrapper { /** * A concrete factory of Conn objects. @@ -57,5 +58,6 @@ private: }; // class SqliteConnFactory +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteRsetImpl.cpp b/rdbms/wrapper/SqliteRset.cpp similarity index 91% rename from rdbms/SqliteRsetImpl.cpp rename to rdbms/wrapper/SqliteRset.cpp index f16144e65dc01c403e2a1468d92b965b53176260..1a45e1ffb63dd7ef297afc3311d3a01755d17ae1 100644 --- a/rdbms/SqliteRsetImpl.cpp +++ b/rdbms/wrapper/SqliteRset.cpp @@ -16,12 +16,12 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "NullDbValue.hpp" -#include "Sqlite.hpp" -#include "SqliteRsetImpl.hpp" -#include "SqliteStmt.hpp" #include "common/exception/Exception.hpp" #include "common/exception/Errnum.hpp" +#include "rdbms/NullDbValue.hpp" +#include "rdbms/wrapper/Sqlite.hpp" +#include "rdbms/wrapper/SqliteRset.hpp" +#include "rdbms/wrapper/SqliteStmt.hpp" #include <cstring> #include <sstream> @@ -30,6 +30,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * A map from column name to column index and type. @@ -120,31 +121,31 @@ private: */ std::map<std::string, IdxAndType> m_nameToIdxAndType; -}; // class SqliteRsetImpl::ColNameToIdx +}; // class SqliteRset::ColNameToIdx //------------------------------------------------------------------------------ // constructor //------------------------------------------------------------------------------ -SqliteRsetImpl::SqliteRsetImpl(SqliteStmt &stmt): m_stmt(stmt) { +SqliteRset::SqliteRset(SqliteStmt &stmt): m_stmt(stmt) { } //------------------------------------------------------------------------------ // destructor. //------------------------------------------------------------------------------ -SqliteRsetImpl::~SqliteRsetImpl() throw() { +SqliteRset::~SqliteRset() throw() { } //------------------------------------------------------------------------------ // getSql //------------------------------------------------------------------------------ -const std::string &SqliteRsetImpl::getSql() const { +const std::string &SqliteRset::getSql() const { return m_stmt.getSql(); } //------------------------------------------------------------------------------ // next //------------------------------------------------------------------------------ -bool SqliteRsetImpl::next() { +bool SqliteRset::next() { try { const int stepRc = sqlite3_step(m_stmt.get()); @@ -159,7 +160,7 @@ bool SqliteRsetImpl::next() { return SQLITE_ROW == stepRc; } catch(exception::Exception &ex) { - throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + m_stmt.getSql() + + throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + m_stmt.getSqlForException() + ": " + ex.getMessage().str()); } } @@ -167,7 +168,7 @@ bool SqliteRsetImpl::next() { //------------------------------------------------------------------------------ // clearAndPopulateColNameToIdxMap //------------------------------------------------------------------------------ -void SqliteRsetImpl::clearAndPopulateColNameToIdxAndTypeMap() { +void SqliteRset::clearAndPopulateColNameToIdxAndTypeMap() { try { m_colNameToIdxAndType.clear(); @@ -197,7 +198,7 @@ void SqliteRsetImpl::clearAndPopulateColNameToIdxAndTypeMap() { //------------------------------------------------------------------------------ // columnIsNull //------------------------------------------------------------------------------ -bool SqliteRsetImpl::columnIsNull(const std::string &colName) const { +bool SqliteRset::columnIsNull(const std::string &colName) const { try { const ColumnNameToIdxAndType::IdxAndType idxAndType = m_colNameToIdxAndType.getIdxAndType(colName); return SQLITE_NULL == idxAndType.colType; @@ -209,7 +210,7 @@ bool SqliteRsetImpl::columnIsNull(const std::string &colName) const { //------------------------------------------------------------------------------ // columnOptionalString //------------------------------------------------------------------------------ -optional<std::string> SqliteRsetImpl::columnOptionalString(const std::string &colName) const { +optional<std::string> SqliteRset::columnOptionalString(const std::string &colName) const { try { const ColumnNameToIdxAndType::IdxAndType idxAndType = m_colNameToIdxAndType.getIdxAndType(colName); if(SQLITE_NULL == idxAndType.colType) { @@ -232,7 +233,7 @@ optional<std::string> SqliteRsetImpl::columnOptionalString(const std::string &co //------------------------------------------------------------------------------ // columnOptionalUint64 //------------------------------------------------------------------------------ -optional<uint64_t> SqliteRsetImpl::columnOptionalUint64(const std::string &colName) const { +optional<uint64_t> SqliteRset::columnOptionalUint64(const std::string &colName) const { try { const ColumnNameToIdxAndType::IdxAndType idxAndType = m_colNameToIdxAndType.getIdxAndType(colName); if(SQLITE_NULL == idxAndType.colType) { @@ -245,5 +246,6 @@ optional<uint64_t> SqliteRsetImpl::columnOptionalUint64(const std::string &colNa } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteRsetImpl.hpp b/rdbms/wrapper/SqliteRset.hpp similarity index 92% rename from rdbms/SqliteRsetImpl.hpp rename to rdbms/wrapper/SqliteRset.hpp index 8aaee093ec3fe6b053184b488e1e027fb9cb834b..0bda5b1e29543c6308096290212e46bdc2895d3e 100644 --- a/rdbms/SqliteRsetImpl.hpp +++ b/rdbms/wrapper/SqliteRset.hpp @@ -18,8 +18,8 @@ #pragma once -#include "rdbms/ColumnNameToIdxAndType.hpp" -#include "rdbms/RsetImpl.hpp" +#include "rdbms/wrapper/ColumnNameToIdxAndType.hpp" +#include "rdbms/wrapper/Rset.hpp" #include <memory> #include <stdint.h> @@ -27,6 +27,7 @@ namespace cta { namespace rdbms { +namespace wrapper { /** * Forward declaration. @@ -36,7 +37,7 @@ class SqliteStmt; /** * The result set of an sql query. */ -class SqliteRsetImpl: public RsetImpl { +class SqliteRset: public Rset { public: /** @@ -44,12 +45,12 @@ public: * * @param stmt The prepared statement. */ - SqliteRsetImpl(SqliteStmt &stmt); + SqliteRset(SqliteStmt &stmt); /** * Destructor. */ - ~SqliteRsetImpl() throw() override; + ~SqliteRset() throw() override; /** * Returns the SQL statement. @@ -113,5 +114,6 @@ private: }; // class SqlLiteRset +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteStmt.cpp b/rdbms/wrapper/SqliteStmt.cpp similarity index 82% rename from rdbms/SqliteStmt.cpp rename to rdbms/wrapper/SqliteStmt.cpp index 76b61a37945e315e273b180a4b93e1a5d49447f0..68886b2b4c32bc14fa4fe043ab524572eac6a104 100644 --- a/rdbms/SqliteStmt.cpp +++ b/rdbms/wrapper/SqliteStmt.cpp @@ -19,12 +19,13 @@ #include "common/exception/Exception.hpp" #include "common/make_unique.hpp" #include "common/threading/MutexLocker.hpp" -#include "rdbms/Sqlite.hpp" -#include "rdbms/SqliteConn.hpp" -#include "rdbms/SqliteRsetImpl.hpp" -#include "rdbms/SqliteStmt.hpp" +#include "rdbms/wrapper/Sqlite.hpp" +#include "rdbms/wrapper/SqliteConn.hpp" +#include "rdbms/wrapper/SqliteRset.hpp" +#include "rdbms/wrapper/SqliteStmt.hpp" #include <cstring> +#include <iostream> #include <stdexcept> #include <stdlib.h> #include <string> @@ -32,6 +33,7 @@ namespace cta { namespace rdbms { +namespace wrapper { //------------------------------------------------------------------------------ // constructor @@ -114,15 +116,47 @@ SqliteStmt::~SqliteStmt() throw() { } } +//------------------------------------------------------------------------------ +// clear +//------------------------------------------------------------------------------ +void SqliteStmt::clear() { + try { + threading::MutexLocker locker(m_mutex); + + if(nullptr != m_stmt) { + const int resetRc = sqlite3_reset(m_stmt); + if(SQLITE_OK != resetRc) { + exception::Exception ex; + ex.getMessage() <<"sqlite3_reset failed: " << Sqlite::rcToStr(resetRc); + } + const int clearBindingsRc = sqlite3_clear_bindings(m_stmt); + if(SQLITE_OK != clearBindingsRc) { + exception::Exception ex; + ex.getMessage() <<"sqlite3_clear_bindings failed: " << Sqlite::rcToStr(clearBindingsRc); + } + } + } catch(exception::Exception &ex) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); + } +} + //------------------------------------------------------------------------------ // close //------------------------------------------------------------------------------ void SqliteStmt::close() { - threading::MutexLocker locker(m_mutex); + try { + threading::MutexLocker locker(m_mutex); - if(nullptr != m_stmt) { - sqlite3_finalize(m_stmt); - m_stmt = nullptr; + if (nullptr != m_stmt) { + const int finalizeRc = sqlite3_finalize(m_stmt); + if (SQLITE_OK != finalizeRc) { + exception::Exception ex; + ex.getMessage() <<"sqlite3_finalize failed: " << Sqlite::rcToStr(finalizeRc); + } + m_stmt = nullptr; + } + } catch(exception::Exception &ex) { + throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str()); } } @@ -161,7 +195,9 @@ void SqliteStmt::bindOptionalUint64(const std::string ¶mName, const optional bindRc = sqlite3_bind_null(m_stmt, paramIdx); } if(SQLITE_OK != bindRc) { - throw exception::Exception("sqlite3_bind_int64() failed"); + exception::Exception ex; + ex.getMessage() << "sqlite3_bind_int64() failed: " << Sqlite::rcToStr(bindRc); + throw ex; } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + @@ -197,7 +233,10 @@ void SqliteStmt::bindOptionalString(const std::string ¶mName, const optional bindRc = sqlite3_bind_null(m_stmt, paramIdx); } if(SQLITE_OK != bindRc) { - throw exception::Exception("sqlite3_bind_text() failed"); + exception::Exception ex; + + ex.getMessage() << "sqlite3_bind_text() failed: " << Sqlite::rcToStr(bindRc); + throw ex; } } catch(exception::Exception &ex) { throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + @@ -208,8 +247,8 @@ void SqliteStmt::bindOptionalString(const std::string ¶mName, const optional //------------------------------------------------------------------------------ // executeQuery //------------------------------------------------------------------------------ -Rset SqliteStmt::executeQuery() { - return Rset(new SqliteRsetImpl(*this)); +std::unique_ptr<Rset> SqliteStmt::executeQuery() { + return cta::make_unique<SqliteRset>(*this); } //------------------------------------------------------------------------------ @@ -262,5 +301,6 @@ void SqliteStmt::beginDeferredTransaction() { } } +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteStmt.hpp b/rdbms/wrapper/SqliteStmt.hpp similarity index 94% rename from rdbms/SqliteStmt.hpp rename to rdbms/wrapper/SqliteStmt.hpp index 3d0cee5ad7a5b209191612883703bedba070efcd..bf0bdb9b9965cb9ee8abb0cadb70ee9d6146f9ae 100644 --- a/rdbms/SqliteStmt.hpp +++ b/rdbms/wrapper/SqliteStmt.hpp @@ -19,7 +19,7 @@ #pragma once #include "common/threading/Mutex.hpp" -#include "rdbms/Stmt.hpp" +#include "rdbms/wrapper/Stmt.hpp" #include <map> #include <memory> @@ -28,9 +28,10 @@ namespace cta { namespace rdbms { +namespace wrapper { class SqliteConn; -class SqliteRsetImpl; +class SqliteRset; /** * A convenience wrapper around an SQLite prepared statement. @@ -55,6 +56,11 @@ public: */ ~SqliteStmt() throw() override; + /** + * Clears the prepared statement so that it is ready to be reused. + */ + void clear() override; + /** * Idempotent close() method. The destructor calls this method. */ @@ -115,7 +121,7 @@ public: * * @return The result set. */ - Rset executeQuery() override; + std::unique_ptr<Rset> executeQuery() override; /** * Executes the statement. @@ -163,5 +169,6 @@ private: }; // class SqlLiteStmt +} // namespace wrapper } // namespace rdbms } // namespace cta diff --git a/rdbms/SqliteStmtTest.cpp b/rdbms/wrapper/SqliteStmtTest.cpp similarity index 63% rename from rdbms/SqliteStmtTest.cpp rename to rdbms/wrapper/SqliteStmtTest.cpp index 9aaadca885731648adaf24faded12dc3479d12bf..0d7b7d32df283b5c8fe90d3771017f1c0e6a1c09 100644 --- a/rdbms/SqliteStmtTest.cpp +++ b/rdbms/wrapper/SqliteStmtTest.cpp @@ -16,16 +16,16 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include "SqliteConn.hpp" -#include "SqliteRsetImpl.hpp" -#include "SqliteStmt.hpp" +#include "rdbms/wrapper/SqliteConn.hpp" +#include "rdbms/wrapper/SqliteRset.hpp" +#include "rdbms/wrapper/SqliteStmt.hpp" #include <gtest/gtest.h> #include <memory> namespace unitTests { -class cta_rdbms_SqliteStmtTest : public ::testing::Test { +class cta_rdbms_wrapper_SqliteStmtTest : public ::testing::Test { protected: virtual void SetUp() { @@ -35,8 +35,9 @@ protected: } }; -TEST_F(cta_rdbms_SqliteStmtTest, create_table) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_SqliteStmtTest, create_table) { + using namespace cta; + using namespace cta::rdbms::wrapper; // Create a connection a memory resident database SqliteConn conn(":memory:"); @@ -49,12 +50,13 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { "SQLITE_MASTER " "WHERE " "TYPE = 'table';"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); - const uint64_t nbTables = rset.columnUint64("NB_TABLES"); - ASSERT_EQ(0, nbTables); - ASSERT_FALSE(rset.next()); + ASSERT_TRUE(rset->next()); + const auto nbTables = rset->columnOptionalUint64("NB_TABLES"); + ASSERT_TRUE((bool)nbTables); + ASSERT_EQ(0, nbTables.value()); + ASSERT_FALSE(rset->next()); ASSERT_TRUE(conn.getTableNames().empty()); } @@ -65,7 +67,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); } @@ -78,12 +80,13 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { "WHERE " "NAME = 'TEST1' AND " "TYPE = 'table';"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); - const uint64_t nbTables = rset.columnUint64("NB_TABLES"); - ASSERT_EQ(1, nbTables); - ASSERT_FALSE(rset.next()); + ASSERT_TRUE(rset->next()); + const auto nbTables = rset->columnOptionalUint64("NB_TABLES"); + ASSERT_TRUE((bool)nbTables); + ASSERT_EQ(1, nbTables.value()); + ASSERT_FALSE(rset->next()); ASSERT_EQ(1, conn.getTableNames().size()); ASSERT_EQ("TEST1", conn.getTableNames().front()); } @@ -95,7 +98,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); } @@ -108,12 +111,13 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { "WHERE " "NAME = 'TEST2' AND " "TYPE = 'table';"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); - const uint64_t nbTables = rset.columnUint64("NB_TABLES"); - ASSERT_EQ(1, nbTables); - ASSERT_FALSE(rset.next()); + ASSERT_TRUE(rset->next()); + const auto nbTables = rset->columnOptionalUint64("NB_TABLES"); + ASSERT_TRUE((bool)nbTables); + ASSERT_EQ(1, nbTables.value()); + ASSERT_FALSE(rset->next()); const auto tableNames = conn.getTableNames(); ASSERT_EQ(2, tableNames.size()); auto nameItor = tableNames.begin(); @@ -125,8 +129,9 @@ TEST_F(cta_rdbms_SqliteStmtTest, create_table) { } } -TEST_F(cta_rdbms_SqliteStmtTest, select_from_empty_table) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_SqliteStmtTest, select_from_empty_table) { + using namespace cta; + using namespace cta::rdbms::wrapper; // Create a connection a memory resident database SqliteConn conn(":memory:"); @@ -139,7 +144,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, select_from_empty_table) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); ASSERT_EQ(1, conn.getTableNames().size()); ASSERT_EQ("TEST", conn.getTableNames().front()); @@ -154,14 +159,15 @@ TEST_F(cta_rdbms_SqliteStmtTest, select_from_empty_table) { "COL3 " "FROM " "TEST;"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_FALSE(rset.next()); + ASSERT_FALSE(rset->next()); } } -TEST_F(cta_rdbms_SqliteStmtTest, insert_without_bind) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_SqliteStmtTest, insert_without_bind) { + using namespace cta; + using namespace cta::rdbms::wrapper; // Create a connection a memory resident database SqliteConn conn(":memory:"); @@ -174,7 +180,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_without_bind) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); ASSERT_EQ(1, conn.getTableNames().size()); ASSERT_EQ("TEST", conn.getTableNames().front()); @@ -191,7 +197,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_without_bind) { "'one'," "'two'," "3);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); } @@ -204,24 +210,29 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_without_bind) { "COL3 AS COL3 " "FROM " "TEST;"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); + ASSERT_TRUE(rset->next()); - const std::string col1 = rset.columnString("COL1"); - const std::string col2 = rset.columnString("COL2"); - const uint64_t col3 = rset.columnUint64("COL3"); + const auto col1 = rset->columnOptionalString("COL1"); + const auto col2 = rset->columnOptionalString("COL2"); + const auto col3 = rset->columnOptionalUint64("COL3"); - ASSERT_EQ("one", col1); - ASSERT_EQ("two", col2); - ASSERT_EQ((uint64_t)3, col3); + ASSERT_TRUE((bool)col1); + ASSERT_TRUE((bool)col2); + ASSERT_TRUE((bool)col3); - ASSERT_FALSE(rset.next()); + ASSERT_EQ("one", col1.value()); + ASSERT_EQ("two", col2.value()); + ASSERT_EQ(3, col3.value()); + + ASSERT_FALSE(rset->next()); } } -TEST_F(cta_rdbms_SqliteStmtTest, insert_with_bind) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_SqliteStmtTest, insert_with_bind) { + using namespace cta; + using namespace cta::rdbms::wrapper; // Create a connection a memory resident database SqliteConn conn(":memory:"); @@ -235,7 +246,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_with_bind) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); ASSERT_EQ(1, conn.getTableNames().size()); ASSERT_EQ("TEST", conn.getTableNames().front()); @@ -252,7 +263,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_with_bind) { ":COL1," ":COL2," ":COL3);"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); stmt->bindString(":COL1", "one"); stmt->bindString(":COL2", "two"); stmt->bindUint64(":COL3", 3); @@ -268,24 +279,29 @@ TEST_F(cta_rdbms_SqliteStmtTest, insert_with_bind) { "COL3 AS COL3 " "FROM " "TEST;"; - auto stmt = conn.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = conn.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); + ASSERT_TRUE(rset->next()); + + const auto col1 = rset->columnOptionalString("COL1"); + const auto col2 = rset->columnOptionalString("COL2"); + const auto col3 = rset->columnOptionalUint64("COL3"); - const std::string col1 = rset.columnString("COL1"); - const std::string col2 = rset.columnString("COL2"); - const uint64_t col3 = rset.columnUint64("COL3"); + ASSERT_TRUE((bool)col1); + ASSERT_TRUE((bool)col2); + ASSERT_TRUE((bool)col3); - ASSERT_EQ("one", col1); - ASSERT_EQ("two", col2); - ASSERT_EQ((uint64_t)3, col3); + ASSERT_EQ("one", col1.value()); + ASSERT_EQ("two", col2.value()); + ASSERT_EQ(3, col3.value()); - ASSERT_FALSE(rset.next()); + ASSERT_FALSE(rset->next()); } } -TEST_F(cta_rdbms_SqliteStmtTest, isolated_transaction) { - using namespace cta::rdbms; +TEST_F(cta_rdbms_wrapper_SqliteStmtTest, isolated_transaction) { + using namespace cta; + using namespace cta::rdbms::wrapper; const std::string dbFilename = "file::memory:?cache=shared"; @@ -298,7 +314,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, isolated_transaction) { "COL1 TEXT," "COL2 TEXT," "COL3 INTEGER);"; - auto stmt = connForCreate.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = connForCreate.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); ASSERT_EQ(1, connForCreate.getTableNames().size()); ASSERT_EQ("TEST", connForCreate.getTableNames().front()); @@ -316,7 +332,7 @@ TEST_F(cta_rdbms_SqliteStmtTest, isolated_transaction) { "'one'," "'two'," "3);"; - auto stmt = connForInsert.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = connForInsert.createStmt(sql, rdbms::AutocommitMode::ON); stmt->executeNonQuery(); } @@ -330,15 +346,15 @@ TEST_F(cta_rdbms_SqliteStmtTest, isolated_transaction) { "COUNT(*) AS NB_ROWS " "FROM " "TEST;"; - auto stmt = connForSelect.createStmt(sql, Stmt::AutocommitMode::ON); + auto stmt = connForSelect.createStmt(sql, rdbms::AutocommitMode::ON); auto rset = stmt->executeQuery(); - ASSERT_TRUE(rset.next()); - - const uint64_t nbRows = rset.columnUint64("NB_ROWS"); + ASSERT_TRUE(rset->next()); - ASSERT_EQ((uint64_t)1, nbRows); + const auto nbRows = rset->columnOptionalUint64("NB_ROWS"); + ASSERT_TRUE((bool)nbRows); + ASSERT_EQ((uint64_t)1, nbRows.value()); - ASSERT_FALSE(rset.next()); + ASSERT_FALSE(rset->next()); } } diff --git a/rdbms/wrapper/Stmt.cpp b/rdbms/wrapper/Stmt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..be47b1bf5dd63d7b0a47cae8b2a3f15661020270 --- /dev/null +++ b/rdbms/wrapper/Stmt.cpp @@ -0,0 +1,96 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "rdbms/wrapper/Stmt.hpp" + +namespace cta { +namespace rdbms { +namespace wrapper { + +//------------------------------------------------------------------------------ +// constructor +//------------------------------------------------------------------------------ +Stmt::Stmt(const std::string &sql, const AutocommitMode autocommitMode): + m_sql(sql), + m_autocommitMode(autocommitMode), + m_paramNameToIdx(sql) { +} + +//------------------------------------------------------------------------------ +// destructor +//------------------------------------------------------------------------------ +Stmt::~Stmt() throw() { +} + +//------------------------------------------------------------------------------ +// getSql +//------------------------------------------------------------------------------ +const std::string &Stmt::getSql() const { + return m_sql; +} + +//------------------------------------------------------------------------------ +// getAutocommitMode +//------------------------------------------------------------------------------ +AutocommitMode Stmt::getAutocommitMode() const noexcept { + return m_autocommitMode; +} + +//------------------------------------------------------------------------------ +// getParamIdx +//------------------------------------------------------------------------------ +uint32_t Stmt::getParamIdx(const std::string ¶mName) const { + return m_paramNameToIdx.getIdx(paramName); +} + +//------------------------------------------------------------------------------ +// getSqlForException +//------------------------------------------------------------------------------ +std::string Stmt::getSqlForException() const { + if(m_sql.length() <= c_maxSqlLenInExceptions) { + return m_sql; + } else { + if(c_maxSqlLenInExceptions >= 3) { + return m_sql.substr(0, c_maxSqlLenInExceptions - 3) + "..."; + } else { + return std::string("..."). substr(0, c_maxSqlLenInExceptions); + } + } +} + +//------------------------------------------------------------------------------ +// bindBool +//------------------------------------------------------------------------------ +void Stmt::bindBool(const std::string ¶mName, const bool paramValue) { + bindOptionalBool(paramName, paramValue); +} + +//------------------------------------------------------------------------------ +// bindOptionalBool +//------------------------------------------------------------------------------ +void Stmt::bindOptionalBool(const std::string ¶mName, const optional<bool> ¶mValue) { + if(paramValue) { + bindOptionalUint64(paramName, paramValue.value() ? 1 : 0); + } else { + bindOptionalUint64(paramName, nullopt); + } +} + +} // namespace wrapper +} // namespace rdbms +} // namespace cta diff --git a/rdbms/wrapper/Stmt.hpp b/rdbms/wrapper/Stmt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3ee0a3a6dcafe8de91a61baef0ded6a97953c39f --- /dev/null +++ b/rdbms/wrapper/Stmt.hpp @@ -0,0 +1,221 @@ +/* + * The CERN Tape Archive (CTA) project + * Copyright (C) 2015 CERN + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "common/optional.hpp" +#include "rdbms/AutocommitMode.hpp" +#include "rdbms/wrapper/ParamNameToIdx.hpp" +#include "rdbms/wrapper/Rset.hpp" + +#include <memory> +#include <stdint.h> +#include <string> + +namespace cta { +namespace rdbms { +namespace wrapper { + +/** + * Abstract class specifying the interface to a database statement. + */ +class Stmt { +public: + + /** + * Constructor. + * + * @param sql The SQL statement. + * @param autocommitMode The autocommit mode of the statement. + */ + Stmt(const std::string &sql, const AutocommitMode autocommitMode); + + /** + * Returns the autocommit mode of teh statement. + * + * @return The autocommit mode of teh statement. + */ + AutocommitMode getAutocommitMode() const noexcept; + + /** + * Destructor. + */ + virtual ~Stmt() throw() = 0; + + /** + * Deletion of the copy constructor. + */ + Stmt(Stmt &) = delete; + + /** + * Deletion of the move constructor. + */ + Stmt(Stmt &&) = delete; + + /** + * Deletion of the copy assignment operator. + */ + Stmt &operator=(const Stmt &) = delete; + + /** + * Deletion of the move assignment operator. + */ + Stmt &operator=(Stmt &&) = delete; + + /** + * Clears the prepared statement so that it is ready to be reused. + */ + virtual void clear() = 0; + + /** + * Idempotent close() method. The destructor calls this method. + */ + virtual void close() = 0; + + /** + * Returns the SQL statement. + * + * @return The SQL statement. + */ + const std::string &getSql() const; + + /** + * Returns the index of the specified SQL parameter. + * + * @param paramName The name of the SQL parameter. + * @return The index of the SQL parameter. + */ + uint32_t getParamIdx(const std::string ¶mName) const; + + /** + * Binds an SQL parameter. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + virtual void bindUint64(const std::string ¶mName, const uint64_t paramValue) = 0; + + /** + * Binds an SQL parameter. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + virtual void bindOptionalUint64(const std::string ¶mName, const optional<uint64_t> ¶mValue) = 0; + + /** + * Binds an SQL parameter. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + void bindBool(const std::string ¶mName, const bool paramValue); + + /** + * Binds an SQL parameter. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + void bindOptionalBool(const std::string ¶mName, const optional<bool> ¶mValue); + + /** + * Binds an SQL parameter of type string. + * + * Please note that this method will throw an exception if the string + * parameter is empty. If a null value is to be bound then the + * bindOptionalString() method should be used. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + virtual void bindString(const std::string ¶mName, const std::string ¶mValue) = 0; + + /** + * Binds an SQL parameter of type optional-string. + * + * Please note that this method will throw an exception if the optional string + * parameter has the empty string as its value. An optional string parameter + * should either have a non-empty string value or no value at all. + * + * @param paramName The name of the parameter. + * @param paramValue The value to be bound. + */ + virtual void bindOptionalString(const std::string ¶mName, const optional<std::string> ¶mValue) = 0; + + /** + * Executes the statement and returns the result set. + * + * @return The result set. + */ + virtual std::unique_ptr<Rset> executeQuery() = 0; + + /** + * Executes the statement. + */ + virtual void executeNonQuery() = 0; + + /** + * Returns the number of rows affected by the last execution of this + * statement. + * + * @return The number of affected rows. + */ + virtual uint64_t getNbAffectedRows() const = 0; + + /** + * Returns the SQL string to be used in an exception message. The string + * will be clipped at a maxmum of c_maxSqlLenInExceptions characters. If the + * string is actually clipped then the three last characters will be an + * replaced by an ellipsis of three dots, in other word "...". These 3 + * characters will indicate to the reader of the exception message that the + * SQL statement has been clipped. + * + * @return The SQL string to be used in an exception message. + */ + std::string getSqlForException() const; + +protected: + + /** + * The maximum length an SQL statement can have in exception error message. + */ + const uint32_t c_maxSqlLenInExceptions = 80; + +private: + + /** + * The SQL statement. + */ + std::string m_sql; + + /** + * The autocommit mode of the statement. + */ + AutocommitMode m_autocommitMode; + + /** + * Map from SQL parameter name to parameter index. + */ + ParamNameToIdx m_paramNameToIdx; + +}; // class Stmt + +} // namespace wrapper +} // namespace rdbms +} // namespace cta diff --git a/scheduler/OStoreDB/MemQueues.cpp b/scheduler/OStoreDB/MemQueues.cpp index 7ce519ecd748006dc2e95120cc1149bca5062824..d60f70f3f20708acd22f6f57ed2bae9d85135cfd 100644 --- a/scheduler/OStoreDB/MemQueues.cpp +++ b/scheduler/OStoreDB/MemQueues.cpp @@ -24,31 +24,46 @@ namespace cta { namespace ostoredb { template<> -void MemQueue<objectstore::ArchiveRequest, objectstore::ArchiveQueue>::specializedAddJobToQueue( - objectstore::ArchiveRequest::JobDump& job, objectstore::ArchiveRequest& request, objectstore::ArchiveQueue& queue) { - auto af = request.getArchiveFile(); - queue.addJob(job, request.getAddressIfSet(), af.archiveFileID, - af.fileSize, request.getMountPolicy(), request.getEntryLog().time); - // Back reference the queue in the job and archive request - job.owner = queue.getAddressIfSet(); - request.setJobOwner(job.copyNb, job.owner); +void MemQueue<objectstore::ArchiveRequest, objectstore::ArchiveQueue>::specializedAddJobsToQueueAndCommit( + std::list<MemQueue<objectstore::ArchiveRequest, objectstore::ArchiveQueue>::JobAndRequest> & jobsToAdd, + objectstore::ArchiveQueue& queue, objectstore::AgentReference & agentReference, log::LogContext & logContext) { + std::list<objectstore::ArchiveQueue::JobToAdd> jtal; + auto queueAddress = queue.getAddressIfSet(); + for (auto & j: jobsToAdd) { + jtal.push_back({j.job, j.request.getAddressIfSet(), j.request.getArchiveFile().archiveFileID, j.request.getArchiveFile().fileSize, + j.request.getMountPolicy(), j.request.getEntryLog().time}); + // We pre-mark (in memory) request as being owned by the queue. + // The actual commit of the request will happen after the queue's, + // so the back reference will be valid. + j.job.owner = queueAddress; + j.request.setJobOwner(j.job.copyNb, j.job.owner); + } + queue.addJobsAndCommit(jtal, agentReference, logContext); } template<> -void MemQueue<objectstore::RetrieveRequest, objectstore::RetrieveQueue>::specializedAddJobToQueue( - objectstore::RetrieveRequest::JobDump& job, objectstore::RetrieveRequest& request, objectstore::RetrieveQueue& queue) { - // We need to find corresponding to the copyNb - for (auto & j: request.getArchiveFile().tapeFiles) { - if (j.second.copyNb == job.copyNb) { - auto criteria = request.getRetrieveFileQueueCriteria(); - queue.addJob(j.second.copyNb, j.second.fSeq, request.getAddressIfSet(), criteria.archiveFile.fileSize, - criteria.mountPolicy, request.getEntryLog().time); - request.setActiveCopyNumber(j.second.copyNb); - request.setOwner(queue.getAddressIfSet()); - goto jobAdded; +void MemQueue<objectstore::RetrieveRequest, objectstore::RetrieveQueue>::specializedAddJobsToQueueAndCommit( + std::list<MemQueue<objectstore::RetrieveRequest, objectstore::RetrieveQueue>::JobAndRequest> & jobsToAdd, + objectstore::RetrieveQueue &queue, objectstore::AgentReference & agentReference, log::LogContext & logContext) { + std::list<objectstore::RetrieveQueue::JobToAdd> jtal; + auto queueAddress = queue.getAddressIfSet(); + for (auto & jta: jobsToAdd) { + // We need to find the job corresponding to the copyNb + auto & job = jta.job; + auto & request = jta.request; + for (auto & j: request.getArchiveFile().tapeFiles) { + if (j.second.copyNb == job.copyNb) { + auto criteria = request.getRetrieveFileQueueCriteria(); + jtal.push_back({j.second.copyNb, j.second.fSeq, request.getAddressIfSet(), criteria.archiveFile.fileSize, + criteria.mountPolicy, request.getEntryLog().time}); + request.setActiveCopyNumber(j.second.copyNb); + request.setOwner(queueAddress); + goto jobAdded; + } } - } jobAdded:; + } + queue.addJobsAndCommit(jtal); } template<> diff --git a/scheduler/OStoreDB/MemQueues.hpp b/scheduler/OStoreDB/MemQueues.hpp index 682fe93dbed7958f032b52e5939f033b1cb63791..81ac26502d6a5fca47aab9d81588de48e2e6ad0f 100644 --- a/scheduler/OStoreDB/MemQueues.hpp +++ b/scheduler/OStoreDB/MemQueues.hpp @@ -178,8 +178,15 @@ private: static std::shared_ptr<SharedQueueLock<Queue, Request>> sharedAddToNewQueue(typename Request::JobDump & job, const std::string & queueIndex, Request & request, OStoreDB & oStoreDB, log::LogContext & logContext, threading::MutexLocker &globalLock); + /** Struct holding the job plus request data */ + struct JobAndRequest { + typename Request::JobDump & job; + Request & request; + }; + /** Helper function handling the difference between archive and retrieve (vid vs tapepool) */ - static void specializedAddJobToQueue(typename Request::JobDump & job, Request & request, Queue & queue); + static void specializedAddJobsToQueueAndCommit(std::list<JobAndRequest> & jobsToAdd, Queue & queue, + objectstore::AgentReference & agentReference, log::LogContext & logContext); /** Helper function updating the cached retrieve queue stats. Noop for archive queues */ static void specializedUpdateCachedQueueStats(Queue &queue); @@ -301,20 +308,21 @@ std::shared_ptr<SharedQueueLock<Queue, Request>> MemQueue<Request, Queue>::share qBytesBefore+=j.size; } size_t addedJobs=1; + // Build the list of jobs to add to the queue + std::list<JobAndRequest> jta; // First add the job for this thread - specializedAddJobToQueue(job, request, queue); + jta.push_back({job, request}); // We are done with the queue: release the lock to make helgrind happy. ulq.unlock(); // We do the same for all the queued requests for (auto &maqr: maq->m_requests) { // Add the job - specializedAddJobToQueue(maqr->m_job, maqr->m_request, queue); + jta.push_back({maqr->m_job, maqr->m_request}); addedJobs++; } - double inMemoryQueueProcessTime = timer.secs(utils::Timer::resetCounter); - // We can now commit the multi-request addition to the object store - queue.commit(); - double queueCommitTime = timer.secs(utils::Timer::resetCounter); + // Actually ass the jobs. + specializedAddJobsToQueueAndCommit(jta, queue, *oStoreDB.m_agentReference, logContext); + double queueProcessAndCommitTime = timer.secs(utils::Timer::resetCounter); // Update the cache stats in memory as we hold the queue. specializedUpdateCachedQueueStats(queue); double cacheUpdateTime = timer.secs(utils::Timer::resetCounter); @@ -341,10 +349,9 @@ std::shared_ptr<SharedQueueLock<Queue, Request>> MemQueue<Request, Queue>::share .add("addedJobs", addedJobs) .add("waitTime", waitTime) .add("getFetchedQueueTime", getFetchedQueueTime) - .add("inMemoryQueueProcessTime", inMemoryQueueProcessTime) - .add("queueCommitTime", queueCommitTime) + .add("queueProcessAndCommitTime", queueProcessAndCommitTime) .add("cacheUpdateTime", cacheUpdateTime) - .add("totalEnqueueTime", getFetchedQueueTime + inMemoryQueueProcessTime + queueCommitTime + .add("totalEnqueueTime", getFetchedQueueTime + queueProcessAndCommitTime + cacheUpdateTime + timer.secs()); logContext.log(log::INFO, "In MemQueue::sharedAddToNewQueue(): added batch of jobs to the queue."); } diff --git a/scheduler/OStoreDB/OStoreDB.cpp b/scheduler/OStoreDB/OStoreDB.cpp index 870227844f4c113b9919d520fffc421e8bfaaec8..120f8e0e9c2831ff566bf197672f59a8ec76e9fe 100644 --- a/scheduler/OStoreDB/OStoreDB.cpp +++ b/scheduler/OStoreDB/OStoreDB.cpp @@ -108,13 +108,13 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro } // If there are files queued, we create an entry for this tape pool in the // mount candidates list. - if (aqueue.getJobsSummary().files) { + if (aqueue.getJobsSummary().jobs) { tmdi.potentialMounts.push_back(SchedulerDatabase::PotentialMount()); auto & m = tmdi.potentialMounts.back(); m.tapePool = aqp.tapePool; m.type = cta::common::dataStructures::MountType::Archive; m.bytesQueued = aqueue.getJobsSummary().bytes; - m.filesQueued = aqueue.getJobsSummary().files; + m.filesQueued = aqueue.getJobsSummary().jobs; m.oldestJobStartTime = aqueue.getJobsSummary().oldestJobStartTime; m.priority = aqueue.getJobsSummary().priority; m.maxDrivesAllowed = aqueue.getJobsSummary().maxDrivesAllowed; @@ -304,7 +304,7 @@ void OStoreDB::trimEmptyQueues(log::LogContext& lc) { ArchiveQueue aq(a.address, m_objectStore); ScopedSharedLock aql(aq); aq.fetch(); - if (!aq.dumpJobs().size()) { + if (!aq.getJobsSummary().jobs) { aql.release(); re.removeArchiveQueueAndCommit(a.tapePool, lc); log::ScopedParamContainer params(lc); @@ -318,7 +318,7 @@ void OStoreDB::trimEmptyQueues(log::LogContext& lc) { RetrieveQueue rq(r.address, m_objectStore); ScopedSharedLock rql(rq); rq.fetch(); - if (!rq.dumpJobs().size()) { + if (!rq.getJobsSummary().files) { rql.release(); re.removeRetrieveQueueAndCommit(r.vid, lc); log::ScopedParamContainer params(lc); @@ -354,116 +354,6 @@ std::unique_ptr<SchedulerDatabase::RetrieveMount> OStoreDB::TapeMountDecisionInf //------------------------------------------------------------------------------ OStoreDB::TapeMountDecisionInfoNoLock::~TapeMountDecisionInfoNoLock() {} - -/* Old getMountInfo -//------------------------------------------------------------------------------ -// OStoreDB::getMountInfo() -//------------------------------------------------------------------------------ -std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> - OStoreDB::getMountInfo() { - //Allocate the getMountInfostructure to return. - assertAgentSet(); - std::unique_ptr<TapeMountDecisionInfo> privateRet (new TapeMountDecisionInfo( - m_objectStore, *m_agent)); - TapeMountDecisionInfo & tmdi=*privateRet; - // Get all the tape pools and tapes with queues (potential mounts) - objectstore::RootEntry re(m_objectStore); - objectstore::ScopedSharedLock rel(re); - re.fetch(); - // Take an exclusive lock on the scheduling and fetch it. - tmdi.m_schedulerGlobalLock.reset( - new SchedulerGlobalLock(re.getSchedulerGlobalLock(), m_objectStore)); - tmdi.m_lockOnSchedulerGlobalLock.lock(*tmdi.m_schedulerGlobalLock); - tmdi.m_lockTaken = true; - tmdi.m_schedulerGlobalLock->fetch(); - auto tpl = re.dumpTapePools(); - for (auto tpp=tpl.begin(); tpp!=tpl.end(); tpp++) { - // Get the tape pool object - objectstore::TapePool tpool(tpp->address, m_objectStore); - // debug utility variable - std::string __attribute__((__unused__)) poolName = tpp->tapePool; - objectstore::ScopedSharedLock tpl(tpool); - tpool.fetch(); - // If there are files queued, we create an entry for this tape pool in the - // mount candidates list. - if (tpool.getJobsSummary().files) { - tmdi.potentialMounts.push_back(SchedulerDatabase::PotentialMount()); - auto & m = tmdi.potentialMounts.back(); - m.tapePool = tpp->tapePool; - m.type = cta::MountType::ARCHIVE; - m.bytesQueued = tpool.getJobsSummary().bytes; - m.filesQueued = tpool.getJobsSummary().files; - m.oldestJobStartTime = tpool.getJobsSummary().oldestJobStartTime; - m.priority = tpool.getJobsSummary().priority; - - m.mountCriteria.maxFilesQueued = - tpool.getMountCriteriaByDirection().archive.maxFilesQueued; - m.mountCriteria.maxBytesQueued = - tpool.getMountCriteriaByDirection().archive.maxBytesQueued; - m.mountCriteria.maxAge = - tpool.getMountCriteriaByDirection().archive.maxAge; - m.mountCriteria.quota = - tpool.getMountCriteriaByDirection().archive.quota; - m.logicalLibrary = ""; - - } - // For each tape in the pool, list the tapes with work - auto tl = tpool.dumpTapesAndFetchStatus(); - for (auto tp = tl.begin(); tp!= tl.end(); tp++) { - objectstore::Tape t(tp->address, m_objectStore); - objectstore::ScopedSharedLock tl(t); - t.fetch(); - if (t.getJobsSummary().files) { - tmdi.potentialMounts.push_back(PotentialMount()); - auto & m = tmdi.potentialMounts.back(); - m.type = cta::MountType::RETRIEVE; - m.bytesQueued = t.getJobsSummary().bytes; - m.filesQueued = t.getJobsSummary().files; - m.oldestJobStartTime = t.getJobsSummary().oldestJobStartTime; - m.priority = t.getJobsSummary().priority; - m.vid = t.getVid(); - m.logicalLibrary = t.getLogicalLibrary(); - - m.mountCriteria.maxFilesQueued = - tpool.getMountCriteriaByDirection().retrieve.maxFilesQueued; - m.mountCriteria.maxBytesQueued = - tpool.getMountCriteriaByDirection().retrieve.maxBytesQueued; - m.mountCriteria.maxAge = - tpool.getMountCriteriaByDirection().retrieve.maxAge; - m.mountCriteria.quota = - tpool.getMountCriteriaByDirection().retrieve.quota; - m.logicalLibrary = t.getLogicalLibrary(); - } - } - } - // Dedication information comes here - // TODO - // - // Collect information about the existing mounts - objectstore::DriveRegister dr(re.getDriveRegisterAddress(), m_objectStore); - objectstore::ScopedSharedLock drl(dr); - dr.fetch(); - auto dl = dr.dumpDrives(); - using common::DriveStatus; - std::set<int> activeDriveStatuses = { - (int)DriveStatus::Starting, - (int)DriveStatus::Mounting, - (int)DriveStatus::Transferring, - (int)DriveStatus::Unloading, - (int)DriveStatus::Unmounting, - (int)DriveStatus::DrainingToDisk }; - for (auto d=dl.begin(); d!= dl.end(); d++) { - if (activeDriveStatuses.count((int)d->status)) { - tmdi.existingMounts.push_back(ExistingMount()); - tmdi.existingMounts.back().type = d->mountType; - tmdi.existingMounts.back().tapePool = d->currentTapePool; - } - } - std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> ret(std::move(privateRet)); - return ret; -} -*/ - //------------------------------------------------------------------------------ // OStoreDB::queueArchive() //------------------------------------------------------------------------------ @@ -550,8 +440,7 @@ void OStoreDB::queueArchive(const std::string &instanceName, const cta::common:: objectstore::ArchiveQueue aq(*tpa, m_objectStore); ScopedExclusiveLock aql(aq); aq.fetch(); - aq.removeJob(aReq.getAddressIfSet()); - aq.commit(); + aq.removeJobsAndCommit({aReq.getAddressIfSet()}); } aReq.remove(); log::ScopedParamContainer params(logContext); @@ -583,63 +472,6 @@ void OStoreDB::queueArchive(const std::string &instanceName, const cta::common:: logContext.log(log::INFO, "In OStoreDB::queueArchive(): Finished enqueuing request."); } -//------------------------------------------------------------------------------ -// OStoreDB::deleteArchiveRequest() -//------------------------------------------------------------------------------ -void OStoreDB::deleteArchiveRequest(const std::string &diskInstanceName, - uint64_t fileId) { - // First of, find the archive request from all the tape pools. - objectstore::RootEntry re(m_objectStore); - objectstore::ScopedSharedLock rel(re); - re.fetch(); - auto aql = re.dumpArchiveQueues(); - rel.release(); - for (auto & aqp: aql) { - objectstore::ArchiveQueue aq(aqp.address, m_objectStore); - ScopedSharedLock aqlock(aq); - aq.fetch(); - auto ajl=aq.dumpJobs(); - aqlock.release(); - for (auto & ajp: ajl) { - objectstore::ArchiveRequest ar(ajp.address, m_objectStore); - ScopedSharedLock arl(ar); - ar.fetch(); - if (ar.getArchiveFile().archiveFileID == fileId) { - // We found a job for the right file Id. - // We now need to dequeue it from all it archive queues (one per job). - // Upgrade the lock to an exclusive one. - arl.release(); - ScopedExclusiveLock arxl(ar); - m_agentReference->addToOwnership(ar.getAddressIfSet(), m_objectStore); - ar.fetch(); - ar.setAllJobsFailed(); - for (auto j:ar.dumpJobs()) { - // Dequeue the job from the queue. - // The owner might not be a queue, in which case the fetch will fail (and it's fine) - try { - // The queue on which we found the job is not locked anymore, so we can re-lock it. - ArchiveQueue aq2(j.owner, m_objectStore); - ScopedExclusiveLock aq2xl(aq2); - aq2.fetch(); - aq2.removeJob(ar.getAddressIfSet()); - aq2.commit(); - } catch (...) {} - ar.setJobOwner(j.copyNb, m_agentReference->getAgentAddress()); - } - ar.remove(); - log::LogContext lc(m_logger); - log::ScopedParamContainer params(lc); - params.add("archiveRequestObject", ar.getAddressIfSet()); - lc.log(log::INFO, "In OStoreDB::deleteArchiveRequest(): delete archive request."); - m_agentReference->removeFromOwnership(ar.getAddressIfSet(), m_objectStore); - // We found and deleted the job: return. - return; - } - } - } - throw NoSuchArchiveRequest("In OStoreDB::deleteArchiveRequest: ArchiveToFileRequest not found"); -} - //------------------------------------------------------------------------------ // OStoreDB::ArchiveToFileRequestCancelation::complete() //------------------------------------------------------------------------------ @@ -668,53 +500,6 @@ OStoreDB::ArchiveToFileRequestCancelation::~ArchiveToFileRequestCancelation() { } } - -//------------------------------------------------------------------------------ -// OStoreDB::getArchiveRequests() -//------------------------------------------------------------------------------ -//std::map<std::string, std::list<ArchiveToTapeCopyRequest> > -// OStoreDB::getArchiveRequests() const { -// objectstore::RootEntry re(m_objectStore); -// objectstore::ScopedSharedLock rel(re); -// re.fetch(); -// std::map<std::string, std::list<ArchiveToTapeCopyRequest> > ret; -// auto aql = re.dumpArchiveQueues(); -// rel.release(); -// for (auto & aqp:aql) { -// objectstore::ArchiveQueue osaq(aqp.address, m_objectStore); -// ScopedSharedLock osaql(osaq); -// osaq.fetch(); -// auto arl = osaq.dumpJobs(); -// osaql.release(); -// for (auto & ar: arl) { -// objectstore::ArchiveRequest osar(ar.address, m_objectStore); -// ScopedSharedLock osarl(osar); -// osar.fetch(); -// // Find which copy number is for this tape pool. -// // skip the request if not found -// auto jl = osar.dumpJobs(); -// uint16_t copynb; -// bool copyndFound=false; -// for (auto & j:jl) { -// if (j.tapePool == aqp.tapePool) { -// copynb = j.copyNb; -// copyndFound = true; -// break; -// } -// } -// if (!copyndFound) continue; -// ret[aqp.tapePool].push_back(cta::ArchiveToTapeCopyRequest( -// osar.getDiskFileID(), -// osar.getArchiveFileID(), -// copynb, -// aqp.tapePool, -// osar.getMountPolicy().archivePriority, -// osar.getCreationLog())); -// } -// } -// return ret; -//} - //------------------------------------------------------------------------------ // OStoreDB::getArchiveJobs() //------------------------------------------------------------------------------ @@ -1737,9 +1522,8 @@ std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > OStoreDB::ArchiveMoun double ownershipAdditionTime = 0; double asyncUpdateLaunchTime = 0; double jobsUpdateTime = 0; - double queueProcessTime = 0; + double queueProcessAndCommitTime = 0; double ownershipRemovalTime = 0; - double queueCommitTime = 0; // Find the next files to archive // First, check we should not forcibly go down. In such an occasion, we just find noting to do. // Get drive register @@ -1830,28 +1614,21 @@ std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > OStoreDB::ArchiveMoun log::ScopedParamContainer params(logContext); params.add("tapepool", mountInfo.tapePool) .add("queueObject", aq.getAddressIfSet()) - .add("queueSize", aq.dumpJobs().size()); + .add("queueSize", aq.getJobsSummary().jobs); logContext.log(log::INFO, "In ArchiveMount::getNextJobBatch(): archive queue found."); } - // We should build the list of jobs we intend to grab. We will attempt to + // The queue will give us a list of files to try and grab. We will attempt to // dequeue them in one go, updating jobs in parallel. If some jobs turn out // to not be there really, we will have to do several passes. // We build directly the return value in the process. - auto candidateDumps=aq.dumpJobs(); + auto candidateJobsFromQueue=aq.getCandidateList(bytesRequested, filesRequested, archiveRequestsToSkip); std::list<std::unique_ptr<OStoreDB::ArchiveJob>> candidateJobs; // If we fail to find jobs in one round, we will exit. - while (candidateDumps.size() && currentBytes < bytesRequested && currentFiles < filesRequested) { - auto job=candidateDumps.front(); - candidateDumps.pop_front(); - // If we saw an archive request we could not pop nor cleanup (really bad case), we - // will disregard it for the rest of this getNextJobBatch call. We will re-consider - // in the next call. - if (!archiveRequestsToSkip.count(job.address)) { - currentFiles++; - currentBytes+=job.size; - candidateJobs.emplace_back(new OStoreDB::ArchiveJob(job.address, m_oStoreDB, *this)); - candidateJobs.back()->tapeFile.copyNb = job.copyNb; - } + for (auto & cj: candidateJobsFromQueue.candidates) { + currentFiles++; + currentBytes+=cj.size; + candidateJobs.emplace_back(new OStoreDB::ArchiveJob(cj.address, m_oStoreDB, *this)); + candidateJobs.back()->tapeFile.copyNb = cj.copyNb; } { log::ScopedParamContainer params(logContext); @@ -1978,15 +1755,12 @@ std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > OStoreDB::ArchiveMoun j=candidateJobs.erase(j); } // All (most) jobs are now officially owned by our agent. We can hence remove them from the queue. - for (const auto &j: jobsToDequeue) aq.removeJob(j); - queueProcessTime += t.secs(utils::Timer::resetCounter); + aq.removeJobsAndCommit(jobsToDequeue); + queueProcessAndCommitTime += t.secs(utils::Timer::resetCounter); if (jobsToForget.size()) { m_oStoreDB.m_agentReference->removeBatchFromOwnership(jobsToForget, m_oStoreDB.m_objectStore); ownershipRemovalTime += t.secs(utils::Timer::resetCounter); } - // (Possibly intermediate) commit of the queue. We keep the lock for the moment. - aq.commit(); - queueCommitTime += t.secs(utils::Timer::resetCounter); // We can now add the validated jobs to the return value. auto vj = validatedJobs.begin(); while (vj != validatedJobs.end()) { @@ -2034,7 +1808,7 @@ std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > OStoreDB::ArchiveMoun break; // If we had exhausted the queue while selecting the jobs, we stop here, else we can go for another // round. - if (!candidateDumps.size()) + if (!candidateJobsFromQueue.remainingFilesAfterCandidates) break; } catch (cta::exception::Exception & ex) { log::ScopedParamContainer params (logContext); @@ -2072,9 +1846,8 @@ std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > OStoreDB::ArchiveMoun .add("ownershipAdditionTime", ownershipAdditionTime) .add("asyncUpdateLaunchTime", asyncUpdateLaunchTime) .add("jobsUpdateTime", jobsUpdateTime) - .add("queueProcessTime", queueProcessTime) + .add("queueProcessAndCommitTime", queueProcessAndCommitTime) .add("ownershipRemovalTime", ownershipRemovalTime) - .add("queueCommitTime", queueCommitTime) .add("schedulerDbTime", totalTime.secs()); logContext.log(log::INFO, "In ArchiveMount::getNextJobBatch(): jobs retrieval complete."); } @@ -2129,6 +1902,17 @@ const OStoreDB::RetrieveMount::MountInfo& OStoreDB::RetrieveMount::getMountInfo( //------------------------------------------------------------------------------ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMount::getNextJobBatch(uint64_t filesRequested, uint64_t bytesRequested, log::LogContext& logContext) { + utils::Timer t, totalTime; + double driveRegisterCheckTime = 0; + double findQueueTime = 0; + double lockFetchQueueTime = 0; + double emptyQueueCleanupTime = 0; + double jobSelectionTime = 0; + double ownershipAdditionTime = 0; + double asyncUpdateLaunchTime = 0; + double jobsUpdateTime = 0; + double queueProcessAndCommitTime = 0; + double ownershipRemovalTime = 0; // Find the next files to retrieve // First, check we should not forcibly go down. In such an occasion, we just find noting to do. // Get drive register @@ -2151,6 +1935,7 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo params.add("exceptionMessage", ex.getMessageValue()); logContext.log(log::INFO, "In OStoreDB::RetrieveMount::getNextJobBatch(): failed to check up/down status."); } + driveRegisterCheckTime = t.secs(utils::Timer::resetCounter); } // Now, we should repeatedly fetch jobs from the queue until we fulfilled the request or there is nothing to get form the // queue anymore. @@ -2167,8 +1952,7 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo uint64_t beforeFiles=currentFiles; // Try and get access to a queue. objectstore::RootEntry re(m_oStoreDB.m_objectStore); - objectstore::ScopedSharedLock rel(re); - re.fetch(); + re.fetchNoLock(); std::string rqAddress; auto rql = re.dumpRetrieveQueues(); for (auto & rqp : rql) { @@ -2179,16 +1963,16 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo // try and lock the archive queue. Any failure from here on means the end of the getting jobs. objectstore::RetrieveQueue rq(rqAddress, m_oStoreDB.m_objectStore); objectstore::ScopedExclusiveLock rqLock; + findQueueTime += t.secs(utils::Timer::resetCounter); try { try { rqLock.lock(rq); - rel.release(); rq.fetch(); + lockFetchQueueTime += t.secs(utils::Timer::resetCounter); } catch (cta::exception::Exception & ex) { // The queue is now absent. We can remove its reference in the root entry. // A new queue could have been added in the mean time, and be non-empty. // We will then fail to remove from the RootEntry (non-fatal). - if (rel.isLocked()) rel.release(); ScopedExclusiveLock rexl(re); re.fetch(); try { @@ -2205,6 +1989,7 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo .add("Message", ex.getMessageValue()); logContext.log(log::INFO, "In RetrieveMount::getNextJobBatch(): could not de-referenced missing queue from root entry"); } + emptyQueueCleanupTime += t.secs(utils::Timer::resetCounter); continue; } // We now have the queue. @@ -2212,28 +1997,21 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo log::ScopedParamContainer params(logContext); params.add("vid", mountInfo.tapePool) .add("queueObject", rq.getAddressIfSet()) - .add("queueSize", rq.dumpJobs().size()); + .add("queueSize", rq.getJobsSummary().files); logContext.log(log::INFO, "In RetrieveMount::getNextJobBatch(): retrieve queue found."); } // We should build the list of jobs we intend to grab. We will attempt to // dequeue them in one go, updating jobs in parallel. If some jobs turn out // to not be there really, we will have to do several passes. // We build directly the return value in the process. - auto candidateDumps=rq.dumpJobs(); + auto candidateJobsFromQueue=rq.getCandidateList(bytesRequested, filesRequested, retrieveRequestsToSkip); std::list<std::unique_ptr<OStoreDB::RetrieveJob>> candidateJobs; // If we fail to find jobs in one round, we will exit. - while (candidateDumps.size() && currentBytes < bytesRequested && currentFiles < filesRequested) { - auto job=candidateDumps.front(); - candidateDumps.pop_front(); - // If we saw an archive request we could not pop nor cleanup (really bad case), we - // will disregard it for the rest of this getNextJobBatch call. We will re-consider - // in the next call. - if (!retrieveRequestsToSkip.count(job.address)) { - currentFiles++; - currentBytes+=job.size; - candidateJobs.emplace_back(new OStoreDB::RetrieveJob(job.address, m_oStoreDB, *this)); - candidateJobs.back()->selectedCopyNb = job.copyNb; - } + for (auto & cj: candidateJobsFromQueue.candidates) { + currentFiles++; + currentBytes+=cj.size; + candidateJobs.emplace_back(new OStoreDB::RetrieveJob(cj.address, m_oStoreDB, *this)); + candidateJobs.back()->selectedCopyNb = cj.copyNb; } { log::ScopedParamContainer params(logContext); @@ -2246,17 +2024,20 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo .add("requestedBytes", bytesRequested); logContext.log(log::INFO, "In RetrieveMount::getNextJobBatch(): will process a set of candidate jobs."); } + jobSelectionTime += t.secs(utils::Timer::resetCounter); // We now have a batch of jobs to try and dequeue. Should not be empty. // First add the jobs to the owned list of the agent. std::list<std::string> addedJobs; for (const auto &j: candidateJobs) addedJobs.emplace_back(j->m_retrieveRequest.getAddressIfSet()); m_oStoreDB.m_agentReference->addBatchToOwnership(addedJobs, m_oStoreDB.m_objectStore); + ownershipAdditionTime += t.secs(utils::Timer::resetCounter); // We can now attempt to switch the ownership of the jobs. Depending on the type of failure (if any) we // will adapt the rest. // First, start the parallel updates of jobs std::list<std::unique_ptr<objectstore::RetrieveRequest::AsyncOwnerUpdater>> jobUpdates; for (const auto &j: candidateJobs) jobUpdates.emplace_back( j->m_retrieveRequest.asyncUpdateOwner(j->selectedCopyNb, m_oStoreDB.m_agentReference->getAgentAddress(), rqAddress)); + asyncUpdateLaunchTime += t.secs(utils::Timer::resetCounter); // Now run through the results of the asynchronous updates. Non-sucess results come in the form of exceptions. std::list<std::string> jobsToForget; // The jobs either absent or not owned, for which we should just remove references (agent). std::list<std::string> jobsToDequeue; // The jobs that should not be queued anymore. All of them indeed (invalid or successfully poped). @@ -2341,15 +2122,18 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo currentFiles--; currentBytes-=(*j)->archiveFile.fileSize; } + jobsUpdateTime += t.secs(utils::Timer::resetCounter); // In all cases: move to the nexts. ju=jobUpdates.erase(ju); j=candidateJobs.erase(j); } // All (most) jobs are now officially owned by our agent. We can hence remove them from the queue. - for (const auto &j: jobsToDequeue) rq.removeJob(j); - if (jobsToForget.size()) m_oStoreDB.m_agentReference->removeBatchFromOwnership(jobsToForget, m_oStoreDB.m_objectStore); - // (Possibly intermediate) commit of the queue. We keep the lock for the moment. - rq.commit(); + rq.removeJobsAndCommit(jobsToDequeue); + ownershipRemovalTime += t.secs(utils::Timer::resetCounter); + if (jobsToForget.size()) { + m_oStoreDB.m_agentReference->removeBatchFromOwnership(jobsToForget, m_oStoreDB.m_objectStore); + ownershipRemovalTime += t.secs(utils::Timer::resetCounter); + } // We can now add the validated jobs to the return value. auto vj = validatedJobs.begin(); while (vj != validatedJobs.end()) { @@ -2357,7 +2141,7 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo vj=validatedJobs.erase(vj); } // Before going for another round, we can release the queue and delete it if we emptied it. - auto remainingJobs=rq.dumpJobs().size(); + auto remainingJobs=rq.getJobsSummary().files; rqLock.release(); // If the queue is empty, we can get rid of it. if (!remainingJobs) { @@ -2397,7 +2181,7 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo break; // If we had exhausted the queue while selecting the jobs, we stop here, else we can go for another // round. - if (!candidateDumps.size()) + if (!candidateJobsFromQueue.remainingFilesAfterCandidates) break; } catch (cta::exception::Exception & ex) { log::ScopedParamContainer params (logContext); @@ -2426,7 +2210,18 @@ std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob> > OStoreDB::RetrieveMo log::ScopedParamContainer params(logContext); params.add("tapepool", mountInfo.tapePool) .add("files", nFiles) - .add("bytes", nBytes); + .add("bytes", nBytes) + .add("driveRegisterCheckTime", driveRegisterCheckTime) + .add("findQueueTime", findQueueTime) + .add("lockFetchQueueTime", lockFetchQueueTime) + .add("emptyQueueCleanupTime", emptyQueueCleanupTime) + .add("jobSelectionTime", jobSelectionTime) + .add("ownershipAdditionTime", ownershipAdditionTime) + .add("asyncUpdateLaunchTime", asyncUpdateLaunchTime) + .add("jobsUpdateTime", jobsUpdateTime) + .add("queueProcessAndCommitTime", queueProcessAndCommitTime) + .add("ownershipRemovalTime", ownershipRemovalTime) + .add("schedulerDbTime", totalTime.secs()); logContext.log(log::INFO, "In RetrieveMount::getNextJobBatch(): jobs retrieval complete."); } // We can construct the return value. @@ -2609,9 +2404,10 @@ void OStoreDB::ArchiveJob::fail(log::LogContext & lc) { auto jl = m_archiveRequest.dumpJobs(); for (auto & j:jl) { if (j.copyNb == tapeFile.copyNb) { - aq.addJobIfNecessary(j, m_archiveRequest.getAddressIfSet(), m_archiveRequest.getArchiveFile().archiveFileID, - m_archiveRequest.getArchiveFile().fileSize, m_archiveRequest.getMountPolicy(), m_archiveRequest.getEntryLog().time); - aq.commit(); + std::list<objectstore::ArchiveQueue::JobToAdd> jta; + jta.push_back({j, m_archiveRequest.getAddressIfSet(), m_archiveRequest.getArchiveFile().archiveFileID, + m_archiveRequest.getArchiveFile().fileSize, m_archiveRequest.getMountPolicy(), m_archiveRequest.getEntryLog().time}); + aq.addJobsIfNecessaryAndCommit(jta, *m_oStoreDB.m_agentReference, lc); aqlock.release(); // We have a pointer to the job, we can change the job ownership m_archiveRequest.setJobOwner(tapeFile.copyNb, aq.getAddressIfSet()); @@ -2757,7 +2553,9 @@ void OStoreDB::RetrieveJob::fail(log::LogContext &logContext) { auto & af=rfqc.archiveFile; auto & tf = af.tapeFiles.at(bestCopyNb); auto sr = m_retrieveRequest.getSchedulerRequest(); - rq.addJobIfNecessary(bestCopyNb, tf.fSeq, m_retrieveRequest.getAddressIfSet(), af.fileSize, rfqc.mountPolicy, sr.creationLog.time); + std::list<objectstore::RetrieveQueue::JobToAdd> jta; + jta.push_back({bestCopyNb, tf.fSeq, m_retrieveRequest.getAddressIfSet(), af.fileSize, rfqc.mountPolicy, sr.creationLog.time}); + rq.addJobsIfNecessaryAndCommit(jta); m_retrieveRequest.setOwner(rq.getAddressIfSet()); m_retrieveRequest.commit(); // We do not own the request anymore diff --git a/scheduler/OStoreDB/OStoreDB.hpp b/scheduler/OStoreDB/OStoreDB.hpp index 992f6d8db4d6ace16e9d8258f1efe054d2cb1956..f9d7e8e3a46b660dffb1671ae2122f1637621a03 100644 --- a/scheduler/OStoreDB/OStoreDB.hpp +++ b/scheduler/OStoreDB/OStoreDB.hpp @@ -204,10 +204,8 @@ public: void queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest &request, const cta::common::dataStructures::ArchiveFileQueueCriteria &criteria, log::LogContext &logContext) override; - - CTA_GENERATE_EXCEPTION_CLASS(NoSuchArchiveRequest); + CTA_GENERATE_EXCEPTION_CLASS(ArchiveRequestAlreadyDeleted); - virtual void deleteArchiveRequest(const std::string &diskInstanceName, uint64_t fileId) override; class ArchiveToFileRequestCancelation: public SchedulerDatabase::ArchiveToFileRequestCancelation { public: diff --git a/scheduler/OStoreDB/OStoreDBFactory.hpp b/scheduler/OStoreDB/OStoreDBFactory.hpp index 5579662c32d7fb2f2ded1d9969ec66c8e8cb07fa..6aeb7f25e3f5ea8370d85637e81a7cb27ab3af52 100644 --- a/scheduler/OStoreDB/OStoreDBFactory.hpp +++ b/scheduler/OStoreDB/OStoreDBFactory.hpp @@ -84,11 +84,6 @@ public: return m_OStoreDB.queueArchive(instanceName, request, criteria, logContext); } - - void deleteArchiveRequest(const std::string &diskInstanceName, uint64_t archiveFileId) override { - m_OStoreDB.deleteArchiveRequest(diskInstanceName, archiveFileId); - } - void deleteRetrieveRequest(const common::dataStructures::SecurityIdentity& cliIdentity, const std::string& remoteFile) override { m_OStoreDB.deleteRetrieveRequest(cliIdentity, remoteFile); } diff --git a/scheduler/SchedulerDatabase.hpp b/scheduler/SchedulerDatabase.hpp index f9e3062ed72225dc2edaee6264fd629a3e9697d4..6fe191d78599606b58297bf865e6b492f5df5769 100644 --- a/scheduler/SchedulerDatabase.hpp +++ b/scheduler/SchedulerDatabase.hpp @@ -126,16 +126,6 @@ public: virtual std::list<cta::common::dataStructures::ArchiveJob> getArchiveJobs( const std::string &tapePoolName) const = 0; - /** - * Deletes the specified archive request. - * - * @param archiveFile The ID of the destination file within the - * archive catalogue. - */ - virtual void deleteArchiveRequest( - const std::string &diskInstanceName, - uint64_t archiveFileId) = 0; - /* * Subclass allowing the tracking and automated cleanup of a * ArchiveToFile requests on the SchdulerDB for deletion. diff --git a/scheduler/SchedulerTest.cpp b/scheduler/SchedulerTest.cpp index fe45504f62be2440f6242c14cf7811fb813c5ad5..743b51e6f5a547e1ab0a6552c0ffaa9712b3e29c 100644 --- a/scheduler/SchedulerTest.cpp +++ b/scheduler/SchedulerTest.cpp @@ -101,8 +101,10 @@ public: m_db = param.dbFactory.create(); const uint64_t nbConns = 1; const uint64_t nbArchiveFileListingConns = 1; + const uint32_t maxTriesToConnect = 1; //m_catalogue = cta::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns); - m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns); + m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns, + maxTriesToConnect); m_scheduler = cta::make_unique<Scheduler>(*m_catalogue, *m_db, 5, 2*1000*1000); } diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp index ecfe79f902d1a9ae163009af58110aecc9183e5a..e02b2431debb129517ca06f331f48d8cbd95dace 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp @@ -118,8 +118,10 @@ public: m_db = param.dbFactory.create(); const uint64_t nbConns = 1; const uint64_t nbArchiveFileListingConns = 1; + const uint32_t maxTriesToConnect = 1; //m_catalogue = cta::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns); - m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns); + m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns, + maxTriesToConnect); m_scheduler = cta::make_unique<Scheduler>(*m_catalogue, *m_db, 5, 2*1000*1000); strncpy(m_tmpDir, "/tmp/DataTransferSessionTestXXXXXX", sizeof(m_tmpDir)); diff --git a/tests/cta-valgrindUnitTests.sh.in b/tests/cta-valgrindUnitTests.sh.in index b65af459392372df77d78c5925c2f7694dd98e61..6e9fc6262c360ec8625374bd9d5ced91a550a605 100644 --- a/tests/cta-valgrindUnitTests.sh.in +++ b/tests/cta-valgrindUnitTests.sh.in @@ -5,18 +5,16 @@ set -e /usr/bin/cta-unitTests-multiProcess -valgrind --track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes \ - --error-exitcode=1 --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/valgrind.suppr \ +valgrind @VALGRIND_OPTS@ --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/valgrind.suppr \ /usr/bin/cta-unitTests -valgrind --tool=helgrind -v --demangle=yes --gen-suppressions=all --conflict-cache-size=10000000 \ - --error-exitcode=1 --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/helgrind.suppr \ +valgrind --tool=helgrind @HELGRIND_OPTS@ \ + --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/helgrind.suppr \ /usr/bin/cta-unitTests -valgrind --track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes \ - --error-exitcode=1 --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/valgrind.suppr \ +valgrind @VALGRIND_OPTS@ --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/valgrind.suppr \ --child-silent-after-fork=yes /usr/bin/cta-unitTests-multiProcess -valgrind --tool=helgrind -v --demangle=yes --gen-suppressions=all --conflict-cache-size=10000000 \ - --error-exitcode=1 --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/helgrind.suppr \ +valgrind --tool=helgrind @HELGRIND_OPTS@ \ + --suppressions=/usr/share/cta-@CTA_VERSION@/unittest/helgrind.suppr \ /usr/bin/cta-unitTests-multiProcess diff --git a/tests/helgrind.suppr b/tests/helgrind.suppr index 4644eb969cb193d0418855d44385ff60914dfdf2..45d6d9bf4a559cbb034789e0775aafb540348dbc 100644 --- a/tests/helgrind.suppr +++ b/tests/helgrind.suppr @@ -580,4 +580,17 @@ fun:_ZNSt10shared_ptrINSt6thread10_Impl_baseEED1Ev fun:_ZNSt6thread10_Impl_baseD1Ev ... +} + +{ + pthread_create_stack_creation + Helgrind:Race + fun:memset + fun:get_cached_stack + fun:allocate_stack + fun:pthread_create@@GLIBC_2.2.5 + fun:pthread_create_WRK + fun:pthread_create@* + fun:__gthread_create + ... } \ No newline at end of file