diff --git a/.gitignore b/.gitignore
index bf4501f375f518b8f081500a2e15023454ac33b1..1c3bda0ddf2f1c316c1021ca0bddc5ae21ef5d91 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@ nbproject/
 .cproject
 .settings/
 workbench.xmi
+.vscode/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 489c72c913368e650beaae8aa4b1823594035916..94bf760e170e715267053a8d6fb4f9f56d4af2dd 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -122,6 +122,46 @@ cta_valgrind:
   tags:
     - docker
 
+cta_rdbms_unit_tests_postgresql:
+  except:
+    - tags
+  stage: test
+  variables:
+    CTAREPODIR: /tmp/repo
+  image: gitlab-registry.cern.ch/linuxsupport/cc7-base
+  script:
+    - cp -f continuousintegration/docker/ctafrontend/cc7/etc/yum.repos.d/* /etc/yum.repos.d/
+    - yum -y install yum-plugin-priorities createrepo
+    - cp -f continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list /etc/yum/pluginconf.d/
+    - mkdir -p ${CTAREPODIR}; cp -r build_rpm/RPM ${CTAREPODIR}
+    - createrepo ${CTAREPODIR}; echo -e "[cta-artifacts]\nname=CTA artifacts\nbaseurl=file://${CTAREPODIR}\ngpgcheck=0\nenabled=1\npriority=2" > /etc/yum.repos.d/cta-artifacts.repo
+    - yum -y --nogpgcheck install cta-systemtests cta-debuginfo sqlite-debuginfo --enablerepo=debug
+    - yum -y install cta-catalogueutils
+    - yum -y install rh-postgresql96-postgresql-server
+    - POSTGRESQL_DATA_DIR=/usr/local/cta_test_pgsql_data
+    - POSTGRESQL_LOG_DIR=/var/log/postgres
+    - echo POSTGRESQL_DATA_DIR=${POSTGRESQL_DATA_DIR}
+    - echo POSTGRESQL_LOG_DIR=${POSTGRESQL_LOG_DIR}
+    - mkdir -p ${POSTGRESQL_DATA_DIR}
+    - chown -R postgres:postgres ${POSTGRESQL_DATA_DIR}
+    - mkdir -p ${POSTGRESQL_LOG_DIR}
+    - chown -R postgres:postgres ${POSTGRESQL_LOG_DIR}
+    - export LD_LIBRARY_PATH=/opt/rh/rh-postgresql96/root/usr/lib64
+    - POSTGRES_BIN=/opt/rh/rh-postgresql96/root/usr/bin
+    - echo POSTGRES_BIN=${POSTGRES_BIN}
+    - runuser -u postgres -- ${POSTGRES_BIN}/initdb -D ${POSTGRESQL_DATA_DIR}
+    - runuser -u postgres -- ${POSTGRES_BIN}/pg_ctl start -w -t 10 -D ${POSTGRESQL_DATA_DIR} -l ${POSTGRESQL_LOG_DIR}/cta_test_postgres.log
+    - runuser -u postgres -- ${POSTGRES_BIN}/createdb cta
+    - runuser -u postgres -- ${POSTGRES_BIN}/createuser -E cta
+    - CTA_CATALOGUE_CONF=/etc/cta/cta-catalogue.conf
+    - echo CTA_CATALOGUE_CONF=${CTA_CATALOGUE_CONF}
+    - echo 'postgresql:postgresql://cta@localhost/cta' > ${CTA_CATALOGUE_CONF}
+    - /usr/bin/cta-catalogue-schema-create ${CTA_CATALOGUE_CONF}
+    - /usr/bin/cta-rdbmsUnitTests ${CTA_CATALOGUE_CONF}
+    - runuser -u postgres -- ${POSTGRES_BIN}/pg_ctl stop -D ${POSTGRESQL_DATA_DIR}
+
+  tags:
+    - docker
 
 ctageneric_docker:
   except:
@@ -136,7 +176,6 @@ ctageneric_docker:
     TO: gitlab-registry.cern.ch/cta/ctageneric:${CI_PIPELINE_ID}
     DOCKER_FILE: continuousintegration/docker/ctafrontend/cc7/ci_runner/Dockerfile
 
-
 ctageneric_docker_rename:
   except:
     - tags
@@ -151,7 +190,6 @@ ctageneric_docker_rename:
     - cd continuousintegration/ci_helpers/
     - ./rename_tag.sh
 
-
 archiveretrieve:
   except:
     - tags
@@ -191,7 +229,7 @@ nightly1:
   only:
     - triggers
   script:
-    - echo "Running  nightly tests"
+    - echo "Running nightly tests"
     - export NAMESPACE="archiveretrieve-${CTA_BUILD_ID}-$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
     - cd continuousintegration/orchestration/; ./run_systemtest.sh -n ${NAMESPACE} -p ${CI_PIPELINE_ID} -s tests/test_nightly1.sh -O -D
   artifacts:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 77f6db411a432a4c43ab1dabce6c64d6015dae31..fbe51177c9e334fa5d415b4ca626910171bb757c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -227,9 +227,7 @@ add_custom_target(helgrind
 add_custom_target(shortunittests
   tests/cta-unitTests
   COMMAND tests/cta-unitTests-multiProcess
-  COMMAND echo ${PROJECT_SOURCE_DIR}/python/eosfstgcd/test_ctafstgcd.py
-  COMMAND ${PROJECT_SOURCE_DIR}/python/eosfstgcd/test_ctafstgcd.py
-  
+
   DEPENDS tests/cta-unitTests tests/cta-unitTests-multiProcess
   COMMENT "Running unit tests" VERBATIM)
 
diff --git a/catalogue/CMakeLists.txt b/catalogue/CMakeLists.txt
index 58d78b5c4606c1404d09703a4d8910159dc8f885..3e6cd3b5176bf653d2ef4c647b15a93f389b245d 100644
--- a/catalogue/CMakeLists.txt
+++ b/catalogue/CMakeLists.txt
@@ -78,6 +78,9 @@ set (CATALOGUE_LIB_SRC_FILES
   UserSpecifiedAnEmptyStringVendor.cpp
   UserSpecifiedAnEmptyStringVid.cpp
   UserSpecifiedAnEmptyStringVo.cpp
+  UserSpecifiedAnEmptyTapePool.cpp
+  UserSpecifiedStorageClassUsedByArchiveFiles.cpp
+  UserSpecifiedStorageClassUsedByArchiveRoutes.cpp
   UserSpecifiedAZeroCapacity.cpp
   UserSpecifiedAZeroCopyNb.cpp)
 
@@ -166,7 +169,8 @@ add_custom_command(OUTPUT PostgresCatalogueSchema.cpp
 set(IN_MEMORY_CATALOGUE_UNIT_TESTS_LIB_SRC_FILES
   CatalogueTest.cpp
   InMemoryCatalogueTest.cpp
-  InMemoryVersionOfCatalogueTest.cpp)
+  InMemoryVersionOfCatalogueTest.cpp
+  TapeItemWrittenPointerTest.cpp)
 
 add_library (ctainmemorycatalogueunittests SHARED
   ${IN_MEMORY_CATALOGUE_UNIT_TESTS_LIB_SRC_FILES})
diff --git a/catalogue/CatalogueTest.cpp b/catalogue/CatalogueTest.cpp
index 9488ea21c34c5bc54db96755b48386ff6ac6fa55..ad6699eada78f1f91aa34ee39aae9cd1fa99a4b6 100644
--- a/catalogue/CatalogueTest.cpp
+++ b/catalogue/CatalogueTest.cpp
@@ -39,8 +39,11 @@
 #include "catalogue/UserSpecifiedAnEmptyStringVendor.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringVid.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringVo.hpp"
+#include "catalogue/UserSpecifiedAnEmptyTapePool.hpp"
 #include "catalogue/UserSpecifiedAZeroCapacity.hpp"
 #include "catalogue/UserSpecifiedAZeroCopyNb.hpp"
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp"
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp"
 #include "common/exception/Exception.hpp"
 #include "common/exception/UserError.hpp"
 #include "common/make_unique.hpp"
@@ -1031,6 +1034,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTapePool_notEmpty) {
     ASSERT_EQ(0, pool.nbPhysicalFiles);
   }
 
+  ASSERT_THROW(m_catalogue->deleteTapePool(tapePoolName), catalogue::UserSpecifiedAnEmptyTapePool);
   ASSERT_THROW(m_catalogue->deleteTapePool(tapePoolName), exception::UserError);
 }
 
@@ -2050,7 +2054,11 @@ TEST_P(cta_catalogue_CatalogueTest, createArchiveRoute_deleteStorageClass) {
     route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  ASSERT_THROW(m_catalogue->deleteStorageClass(storageClass.diskInstance, storageClass.name), exception::Exception);
+  ASSERT_THROW(m_catalogue->deleteStorageClass(storageClass.diskInstance, storageClass.name),
+    catalogue::UserSpecifiedStorageClassUsedByArchiveRoutes);
+
+  ASSERT_THROW(m_catalogue->deleteStorageClass(storageClass.diskInstance, storageClass.name),
+    exception::UserError);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, modifyArchiveRouteTapePoolName) {
@@ -10218,6 +10226,276 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
   }
 }
 
+TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_1_tape_copy) {
+  using namespace cta;
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
+    {
+      auto it = vidToTape.find(vid1);
+      const common::dataStructures::Tape &tape = it->second;
+      ASSERT_EQ(vid1, tape.vid);
+      ASSERT_EQ(mediaType, tape.mediaType);
+      ASSERT_EQ(vendor, tape.vendor);
+      ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+      ASSERT_EQ(tapePoolName, tape.tapePoolName);
+      ASSERT_EQ(vo, tape.vo);
+      ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+      ASSERT_TRUE(disabledValue == tape.disabled);
+      ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
+      ASSERT_EQ(comment, tape.comment);
+      ASSERT_FALSE(tape.labelLog);
+      ASSERT_FALSE(tape.lastReadLog);
+      ASSERT_FALSE(tape.lastWriteLog);
+
+      const common::dataStructures::EntryLog creationLog = tape.creationLog;
+      ASSERT_EQ(m_admin.username, creationLog.username);
+      ASSERT_EQ(m_admin.host, creationLog.host);
+
+      const common::dataStructures::EntryLog lastModificationLog =
+        tape.lastModificationLog;
+      ASSERT_EQ(creationLog, lastModificationLog);
+    }
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = "disk_instance";
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    catalogue::TapeSearchCriteria searchCriteria;
+    searchCriteria.vid = file1Written.vid;
+    std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes(searchCriteria);
+    ASSERT_EQ(1, tapes.size());
+    const common::dataStructures::Tape &tape = tapes.front();
+    ASSERT_EQ(1, tape.lastFSeq);
+  }
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+}
+
+TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_1_tape_copy_deleteStorageClass) {
+  using namespace cta;
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
+    {
+      auto it = vidToTape.find(vid1);
+      const common::dataStructures::Tape &tape = it->second;
+      ASSERT_EQ(vid1, tape.vid);
+      ASSERT_EQ(mediaType, tape.mediaType);
+      ASSERT_EQ(vendor, tape.vendor);
+      ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+      ASSERT_EQ(tapePoolName, tape.tapePoolName);
+      ASSERT_EQ(vo, tape.vo);
+      ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+      ASSERT_TRUE(disabledValue == tape.disabled);
+      ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
+      ASSERT_EQ(comment, tape.comment);
+      ASSERT_FALSE(tape.labelLog);
+      ASSERT_FALSE(tape.lastReadLog);
+      ASSERT_FALSE(tape.lastWriteLog);
+
+      const common::dataStructures::EntryLog creationLog = tape.creationLog;
+      ASSERT_EQ(m_admin.username, creationLog.username);
+      ASSERT_EQ(m_admin.host, creationLog.host);
+
+      const common::dataStructures::EntryLog lastModificationLog =
+        tape.lastModificationLog;
+      ASSERT_EQ(creationLog, lastModificationLog);
+    }
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = "disk_instance";
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    catalogue::TapeSearchCriteria searchCriteria;
+    searchCriteria.vid = file1Written.vid;
+    std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes(searchCriteria);
+    ASSERT_EQ(1, tapes.size());
+    const common::dataStructures::Tape &tape = tapes.front();
+    ASSERT_EQ(1, tape.lastFSeq);
+  }
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  ASSERT_TRUE(m_catalogue->getArchiveRoutes().empty());
+
+  ASSERT_THROW(m_catalogue->deleteStorageClass(storageClass.diskInstance, storageClass.name),
+    catalogue::UserSpecifiedStorageClassUsedByArchiveFiles);
+
+  ASSERT_THROW(m_catalogue->deleteStorageClass(storageClass.diskInstance, storageClass.name),
+    exception::UserError);
+}
+
 TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_copies) {
   using namespace cta;
 
diff --git a/catalogue/OracleCatalogue.cpp b/catalogue/OracleCatalogue.cpp
index df420ca72fa928ea69c60fe1e8460719178dd2d2..98b28965e2523285084a40695671d5aff17a5328 100644
--- a/catalogue/OracleCatalogue.cpp
+++ b/catalogue/OracleCatalogue.cpp
@@ -371,6 +371,19 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       } catch (std::bad_cast&) {}
     }
 
+    // Store the value of each field
+    i = 0;
+    for (const auto &event: fileEvents) {
+      tapeFileBatch.vid.setFieldValue(i, event.vid);
+      tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
+      tapeFileBatch.blockId.setFieldValue(i, event.blockId);
+      tapeFileBatch.fileSize.setFieldValue(i, event.size);
+      tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
+      tapeFileBatch.creationTime.setFieldValue(i, now);
+      tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
+      i++;
+    }
+
     // Update the tape because all the necessary information is now available
     auto lastEventItor = events.cend();
     lastEventItor--;
@@ -386,7 +399,62 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
     // Create the archive file entries, skipping those that already exist
     idempotentBatchInsertArchiveFiles(conn, fileEvents);
 
-    insertTapeFileBatchIntoTempTable(conn, fileEvents);
+    {
+      const char *const sql =
+        "INSERT INTO TEMP_TAPE_FILE_INSERTION_BATCH(" "\n"
+          "VID,"                                      "\n"
+          "FSEQ,"                                     "\n"
+          "BLOCK_ID,"                                 "\n"
+          "LOGICAL_SIZE_IN_BYTES,"                    "\n"
+          "COPY_NB,"                                  "\n"
+          "CREATION_TIME,"                            "\n"
+          "ARCHIVE_FILE_ID)"                          "\n"
+        "VALUES("                                     "\n"
+          ":VID,"                                     "\n"
+          ":FSEQ,"                                    "\n"
+          ":BLOCK_ID,"                                "\n"
+          ":LOGICAL_SIZE_IN_BYTES,"                   "\n"
+          ":COPY_NB,"                                 "\n"
+          ":CREATION_TIME,"                           "\n"
+          ":ARCHIVE_FILE_ID)"                         "\n";
+      auto stmt = conn.createStmt(sql);
+      rdbms::wrapper::OcciStmt &occiStmt = dynamic_cast<rdbms::wrapper::OcciStmt &>(stmt.getStmt());
+      occiStmt.setColumn(tapeFileBatch.vid);
+      occiStmt.setColumn(tapeFileBatch.fSeq);
+      occiStmt.setColumn(tapeFileBatch.blockId);
+      occiStmt.setColumn(tapeFileBatch.fileSize);
+      occiStmt.setColumn(tapeFileBatch.copyNb);
+      occiStmt.setColumn(tapeFileBatch.creationTime);
+      occiStmt.setColumn(tapeFileBatch.archiveFileId);
+      try {
+        occiStmt->executeArrayUpdate(tapeFileBatch.nbRows);
+      } catch(oracle::occi::SQLException &ex) {
+        std::ostringstream msg;
+        msg << std::string(__FUNCTION__) << " failed for SQL statement " << rdbms::getSqlForException(sql) << ": " <<
+          ex.what();
+
+        if(rdbms::wrapper::OcciStmt::connShouldBeClosed(ex)) {
+          // Close the statement first and then the connection
+          try {
+            occiStmt.close();
+          } catch(...) {
+          }
+
+          try {
+            conn.closeUnderlyingStmtsAndConn();
+          } catch(...) {
+          }
+          throw exception::LostDatabaseConnection(msg.str());
+        }
+        throw exception::Exception(msg.str());
+      } catch(std::exception &se) {
+        std::ostringstream msg;
+        msg << std::string(__FUNCTION__) << " failed for SQL statement " << rdbms::getSqlForException(sql) << ": " <<
+          se.what();
+
+        throw exception::Exception(msg.str());
+      }
+    }
 
     // Verify that the archive file entries in the catalogue database agree with
     // the tape file written events
@@ -417,89 +485,45 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       fileSizeAndChecksum.checksumBlob.validate(event.checksumBlob);
     }
 
-    // Store the value of each field
-    i = 0;
-    for (const auto &event: fileEvents) {
-      tapeFileBatch.vid.setFieldValue(i, event.vid);
-      tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
-      tapeFileBatch.blockId.setFieldValue(i, event.blockId);
-      tapeFileBatch.fileSize.setFieldValue(i, event.size);
-      tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
-      tapeFileBatch.creationTime.setFieldValue(i, now);
-      tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
-      i++;
+    {
+      const char *const sql =
+        "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"              "\n"
+           "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID)"                                     "\n"
+        "SELECT VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"                              "\n"
+           "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID FROM TEMP_TAPE_FILE_INSERTION_BATCH";
+      auto stmt = conn.createStmt(sql);
+      stmt.executeNonQuery();
     }
 
-    const char *const sql =
-    "BEGIN"                                                                             "\n"
-      "INSERT INTO TEMP_TAPE_FILE_INSERTION_BATCH("                                     "\n"
-        "VID,"                                                                          "\n"
-        "FSEQ,"                                                                         "\n"
-        "BLOCK_ID,"                                                                     "\n"
-        "LOGICAL_SIZE_IN_BYTES,"                                                        "\n"
-        "COPY_NB,"                                                                      "\n"
-        "CREATION_TIME,"                                                                "\n"
-        "ARCHIVE_FILE_ID)"                                                              "\n"
-      "VALUES("                                                                         "\n"
-        ":VID,"                                                                         "\n"
-        ":FSEQ,"                                                                        "\n"
-        ":BLOCK_ID,"                                                                    "\n"
-        ":LOGICAL_SIZE_IN_BYTES,"                                                       "\n"
-        ":COPY_NB,"                                                                     "\n"
-        ":CREATION_TIME,"                                                               "\n"
-        ":ARCHIVE_FILE_ID);"                                                            "\n"
-      "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"              "\n"
-         "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID)"                                     "\n"
-      "SELECT VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"                              "\n"
-         "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID FROM TEMP_TAPE_FILE_INSERTION_BATCH;" "\n"
-      "FOR TF IN (SELECT * FROM TEMP_TAPE_FILE_INSERTION_BATCH)"                        "\n"
-      "LOOP"                                                                            "\n"
-        "UPDATE TAPE_FILE SET"                                                          "\n"
-          "SUPERSEDED_BY_VID=TF.VID,"  /*VID of the new file*/                          "\n"
-          "SUPERSEDED_BY_FSEQ=TF.FSEQ" /*FSEQ of the new file*/                         "\n"
-        "WHERE"                                                                         "\n"
-          "TAPE_FILE.ARCHIVE_FILE_ID= TF.ARCHIVE_FILE_ID AND"                           "\n"
-          "TAPE_FILE.COPY_NB= TF.COPY_NB AND"                                           "\n"
-          "(TAPE_FILE.VID <> TF.VID OR TAPE_FILE.FSEQ <> TF.FSEQ);"                     "\n"
-      "END LOOP;"                                                                       "\n"
-      "COMMIT;"                                                                         "\n"
-    "END;";
-    auto stmt = conn.createStmt(sql);
-    rdbms::wrapper::OcciStmt &occiStmt = dynamic_cast<rdbms::wrapper::OcciStmt &>(stmt.getStmt());
-    occiStmt.setColumn(tapeFileBatch.vid);
-    occiStmt.setColumn(tapeFileBatch.fSeq);
-    occiStmt.setColumn(tapeFileBatch.blockId);
-    occiStmt.setColumn(tapeFileBatch.fileSize);
-    occiStmt.setColumn(tapeFileBatch.copyNb);
-    occiStmt.setColumn(tapeFileBatch.creationTime);
-    occiStmt.setColumn(tapeFileBatch.archiveFileId);
-    try {
-      occiStmt->executeArrayUpdate(tapeFileBatch.nbRows);
-    } catch(oracle::occi::SQLException &ex) {
-      std::ostringstream msg;
-      msg << std::string(__FUNCTION__) << " failed for SQL statement " << rdbms::getSqlForException(sql) << ": " <<
-        ex.what();
-
-      if(rdbms::wrapper::OcciStmt::connShouldBeClosed(ex)) {
-        // Close the statement first and then the connection
-        try {
-          occiStmt.close();
-        } catch(...) {
-        }
-
-        try {
-          conn.closeUnderlyingStmtsAndConn();
-        } catch(...) {
-        }
-        throw exception::LostDatabaseConnection(msg.str());
-      }
-      throw exception::Exception(msg.str());
-    } catch(std::exception &se) {
-      std::ostringstream msg;
-      msg << std::string(__FUNCTION__) << " failed for SQL statement " << rdbms::getSqlForException(sql) << ": " <<
-        se.what();
-
-      throw exception::Exception(msg.str());
+    {
+      const char *const sql =
+        "MERGE INTO"                                                                      "\n"
+          "TAPE_FILE"                                                                     "\n"
+        "USING("                                                                          "\n"
+          "SELECT"                                                                        "\n"
+            "ARCHIVE_FILE_ID,"                                                            "\n"
+            "COPY_NB,"                                                                    "\n"
+            "VID,"                                                                        "\n"
+            // Using MAX(FSEQ) to cover the same tape copy being written more than
+            // once.  The last one written supersedes the previous ones.
+            "MAX(FSEQ) AS MAX_FSEQ"                                                       "\n"
+          "FROM"                                                                          "\n"
+            "TEMP_TAPE_FILE_INSERTION_BATCH"                                              "\n"
+          "GROUP BY"                                                                      "\n"
+            "ARCHIVE_FILE_ID, COPY_NB, VID"                                               "\n"
+          ") TEMP"                                                                        "\n"
+        "ON("                                                                             "\n"
+          "TAPE_FILE.ARCHIVE_FILE_ID = TEMP.ARCHIVE_FILE_ID AND"                          "\n"
+          "TAPE_FILE.COPY_NB = TEMP.COPY_NB)"                                             "\n"
+        "WHEN MATCHED THEN"                                                               "\n"
+          "UPDATE SET"                                                                    "\n"
+            "TAPE_FILE.SUPERSEDED_BY_VID = TEMP.VID,"                                     "\n"
+            "TAPE_FILE.SUPERSEDED_BY_FSEQ = TEMP.MAX_FSEQ"                                "\n"
+          "WHERE"                                                                         "\n"
+            "NOT(TAPE_FILE.VID = TEMP.VID AND TAPE_FILE.FSEQ = TEMP.MAX_FSEQ)";
+      conn.setAutocommitMode(rdbms::AutocommitMode::AUTOCOMMIT_ON);
+      auto stmt = conn.createStmt(sql);
+      stmt.executeNonQuery();
     }
   } catch(exception::UserError &) {
     throw;
@@ -686,10 +710,9 @@ std::map<uint64_t, OracleCatalogue::FileSizeAndChecksum> OracleCatalogue::select
         "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32 "
       "FROM "
         "ARCHIVE_FILE "
-      "INNER JOIN TEMP_TAPE_FILE_BATCH ON "
-        "ARCHIVE_FILE.ARCHIVE_FILE_ID = TEMP_TAPE_FILE_BATCH.ARCHIVE_FILE_ID";
+      "INNER JOIN TEMP_TAPE_FILE_INSERTION_BATCH ON "
+        "ARCHIVE_FILE.ARCHIVE_FILE_ID = TEMP_TAPE_FILE_INSERTION_BATCH.ARCHIVE_FILE_ID";
     auto stmt = conn.createStmt(sql);
-
     auto rset = stmt.executeQuery();
 
     std::map<uint64_t, FileSizeAndChecksum> fileSizesAndChecksums;
@@ -717,48 +740,6 @@ std::map<uint64_t, OracleCatalogue::FileSizeAndChecksum> OracleCatalogue::select
   }
 }
 
-//------------------------------------------------------------------------------
-// insertArchiveFilesIntoTempTable
-//------------------------------------------------------------------------------
-void OracleCatalogue::insertTapeFileBatchIntoTempTable(rdbms::Conn &conn, const std::set<TapeFileWritten> &events) {
-  try {
-    TempTapeFileBatch tempTapeFileBatch(events.size());
-
-    // Store the length of each field and implicitly calculate the maximum field
-    // length of each column 
-    uint32_t i = 0;
-    for (const auto &event: events) {
-      tempTapeFileBatch.archiveFileId.setFieldLenToValueLen(i, event.archiveFileId);
-      i++;
-    }
-
-    // Store the value of each field
-    i = 0;
-    for (const auto &event: events) {
-      tempTapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
-      i++;
-    }
-
-    const char *const sql =
-      "INSERT INTO TEMP_TAPE_FILE_BATCH("
-        "ARCHIVE_FILE_ID)"
-      "VALUES("
-        ":ARCHIVE_FILE_ID)";
-    auto stmt = conn.createStmt(sql);
-    rdbms::wrapper::OcciStmt &occiStmt = dynamic_cast<rdbms::wrapper::OcciStmt &>(stmt.getStmt());
-    occiStmt->setBatchErrorMode(false);
-
-    occiStmt.setColumn(tempTapeFileBatch.archiveFileId);
-
-    occiStmt->executeArrayUpdate(tempTapeFileBatch.nbRows);
-  } catch(exception::UserError &) {
-    throw;
-  } catch(exception::Exception &ex) {
-    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
-    throw;
-  }
-}
-
 //------------------------------------------------------------------------------
 // deleteArchiveFile
 //------------------------------------------------------------------------------
diff --git a/catalogue/OracleCatalogue.hpp b/catalogue/OracleCatalogue.hpp
index fe6fd8bc2e6745d51a54110df1f300637fd297b1..473bb9004cff8f59daba0fb76c189a22140b3e5b 100644
--- a/catalogue/OracleCatalogue.hpp
+++ b/catalogue/OracleCatalogue.hpp
@@ -159,15 +159,6 @@ private:
   std::map<uint64_t, FileSizeAndChecksum> selectArchiveFileSizesAndChecksums(rdbms::Conn &conn,
     const std::set<TapeFileWritten> &events);
 
-  /**
-   * Batch inserts rows into the TAPE_FILE_BATCH temporary table that correspond
-   * to the specified TapeFileWritten events.
-   *
-   * @param conn The database connection.
-   * @param events The tape file written events.
-   */
-  void insertTapeFileBatchIntoTempTable(rdbms::Conn &conn, const std::set<TapeFileWritten> &events);
-
 }; // class OracleCatalogue
 
 } // namespace catalogue
diff --git a/catalogue/RdbmsCatalogue.cpp b/catalogue/RdbmsCatalogue.cpp
index e9d97e68d667be749b333336f7cac241650bb42f..a155762f689665ce7b3729168d02ff90e98d3c3e 100644
--- a/catalogue/RdbmsCatalogue.cpp
+++ b/catalogue/RdbmsCatalogue.cpp
@@ -44,8 +44,11 @@
 #include "catalogue/UserSpecifiedAnEmptyStringVendor.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringVid.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringVo.hpp"
+#include "catalogue/UserSpecifiedAnEmptyTapePool.hpp"
 #include "catalogue/UserSpecifiedAZeroCapacity.hpp"
 #include "catalogue/UserSpecifiedAZeroCopyNb.hpp"
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp"
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp"
 #include "common/dataStructures/TapeFile.hpp"
 #include "common/exception/Exception.hpp"
 #include "common/exception/UserError.hpp"
@@ -414,13 +417,24 @@ bool RdbmsCatalogue::storageClassExists(rdbms::Conn &conn, const std::string &di
 //------------------------------------------------------------------------------
 void RdbmsCatalogue::deleteStorageClass(const std::string &diskInstanceName, const std::string &storageClassName) {
   try {
+    auto conn = m_connPool.getConn();
+
+    if(storageClassIsUsedByArchiveRoutes(conn, storageClassName)) {
+      throw UserSpecifiedStorageClassUsedByArchiveRoutes(std::string("The ") + storageClassName +
+        " storage class is being used by one or more archive routes");
+    }
+
+    if(storageClassIsUsedByArchiveFiles(conn, storageClassName)) {
+      throw UserSpecifiedStorageClassUsedByArchiveFiles(std::string("The ") + storageClassName +
+        " storage class is being used by one or more archive files");
+    }
+
     const char *const sql =
       "DELETE FROM "
         "STORAGE_CLASS "
       "WHERE "
         "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND "
         "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME";
-    auto conn = m_connPool.getConn();
     auto stmt = conn.createStmt(sql);
 
     stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
@@ -439,6 +453,62 @@ void RdbmsCatalogue::deleteStorageClass(const std::string &diskInstanceName, con
   }
 }
 
+//------------------------------------------------------------------------------
+// storageClassIsUsedByArchiveRoutes
+//------------------------------------------------------------------------------
+bool RdbmsCatalogue::storageClassIsUsedByArchiveRoutes(rdbms::Conn &conn, const std::string &storageClassName) const {
+  try {
+    const char *const sql =
+      "SELECT "
+        "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME "
+      "FROM "
+        "ARCHIVE_ROUTE "
+      "INNER JOIN "
+        "STORAGE_CLASS "
+      "ON "
+        "ARCHIVE_ROUTE.STORAGE_CLASS_ID = STORAGE_CLASS.STORAGE_CLASS_ID "
+      "WHERE "
+        "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME";
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":STORAGE_CLASS_NAME", storageClassName);
+    auto rset = stmt.executeQuery();
+    return rset.next();
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// storageClassIsUsedByARchiveFiles
+//------------------------------------------------------------------------------
+bool RdbmsCatalogue::storageClassIsUsedByArchiveFiles(rdbms::Conn &conn, const std::string &storageClassName) const {
+  try {
+    const char *const sql =
+      "SELECT "
+        "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME "
+      "FROM "
+        "ARCHIVE_FILE "
+      "INNER JOIN "
+        "STORAGE_CLASS "
+      "ON "
+        "ARCHIVE_FILE.STORAGE_CLASS_ID = STORAGE_CLASS.STORAGE_CLASS_ID "
+      "WHERE "
+        "STORAGE_CLASS_NAME = :STORAGE_CLASS_NAME";
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":STORAGE_CLASS_NAME", storageClassName);
+    auto rset = stmt.executeQuery();
+    return rset.next();
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 //------------------------------------------------------------------------------
 // getStorageClasses
 //------------------------------------------------------------------------------
@@ -885,7 +955,7 @@ void RdbmsCatalogue::deleteTapePool(const std::string &name) {
         throw exception::UserError(std::string("Cannot delete tape-pool ") + name + " because it does not exist");
       }
     } else {
-      throw exception::UserError(std::string("Cannot delete tape-pool ") + name + " because it is not empty");
+      throw UserSpecifiedAnEmptyTapePool(std::string("Cannot delete tape-pool ") + name + " because it is not empty");
     }
   } catch(exception::UserError &) {
     throw;
diff --git a/catalogue/RdbmsCatalogue.hpp b/catalogue/RdbmsCatalogue.hpp
index beb9a57fbf5c1acff8b580fd787daba3f8b3a9c1..8de37183d37975c593b9b26a5a86e9daaad5ebdb 100644
--- a/catalogue/RdbmsCatalogue.hpp
+++ b/catalogue/RdbmsCatalogue.hpp
@@ -1411,6 +1411,24 @@ protected:
    */
   bool isSetAndEmpty(const optional<std::string> &optionalStr) const;
 
+  /**
+   * Returns true if the specified storage class is currently being used by one
+   * or more archive routes.
+   *
+   * @param conn The database connection.
+   * @param storageClassName The name of the storage class.
+   */
+  bool storageClassIsUsedByArchiveRoutes(rdbms::Conn &conn, const std::string &storageClassName) const;
+
+  /**
+   * Returns true if the specified storage class is currently being used by one
+   * or more archive files.
+   *
+   * @param conn The database connection.
+   * @param storageClassName The name of the storage class.
+   */
+  bool storageClassIsUsedByArchiveFiles(rdbms::Conn &conn, const std::string &storageClassName) const;
+
   /**
    * Cached versions of tape copy to tape tape pool mappings for specific
    * storage classes.
diff --git a/catalogue/TapeItemWrittenPointerTest.cpp b/catalogue/TapeItemWrittenPointerTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..84b4f9b2357f5eeb37f66328e31d26ce2aa83d4e
--- /dev/null
+++ b/catalogue/TapeItemWrittenPointerTest.cpp
@@ -0,0 +1,146 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "catalogue/TapeFileWritten.hpp"
+#include "catalogue/TapeItemWrittenPointer.hpp"
+#include "common/make_unique.hpp"
+
+#include <gtest/gtest.h>
+#include <set>
+
+namespace unitTests {
+
+class cta_catalogue_TapeItemWrittenPointerTest : public ::testing::Test {
+protected:
+
+  virtual void SetUp() {
+  }
+
+  virtual void TearDown() {
+  }
+};
+
+TEST_F(cta_catalogue_TapeItemWrittenPointerTest, check_set_order_after_set_fseq_using_unique_ptr) {
+  using namespace cta::catalogue;
+
+  std::set<TapeItemWrittenPointer> filesWrittenSet;
+
+  auto file1WrittenUP = cta::make_unique<TapeFileWritten>();
+  auto file2WrittenUP = cta::make_unique<TapeFileWritten>();
+
+  file1WrittenUP->fSeq = 1;
+  filesWrittenSet.insert(file1WrittenUP.release());
+
+  file2WrittenUP->fSeq = 2;
+  filesWrittenSet.insert(file2WrittenUP.release());
+
+  ASSERT_EQ(2, filesWrittenSet.size());
+
+  uint64_t expectedFSeq = 1;
+  for(const auto &event: filesWrittenSet) {
+    ASSERT_EQ(expectedFSeq, event->fSeq);
+    expectedFSeq++;
+  }
+}
+
+TEST_F(cta_catalogue_TapeItemWrittenPointerTest, DISABLED_check_set_order_after_set_fseq_using_reference) {
+  using namespace cta::catalogue;
+
+  std::set<TapeItemWrittenPointer> filesWrittenSet;
+
+  auto file1WrittenUP = cta::make_unique<TapeFileWritten>();
+  auto file2WrittenUP = cta::make_unique<TapeFileWritten>();
+
+  auto file1WrittenPtr = file1WrittenUP.get();
+  auto file2WrittenPtr = file2WrittenUP.get();
+
+  auto & file1Written = *file1WrittenUP;
+  filesWrittenSet.insert(file1WrittenUP.release());
+  file1Written.fSeq = 1;
+
+  auto & file2Written = *file2WrittenUP;
+  filesWrittenSet.insert(file2WrittenUP.release());
+  file2Written.fSeq = 2;
+
+  ASSERT_LT(file1Written, file2Written);
+
+  ASSERT_EQ(2, filesWrittenSet.size());
+
+  // Check the set contains the original objects
+  for(const auto &event: filesWrittenSet) {
+    ASSERT_TRUE(event.get() == file1WrittenPtr || event.get() == file2WrittenPtr);
+
+    if(event.get() == file1WrittenPtr) {
+      ASSERT_EQ(1, event->fSeq);
+    } else {
+      ASSERT_EQ(2, event->fSeq);
+    }
+  }
+
+  // Check the order of the set
+  uint64_t expectedFSeq = 1;
+  for(const auto &event: filesWrittenSet) {
+    ASSERT_EQ(expectedFSeq, event->fSeq);
+    expectedFSeq++;
+  }
+}
+
+TEST_F(cta_catalogue_TapeItemWrittenPointerTest, check_set_order_after_set_fseq_using_reference_delayed_insert) {
+  using namespace cta::catalogue;
+
+  std::set<TapeItemWrittenPointer> filesWrittenSet;
+
+  auto file1WrittenUP = cta::make_unique<TapeFileWritten>();
+  auto file2WrittenUP = cta::make_unique<TapeFileWritten>();
+
+  auto file1WrittenPtr = file1WrittenUP.get();
+  auto file2WrittenPtr = file2WrittenUP.get();
+
+  auto & file1Written = *file1WrittenUP;
+  file1Written.fSeq = 1;
+
+  auto & file2Written = *file2WrittenUP;
+  file2Written.fSeq = 2;
+
+  filesWrittenSet.insert(file1WrittenUP.release());
+  filesWrittenSet.insert(file2WrittenUP.release());
+
+  ASSERT_LT(file1Written, file2Written);
+
+  ASSERT_EQ(2, filesWrittenSet.size());
+
+  // Check the set contains the original objects
+  for(const auto &event: filesWrittenSet) {
+    ASSERT_TRUE(event.get() == file1WrittenPtr || event.get() == file2WrittenPtr);
+
+    if(event.get() == file1WrittenPtr) {
+      ASSERT_EQ(1, event->fSeq);
+    } else {
+      ASSERT_EQ(2, event->fSeq);
+    }
+  }
+
+  // Check the order of the set
+  uint64_t expectedFSeq = 1;
+  for(const auto &event: filesWrittenSet) {
+    ASSERT_EQ(expectedFSeq, event->fSeq);
+    expectedFSeq++;
+  }
+}
+
+} // namespace unitTests
diff --git a/catalogue/UserSpecifiedAnEmptyTapePool.cpp b/catalogue/UserSpecifiedAnEmptyTapePool.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8882480a86f9573b52449bcff6064b0c66168cdc
--- /dev/null
+++ b/catalogue/UserSpecifiedAnEmptyTapePool.cpp
@@ -0,0 +1,32 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "UserSpecifiedAnEmptyTapePool.hpp"
+
+namespace cta {
+namespace catalogue {
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+UserSpecifiedAnEmptyTapePool::UserSpecifiedAnEmptyTapePool(const std::string &context, const bool embedBacktrace):
+  UserError(context, embedBacktrace) {
+}
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/UserSpecifiedAnEmptyTapePool.hpp b/catalogue/UserSpecifiedAnEmptyTapePool.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0b76caef8b0001eac36833f6969e0a66568dc796
--- /dev/null
+++ b/catalogue/UserSpecifiedAnEmptyTapePool.hpp
@@ -0,0 +1,46 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/UserError.hpp"
+
+namespace cta {
+namespace catalogue {
+
+/**
+ * User error thrown when a tape pool they specified is not empty when it should
+ * be.
+ */
+class UserSpecifiedAnEmptyTapePool: public exception::UserError {
+public:
+
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  UserSpecifiedAnEmptyTapePool(const std::string &context = "", const bool embedBacktrace = true);
+
+}; // class UserSpecifiedAnEmptyTapePool
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.cpp b/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f96b4f7f4a010c3667fd4fe844a155865b18ce3e
--- /dev/null
+++ b/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.cpp
@@ -0,0 +1,39 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp"
+
+namespace cta {
+namespace catalogue {
+
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+UserSpecifiedStorageClassUsedByArchiveFiles::UserSpecifiedStorageClassUsedByArchiveFiles(const std::string &context,
+  const bool embedBacktrace): cta::exception::UserError(context, embedBacktrace) {
+}
+
+//------------------------------------------------------------------------------
+// destructor
+//------------------------------------------------------------------------------
+UserSpecifiedStorageClassUsedByArchiveFiles::~UserSpecifiedStorageClassUsedByArchiveFiles() {
+}
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp b/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..892340d0f9604efb7ea019bbe2c0ff8dc90cbac8
--- /dev/null
+++ b/catalogue/UserSpecifiedStorageClassUsedByArchiveFiles.hpp
@@ -0,0 +1,49 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/UserError.hpp"
+
+namespace cta {
+namespace catalogue {
+
+/**
+ * User specified a storage class which is currently being used by one or more
+ * archive files.
+ */
+class UserSpecifiedStorageClassUsedByArchiveFiles: public exception::UserError {
+public:
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  UserSpecifiedStorageClassUsedByArchiveFiles(const std::string &context = "", const bool embedBacktrace = true);
+
+  /**
+   * Destructor.
+   */
+  ~UserSpecifiedStorageClassUsedByArchiveFiles() override;
+}; // class UserSpecifiedStorageClassUsedByArchiveFiles
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.cpp b/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fe4cc771ad55c2553505306babda8a655759a747
--- /dev/null
+++ b/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.cpp
@@ -0,0 +1,38 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp"
+
+namespace cta {
+namespace catalogue {
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+UserSpecifiedStorageClassUsedByArchiveRoutes::UserSpecifiedStorageClassUsedByArchiveRoutes(const std::string &context,
+  const bool embedBacktrace): cta::exception::UserError(context, embedBacktrace) {
+}
+
+//------------------------------------------------------------------------------
+// destructor
+//------------------------------------------------------------------------------
+UserSpecifiedStorageClassUsedByArchiveRoutes::~UserSpecifiedStorageClassUsedByArchiveRoutes() {
+}
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp b/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ddf1fc49984d6ef0636aeb4ef9b72bcc489e9e5e
--- /dev/null
+++ b/catalogue/UserSpecifiedStorageClassUsedByArchiveRoutes.hpp
@@ -0,0 +1,49 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/UserError.hpp"
+
+namespace cta {
+namespace catalogue {
+
+/**
+ * User specified a storage class which is currently being used by one or more
+ * archive routes.
+ */
+class UserSpecifiedStorageClassUsedByArchiveRoutes: public exception::UserError {
+public:
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  UserSpecifiedStorageClassUsedByArchiveRoutes(const std::string &context = "", const bool embedBacktrace = true);
+
+  /**
+   * Destructor.
+   */
+  ~UserSpecifiedStorageClassUsedByArchiveRoutes() override;
+}; // class UserSpecifiedStorageClassUsedByArchiveRoutes
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/oracle_catalogue_schema_header.sql b/catalogue/oracle_catalogue_schema_header.sql
index 4235531d59e2061793d31e303e06681bc4fb56dd..6a6d22997f350a4fa4384e705e62213befb316b5 100644
--- a/catalogue/oracle_catalogue_schema_header.sql
+++ b/catalogue/oracle_catalogue_schema_header.sql
@@ -14,11 +14,6 @@ CREATE SEQUENCE STORAGE_CLASS_ID_SEQ
   NOCYCLE
   CACHE 20
   NOORDER;
-CREATE GLOBAL TEMPORARY TABLE TEMP_TAPE_FILE_BATCH(
-  ARCHIVE_FILE_ID NUMERIC(20, 0)
-)
-ON COMMIT DELETE ROWS;
-CREATE INDEX TEMP_T_F_B_ARCHIVE_FILE_ID_I ON TEMP_TAPE_FILE_BATCH(ARCHIVE_FILE_ID);
 CREATE GLOBAL TEMPORARY TABLE TEMP_TAPE_FILE_INSERTION_BATCH(
   VID                   VARCHAR(100),
   FSEQ                  NUMERIC(20, 0),
diff --git a/cmdline/CtaAdminCmdParse.hpp b/cmdline/CtaAdminCmdParse.hpp
index 6ff0ea7b1db0d1b1f722cf97a2e5202681ff3658..fa080f80849714aeff28ca1d95b1d1ec7a57ce17 100644
--- a/cmdline/CtaAdminCmdParse.hpp
+++ b/cmdline/CtaAdminCmdParse.hpp
@@ -256,7 +256,8 @@ const std::map<std::string, OptionBoolean::Key> boolOptions = {
    { "--justaddcopies",         OptionBoolean::JUSTADDCOPIES },
    { "--justretrieve",          OptionBoolean::JUSTRETRIEVE },
    { "--log",                   OptionBoolean::SHOW_LOG_ENTRIES },
-   { "--summary",               OptionBoolean::SUMMARY }
+   { "--summary",               OptionBoolean::SUMMARY },
+   { "--disabledtape",		OptionBoolean::DISABLED } 
 };
 
 
@@ -358,7 +359,8 @@ const std::map<AdminCmd::Cmd, CmdHelp> cmdHelp = {
 			   "     If the --justaddcopies option is set, new (or missing) copies (as defined by the storage class) of the files located on the tape to repack will be created and migrated.\n"
 			   "     By default, CTA will migrate AND add new (or missing) copies (as defined by the storage class) of the files located on the tape to repack.\n"
                            "   * The --mountpolicy option allows to give a specific mount policy that will be applied to the repack subrequests (retrieve and archive requests).\n"
-			   "     By default, a hardcoded mount policy is applied (every request priorities and minimum request ages = 1)."
+			   "     By default, a hardcoded mount policy is applied (every request priorities and minimum request ages = 1).\n"
+			   "   * If the --disabledtape flag is set, the tape to repack will be mounted for retrieval even if it is disabled."
 					"\n\n" 
 					 }},
    { AdminCmd::CMD_REQUESTERMOUNTRULE,   { "requestermountrule",   "rmr", { "add", "ch", "rm", "ls" } }},
@@ -441,6 +443,7 @@ const Option opt_vo                   { Option::OPT_STR,  "--vo",
 const Option opt_vidfile              { Option::OPT_STR_LIST, "--vidfile",           "-f",   " <filename>" };
 const Option opt_full                 { Option::OPT_BOOL, "--full",                  "-f",   " <\"true\" or \"false\">" };
 const Option opt_readonly             { Option::OPT_BOOL, "--readonly",              "-r",   " <\"true\" or \"false\">" };
+const Option opt_disabled_tape        { Option::OPT_FLAG, "--disabledtape",          "-d",   ""};
 
 const Option opt_disksystem           { Option::OPT_STR,  "--disksystem",            "-n", " <disk_system_name>" };
 const Option opt_file_regexp          { Option::OPT_STR,  "--fileregexp",            "-r", " <file_regexp>" };
@@ -514,7 +517,7 @@ const std::map<cmd_key_t, cmd_val_t> cmdOptions = {
    {{ AdminCmd::CMD_MOUNTPOLICY,          AdminCmd::SUBCMD_LS    }, { }},
    /*----------------------------------------------------------------------------------------------------*/
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_ADD   },
-      { opt_vid.optional(), opt_vidfile.optional(), opt_bufferurl.optional(), opt_justmove.optional(), opt_justaddcopies.optional(), opt_mountpolicy.optional() }},
+      { opt_vid.optional(), opt_vidfile.optional(), opt_bufferurl.optional(), opt_justmove.optional(), opt_justaddcopies.optional(), opt_mountpolicy.optional(), opt_disabled_tape.optional() }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_RM    }, { opt_vid }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_LS    }, { opt_vid.optional() }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_ERR   }, { opt_vid }},
diff --git a/cmdline/CtaAdminTextFormatter.cpp b/cmdline/CtaAdminTextFormatter.cpp
index 683b3c61fa1afae5bd8b3efaa2277b4fdd4a47a4..810c0c8a3de5362633dbf446d3cccc2676556a78 100644
--- a/cmdline/CtaAdminTextFormatter.cpp
+++ b/cmdline/CtaAdminTextFormatter.cpp
@@ -293,7 +293,8 @@ void TextFormatter::print(const DriveLsItem &drls_item)
   if(drls_item.drive_status() == DriveLsItem::TRANSFERRING) {
     filesTransferredInSession = std::to_string(drls_item.files_transferred_in_session());
     bytesTransferredInSession = dataSizeToStr(drls_item.bytes_transferred_in_session());
-    latestBandwidth = std::to_string(drls_item.latest_bandwidth());
+    double bandwidth = static_cast<double>(drls_item.latest_bandwidth())/static_cast<double>(1000000);
+    latestBandwidth = doubleToStr(bandwidth,'\0');
   }
 
   if(drls_item.drive_status() != DriveLsItem::UP &&
diff --git a/common/dataStructures/DriveState.hpp b/common/dataStructures/DriveState.hpp
index 64afdd153d429decfa095e48e724065130709a45..7528e0aae830cf40e3e5ebe2d59ca4b8181b55f4 100644
--- a/common/dataStructures/DriveState.hpp
+++ b/common/dataStructures/DriveState.hpp
@@ -20,6 +20,7 @@
 
 #include <stdint.h>
 #include <string>
+#include <vector>
 
 #include "DriveStatus.hpp"
 #include "MountType.hpp"
@@ -39,10 +40,18 @@ struct DriveState {
   bool operator==(const DriveState &rhs) const;
 
   bool operator!=(const DriveState &rhs) const;
+  
+  struct DriveConfigItem{
+    std::string category;
+    std::string key;
+    std::string value;
+    std::string source;
+  };
 
   std::string driveName;
   std::string host;
   std::string logicalLibrary;
+  std::string ctaVersion;
   uint64_t sessionId = 0;
   uint64_t bytesTransferredInSession = 0;
   uint64_t filesTransferredInSession = 0;
@@ -75,6 +84,9 @@ struct DriveState {
   std::string nextTapepool;
   uint64_t nextPriority = 0;
   optional<ActivityAndWeight> nextActivityAndWeight;
+  std::vector<DriveConfigItem> driveConfigItems;
+  std::string devFileName;
+  std::string rawLibrarySlot;
 }; // struct DriveState
 
 std::ostream &operator<<(std::ostream &os, const DriveState &obj);
diff --git a/common/dataStructures/RepackInfo.hpp b/common/dataStructures/RepackInfo.hpp
index 03b7666a0121a31a4d95419f2bbdcbdcd0ea18a4..7f9a9dc7b53f9df52a77dc2d5c6c4a11ed960ff7 100644
--- a/common/dataStructures/RepackInfo.hpp
+++ b/common/dataStructures/RepackInfo.hpp
@@ -61,6 +61,7 @@ struct RepackInfo {
   uint64_t retrievedFiles;
   uint64_t archivedFiles;
   bool isExpandFinished;
+  bool forceDisabledTape;
 //  std::string tag;
 //  uint64_t totalFiles;
 //  uint64_t totalSize;
diff --git a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf
index 6f3d411e8d398fcaefa2004a8cedd4b1fbeeab0e..ed58bb6ff5ff60bb35bf822930f9f5f5b7f9be2f 100644
--- a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf
+++ b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf
@@ -3,6 +3,7 @@ logfile = /var/log/eos/fst/cta-fst-gcd.log
 mgmhost = localhost.cern.ch
 minfreebytes = 0
 gcagesecs = 1
+absolutemaxagesecs = 604800
 queryperiodsecs = 20
 mainloopperiodsecs = 10
 xrdsecssskt = /etc/eos.keytab
diff --git a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
index 022a19576cad61680ab80d1952dec2e3502fa6bc..bcdc8db05bcd3118b8de1bc5be776e3e7e117cdb 100644
--- a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
+++ b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
@@ -9,7 +9,6 @@ export KRB5RCACHETYPE=none
 # e.g. /etc/sysconfig/xrd.<role>. The role based mechanism allows for
 # multiple xrd's running with different options to be controlled via
 # the same initd script
-
 XRD_ROLES="mq mgm fst"
 
 # ------------------------------------------------------------------
@@ -58,3 +57,6 @@ export EOS_FST_DELETE_QUERY_INTERVAL=5
 
 # Tell clients that there is a CTA backend by starting the sitename with "cern_tape_archive_"
 XRDSITE=cern_tape_archive_ci
+
+# Port to use for injecting into namespace using gRPC API
+EOS_MGM_GRPC_PORT=50051
diff --git a/continuousintegration/docker/ctafrontend/cc7/etc/yum.repos.d/castor.repo b/continuousintegration/docker/ctafrontend/cc7/etc/yum.repos.d/castor.repo
index 1a0bd6a90c9a1491dbfb280a7e578746e73d7f69..afd1ae5007d1dec2d072c2a501fccea2b807d9a5 100644
--- a/continuousintegration/docker/ctafrontend/cc7/etc/yum.repos.d/castor.repo
+++ b/continuousintegration/docker/ctafrontend/cc7/etc/yum.repos.d/castor.repo
@@ -4,3 +4,10 @@ baseurl=http://linuxsoft.cern.ch/internal/repos/castor7-testing/$basearch/os
 priority=2
 enabled=0
 gpgcheck=0
+
+[castor-dbtools]
+name=Core CASTOR Repository
+baseurl=http://castorrepo.web.cern.ch/castorrepo/castor-repo/qa/cc-7/$basearch
+enabled=1
+gpgcheck=0
+priority=1
diff --git a/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list b/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
index 4654466fe19b1333a2a3b387baf67fd05473b25e..b205886f2f61012507120001298bdb614611ead9 100644
--- a/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
+++ b/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
@@ -1,17 +1,18 @@
-0:eos-archive-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-cleanup-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-client-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-debuginfo-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fuse-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fuse-core-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fuse-sysv-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fusex-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fusex-core-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-fusex-selinux-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-server-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-srm-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-test-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
-0:eos-testkeytab-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-archive-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-cleanup-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-client-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-debuginfo-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fuse-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fuse-core-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fuse-sysv-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fusex-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fusex-core-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-fusex-selinux-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-ns-inspect-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-server-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-srm-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-test-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
+0:eos-testkeytab-4.5.8-20190910135615gitd3acb55.el7.cern.x86_64
 1:python2-xrootd-4.10.0-1.el7.*
 1:python3-xrootd-4.10.0-1.el7.*
 1:xrootd-4.10.0-1.el7.*
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/client.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/client.sh
index 05325855de7ff37c0c4bac8d7a044f164fee4e23..caf7640b24118f78d27b7a0eff52deb0c71c391d 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/client.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/client.sh
@@ -13,7 +13,6 @@ if [ ! -e /etc/buildtreeRunner ]; then
   # Install eos-protobuf3 separately as eos is OK with protobuf3 but cannot use it..
   # Andreas is fixing eos-(client|server) rpms to depend on eos-protobuf3 instead
   # yum -y install eos-protobuf3
-
 fi
 
 cat <<EOF > /etc/cta/cta-cli.conf
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
index 6917d8e37e6fb79917dab17dc59a053680dca832..789c31cde9904b3205350db3f7699cf9cf554863 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
@@ -9,7 +9,7 @@ if [ ! -e /etc/buildtreeRunner ]; then
   yum-config-manager --enable eos-citrine
 
   # Install missing RPMs
-  yum -y install eos-client eos-server xrootd-client xrootd-debuginfo xrootd-server cta-cli cta-debuginfo sudo logrotate cta-fst-gcd
+  yum -y install eos-client eos-server xrootd-client xrootd-debuginfo xrootd-server cta-cli cta-migration-tools cta-debuginfo sudo logrotate cta-fst-gcd
 
   ## Keep this temporary fix that may be needed if going to protobuf3-3.5.1 for CTA
   # Install eos-protobuf3 separately as eos is OK with protobuf3 but cannot use it..
@@ -17,9 +17,9 @@ if [ ! -e /etc/buildtreeRunner ]; then
 fi
 
 # Check that the /usr/bin/cta-fst-gcd executable has been installed
-test -e /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd EXISTS" || exit 1
-test -f /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd IS A REGULAR FILE" || exit 1
-test -x /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd IS EXECUTABLE" || exit 1
+test -e /usr/bin/cta-fst-gcd || { echo "/usr/bin/cta-fst-gcd MISSING" ; exit 1; }
+test -f /usr/bin/cta-fst-gcd || { echo "/usr/bin/cta-fst-gcd NO A REGULAR FILE"; exit 1; }
+test -x /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd exists as a regular, executable file: OK" || { echo "/usr/bin/cta-fst-gcd NOT EXECUTABLE"; exit 1; }
 
 # create local users as the mgm is the only one doing the uid/user/group mapping in the full infrastructure
 groupadd --gid 1100 eosusers
@@ -48,6 +48,8 @@ CTA_PROC_DIR=/eos/${EOS_INSTANCE}/proc/cta
 CTA_WF_DIR=${CTA_PROC_DIR}/workflow
 # dir for cta tests only for eosusers and powerusers
 CTA_TEST_DIR=/eos/${EOS_INSTANCE}/cta
+# dir for gRPC tests, should be the same as eos.prefix in client.sh
+GRPC_TEST_DIR=/eos/grpctest
 # dir for eos instance basic tests writable and readable by anyone
 EOS_TMP_DIR=/eos/${EOS_INSTANCE}/tmp
 
@@ -149,9 +151,6 @@ if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
   systemctl status eos@{mq,mgm,fst} &>/dev/null && echo OK || echo FAILED
 
   systemctl status eos@{mq,mgm,fst}
-
-  systemctl start cta-fst-gcd
-
 else
   # Using jemalloc as specified in
   # it-puppet-module-eos:
@@ -167,27 +166,48 @@ else
     /usr/bin/xrootd -n mq -c /etc/xrd.cf.mq -l /var/log/eos/xrdlog.mq -b -Rdaemon
     /usr/bin/xrootd -n mgm -c /etc/xrd.cf.mgm -m -l /var/log/eos/xrdlog.mgm -b -Rdaemon
     /usr/bin/xrootd -n fst -c /etc/xrd.cf.fst -l /var/log/eos/xrdlog.fst -b -Rdaemon
-
-
-  runuser -u daemon setsid /usr/bin/cta-fst-gcd > /dev/null 2>&1 < /dev/null &
 fi
 
-echo "Giving cta-fst-gcd 1 second to start logging"
-sleep 1
+if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
+  if eos ns | grep 'In-flight FileMD' && eos ns | grep 'In-flight ContainerMD'; then
+    echo 'The EOS namespace backend is QuarkDB'
+  else
+    echo 'The EOS namespace backend is not QuarkDB'
+    exit 1
+  fi
 
-let EXPECTED_NB_STARTED_CTA_FST_GCD=NB_STARTED_CTA_FST_GCD+1
-ACTUAL_NB_STARTED_CTA_FST_GCD=0
-if test -f /var/log/eos/fst/cta-fst-gcd.log; then
-  ACTUAL_NB_STARTED_CTA_FST_GCD=`grep "cta-fst-gcd started" /var/log/eos/fst/cta-fst-gcd.log | wc -l`
-else
-  echo "/usr/bin/cta-fst-gcd DOES NOT EXIST"
-  exit 1
-fi
-if test ${EXPECTED_NB_STARTED_CTA_FST_GCD} = ${ACTUAL_NB_STARTED_CTA_FST_GCD}; then
-  echo "/usr/bin/cta-fst-gcd LOGGED 'cta-fst-gcd started'"
-else
-  echo "/usr/bin/cta-fst-gcd DID NOT LOG 'cta-fst-gcd started'"
-  exit 1
+  if eos ns reserve-ids 4294967296 4294967296; then
+    echo "Reserved EOS file and container IDs up to and including 4294967296"
+  else
+    echo "Failed to reserve EOS file and container IDs"
+    exit 1
+  fi
+  CID_TEST_DIR=/cid_test_dir
+  if eos mkdir ${CID_TEST_DIR}; then
+    echo "Created ${CID_TEST_DIR}"
+  else
+    echo "Failed to create ${CID_TEST_DIR}"
+    exit 1
+  fi
+  echo eos fileinfo ${CID_TEST_DIR}
+  eos fileinfo ${CID_TEST_DIR}
+  CID_TEST_DIR_CID=`eos fileinfo ${CID_TEST_DIR} | sed 's/Fid: /Fid:/' | sed 's/ /\n/g' | grep Fid: | sed 's/Fid://'`
+  if test x = "x${CID_TEST_DIR_CID}"; then
+    echo "Failed to determine the EOS container ID of ${CID_TEST_DIR}"
+    exit 1
+  else
+    echo "The EOS container ID of ${CID_TEST_DIR} is ${CID_TEST_DIR_CID}"
+  fi
+  if test 4294967296 -ge ${CID_TEST_DIR_CID}; then
+    echo "Container ID ${CID_TEST_DIR_CID} is illegal because it is within the reserverd set"
+    exit 1
+  fi
+  if eos rmdir ${CID_TEST_DIR}; then
+    echo "Deleted ${CID_TEST_DIR}"
+  else
+    echo "Failed to delete ${CID_TEST_DIR}"
+    exit 1
+  fi
 fi
 
   eos vid enable krb5
@@ -208,6 +228,35 @@ fi
   eos mkdir ${CTA_PROC_DIR}
   eos mkdir ${CTA_WF_DIR}
 
+  # Configure gRPC interface:
+  #
+  # 1. Map requests from the client to EOS virtual identities
+  eos -r 0 0 vid add gateway [:1] grpc
+  # 2. Add authorisation key
+  #
+  # Note: EOS_AUTH_KEY must be the same as the one specified in client.sh
+  EOS_AUTH_KEY=migration-test-token
+  eos -r 0 0 vid set map -grpc key:${EOS_AUTH_KEY} vuid:2 vgid:2
+  echo "eos vid ls:"
+  eos -r 0 0 vid ls
+  # 3. Create top-level directory and set permissions to writeable by all
+  eos mkdir ${GRPC_TEST_DIR}
+  eos chmod 777 ${GRPC_TEST_DIR}
+
+if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
+  CTA_PROC_DIR_CID=`eos fileinfo ${CTA_PROC_DIR} | sed 's/Fid: /Fid:/' | sed 's/ /\n/g' | grep Fid: | sed 's/Fid://'`
+  if test x = "x${CTA_PROC_DIR_CID}"; then
+    echo "Failed to determine the EOS container ID of ${CTA_PROC_DIR}"
+    exit 1
+  else
+    echo "The EOS container ID of ${CTA_PROC_DIR} is ${CTA_PROC_DIR_CID}"
+  fi
+  if test 4294967296 -ge ${CTA_PROC_DIR_CID}; then
+    echo "Container ID ${CTA_PROC_DIR_CID} is illegal because it is within the reserverd set"
+    exit 1
+  fi
+fi
+
   # ${CTA_TEST_DIR} must be writable by eosusers and powerusers
   # but as there is no sticky bit in eos, we need to remove deletion for non owner to eosusers members
   # this is achieved through the ACLs.
@@ -215,7 +264,6 @@ fi
   eos mkdir ${CTA_TEST_DIR}
   eos chmod 555 ${CTA_TEST_DIR}
   eos attr set sys.acl=g:eosusers:rwx!d,u:poweruser1:rwx+dp,u:poweruser2:rwx+dp /eos/ctaeos/cta
-
   eos attr set CTA_StorageClass=ctaStorageClass ${CTA_TEST_DIR}
     
   # Link the attributes of CTA worklow directory to the test directory
@@ -231,6 +279,22 @@ fi
     sleep 1
   done
 
+  # Start the FST garbage collector (the daemon user must be an EOS sudoer by now)
+  if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
+    systemctl start cta-fst-gcd
+  else
+    runuser -u daemon setsid /usr/bin/cta-fst-gcd > /dev/null 2>&1 < /dev/null &
+  fi
+  echo "Giving cta-fst-gcd 1 second to start"
+  sleep 1
+  FST_GCD_PID=`ps -ef | egrep '^daemon .* /bin/python /usr/bin/cta-fst-gcd$' | grep -v grep | awk '{print $2;}'`
+  if test "x${FST_GCD_PID}" = x; then
+    echo "cta-fst-gcd is not running"
+    exit 1
+  else
+    echo "cta-fst-gcd is running FST_GCD_PID=${FST_GCD_PID}"
+  fi
+
 # test EOS
   eos -b node ls
 
@@ -269,6 +333,22 @@ fi
 # configure preprod directory separately
 /opt/run/bin/eos_configure_preprod.sh
 
+# configuration for migration tools
+cat <<EOF >/etc/cta/castor-migration.conf
+castor.db_login               oracle:castor/<password>@castor
+castor.json                   true
+castor.max_num_connections    1
+castor.batch_size             100
+castor.prefix                 /castor/cern.ch
+eos.dry_run                   false
+eos.prefix                    /eos/grpctest
+eos.endpoint                  localhost:50051
+eos.token                     ${EOS_AUTH_KEY}
+EOF
+echo Migration tools configuration:
+cat /etc/cta/castor-migration.conf
+
+
 touch /EOSOK
 
 if [ "-${CI_CONTEXT}-" == '-nosystemd-' ]; then
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
index e620de65b92d48ca3f868184700e0afc493bc593..4dcef3daacc4d1a114435eb4575cfc861f887c2c 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
@@ -50,7 +50,6 @@ else
   echo "Reusing objectstore (no check)"
 fi
 
-
 echo "Configuring database:"
 /opt/run/bin/init_database.sh
 . /tmp/database-rc.sh
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/mkSymlinks.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/mkSymlinks.sh
index 683a0325c47db98fd7e29ebdc7bab0feb5da051c..7df4512d961e44de9752d8d961da0d8c971046c8 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/mkSymlinks.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/mkSymlinks.sh
@@ -2,12 +2,17 @@
 
 # make symbolic links to all CTA binaries.
 
-echo Creating symlinks for CTA binaries.
+
+
+echo "Creating symlinks for CTA binaries."
 ln -s -v -t /usr/bin `find ${BUILDTREE_BASE}/${CTA_BUILDTREE_SUBDIR} -type f -executable | egrep -v '\.so(\.|$)' | egrep -v '\.sh$' | grep -v RPM/BUILD | grep -v CMake | grep -v CPack`
 echo Creating symlinks for CTA libraries.
 find ${BUILDTREE_BASE}/${CTA_BUILDTREE_SUBDIR} | grep '.so$' | xargs -itoto ln -s -v -t /usr/lib64 toto
 echo Creating symlink for frontend configuration file.
-ln -s -v -t /etc/cta `perl -e 'while (<>) { if (/cta_SOURCE_DIR:STATIC=(.*)/ ) { print $1."\n"; } }' < ${BUILDTREE_BASE}/${CTA_BUILDTREE_SUBDIR}/CMakeCache.txt`/xroot_plugins/cta-frontend-xrootd.conf
+CTA_SOURCE_TREE=`perl -e 'while (<>) { if (/cta_SOURCE_DIR:STATIC=(.*)/ ) { print $1."\n"; } }' < ${BUILDTREE_BASE}/${CTA_BUILDTREE_SUBDIR}/CMakeCache.txt`
+ln -s -v -t /etc/cta ${CTA_SOURCE_TREE}/xroot_plugins/cta-frontend-xrootd.conf
+echo "Copying cta-fst-gcd (requires a different name)"
+cp -v ${CTA_SOURCE_TREE}/python/eosfstgcd/ctafstgcd.py /usr/bin/cta-fst-gcd
 if [[ -n "${EOS_BUILDTREE_SUBDIR}" ]]; then
   echo Creating symlinks for EOS binaries.
   ln -s -v -t /usr/bin `find ${BUILDTREE_BASE}/${EOS_BUILDTREE_SUBDIR} -type f -executable | egrep -v '\.so(\.|$)' | egrep -v '\.sh$' | grep -v RPM/BUILD | grep -v CMake | grep -v CPack`
@@ -18,3 +23,4 @@ if [[ -n "${EOS_BUILDTREE_SUBDIR}" ]]; then
   mkdir /var/eos{,/wfe{,/bash},ns-queue{,/default}}
   chown -R daemon.daemon /var/log/eos /var/eos
 fi
+echo "Symlinks creation compete."
diff --git a/continuousintegration/orchestration/create_instance.sh b/continuousintegration/orchestration/create_instance.sh
index 534c7113fc3f09d6f1825feb3f15e51bae94681e..57825ecf9ce858c0ca50b1d96ed7b80497ed8d58 100755
--- a/continuousintegration/orchestration/create_instance.sh
+++ b/continuousintegration/orchestration/create_instance.sh
@@ -301,7 +301,6 @@ kubectl --namespace=${instance} exec kdc cat /root/eos-server.keytab | kubectl -
 kubectl --namespace=${instance} exec ctacli -- kinit -kt /root/ctaadmin1.keytab ctaadmin1@TEST.CTA
 kubectl --namespace=${instance} exec client -- kinit -kt /root/user1.keytab user1@TEST.CTA
 
-
 ## THE FILE IS MOVED THERE MUCH LATER AND OVERWRITES THIS
 # THIS HAS TO BE IMPROVED (DEFINITELY) SO THAT WE CAN ASYNCHRONOUSLY UPDATE THE CONFIGURATION FILES...
 # SYSTEMD IS THE WAY TO GO
diff --git a/continuousintegration/orchestration/tests/client_ar.sh b/continuousintegration/orchestration/tests/client_ar.sh
index ae13afd26a0e92c1830ee5d1c40e5c7dc7da06c1..2b34f1b40f6ec67006bd83aa4eceb73d2b554706 100644
--- a/continuousintegration/orchestration/tests/client_ar.sh
+++ b/continuousintegration/orchestration/tests/client_ar.sh
@@ -7,6 +7,7 @@ DATA_SOURCE=/dev/urandom
 ARCHIVEONLY=0 # Only archive files or do the full test?
 DONOTARCHIVE=0 # files were already archived in a previous run NEED TARGETDIR
 TARGETDIR=''
+LOGDIR='/var/log'
 
 COMMENT=''
 # id of the test so that we can track it
@@ -134,6 +135,9 @@ if [[ "x${TARGETDIR}" = "x" ]]; then
 else
     EOS_DIR="${EOS_BASEDIR}/${TARGETDIR}"
 fi
+LOGDIR="${LOGDIR}/$(basename ${EOS_DIR})"
+mkdir -p ${LOGDIR} || die "Cannot create directory LOGDIR: ${LOGDIR}"
+mkdir -p ${LOGDIR}/xrd_errors || die "Cannot create directory LOGDIR/xrd_errors: ${LOGDIR}/xrd_errors"
 
 STATUS_FILE=$(mktemp)
 ERROR_FILE=$(mktemp)
@@ -188,6 +192,12 @@ done | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump
 #  done | xargs -n ${BATCH_SIZE} --max-procs=${NB_BATCH_PROCS} ./batch_xrdcp /tmp/testfile root://${EOSINSTANCE}/${EOS_DIR}/${subdir}
   echo Done.
 done
+if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then
+  # there were some xrdcp errors
+  echo "Several xrdcp errors occured during archival!"
+  echo "Please check client pod logs in artifacts"
+  mv ${ERROR_DIR}/* ${LOGDIR}/xrd_errors/
+fi
 
 COPIED=0
 COPIED_EMPTY=0
@@ -223,6 +233,12 @@ while test 0 != ${ARCHIVING}; do
   echo "${ARCHIVED}/${TO_BE_ARCHIVED} archived"
 
   ARCHIVING=$((${TO_BE_ARCHIVED} - ${ARCHIVED}))
+  NB_TAPE_NOT_FULL=`admin_cta --json ta ls --all | jq "[.[] | select(.full == false)] | length"`
+  if [[ ${NB_TAPE_NOT_FULL} == 0 ]]
+  then
+    echo "$(date +%s): All tapes are full, exiting archiving loop"
+    break
+  fi
 done
 
 
@@ -264,10 +280,16 @@ done
 # CAREFULL HERE: ${STATUS_FILE} contains lines like: 99/test9900001
 for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do
   echo -n "Recalling files to ${EOS_DIR}/${subdir} using ${NB_PROCS} processes..."
-  cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOSINSTANCE} prepare -s ${EOS_DIR}/${subdir}/TEST_FILE_NAME?activity=T0Reprocess 2>${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME && rm ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME || echo ERROR with xrootd transfer for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME" > /dev/null
+  cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOSINSTANCE} prepare -s ${EOS_DIR}/${subdir}/TEST_FILE_NAME?activity=T0Reprocess 2>${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME && rm ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME || echo ERROR with xrootd transfer for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME" | tee ${LOGDIR}/prepare_${subdir}.log | grep ^ERROR
   echo Done.
+  cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOSINSTANCE} query opaquefile ${EOS_DIR}/${subdir}/TEST_FILE_NAME?mgm.pcmd=xattr\&mgm.subcmd=get\&mgm.xattrname=sys.retrieve.req_id 2>${ERROR_DIR}/XATTRGET_TEST_FILE_NAME && rm ${ERROR_DIR}/XATTRGET_TEST_FILE_NAME || echo ERROR with xrootd xattr get for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/XATTRGET_TEST_FILE_NAME" | tee ${LOGDIR}/prepare_sys.retrieve.req_id_${subdir}.log | grep ^ERROR
 done
-
+if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then
+  # there were some prepare errors
+  echo "Several prepare errors occured during retrieval!"
+  echo "Please check client pod logs in artifacts"
+  mv ${ERROR_DIR}/* ${LOGDIR}/xrd_errors/
+fi
 
 ARCHIVED=$(cat ${STATUS_FILE} | wc -l)
 TO_BE_RETRIEVED=$(( ${ARCHIVED} - $(ls ${ERROR_DIR}/RETRIEVE_* 2>/dev/null | wc -l) ))
@@ -434,9 +456,23 @@ test -z ${COMMENT} || annotate "test ${TESTID} FINISHED" "Summary:</br>NB_FILES:
 # stop tail
 test -z $TAILPID || kill ${TAILPID} &> /dev/null
 
-test ${LASTCOUNT} -eq $((${NB_FILES} * ${NB_DIRS})) && exit 0
+RC=0
+if [ ${LASTCOUNT} -ne $((${NB_FILES} * ${NB_DIRS})) ]; then
+  ((RC++))
+  echo "ERROR there were some lost files during the archive/retrieve test with ${NB_FILES} files (first 10):"
+  grep -v retrieved ${STATUS_FILE} | sed -e "s;^;${EOS_DIR}/;" | head -10
+fi
 
-echo "ERROR there were some lost files during the archive/retrieve test with ${NB_FILES} files (first 10):"
-grep -v retrieved ${STATUS_FILE} | sed -e "s;^;${EOS_DIR}/;" | head -10
+if [ $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | grep -v value= | wc -l) -ne 0 ]; then
+  # THIS IS NOT YET AN ERROR: UNCOMMENT THE FOLLOWING LINE WHEN https://gitlab.cern.ch/cta/CTA/issues/606 is fixed
+  # ((RC++))
+  echo "ERROR $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | grep -v value= | wc -l) files out of $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | wc -l) prepared files have no sys.retrieve.req_id extended attribute set"
+fi
 
-exit 1
+if [ $(ls ${LOGDIR}/xrd_errors | wc -l) -ne 0 ]; then
+  ((RC++))
+  echo "ERROR several xrootd failures occured during this run, please check client dumps in ${LOGDIR}/xrd_errors."
+fi
+
+
+exit ${RC}
diff --git a/continuousintegration/orchestration/tests/grpc_dir_inject.sh b/continuousintegration/orchestration/tests/grpc_dir_inject.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7cfb574abec6d0e5e638fee447a2cb0222923d59
--- /dev/null
+++ b/continuousintegration/orchestration/tests/grpc_dir_inject.sh
@@ -0,0 +1,97 @@
+#!/bin/sh
+
+# Migration tools parameters
+#EOSINSTANCE=ctaeos
+#EOS_CMD="/usr/bin/eos root://${EOSINSTANCE}"
+EOS_CMD="/usr/bin/eos"
+EOS_TEST_DIR_INJECT=/usr/bin/eos-test-dir-inject
+CONFIG_FILE=/etc/cta/castor-migration.conf
+TMPFILE=/tmp/eos-test-inject-sh.$$
+
+# Colours
+NC='\033[0m' # No colour
+RED='\033[0;31m'
+LT_RED='\033[1;31m'
+GREEN='\033[0;32m'
+LT_GREEN='\033[1;32m'
+ORANGE='\033[0;33m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+LT_BLUE='\033[1;34m'
+
+error()
+{
+  echo -e "${RED}$*${NC}" >&2
+  if [ -r ${TMPFILE} ]
+  then
+    json-pretty-print.sh ${TMPFILE}
+  fi
+  exit 1
+}
+
+echoc()
+{
+  COLOUR=$1
+  shift
+  echo -e "${COLOUR}$*${NC}"
+}
+
+[ -x ${EOS_TEST_DIR_INJECT} ] || error "Can't find executable ${EOS_TEST_DIR_INJECT}"
+[ -r ${CONFIG_FILE} ] || error "Can't find configuration file ${CONFIG_FILE}"
+CASTOR_PREFIX=$(awk '/^castor.prefix[ 	]/ { print $2 }' ${CONFIG_FILE})
+EOS_PREFIX=$(awk '/^eos.prefix[ 	]/ { print $2 }' ${CONFIG_FILE})
+
+echoc $LT_BLUE "gRPC configuration:"
+cat ${CONFIG_FILE}
+
+# Ping the gRPC interface
+${EOS_TEST_DIR_INJECT} ping || error "gRPC ping failed"
+
+# Create the top-level directory.
+#
+# Note: GNU coreutils "mkdir -p" does not return an error if the directory already exists;
+#       but "eos mkdir -p" does return an error.
+echo Creating ${EOS_PREFIX}...
+${EOS_CMD} mkdir -p ${EOS_PREFIX}
+
+# Create directory with system-assigned file id -- should succeed
+echoc $LT_BLUE "Creating directory with auto-assigned file id"
+${EOS_TEST_DIR_INJECT} --path ${CASTOR_PREFIX}/test_dir1 >${TMPFILE}
+[ $? -eq 0 ] || error "Creating directory with auto-assigned file id failed"
+json-pretty-print.sh ${TMPFILE}
+rm ${TMPFILE}
+${EOS_CMD} ls -l ${EOS_PREFIX}
+${EOS_CMD} fileinfo ${EOS_PREFIX}/test_dir1
+${EOS_CMD} attr ls ${EOS_PREFIX}/test_dir1
+/usr/bin/eos root://${EOSINSTANCE} rmdir ${EOS_PREFIX}/test_dir1
+
+# Create directory with self-assigned file id -- should succeed
+TEST_FILE_ID=123456789
+echoc $LT_BLUE "Creating directory with self-assigned file id"
+${EOS_TEST_DIR_INJECT} --fileid ${TEST_FILE_ID} --path ${CASTOR_PREFIX}/test_dir2 >${TMPFILE}
+[ $? -eq 0 ] || error "Creating directory with self-assigned file id failed"
+json-pretty-print.sh ${TMPFILE}
+rm ${TMPFILE}
+${EOS_CMD} fileinfo ${EOS_PREFIX}/test_dir2
+
+# Try again -- should fail
+echoc $LT_GREEN "Creating directory with the same path (should fail)"
+${EOS_TEST_DIR_INJECT} --path ${CASTOR_PREFIX}/test_dir2 >/dev/null
+[ $? -ne 0 ] || error "Creating directory with self-assigned file id succeeded when it should have failed"
+
+# Try again -- should fail
+echoc $LT_GREEN "Creating directory with the same file id (should fail)"
+${EOS_TEST_DIR_INJECT} --fileid ${TEST_FILE_ID} --path ${CASTOR_PREFIX}/test_dir3 >/dev/null
+[ $? -ne 0 ] || error "Creating directory with self-assigned file id succeeded when it should have failed"
+
+# Remove and try again -- should succeed
+echoc $LT_GREEN "Remove the directory and tombstone"
+${EOS_CMD} rmdir ${EOS_PREFIX}/test_dir2
+${EOS_CMD} ns cache drop-single-container ${TEST_FILE_ID}
+
+echoc $LT_BLUE "Recreate the directory with self-assigned file id (should succeed this time)"
+${EOS_TEST_DIR_INJECT} --fileid ${TEST_FILE_ID} --path ${CASTOR_PREFIX}/test_dir2 >/dev/null
+[ $? -eq 0 ] || error "Creating directory with self-assigned file id failed with error $?"
+${EOS_CMD} fileinfo ${EOS_PREFIX}/test_dir2
+${EOS_CMD} rmdir ${EOS_PREFIX}/test_dir2
+
diff --git a/continuousintegration/orchestration/tests/migration.sh b/continuousintegration/orchestration/tests/migration.sh
new file mode 100755
index 0000000000000000000000000000000000000000..45e81f2c78850a911b3059769b5df5a89c2c31dd
--- /dev/null
+++ b/continuousintegration/orchestration/tests/migration.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+usage() { cat <<EOF 1>&2
+Usage: $0 -n <namespace>
+EOF
+exit 1
+}
+
+while getopts "n:" o; do
+    case "${o}" in
+        n)
+            NAMESPACE=${OPTARG}
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ -z "${NAMESPACE}" ]; then
+    usage
+fi
+
+if [ ! -z "${error}" ]; then
+    echo -e "ERROR:\n${error}"
+    exit 1
+fi
+
+echo "Preparing namespace for the tests"
+./prepare_tests.sh -n ${NAMESPACE}
+if [ $? -ne 0 ]; then
+  echo "ERROR: failed to prepare namespace for the tests"
+  exit 1
+fi
+
+echo
+echo "Launching grpc_dir_inject.sh on ctaeos pod"
+kubectl -n ${NAMESPACE} cp grpc_dir_inject.sh ctaeos:/root/grpc_dir_inject.sh
+kubectl -n ${NAMESPACE} exec ctaeos -- bash /root/grpc_dir_inject.sh || exit 1
+
+exit 0
diff --git a/continuousintegration/orchestration/tests/multiple_repack.sh b/continuousintegration/orchestration/tests/multiple_repack.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c2b173eb79dd6e4058f22caa36bd0f756f94297d
--- /dev/null
+++ b/continuousintegration/orchestration/tests/multiple_repack.sh
@@ -0,0 +1,177 @@
+#!/bin/bash
+
+#default CI EOS instance
+EOSINSTANCE=ctaeos
+EOSBASEDIR=/eos/ctaeos/preprod
+
+#default Repack timeout
+WAIT_FOR_REPACK_TIMEOUT=300
+
+die() {
+  echo "$@" 1>&2
+  test -z $TAILPID || kill ${TAILPID} &> /dev/null
+  exit 1
+}
+
+usage() { cat <<EOF 1>&2
+Usage: $0 -s <size_of_tapes> -n <nb_files_per_tape> -b <repack_buffer_url> [-e <eosinstance>] [-d <eosbasedir>] [-t <timeout>]
+size_of_tape : in MB
+repack_buffer_url example : /eos/ctaeos/repack
+eosinstance : the name of the ctaeos instance to be used (default ctaeos)
+eosbasedir : the path in which files will be created for archival
+timeout : the timeout in seconds to wait for each repack request to be complete
+EOF
+exit 1
+}
+
+if [ $# -lt 6 ]
+then
+  usage
+fi;
+
+while getopts "s:n:e:t:b:d:" o; do
+  case "${o}" in
+    s)
+      SIZE_OF_TAPES=${OPTARG}
+      ;;
+    n)
+      NB_FILES_PER_TAPE=${OPTARG}
+      ;;
+    e)
+      EOSINSTANCE=${OPTARG}
+      ;;
+    t)
+      WAIT_FOR_REPACK_TIMEOUT=${OPTARG}
+      ;;
+    b)
+      REPACK_BUFFER_URL=${OPTARG}
+      ;;
+    d)
+      EOSBASEDIR=${OPTARG}
+      ;;
+    *)
+      usage
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+
+if [ -z "${SIZE_OF_TAPES}" ]; then
+    usage
+fi
+
+if [ -z "${NB_FILES_PER_TAPE}" ]; then
+    usage
+fi
+
+if [ -z "${REPACK_BUFFER_URL}" ]; then
+    usage
+fi
+
+echo "Starting multiple repack test"
+
+. /root/client_helper.sh
+
+# Get kerberos credentials for user1
+admin_kinit
+admin_klist > /dev/null 2>&1 || die "Cannot get kerberos credentials for user ${USER}"
+
+# Get the number of tapes available
+# availableTapes=`admin_cta --json ta ls --all | jq -r ".[] | select (.occupancy==\"0\") | .vid"`
+availableTapes=`admin_cta --json ta ls --all | jq -r ".[] | select (.full==false) | .vid"`
+
+# Save the available tapes in an array
+read -a arrayTapes <<< $availableTapes
+nbTapes=${#arrayTapes[@]}
+
+#Get the tapes that we will repack
+nbTapesToRepack=$(($nbTapes/2))
+tapesToRepack=()
+for ((i=0; i<nbTapesToRepack; i++))
+do
+  tapesToRepack+=(${arrayTapes[$i]})
+done
+
+destinationTapes=()
+for (( i=$(($nbTapesToRepack)); i<nbTapes; i++ ))
+do
+  destinationTapes+=(${arrayTapes[$i]})
+done
+
+# Mark repack destination tape as full, we don't want to archive on them
+for vid in ${destinationTapes[@]}
+do
+  echo "Marking repack destination tape (${vid}) as full"
+  admin_cta tape ch --vid $vid --full true
+done
+
+nbDestinationTape=${#destinationTapes[@]}
+
+# Compute the number of files to copy and the size of each file
+fileSizeToCopy=`perl -e "use POSIX; print int( ceil((( (${SIZE_OF_TAPES} * 1000) - ((6 * 80) / 1000)) / ${NB_FILES_PER_TAPE})) )"` 
+nbFilesToCopy=$(($NB_FILES_PER_TAPE * $nbTapesToRepack))
+
+echo 
+echo "file size to copy (in KB) =  $fileSizeToCopy"
+echo "Nb files to copy = $nbFilesToCopy"
+
+bash /root/client_ar.sh -n ${nbFilesToCopy} -s ${fileSizeToCopy} -p 100 -d ${EOSBASEDIR} -v -A || exit 1
+
+for vid in ${destinationTapes[@]}
+do
+  echo "Marking destination tape (${vid}) as not full"
+  admin_cta tape ch --vid $vid --full false
+done
+
+allPid=()
+for vid in ${tapesToRepack[@]}
+do
+  echo "Launching repack requests on vid $vid"
+  bash /root/repack_systemtest.sh -v $vid -b ${REPACK_BUFFER_URL} -t 500 &
+  allPid+=($!)
+done
+
+oneRepackFailed=0
+for pid in ${allPid[@]}
+do
+  wait $pid || oneRepackFailed=1
+done
+
+if [[ $oneRepackFailed == 1 ]]
+then
+  die "Fail of multiple_repack test"
+fi
+
+echo "End of multiple_repack test"
+
+#WAIT_FOR_REPACK_TIMEOUT=300
+#
+#while test $nbTapesToRepack != `admin_cta --json re ls | jq "[.[] | select(.status == \"Complete\" or .status == \"Failed\")] | length"`; do
+#  echo "Waiting for repack request on all tapes to be complete: Seconds passed = $SECONDS_PASSED"
+#  sleep 1
+#  let SECONDS_PASSED=SECONDS_PASSED+1
+#
+#  if test ${SECONDS_PASSED} == ${WAIT_FOR_REPACK_TIMEOUT}; then
+#    echo "Timed out after ${WAIT_FOR_REPACK_TIMEOUT} seconds waiting all tapes to be repacked"
+#    exit 1
+#  fi
+#done
+#
+#successfulRepackTapes=`admin_cta --json re ls | jq ".[] | select(.status == \"Complete\") | .vid"`
+#failedToRepackTapes=`admin_cta --json re ls | jq ".[] | select(.status == \"Failed\") | .vid"`
+#
+#read -a arrayFailedToRepackTapes <<< $failedToRepackTapes
+#
+#if  test 0 != ${#arrayFailedToRepackTapes[@]} then
+#    echo "Repack failed for tapes ${arrayFailedToRepackTapes[@]}."
+#    exit 1
+#else
+#  for vid in $successfulRepackTapes[@]
+#  do
+#    bash /root/repack_generate_report.sh -v $vid 
+#  done
+#  echo "End of multiple repack test"
+#  exit 0
+#fi
+
+# echo $nb_tapes_to_fill
\ No newline at end of file
diff --git a/continuousintegration/orchestration/tests/multiple_repack_wrapper.sh b/continuousintegration/orchestration/tests/multiple_repack_wrapper.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b601751ad2866c4a614204e5f4dedf99f3dabad8
--- /dev/null
+++ b/continuousintegration/orchestration/tests/multiple_repack_wrapper.sh
@@ -0,0 +1,60 @@
+#default CI EOS instance
+EOSINSTANCE=ctaeos
+#default Repack timeout
+WAIT_FOR_REPACK_TIMEOUT=300
+
+die() {
+  echo "$@" 1>&2
+  test -z $TAILPID || kill ${TAILPID} &> /dev/null
+  exit 1
+}
+
+usage() { cat <<EOF 1>&2
+Usage: $0 -n <namespace>
+EOF
+exit 1
+}
+
+while getopts "n:" o; do
+    case "${o}" in
+        n)
+            NAMESPACE=${OPTARG}
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ -z "${NAMESPACE}" ]; then
+    usage
+fi
+
+if [ ! -z "${error}" ]; then
+    echo -e "ERROR:\n${error}"
+    exit 1
+fi
+
+echo "Preparing namespace for the tests"
+./prepare_tests.sh -n ${NAMESPACE}
+
+kubectl -n ${NAMESPACE} cp client_helper.sh client:/root/client_helper.sh
+kubectl -n ${NAMESPACE} cp client_ar.sh client:/root/client_ar.sh
+kubectl -n ${NAMESPACE} cp multiple_repack.sh client:/root/multiple_repack.sh
+kubectl -n ${NAMESPACE} cp repack_systemtest.sh client:/root/repack_systemtest.sh
+kubectl -n ${NAMESPACE} cp repack_generate_report.sh client:/root/repack_generate_report.sh
+
+NB_FILES_PER_TAPE=1000
+
+SIZE_OF_TAPES=10
+
+REPACK_BUFFER_URL=/eos/ctaeos/repack
+echo "Creating the repack buffer URL directory (${REPACK_BUFFER_URL})"
+kubectl -n ${NAMESPACE} exec ctaeos -- eos mkdir ${REPACK_BUFFER_URL}
+kubectl -n ${NAMESPACE} exec ctaeos -- eos chmod 1777 ${REPACK_BUFFER_URL}
+
+echo "Enabling all drives"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin dr up ".*"
+
+kubectl -n ${NAMESPACE} exec client -- bash /root/multiple_repack.sh -n ${NB_FILES_PER_TAPE} -s ${SIZE_OF_TAPES} -b ${REPACK_BUFFER_URL} || exit 1
\ No newline at end of file
diff --git a/continuousintegration/orchestration/tests/prepare_tests.sh b/continuousintegration/orchestration/tests/prepare_tests.sh
index 2f80787d510d0508cf3f69a75a37250e8074f2b4..35ebeeebd7cbc9561089541cb84eb6c536ea405b 100755
--- a/continuousintegration/orchestration/tests/prepare_tests.sh
+++ b/continuousintegration/orchestration/tests/prepare_tests.sh
@@ -72,6 +72,28 @@ echo "Preparing CTA configuration for tests"
       exit 1
     fi
   kubectl --namespace ${NAMESPACE} exec ctafrontend -- cta-catalogue-admin-user-create /etc/cta/cta-catalogue.conf --username ctaadmin1 -m "docker cli"
+
+  echo "Cleaning up leftovers from potential previous runs."
+  kubectl --namespace ${NAMESPACE} exec ctaeos -- eos rm /eos/ctaeos/cta/*
+  kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin --json tape ls --all  |             \
+    jq -r '.[] | .vid ' | xargs -I{} kubectl --namespace ${NAMESPACE} exec ctacli --            \
+    cta-admin tape rm -v {}
+
+  kubectl --namespace ${NAMESPACE}  exec ctacli -- cta-admin --json archiveroute ls |           \
+    jq '.[] |  "-i "  + .instance + " -s " + .storageClass + " -c " + (.copyNumber|tostring)' | \
+    xargs -I{} bash -c "kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin archiveroute rm {}"
+
+  kubectl --namespace ${NAMESPACE}  exec ctacli -- cta-admin --json tapepool ls  |              \
+    jq -r '.[] | .name' |                                                                       \
+    xargs -I{} kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin tapepool rm -n {} 
+
+  kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin --json storageclass ls  |           \
+    jq -r '.[] | "-i " + .diskInstance + " -n  " + .name'  |                                    \
+    xargs -I{} bash -c "kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin storageclass rm {}"
+
+
+
+
   for ((i=0; i<${#TAPEDRIVES_IN_USE[@]}; i++)); do
     kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin logicallibrary add \
       --name ${TAPEDRIVES_IN_USE[${i}]}                                            \
diff --git a/continuousintegration/orchestration/tests/repack_generate_report.sh b/continuousintegration/orchestration/tests/repack_generate_report.sh
index 8182ca5c79ec8233d1cf0cacb832149d8c1eaa99..ad537eff7815828a8ff253449d9a48c9248cd03a 100755
--- a/continuousintegration/orchestration/tests/repack_generate_report.sh
+++ b/continuousintegration/orchestration/tests/repack_generate_report.sh
@@ -1,6 +1,7 @@
 #!/bin/bash
 
 REPORT_DIRECTORY=/var/log
+ADD_COPIES_ONLY=0
 
 die() {
   echo "$@" 1>&2
@@ -9,8 +10,9 @@ die() {
 }
 
 usage() { cat <<EOF 1>&2
-Usage: $0 -v <vid> [-r <report_directory>]
+Usage: $0 -v <vid> [-r <report_directory>] [-a]
 Default report_directory = ${REPORT_DIRECTORY}
+-a: Specify if the repack is add copies only or not
 EOF
 exit 1
 }
@@ -20,7 +22,7 @@ then
   usage
 fi;
 
-while getopts "v:r:" o; do
+while getopts "v:r:a" o; do
     case "${o}" in
         v)
             VID=${OPTARG}
@@ -28,6 +30,9 @@ while getopts "v:r:" o; do
         r)
             REPORT_DIRECTORY=${OPTARG}
             ;;
+        a)
+            ADD_COPIES_ONLY=1
+            ;;
         *)
             usage
             ;;
@@ -42,56 +47,97 @@ shift $((OPTIND-1))
 admin_kinit
 admin_klist > /dev/null 2>&1 || die "Cannot get kerberos credentials for user ${USER}"
 
-echo "Generation of a repack report"
+repackRequest=`admin_cta --json repack ls --vid ${VID}`
+
+if [ "$repackRequest" == "" ];
+then
+  die "No repack request for this VID."
+fi;
+
+echo "Creating the report directory ${REPORT_DIRECTORY}"
+mkdir -p ${REPORT_DIRECTORY} || die "Unable to create the directory for report generation"
+
+echo "Generation of a repack report of the vid ${VID}"
 
 DATE=`date +%d-%m-%y-%H:%M:%S`
 
-ARCHIVE_FILE_LS_RESULT_PATH=${REPORT_DIRECTORY}/${VID}_af_ls_${DATE}.json
+ARCHIVE_FILE_LS_RESULT_PATH=${REPORT_DIRECTORY}/af_ls_${DATE}.json
+ARCHIVE_FILE_LS_VID_RESULT_PATH=${REPORT_DIRECTORY}/${VID}_af_ls_${DATE}.json
 NOT_REPACKED_JSON_PATH=${REPORT_DIRECTORY}/${VID}_report_not_repacked_${DATE}.json
 SELF_REPACKED_JSON_PATH=${REPORT_DIRECTORY}/${VID}_report_self_repacked_${DATE}.json
-REPACKED_JSON_PATH=${REPORT_DIRECTORY}/${VID}_report_repacked_${DATE}.json
+REPACKED_MOVE_JSON_PATH=${REPORT_DIRECTORY}/${VID}_report_repacked_move_${DATE}.json
+REPACK_ADD_COPIES_JSON_PATH=${REPORT_DIRECTORY}/${VID}_report_repack_add_copies_${DATE}.json
+STDOUT_REPORT_PATH=${REPORT_DIRECTORY}/${VID}_stdout_report.txt
 
 
 echo "1. Generate archive file ls result into ${ARCHIVE_FILE_LS_RESULT_PATH} file..."
-admin_cta --json archivefile ls --vid ${VID} > ${ARCHIVE_FILE_LS_RESULT_PATH}
+admin_cta --json archivefile ls --all > ${ARCHIVE_FILE_LS_RESULT_PATH}
 echo "OK"
 
-echo "2. Generate the non-repacked files report into ${NOT_REPACKED_JSON_PATH} file..."
-jq -r '[.[] | select(.tf.supersededByVid == "")]' ${ARCHIVE_FILE_LS_RESULT_PATH} > ${NOT_REPACKED_JSON_PATH}
+echo "2. Generate all the archive files that are on vid ${VID} into ${ARCHIVE_FILE_LS_VID_RESULT_PATH} file..."
+jq -r "[.[] | select (.tf.vid == \"${VID}\")]" ${ARCHIVE_FILE_LS_RESULT_PATH} > ${ARCHIVE_FILE_LS_VID_RESULT_PATH}
 echo "OK"
 
-echo "3. Generating the self-repacked files report into ${SELF_REPACKED_JSON_PATH} file..."
-jq -r  '[.[] | select((.tf.supersededByVid == .tf.vid) and (.tf.fSeq < .tf.supersededByFSeq))]' ${ARCHIVE_FILE_LS_RESULT_PATH} > ${SELF_REPACKED_JSON_PATH}
+echo "3. Generate the non-repacked files report into ${NOT_REPACKED_JSON_PATH} file..."
+jq -r "[.[] | select (.tf.supersededByVid == \"\" and .tf.vid == \"${VID}\")]" ${ARCHIVE_FILE_LS_VID_RESULT_PATH} > ${NOT_REPACKED_JSON_PATH}
 echo "OK"
 
-echo "4. Generate the repacked files report into ${REPACKED_JSON_PATH} file..."
-jq -r  '[.[] | select((.tf.supersededByVid != "") and (.tf.supersededByVid != .tf.vid))]' ${ARCHIVE_FILE_LS_RESULT_PATH} > ${REPACKED_JSON_PATH}
+echo "4. Generating the self-repacked files report into ${SELF_REPACKED_JSON_PATH} file..."
+jq -r  '[.[] | select((.tf.supersededByVid == .tf.vid) and (.tf.fSeq < .tf.supersededByFSeq))]' ${ARCHIVE_FILE_LS_VID_RESULT_PATH} > ${SELF_REPACKED_JSON_PATH}
 echo "OK"
 
-echo "5. Report of the repacked tape"
-echo
-NB_NON_REPACKED_FILES=$(jq '[.[]] | length' ${NOT_REPACKED_JSON_PATH} || 0)
-echo "Number of non repacked files : ${NB_NON_REPACKED_FILES}" 
-if [ ${NB_NON_REPACKED_FILES} -ne 0 ]
+echo "5. Generate the repacked (moved) files report into ${REPACKED_MOVE_JSON_PATH} file..."
+jq -r  '[.[] | select((.tf.supersededByVid != "") and (.tf.supersededByVid != .tf.vid))]' ${ARCHIVE_FILE_LS_VID_RESULT_PATH} > ${REPACKED_MOVE_JSON_PATH}
+echo "OK"
+
+echo "6. Generate the repack \"just add copies\" report into ${REPACK_ADD_COPIES_JSON_PATH} file..."
+storageClass=`jq ".[0] | .af.storageClass" ${ARCHIVE_FILE_LS_VID_RESULT_PATH}`
+copyNbToExclude=`jq -r ".[0] | .copyNb" ${ARCHIVE_FILE_LS_VID_RESULT_PATH}`
+copyNbs=`admin_cta --json archiveroute ls | jq -r ".[] | select(.storageClass == $storageClass and .copyNumber != $copyNbToExclude) | .copyNumber"`
+
+jq -r "[.[] | select(.copyNb == (\"$copyNbs\" | split(\"\n\")[]))]" ${ARCHIVE_FILE_LS_RESULT_PATH} > ${REPACK_ADD_COPIES_JSON_PATH}
+echo "OK"
+
+echo "7. Report of the repacked tape"
+
+if [ ${ADD_COPIES_ONLY} -eq 0 ]; 
 then
-  header="ArchiveID\tFSeq\tSize"
-  { echo -e $header; jq -r '.[] | [.af.archiveId,.tf.fSeq, .af.size] | @tsv' ${NOT_REPACKED_JSON_PATH}; } | column -t
-fi; 
+  echo
+  NB_NON_REPACKED_FILES=$(jq '[.[]] | length' ${NOT_REPACKED_JSON_PATH} || 0)
+  echo "Number of non repacked files : ${NB_NON_REPACKED_FILES}" 
+  if [ ${NB_NON_REPACKED_FILES} -ne 0 ]
+  then
+    header="ArchiveID\tFSeq\tSize"
+    { echo -e $header; jq -r '.[] | [.af.archiveId,.tf.fSeq, .af.size] | @tsv' ${NOT_REPACKED_JSON_PATH}; } | column -t | tee --append ${STDOUT_REPORT_PATH}
+  fi;
+fi;
+ 
 echo
 NB_SELF_REPACKED_FILES=$(jq '[.[]] | length' ${SELF_REPACKED_JSON_PATH} || 0)
 echo "Number of self-repacked files : ${NB_SELF_REPACKED_FILES}"
 if [ ${NB_SELF_REPACKED_FILES} -ne 0 ]
 then
   header="ArchiveID\tFSeq\tSize"
-  { echo -e $header; jq -r '.[] | [.af.archiveId, .tf.fSeq, .af.size] | @tsv' ${SELF_REPACKED_JSON_PATH}; } | column -t
+  { echo -e $header; jq -r '.[] | [.af.archiveId, .tf.fSeq, .af.size] | @tsv' ${SELF_REPACKED_JSON_PATH}; } | column -t | tee --append ${STDOUT_REPORT_PATH}
 fi; 
 echo
-NB_REPACKED_FILES=$(jq '[.[]] | length' ${REPACKED_JSON_PATH} || 0)
-echo "Number of repacked files : ${NB_REPACKED_FILES}"
-if [ ${NB_REPACKED_FILES} -ne 0 ]
+
+NB_REPACKED_MOVE_FILES=$(jq '[.[]] | length' ${REPACKED_MOVE_JSON_PATH} || 0)
+echo "Number of repacked (moved) files : ${NB_REPACKED_MOVE_FILES}"
+if [ ${NB_REPACKED_MOVE_FILES} -ne 0 ]
+then
+  header="DestinationVID\tNbFiles\ttotalSize\n"
+  { echo -e $header; jq -r 'group_by(.tf.supersededByVid)[] | [(.[0].tf.supersededByVid),([.[] | .tf.supersededByFSeq] | length),(reduce [.[] | .af.size | tonumber][] as $currentSize (0; . + $currentSize))] | @tsv' ${REPACKED_MOVE_JSON_PATH}; } | column -t | tee --append ${STDOUT_REPORT_PATH}
+fi;
+echo
+
+NB_COPIED_FILES=$(jq '[.[]] | length' ${REPACK_ADD_COPIES_JSON_PATH} || 0)
+echo "Number of copied files : $NB_COPIED_FILES"
+if [ ${NB_COPIED_FILES} -ne 0 ]
 then
   header="DestinationVID\tNbFiles\ttotalSize\n"
-  { echo -e $header; jq -r 'group_by(.tf.supersededByVid)[] | [(.[0].tf.supersededByVid),([.[] | .tf.supersededByFSeq] | length),(reduce [.[] | .af.size | tonumber][] as $currentSize (0; . + $currentSize))] | @tsv' ${REPACKED_JSON_PATH}; } | column -t
+  { echo -e $header; jq -r 'group_by(.tf.vid)[] | [(.[0].tf.vid),([.[] | .tf.fSeq] | length),(reduce [.[] | .af.size | tonumber][] as $currentSize (0; . + $currentSize))] | @tsv' ${REPACK_ADD_COPIES_JSON_PATH}; } | column -t | tee --append ${STDOUT_REPORT_PATH}
 fi;
 
-echo "End of the repack report"
\ No newline at end of file
+echo "End of the repack report"
+exit 0
\ No newline at end of file
diff --git a/continuousintegration/orchestration/tests/repack_systemtest.sh b/continuousintegration/orchestration/tests/repack_systemtest.sh
index ab48ed6e93ca4f1a9f17ff80a2b14ab3079272fa..dd6d8c01cdd3464cdc915c18c160ad15493da5e3 100755
--- a/continuousintegration/orchestration/tests/repack_systemtest.sh
+++ b/continuousintegration/orchestration/tests/repack_systemtest.sh
@@ -5,6 +5,8 @@ EOSINSTANCE=ctaeos
 #default Repack timeout
 WAIT_FOR_REPACK_TIMEOUT=300
 
+REPORT_DIRECTORY=/var/log
+
 die() {
   echo "$@" 1>&2
   test -z $TAILPID || kill ${TAILPID} &> /dev/null
@@ -12,10 +14,14 @@ die() {
 }
 
 usage() { cat <<EOF 1>&2
-Usage: $0 -v <vid> -b <bufferURL> [-e <eosinstance>] [-t <timeout>]
+Usage: $0 -v <vid> -b <bufferURL> [-e <eosinstance>] [-t <timeout>] [-r <reportDirectory>] [-a] [-m] [-d]
 (bufferURL example : /eos/ctaeos/repack)
-eosinstance : the name of the ctaeos instance to be used (default ctaeos)
+eosinstance : the name of the ctaeos instance to be used (default : $EOSINSTANCE)
 timeout : the timeout in seconds to wait for the repack to be done
+reportDirectory : the directory to generate the report of the repack test (default : $REPORT_DIRECTORY)
+-a : Launch a repack just add copies workflow
+-m : Launch a repack just move workflow
+-d : Force a repack on a disabled tape (adds --disabled to the repack add command)
 EOF
 exit 1
 }
@@ -38,7 +44,8 @@ then
   usage
 fi;
 
-while getopts "v:e:b:t:" o; do
+DISABLED_TAPE_FLAG=""
+while getopts "v:e:b:t:r:amd" o; do
   case "${o}" in
     v)
       VID_TO_REPACK=${OPTARG}
@@ -52,6 +59,18 @@ while getopts "v:e:b:t:" o; do
     t)
       WAIT_FOR_REPACK_TIMEOUT=${OPTARG}
       ;;
+    a)
+      ADD_COPIES_ONLY="-a"
+      ;;
+    m)
+      MOVE_ONLY="-m"
+      ;;
+    r)
+      REPORT_DIRECTORY=${OPTARG}
+      ;;
+    d)
+      DISABLED_TAPE_FLAG="--disabledtape"
+      ;;
     *)
       usage
       ;;
@@ -69,6 +88,14 @@ if [ "x${VID_TO_REPACK}" = "x" ]; then
   die "No vid to repack provided."
 fi
 
+REPACK_OPTION=""
+
+if [ "x${ADD_COPIES_ONLY}" != "x" ] && [ "x${MOVE_ONLY}" != "x" ]; then
+  die "-a and -m options are mutually exclusive"
+fi
+
+[[ "x${ADD_COPIES_ONLY}" == "x" ]] && REPACK_OPTION=${MOVE_ONLY} || REPACK_OPTION=${ADD_COPIES_ONLY}
+
 # get some common useful helpers for krb5
 . /root/client_helper.sh
 
@@ -96,7 +123,8 @@ fi
 admin_cta ds ls
 
 echo "Launching repack request for VID ${VID_TO_REPACK}, bufferURL = ${FULL_REPACK_BUFFER_URL}"
-admin_cta re add --vid ${VID_TO_REPACK} --justmove --bufferurl ${FULL_REPACK_BUFFER_URL}
+
+admin_cta repack add --vid ${VID_TO_REPACK} ${REPACK_OPTION} --bufferurl ${FULL_REPACK_BUFFER_URL} ${DISABLED_TAPE_FLAG}
 
 echo "Backpressure test: waiting to see a report of sleeping retrieve queue."
 SECONDS_PASSED=0
@@ -124,12 +152,20 @@ while test 0 = `admin_cta --json repack ls --vid ${VID_TO_REPACK} | jq -r '.[0]
 
   if test ${SECONDS_PASSED} == ${WAIT_FOR_REPACK_TIMEOUT}; then
     echo "Timed out after ${WAIT_FOR_REPACK_TIMEOUT} seconds waiting for tape ${VID_TO_REPACK} to be repacked"
+    exec /root/repack_generate_report.sh -v ${VID_TO_REPACK} -r ${REPORT_DIRECTORY} ${ADD_COPIES_ONLY} &
+    wait $!
     exit 1
   fi
 done
-if test 1 = `admin_cta --json repack ls --vid ${VID_TO_REPACK} | jq -r '.[0] | select(.status == "Failed")' | wc -l`; then
+if test 1 = `admin_cta --json repack ls --vid ${VID_TO_REPACK} | jq -r '[.[0] | select (.status == "Failed")] | length'`; then
     echo "Repack failed for tape ${VID_TO_REPACK}."
+    exec /root/repack_generate_report.sh -v ${VID_TO_REPACK} -r ${REPORT_DIRECTORY} ${ADD_COPIES_ONLY} &
+    wait $!
     exit 1
 fi
 
-exec /root/repack_generate_report.sh -v ${VID_TO_REPACK}
\ No newline at end of file
+echo "Repack request on VID ${VID_TO_REPACK} succeeded."
+
+exec /root/repack_generate_report.sh -v ${VID_TO_REPACK} -r ${REPORT_DIRECTORY} ${ADD_COPIES_ONLY} &
+wait $!
+exit 0
diff --git a/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh b/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
index 9b9babf38fde92c45765b365ec306c9f48551e45..3c939d7d5f4221444b89bf8e11d9972709659a7b 100755
--- a/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
+++ b/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
@@ -1,5 +1,7 @@
 #!/bin/bash
 
+BASE_REPORT_DIRECTORY=/var/log
+
 usage() { cat <<EOF 1>&2
 Usage: $0 -n <namespace>
 EOF
@@ -52,14 +54,16 @@ kubectl -n ${NAMESPACE} cp repack_systemtest.sh client:/root/repack_systemtest.s
 kubectl -n ${NAMESPACE} cp repack_generate_report.sh client:/root/repack_generate_report.sh
 
 echo
-echo "Launching a round trip repack request"
+echo "***********************************************************"
+echo "STEP 1. Launching a round trip repack \"just move\" request"
+echo "***********************************************************"
 
 VID_TO_REPACK=$(getFirstVidContainingFiles)
 if [ "$VID_TO_REPACK" != "null" ] 
 then
 echo
-  echo "Launching the repack test on VID ${VID_TO_REPACK}"
-  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} || exit 1
+  echo "Launching the repack \"just move\" test on VID ${VID_TO_REPACK}"
+  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -m -r ${BASE_REPORT_DIRECTORY}/Step1-RoundTripRepack || exit 1
 else
   echo "No vid found to repack"
   exit 1
@@ -72,8 +76,8 @@ VID_TO_REPACK=$(getFirstVidContainingFiles)
 if [ "$VID_TO_REPACK" != "null" ] 
 then
 echo
-  echo "Launching the repack test on VID ${VID_TO_REPACK}"
-  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} || exit 1
+  echo "Launching the repack \"just move\" test on VID ${VID_TO_REPACK}"
+  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -m -r ${BASE_REPORT_DIRECTORY}/Step1-RoundTripRepack || exit 1
 else
   echo "No vid found to repack"
   exit 1
@@ -81,17 +85,166 @@ fi
 
 echo "Reclaiming tape ${VID_TO_REPACK}"
 kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape reclaim --vid ${VID_TO_REPACK}
+echo 
+echo "*******************************************************************"
+echo "STEP 1. Launching a round trip repack \"just move\" request TEST OK"
+echo "*******************************************************************"
+echo 
+echo "*****************************************************"
+echo "STEP 2. Launching a Repack Request on a disabled tape"
+echo "*****************************************************"
+
+VID_TO_REPACK=$(getFirstVidContainingFiles)
+
+if [ "$VID_TO_REPACK" != "null" ]
+then
+  echo "Marking the tape ${VID_TO_REPACK} as disabled"
+  kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape ch --disabled true --vid ${VID_TO_REPACK}
+  echo "Waiting 20 seconds so that the RetrieveQueueStatisticsCache is flushed"
+  sleep 20
+  echo "Launching the repack request test on VID ${VID_TO_REPACK}"
+  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -r ${BASE_REPORT_DIRECTORY}/Step2-RepackDisabledTape && echo "The repack request is Complete instead of Failed, it should be failed as the tape is disabled" && exit 1 || echo "REPACK FAILED, the tape is disabled so, Test OK"
+else
+  echo "No vid found to repack"
+  exit 1
+fi;
 
-NB_FILES=1153
+echo
+echo "Launching the repack request test on VID ${VID_TO_REPACK} with the --disabledtape flag"
+kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -d -r ${BASE_REPORT_DIRECTORY}/Step2-RepackDisabledTape || exit 1
+
+echo "Reclaiming tape ${VID_TO_REPACK}"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape reclaim --vid ${VID_TO_REPACK}
+
+echo
+echo "*************************************************************"
+echo "STEP 2. Launching a Repack Request on a disabled tape TEST OK"
+echo "*************************************************************"
+echo 
+echo "*********************************************"
+echo "STEP 3. Testing Repack \"Just move\" workflow"
+echo "*********************************************"
+
+NB_FILES=1152
 kubectl -n ${NAMESPACE} exec client -- bash /root/client_ar.sh -n ${NB_FILES} -s ${FILE_SIZE_KB} -p 100 -d /eos/ctaeos/preprod -v -A || exit 1
 
 VID_TO_REPACK=$(getFirstVidContainingFiles)
 if [ "$VID_TO_REPACK" != "null" ] 
 then
 echo
-  echo "Launching the repack test on VID ${VID_TO_REPACK}"
-  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} || exit 1
+  echo "Launching the repack test \"just move\" on VID ${VID_TO_REPACK}"
+  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -m -r ${BASE_REPORT_DIRECTORY}/Step3-RepackJustMove || exit 1
 else
   echo "No vid found to repack"
   exit 1
 fi
+
+echo "Reclaiming tape ${VID_TO_REPACK}"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape reclaim --vid ${VID_TO_REPACK}
+echo 
+echo "*****************************************************"
+echo "STEP 3. Testing Repack \"Just move\" workflow TEST OK"
+echo "*****************************************************"
+echo
+echo "**************************************************************************"
+echo "STEP 4. Testing Repack \"Just Add copies\" workflow with all copies on CTA"
+echo "**************************************************************************"
+
+VID_TO_REPACK=$(getFirstVidContainingFiles)
+if [ "$VID_TO_REPACK" != "null" ] 
+then
+  echo "Launching the repack \"just add copies\" test on VID ${VID_TO_REPACK} with all copies already on CTA"
+  kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -a -r ${BASE_REPORT_DIRECTORY}/Step4-JustAddCopiesAllCopiesInCTA || exit 1
+else
+  echo "No vid found to repack"
+  exit 1
+fi
+
+repackJustAddCopiesResult=`kubectl -n ${NAMESPACE} exec ctacli -- cta-admin --json re ls | jq -r ". [] | select (.vid == \"${VID_TO_REPACK}\")"`
+
+nbRetrievedFiles=`echo ${repackJustAddCopiesResult} | jq -r ".retrievedFiles"`
+nbArchivedFiles=`echo ${repackJustAddCopiesResult} | jq -r ".archivedFiles"`
+
+if [ $nbArchivedFiles == 0 ] && [ $nbRetrievedFiles == 0 ] 
+then
+  echo "Nb retrieved files = 0 and nb archived files = 0. Test OK"
+else
+  echo "Repack \"just add copies\" on VID ${VID_TO_REPACK} failed : nbRetrievedFiles = $nbRetrievedFiles, nbArchivedFiles = $nbArchivedFiles"
+  exit 1
+fi
+
+echo
+echo "**********************************************************************************"
+echo "STEP 4. Testing Repack \"Just Add copies\" workflow with all copies on CTA TEST OK"
+echo "**********************************************************************************"
+
+echo
+echo "*******************************************************"
+echo "STEP 5. Testing Repack \"Move and Add copies\" workflow"
+echo "*******************************************************"
+
+tapepoolDestination1="ctasystest2"
+tapepoolDestination2="ctasystest3"
+
+echo "Creating two destination tapepool : $tapepoolDestination1 and $tapepoolDestination2"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tapepool add --name $tapepoolDestination1 --vo vo --partialtapesnumber 2 --encrypted false --comment "$tapepoolDestination1 tapepool"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tapepool add --name $tapepoolDestination2 --vo vo --partialtapesnumber 2 --encrypted false --comment "$tapepoolDestination2 tapepool"
+echo "OK"
+
+echo "Creating archive routes for adding two copies of the file"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin archiveroute add --instance ctaeos --storageclass ctaStorageClass --copynb 2 --tapepool $tapepoolDestination1 --comment "ArchiveRoute2"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin archiveroute add --instance ctaeos --storageclass ctaStorageClass --copynb 3 --tapepool $tapepoolDestination2 --comment "ArchiveRoute3"
+echo "OK"
+
+echo "Will change the tapepool of the tapes"
+
+allVID=`kubectl -n ${NAMESPACE}  exec ctacli -- cta-admin --json tape ls --all | jq -r ". [] | .vid"`
+read -a allVIDTable <<< $allVID
+
+nbVid=${#allVIDTable[@]}
+
+allTapepool=`kubectl -n ${NAMESPACE} exec ctacli -- cta-admin --json tapepool ls | jq -r ". [] .name"`
+
+read -a allTapepoolTable <<< $allTapepool
+
+nbTapepool=${#allTapepoolTable[@]}
+
+nbTapePerTapepool=$(($nbVid / $nbTapepool))
+
+allTapepool=`kubectl -n ${NAMESPACE} exec ctacli -- cta-admin --json tapepool ls | jq -r ". [] .name"`
+read -a allTapepoolTable <<< $allTapepool
+
+
+countChanging=0
+tapepoolIndice=1 #We only change the vid of the remaining other tapes
+
+for ((i=$(($nbTapePerTapepool+$(($nbVid%$nbTapepool)))); i<$nbVid; i++));
+do
+  echo "kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape ch --vid ${allVIDTable[$i]} --tapepool ${allTapepoolTable[$tapepoolIndice]}"
+  kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape ch --vid ${allVIDTable[$i]} --tapepool ${allTapepoolTable[$tapepoolIndice]}
+  countChanging=$((countChanging + 1))
+  if [ $countChanging != 0 ] && [ $((countChanging % nbTapePerTapepool)) == 0 ]
+  then
+    tapepoolIndice=$((tapepoolIndice + 1))
+  fi
+done
+
+echo "OK"
+
+storageClassName=`kubectl -n ${NAMESPACE} exec ctacli -- cta-admin --json storageclass ls | jq -r ". [0] | .name"`
+instanceName=`kubectl -n ${NAMESPACE} exec ctacli -- cta-admin --json storageclass ls | jq -r ". [0] | .diskInstance"`
+
+echo "Changing the storage class $storageClassName nb copies"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin storageclass ch --instance $instanceName --name $storageClassName --copynb 3
+echo "OK"
+
+echo "Putting all drives up"
+kubectl -n ${NAMESPACE} exec ctacli -- cta-admin dr up VD.*
+echo "OK"
+
+echo "Launching the repack \"Move and add copies\" test on VID ${VID_TO_REPACK}"
+kubectl -n ${NAMESPACE} exec client -- bash /root/repack_systemtest.sh -v ${VID_TO_REPACK} -b ${REPACK_BUFFER_URL} -t 600 -r ${BASE_REPORT_DIRECTORY}/Step5-MoveAndAddCopies || exit 1
+echo
+echo "***************************************************************"
+echo "STEP 5. Testing Repack \"Move and Add copies\" workflow TEST OK"
+echo "***************************************************************"
diff --git a/cta.spec.in b/cta.spec.in
index 3671dfc5234b536e8bde7f43039213c171761f22..681a118d3bf0a322b66274511d0cd9f79bedef3d 100644
--- a/cta.spec.in
+++ b/cta.spec.in
@@ -311,8 +311,6 @@ directory metadata into the EOS namespace.
 %attr(0755,root,root) %{_bindir}/eos-import-files
 %attr(0755,root,root) %{_bindir}/eos-test-dir-inject
 %attr(0755,root,root) %{_bindir}/eos-test-file-inject
-%attr(0755,root,root) %{_bindir}/eos-test-inject.sh
-%attr(0755,root,root) %{_bindir}/eos-insert-missing-dirs
 %attr(0755,root,root) %{_bindir}/json-pretty-print.sh
 %attr(0755,root,root) %{_bindir}/startvoexport.sh
 %attr(0755,root,root) %{_bindir}/exporttapepool.sh
diff --git a/disk/DiskFile.cpp b/disk/DiskFile.cpp
index 18667be23094059e3ae3f8897a1ca488bb44979a..32a31aa32cdb2587498b23504f0b97150e1f2de0 100644
--- a/disk/DiskFile.cpp
+++ b/disk/DiskFile.cpp
@@ -774,11 +774,7 @@ bool XRootdDirectory::exist() {
   if(statStatus.errNo == XErrorCode::kXR_NotFound){
     return false;
   }
-  cta::exception::XrootCl::throwOnError(statStatus,"In XrootdDirectory::exist(): fail to determine if directory exists.");
-  if(statInfo->GetSize() !=  0){
-    return true;
-  }
-  return false;
+  return true;
 }
 
 std::set<std::string> XRootdDirectory::getFilesName(){
diff --git a/objectstore/ArchiveRequest.cpp b/objectstore/ArchiveRequest.cpp
index c4a72d0dd1fb313c958639c90923a1989ea98922..3d42dd084c322636e8384dc92caf87ca90c8bac1 100644
--- a/objectstore/ArchiveRequest.cpp
+++ b/objectstore/ArchiveRequest.cpp
@@ -65,6 +65,20 @@ void ArchiveRequest::initialize() {
   m_payloadInterpreted = true;
 }
 
+void ArchiveRequest::commit(){
+  checkPayloadWritable();
+  checkPayloadReadable();
+  for(auto & job: m_payload.jobs()){
+    int nbTapepool = std::count_if(m_payload.jobs().begin(),m_payload.jobs().end(),[&job](const cta::objectstore::serializers::ArchiveJob & archiveJob){
+      return archiveJob.tapepool() == job.tapepool();
+    });
+    if(nbTapepool != 1){
+      throw cta::exception::Exception("In ArchiveRequest::commit(), cannot insert an ArchiveRequest containing archive jobs with the same destination tapepool");
+    }
+  }
+  ObjectOps<serializers::ArchiveRequest, serializers::ArchiveRequest_t>::commit();
+}
+
 //------------------------------------------------------------------------------
 // ArchiveRequest::addJob()
 //------------------------------------------------------------------------------
@@ -139,24 +153,24 @@ auto ArchiveRequest::addTransferFailure(uint32_t copyNumber,
       }
       j.set_totalretries(j.totalretries() + 1);
       * j.mutable_failurelogs()->Add() = failureReason;
-    }
-    if (j.totalretries() >= j.maxtotalretries()) {
-      // We have to determine if this was the last copy to fail/succeed.
-      return determineNextStep(copyNumber, JobEvent::TransferFailed, lc);
-    } else {
-      EnqueueingNextStep ret;
-      bool isRepack =  m_payload.isrepack();
-      ret.nextStatus = isRepack ? serializers::ArchiveJobStatus::AJS_ToTransferForRepack : serializers::ArchiveJobStatus::AJS_ToTransferForUser;
-      // Decide if we want the job to have a chance to come back to this mount (requeue) or not. In the latter
-      // case, the job will remain owned by this session and get garbage collected.
-      if (j.retrieswithinmount() >= j.maxretrieswithinmount())
-        ret.nextStep = EnqueueingNextStep::NextStep::Nothing;
-      else
-        ret.nextStep = isRepack ? EnqueueingNextStep::NextStep::EnqueueForTransferForRepack : EnqueueingNextStep::NextStep::EnqueueForTransferForUser;
-      return ret;
+      if (j.totalretries() >= j.maxtotalretries()) {
+        // We have to determine if this was the last copy to fail/succeed.
+        return determineNextStep(copyNumber, JobEvent::TransferFailed, lc);
+      } else {
+        EnqueueingNextStep ret;
+        bool isRepack =  m_payload.isrepack();
+        ret.nextStatus = isRepack ? serializers::ArchiveJobStatus::AJS_ToTransferForRepack : serializers::ArchiveJobStatus::AJS_ToTransferForUser;
+        // Decide if we want the job to have a chance to come back to this mount (requeue) or not. In the latter
+        // case, the job will remain owned by this session and get garbage collected.
+        if (j.retrieswithinmount() >= j.maxretrieswithinmount())
+          ret.nextStep = EnqueueingNextStep::NextStep::Nothing;
+        else
+          ret.nextStep = isRepack ? EnqueueingNextStep::NextStep::EnqueueForTransferForRepack : EnqueueingNextStep::NextStep::EnqueueForTransferForUser;
+        return ret;
+      }
     }
   }
-  throw NoSuchJob ("In ArchiveRequest::addJobFailure(): could not find job");
+  throw NoSuchJob ("In ArchiveRequest::addTransferFailure(): could not find job");
 }
 
 //------------------------------------------------------------------------------
@@ -822,7 +836,7 @@ auto ArchiveRequest::determineNextStep(uint32_t copyNumberUpdated, JobEvent jobE
   for (auto &j:jl) { if (j.copynb() == copyNumberUpdated) currentStatus = j.status(); }
   if (!currentStatus) {
     std::stringstream err;
-    err << "In ArchiveRequest::updateJobStatus(): copynb not found : " << copyNumberUpdated
+    err << "In ArchiveRequest::determineNextStep(): copynb not found : " << copyNumberUpdated
         << "existing ones: ";
     for (auto &j: jl) err << j.copynb() << "  ";
     throw cta::exception::Exception(err.str());
@@ -830,13 +844,13 @@ auto ArchiveRequest::determineNextStep(uint32_t copyNumberUpdated, JobEvent jobE
   // Check status compatibility with event.
   switch (jobEvent) {
   case JobEvent::TransferFailed:
-    if (*currentStatus != ArchiveJobStatus::AJS_ToTransferForUser) {
+    if (*currentStatus != ArchiveJobStatus::AJS_ToTransferForUser && *currentStatus != ArchiveJobStatus::AJS_ToTransferForRepack) {
       // Wrong status, but the context leaves no ambiguity. Just warn.
       log::ScopedParamContainer params(lc);
       params.add("event", eventToString(jobEvent))
             .add("status", statusToString(*currentStatus))
             .add("fileId", m_payload.archivefileid());
-      lc.log(log::WARNING, "In ArchiveRequest::updateJobStatus(): unexpected status. Assuming ToTransfer.");
+      lc.log(log::WARNING, "In ArchiveRequest::determineNextStep(): unexpected status. Assuming ToTransfer.");
     }
     break;
   case JobEvent::ReportFailed:
@@ -846,7 +860,7 @@ auto ArchiveRequest::determineNextStep(uint32_t copyNumberUpdated, JobEvent jobE
       params.add("event", eventToString(jobEvent))
               .add("status", statusToString(*currentStatus))
             .add("fileId", m_payload.archivefileid());
-      lc.log(log::WARNING, "In ArchiveRequest::updateJobStatus(): unexpected status. Failing the job.");
+      lc.log(log::WARNING, "In ArchiveRequest::determineNextStep(): unexpected status. Failing the job.");
     }
   }
   // We are in the normal cases now.
@@ -854,18 +868,14 @@ auto ArchiveRequest::determineNextStep(uint32_t copyNumberUpdated, JobEvent jobE
   switch (jobEvent) {  
   case JobEvent::TransferFailed:
   {
+    bool isRepack = m_payload.isrepack();
     if (!m_payload.reportdecided()) {
       m_payload.set_reportdecided(true);
-      if(!m_payload.isrepack()){
-        ret.nextStep = EnqueueingNextStep::NextStep::EnqueueForReportForUser;
-        ret.nextStatus = serializers::ArchiveJobStatus::AJS_ToReportToUserForFailure;
-      } else {
-        ret.nextStep = EnqueueingNextStep::NextStep::EnqueueForReportForRepack;
-        ret.nextStatus = serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure;
-      }
+      ret.nextStep = isRepack ? EnqueueingNextStep::NextStep::EnqueueForReportForRepack : EnqueueingNextStep::NextStep::EnqueueForReportForUser;
+      ret.nextStatus = isRepack ? serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure : serializers::ArchiveJobStatus::AJS_ToReportToUserForFailure;
     } else {
-      ret.nextStep = EnqueueingNextStep::NextStep::StoreInFailedJobsContainer;
-      ret.nextStatus = serializers::ArchiveJobStatus::AJS_Failed;
+      ret.nextStep = isRepack ? EnqueueingNextStep::NextStep::EnqueueForReportForRepack : EnqueueingNextStep::NextStep::StoreInFailedJobsContainer;
+      ret.nextStatus = isRepack ? serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure : serializers::ArchiveJobStatus::AJS_Failed;
     }
   }
   break;
diff --git a/objectstore/ArchiveRequest.hpp b/objectstore/ArchiveRequest.hpp
index 6936f967a1c55b695804566d571279b364bfc9e8..8d925e8150ec27b0a8633a900bc913bf6a0ec149 100644
--- a/objectstore/ArchiveRequest.hpp
+++ b/objectstore/ArchiveRequest.hpp
@@ -44,6 +44,7 @@ public:
   ArchiveRequest(Backend & os);
   ArchiveRequest(GenericObject & go);
   void initialize();
+  void commit();
   // Ownership of archive requests is managed per job. Object level owner has no meaning.
   std::string getOwner() = delete;
   void setOwner(const std::string &) = delete;
diff --git a/objectstore/DriveState.cpp b/objectstore/DriveState.cpp
index 7434180bad6c01642bc49f78577ee6b70296078e..7d673c678a1aec891b1987d022407da3096912e3 100644
--- a/objectstore/DriveState.cpp
+++ b/objectstore/DriveState.cpp
@@ -19,6 +19,9 @@
 #include "DriveState.hpp"
 #include "GenericObject.hpp"
 #include <google/protobuf/util/json_util.h>
+#include <version.h>
+#include "common/SourcedParameter.hpp"
+#include "tapeserver/daemon/FetchReportOrFlushLimits.hpp"
 
 namespace cta { namespace objectstore {
 
@@ -105,6 +108,12 @@ cta::common::dataStructures::DriveState DriveState::getState() {
   ret.currentVid                  = m_payload.currentvid();
   ret.currentTapePool             = m_payload.currenttapepool();
   ret.currentPriority             = m_payload.current_priority();
+  ret.ctaVersion                  = m_payload.cta_version();
+  for(auto & driveConfigItem: m_payload.drive_config()){
+    ret.driveConfigItems.push_back({driveConfigItem.category(),driveConfigItem.key(),driveConfigItem.value(),driveConfigItem.source()});
+  }
+  ret.devFileName = m_payload.dev_file_name();
+  ret.rawLibrarySlot = m_payload.raw_library_slot();
   if (m_payload.has_current_activity())
     ret.currentActivityAndWeight = 
       cta::common::dataStructures::DriveState::ActivityAndWeight{
@@ -174,6 +183,96 @@ void DriveState::setState(cta::common::dataStructures::DriveState& state) {
   }
 }
 
+template <>
+void DriveState::setConfigValue<std::string>(cta::objectstore::serializers::DriveConfig * item, const std::string& value){
+  item->set_value(value);
+}
+
+template<>
+void DriveState::setConfigValue<uint64_t>(cta::objectstore::serializers::DriveConfig * item,const uint64_t & value){
+  item->set_value(std::to_string(value));
+}
+
+template<>
+void DriveState::setConfigValue<time_t>(cta::objectstore::serializers::DriveConfig * item,const time_t & value){
+  item->set_value(std::to_string(value));
+}
+
+template <typename T>
+cta::objectstore::serializers::DriveConfig * DriveState::createAndInitDriveConfig(cta::SourcedParameter<T>& sourcedParameter) {
+  auto item = m_payload.mutable_drive_config()->Add();
+  item->set_source(sourcedParameter.source());
+  item->set_category(sourcedParameter.category());
+  item->set_key(sourcedParameter.key());
+  return item;
+}
+
+template<>
+void DriveState::fillConfig<std::string>(cta::SourcedParameter<std::string> & sourcedParameter){
+  auto item = createAndInitDriveConfig(sourcedParameter);
+  setConfigValue(item,sourcedParameter.value());
+}
+
+template <>
+void DriveState::fillConfig<cta::tape::daemon::FetchReportOrFlushLimits>(cta::SourcedParameter<cta::tape::daemon::FetchReportOrFlushLimits>& sourcedParameter){
+  auto itemFiles = createAndInitDriveConfig(sourcedParameter);
+  std::string key = sourcedParameter.key();
+  cta::utils::searchAndReplace(key,"Bytes","");
+  cta::utils::searchAndReplace(key,"Files","");
+  itemFiles->set_key(key.append("Files"));
+  setConfigValue(itemFiles, sourcedParameter.value().maxFiles);
+  
+  cta::utils::searchAndReplace(key,"Files","");
+  auto itemBytes = createAndInitDriveConfig(sourcedParameter);
+  itemBytes->set_key(key.append("Bytes"));
+  setConfigValue(itemBytes,sourcedParameter.value().maxBytes);
+}
+
+template<>
+void DriveState::fillConfig<uint64_t>(cta::SourcedParameter<uint64_t>& sourcedParameter){
+  auto item = createAndInitDriveConfig(sourcedParameter);
+  setConfigValue(item,sourcedParameter.value());
+}
+
+template<>
+void DriveState::fillConfig<time_t>(cta::SourcedParameter<time_t>& sourcedParameter){
+  auto item = createAndInitDriveConfig(sourcedParameter);
+  setConfigValue(item,sourcedParameter.value());
+}
+
+//------------------------------------------------------------------------------
+// DriveState::setConfig())
+//------------------------------------------------------------------------------
+void DriveState::setConfig(const cta::tape::daemon::TapedConfiguration& tapedConfiguration) {
+  cta::tape::daemon::TapedConfiguration * config = const_cast<cta::tape::daemon::TapedConfiguration*>(&tapedConfiguration);
+  
+  m_payload.mutable_drive_config()->Clear();
+  
+  fillConfig(config->daemonUserName);
+  fillConfig(config->daemonGroupName);
+  fillConfig(config->logMask);
+  fillConfig(config->tpConfigPath);
+  fillConfig(config->bufferSizeBytes);
+  fillConfig(config->bufferCount);
+  fillConfig(config->archiveFetchBytesFiles);
+  fillConfig(config->archiveFlushBytesFiles);
+  fillConfig(config->retrieveFetchBytesFiles);
+  fillConfig(config->mountCriteria);
+  fillConfig(config->nbDiskThreads);
+  fillConfig(config->useRAO);
+  fillConfig(config->wdScheduleMaxSecs);
+  fillConfig(config->wdMountMaxSecs);
+  fillConfig(config->wdNoBlockMoveMaxSecs);
+  fillConfig(config->wdIdleSessionTimer);
+  fillConfig(config->backendPath);
+  fillConfig(config->fileCatalogConfigFile);
+}
+
+void DriveState::setTpConfig(const cta::tape::daemon::TpconfigLine& configLine){
+  m_payload.set_dev_file_name(configLine.devFilename);
+  m_payload.set_raw_library_slot(configLine.rawLibrarySlot);
+}
+
 //------------------------------------------------------------------------------
 // DriveState::getDiskSpaceReservations())
 //------------------------------------------------------------------------------
@@ -251,8 +350,10 @@ std::string DriveState::dump() {
   return headerDump;
 }
 
-}} // namespace cta::objectstore
-
-
-
+void DriveState::commit(){
+  checkPayloadWritable();
+  m_payload.set_cta_version(CTA_VERSION);
+  ObjectOps<serializers::DriveState, serializers::DriveState_t>::commit();
+}
 
+}} // namespace cta::objectstore
diff --git a/objectstore/DriveState.hpp b/objectstore/DriveState.hpp
index afe22a049c90c1fb586b68c4fe410c59c44f023a..ab405df93a61b9cfc812bfd543f6fdd18ce9c1e0 100644
--- a/objectstore/DriveState.hpp
+++ b/objectstore/DriveState.hpp
@@ -21,6 +21,7 @@
 #include "ObjectOps.hpp"
 #include "common/dataStructures/DriveState.hpp"
 #include "common/dataStructures/DriveNextState.hpp"
+#include "tapeserver/daemon/TapedConfiguration.hpp"
 
 namespace cta { namespace objectstore {
 
@@ -52,11 +53,41 @@ public:
   void substractDiskSpaceReservation(const std::string & diskSystemName, uint64_t bytes);
   void resetDiskSpaceReservation();
   
+  void setConfig(const cta::tape::daemon::TapedConfiguration &tapedConfiguration);
+  void setTpConfig(const cta::tape::daemon::TpconfigLine &tpConfigLine);
   /**
    * JSON dump of the drive state
    * @return 
    */
   std::string dump();
+  
+  void commit();
+  
+private:
+  /**
+   * Allows to set a configuration value to the DriveConfig item passed in parameter
+   * @param item the objectstore DriveConfig item
+   * @param value the value to set to the item
+   */
+  template <typename T>
+  void setConfigValue(cta::objectstore::serializers::DriveConfig * item,const T& value);
+  
+  /**
+   * Add a DriveConfig to the DriveState DriveConfig list and return its pointer
+   * so the pointed DriveConfig can be modified afterwards
+   * @param sourcedParameter the configuration that will be used for initialize the DriveConfig item
+   * @return 
+   */
+  template <typename T>
+  cta::objectstore::serializers::DriveConfig * createAndInitDriveConfig(cta::SourcedParameter<T>& sourcedParameter);
+  
+  /**
+   * Allows to put the content of the sourcedParameter passed in parameter
+   * into the DriveState's protobuf list of DriveConfig items (used for the cta-admin --json dr ls) command
+   * @param sourcedParameter the SourcedParameter to save into the protobuf list of DriveConfig items
+   */
+  template <typename T>
+  void fillConfig(cta::SourcedParameter<T>& sourceParameter);
 };
 
 }} // namespace cta::objectstore
\ No newline at end of file
diff --git a/objectstore/GarbageCollector.cpp b/objectstore/GarbageCollector.cpp
index a2626922492b5b057526eea2540bf67fbfaa883f..052f4bb2098d62aec6c2664243bc89c31c6499da 100644
--- a/objectstore/GarbageCollector.cpp
+++ b/objectstore/GarbageCollector.cpp
@@ -336,6 +336,7 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
         obj.reset();
         // Get the list of vids for non failed tape files.
         std::set<std::string> candidateVids;
+        bool disabledTape = rr->getRepackInfo().forceDisabledTape;
         for (auto & j: rr->dumpJobs()) {
           if(j.status==RetrieveJobStatus::RJS_ToTransfer) {
             for (auto &tf: rr->getArchiveFile().tapeFiles) {
@@ -369,7 +370,7 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
         // Back to the transfer case.
         std::string vid;
         try {
-          vid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, objectStore);
+          vid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, objectStore, disabledTape);
         } catch (Helpers::NoTapeAvailableForRetrieve & ex) {
           log::ScopedParamContainer params3(lc);
           params3.add("fileId", rr->getArchiveFile().archiveFileID);
diff --git a/objectstore/GarbageCollectorTest.cpp b/objectstore/GarbageCollectorTest.cpp
index ada1a4bce885e7609b6a55e06ed85e718048ba40..e9ff8227f4bd415f4d4068ea0e4f368b7b83b7cb 100644
--- a/objectstore/GarbageCollectorTest.cpp
+++ b/objectstore/GarbageCollectorTest.cpp
@@ -1558,6 +1558,167 @@ TEST(ObjectStore, GarbageCollectorRetrieveAllStatusesAndQueues) {
   }
 }
 
+TEST(ObjectStore, GarbageCollectorRetrieveRequestRepackDisabledTape){
+// We will need a log object
+#ifdef STDOUT_LOGGING
+  cta::log::StdoutLogger dl("dummy", "unitTest");
+#else
+  cta::log::DummyLogger dl("dummy", "unitTest");
+#endif
+  cta::log::LogContext lc(dl);
+  // We need a dummy catalogue
+  cta::catalogue::DummyCatalogue catalogue;
+  // Here we check that can successfully call RetrieveRequests's garbage collector
+  cta::objectstore::BackendVFS be;
+  // Create the root entry
+  cta::objectstore::RootEntry re(be);
+  re.initialize();
+  re.insert();
+  // Create the agent register
+  cta::objectstore::EntryLogSerDeser el("user0",
+      "unittesthost", time(NULL));
+  cta::objectstore::ScopedExclusiveLock rel(re);
+  // Create the agent for objects creation
+  cta::objectstore::AgentReference agentRef("unitTestCreateEnv", dl);
+  // Finish root creation.
+  re.addOrGetAgentRegisterPointerAndCommit(agentRef, el, lc);
+  rel.release();
+  // continue agent creation.
+  cta::objectstore::Agent agent(agentRef.getAgentAddress(), be);
+  agent.initialize();
+  agent.setTimeout_us(10000);
+  agent.insertAndRegisterSelf(lc);
+  // Create all agents to be garbage collected
+  cta::objectstore::AgentReference agentRefToTransferForUser("ToTransferForUser", dl);
+  cta::objectstore::Agent agentToTransferForUser(agentRefToTransferForUser.getAgentAddress(), be);
+  agentToTransferForUser.initialize();
+  agentToTransferForUser.setTimeout_us(0);
+  agentToTransferForUser.insertAndRegisterSelf(lc);
+  
+  std::string retrieveRequestAddress = agentRefToTransferForUser.nextId("RetrieveRequest");
+  agentRefToTransferForUser.addToOwnership(retrieveRequestAddress, be);
+  
+  cta::objectstore::RetrieveRequest rr(retrieveRequestAddress, be);
+  
+  rr.initialize();
+  cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
+  rqc.archiveFile.archiveFileID = 123456789L;
+  rqc.archiveFile.diskFileId = "eos://diskFile";
+  rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
+  rqc.archiveFile.creationTime = 0;
+  rqc.archiveFile.reconciliationTime = 0;
+  rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
+  rqc.archiveFile.diskInstance = "eoseos";
+  rqc.archiveFile.fileSize = 1000;
+  rqc.archiveFile.storageClass = "sc";
+  {
+    cta::common::dataStructures::TapeFile tf;
+    tf.blockId=0;
+    tf.fileSize=1;
+    tf.copyNb=2;
+    tf.creationTime=time(nullptr);
+    tf.fSeq=1;
+    tf.vid="Tape0";
+    rqc.archiveFile.tapeFiles.push_back(tf);
+  }
+  rqc.mountPolicy.archiveMinRequestAge = 1;
+  rqc.mountPolicy.archivePriority = 1;
+  rqc.mountPolicy.creationLog.time = time(nullptr);
+  rqc.mountPolicy.lastModificationLog.time = time(nullptr);
+  rqc.mountPolicy.maxDrivesAllowed = 1;
+  rqc.mountPolicy.retrieveMinRequestAge = 1;
+  rqc.mountPolicy.retrievePriority = 1;
+  rr.setRetrieveFileQueueCriteria(rqc);
+  cta::common::dataStructures::RetrieveRequest sReq;
+  sReq.archiveFileID = rqc.archiveFile.archiveFileID;
+  sReq.creationLog.time=time(nullptr);
+  rr.setSchedulerRequest(sReq);
+  rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToTransfer);
+  rr.setOwner(agentToTransferForUser.getAddressIfSet());
+  rr.setActiveCopyNumber(0);
+  
+  cta::objectstore::RetrieveRequest::RepackInfo ri;
+  ri.isRepack = true;
+  ri.forceDisabledTape = true;
+  ri.fSeq = 1;
+  ri.fileBufferURL = "testFileBufferURL";
+  ri.repackRequestAddress = "repackRequestAddress";
+  rr.setRepackInfo(ri);
+  
+  rr.insert();
+  
+  // Create the garbage collector and run it once.
+  cta::objectstore::AgentReference gcAgentRef("unitTestGarbageCollector", dl);
+  cta::objectstore::Agent gcAgent(gcAgentRef.getAgentAddress(), be);
+  gcAgent.initialize();
+  gcAgent.setTimeout_us(0);
+  gcAgent.insertAndRegisterSelf(lc);
+  
+  catalogue.addDisabledTape("Tape0");
+
+  cta::objectstore::GarbageCollector gc(be, gcAgentRef, catalogue);
+  gc.runOnePass(lc);
+  
+  {
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    auto jobs = rq.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+    
+    rr.fetchNoLock();
+    ASSERT_EQ(rr.getOwner(),rq.getAddressIfSet());
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for RJS_ToTransferForUser job and a disabled tape
+    cta::objectstore::AgentReference agentRefToTransferDisabledTapeAutoGc("ToReportToRepackForFailureAutoGC", dl);
+    cta::objectstore::Agent agentToReportToRepackForFailureJobAutoGc(agentRefToTransferDisabledTapeAutoGc.getAgentAddress(), be);
+    agentToReportToRepackForFailureJobAutoGc.initialize();
+    agentToReportToRepackForFailureJobAutoGc.setTimeout_us(0);
+    agentToReportToRepackForFailureJobAutoGc.insertAndRegisterSelf(lc);
+    
+    
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToTransferDisabledTapeAutoGc.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToTransfer);
+      rr.commit();
+
+      agentRefToTransferDisabledTapeAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+
+      ASSERT_NO_THROW(rr.garbageCollect(agentRefToTransferDisabledTapeAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    }
+    
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToTransferForUser(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    rqToTransferForUser.fetchNoLock();
+    
+    auto jobs = rqToTransferForUser.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+
+    rr.fetchNoLock();
+    ASSERT_EQ(rqToTransferForUser.getAddressIfSet(),rr.getOwner());
+  }
+}
+
 TEST(ObjectStore, GarbageCollectorArchiveAllStatusesAndQueues) {
   // We will need a log object
 #ifdef STDOUT_LOGGING
diff --git a/objectstore/Helpers.cpp b/objectstore/Helpers.cpp
index a8d1489f6783ec8ff8f2956e6e90f8ad56c88921..5486920ca7f6177c7e57f74cd4d2e9fea4853aff 100644
--- a/objectstore/Helpers.cpp
+++ b/objectstore/Helpers.cpp
@@ -364,7 +364,7 @@ void Helpers::getLockedAndFetchedRepackQueue(RepackQueue& queue, ScopedExclusive
 // Helpers::selectBestRetrieveQueue()
 //------------------------------------------------------------------------------
 std::string Helpers::selectBestRetrieveQueue(const std::set<std::string>& candidateVids, cta::catalogue::Catalogue & catalogue,
-    objectstore::Backend & objectstore) {
+    objectstore::Backend & objectstore, bool forceDisabledTape) {
   // We will build the retrieve stats of the non-disable candidate vids here
   std::list<SchedulerDatabase::RetrieveQueueStatistics> candidateVidsStats;
   // A promise we create so we can make users wait on it.
@@ -378,20 +378,29 @@ std::string Helpers::selectBestRetrieveQueue(const std::set<std::string>& candid
       // If an update is in progress, we wait on it, and get the result after.
       // We have to release the global lock while doing so.
       if (g_retrieveQueueStatistics.at(v).updating) {
+        logUpdateCacheIfNeeded(false,g_retrieveQueueStatistics.at(v),"g_retrieveQueueStatistics.at(v).updating");
         // Cache is updating, we wait on update.
         auto updateFuture = g_retrieveQueueStatistics.at(v).updateFuture;
         grqsmLock.unlock();
         updateFuture.wait();
         grqsmLock.lock();
-        if (!g_retrieveQueueStatistics.at(v).tapeStatus.disabled) {
+        if(!g_retrieveQueueStatistics.at(v).tapeStatus.disabled || (g_retrieveQueueStatistics.at(v).tapeStatus.disabled && forceDisabledTape)) {
+          logUpdateCacheIfNeeded(false,g_retrieveQueueStatistics.at(v),"!g_retrieveQueueStatistics.at(v).tapeStatus.disabled || (g_retrieveQueueStatistics.at(v).tapeStatus.disabled && forceDisabledTape)");
           candidateVidsStats.emplace_back(g_retrieveQueueStatistics.at(v).stats);
         }
       } else {
         // We have a cache hit, check it's not stale.
-        if (g_retrieveQueueStatistics.at(v).updateTime + c_retrieveQueueCacheMaxAge > time(nullptr))
+        time_t timeSinceLastUpdate = time(nullptr) - g_retrieveQueueStatistics.at(v).updateTime;
+        if (timeSinceLastUpdate > c_retrieveQueueCacheMaxAge){
+          logUpdateCacheIfNeeded(false,g_retrieveQueueStatistics.at(v),"timeSinceLastUpdate ("+std::to_string(timeSinceLastUpdate)+")> c_retrieveQueueCacheMaxAge ("
+                  +std::to_string(c_retrieveQueueCacheMaxAge)+"), cache needs to be updated");
           throw std::out_of_range("");
+        }
+        
+        logUpdateCacheIfNeeded(false,g_retrieveQueueStatistics.at(v),"Cache is not updated, timeSinceLastUpdate ("+std::to_string(timeSinceLastUpdate)+
+        ") <= c_retrieveQueueCacheMaxAge ("+std::to_string(c_retrieveQueueCacheMaxAge)+")");
         // We're lucky: cache hit (and not stale)
-        if (!g_retrieveQueueStatistics.at(v).tapeStatus.disabled)
+        if (!g_retrieveQueueStatistics.at(v).tapeStatus.disabled || (g_retrieveQueueStatistics.at(v).tapeStatus.disabled && forceDisabledTape))
           candidateVidsStats.emplace_back(g_retrieveQueueStatistics.at(v).stats);
       }
     } catch (std::out_of_range &) {
@@ -427,11 +436,14 @@ std::string Helpers::selectBestRetrieveQueue(const std::set<std::string>& candid
         throw cta::exception::Exception("In Helpers::selectBestRetrieveQueue(): unexpected vid in tapeStatus.");
       g_retrieveQueueStatistics[v].stats = queuesStats.front();
       g_retrieveQueueStatistics[v].tapeStatus = tapeStatus.at(v);
+      g_retrieveQueueStatistics[v].updateTime = time(nullptr);
+      logUpdateCacheIfNeeded(true,g_retrieveQueueStatistics[v]);
       // Signal to potential waiters
       updatePromise.set_value();
       // Update our own candidate list if needed.
-      if(!g_retrieveQueueStatistics.at(v).tapeStatus.disabled)
+      if(!g_retrieveQueueStatistics.at(v).tapeStatus.disabled || (g_retrieveQueueStatistics.at(v).tapeStatus.disabled && forceDisabledTape)) {
         candidateVidsStats.emplace_back(g_retrieveQueueStatistics.at(v).stats);
+      }
     }
   }
   // We now have all the candidates listed (if any).
@@ -471,6 +483,7 @@ void Helpers::updateRetrieveQueueStatisticsCache(const std::string& vid, uint64_
     g_retrieveQueueStatistics.at(vid).stats.filesQueued=files;
     g_retrieveQueueStatistics.at(vid).stats.bytesQueued=bytes;
     g_retrieveQueueStatistics.at(vid).stats.currentPriority = priority;
+    logUpdateCacheIfNeeded(false,g_retrieveQueueStatistics.at(vid));
   } catch (std::out_of_range &) {
     // The entry is missing. We just create it.
     g_retrieveQueueStatistics[vid].stats.bytesQueued=bytes;
@@ -481,9 +494,15 @@ void Helpers::updateRetrieveQueueStatisticsCache(const std::string& vid, uint64_
     g_retrieveQueueStatistics[vid].tapeStatus.full=false;
     g_retrieveQueueStatistics[vid].updating = false;
     g_retrieveQueueStatistics[vid].updateTime = time(nullptr);
+    logUpdateCacheIfNeeded(true,g_retrieveQueueStatistics[vid]);
   }
 }
 
+void Helpers::flushRetrieveQueueStatisticsCache(){
+  threading::MutexLocker ml(g_retrieveQueueStatisticsMutex);
+  g_retrieveQueueStatistics.clear();
+}
+
 //------------------------------------------------------------------------------
 // Helpers::g_retrieveQueueStatistics
 //------------------------------------------------------------------------------
@@ -703,4 +722,16 @@ void Helpers::removeRepackRequestToIndex(const std::string& vid, Backend& backen
   ri.commit();
 }
 
+void Helpers::logUpdateCacheIfNeeded(const bool entryCreation, const RetrieveQueueStatisticsWithTime& tapeStatistic, std::string message){
+  #ifdef HELPERS_CACHE_UPDATE_LOGGING
+    std::ofstream logFile(HELPERS_CACHE_UPDATE_LOGGING_FILE, std::ofstream::app);
+    std::time_t end_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
+    // Chomp newline in the end
+    std::string date=std::ctime(&end_time);
+    date.erase(std::remove(date.begin(), date.end(), '\n'), date.end());
+    logFile << date << " pid=" << ::getpid() << " tid=" << syscall(SYS_gettid) << " message=" << message << " entryCreation="<< entryCreation <<" vid=" 
+            << tapeStatistic.tapeStatus.vid << " disabled=" << tapeStatistic.tapeStatus.disabled << " filesQueued=" << tapeStatistic.stats.filesQueued <<  std::endl;
+  #endif //HELPERS_CACHE_UPDATE_LOGGING
+}
+
 }} // namespace cta::objectstore.
diff --git a/objectstore/Helpers.hpp b/objectstore/Helpers.hpp
index debd836b1e6c5b720ac4c23a04f873b4df48f8c1..c1c44ace2932d83b1ecd945d2145cadfdb88dd3d 100644
--- a/objectstore/Helpers.hpp
+++ b/objectstore/Helpers.hpp
@@ -28,6 +28,12 @@
 #include <string>
 #include <set>
 #include <future>
+#include <fstream>
+#include <syscall.h>
+
+//Activate or not helper cache update for debugging
+//#define HELPERS_CACHE_UPDATE_LOGGING
+#define HELPERS_CACHE_UPDATE_LOGGING_FILE "/var/tmp/cta-helpers-update-cache.log"
 
 /**
  * A collection of helper functions for commonly used multi-object operations
@@ -78,7 +84,7 @@ public:
    * to the algorithm, but will help performance drastically for a very similar result
    */
   static std::string selectBestRetrieveQueue (const std::set<std::string> & candidateVids, cta::catalogue::Catalogue & catalogue, 
-  objectstore::Backend & objectstore);
+  objectstore::Backend & objectstore, bool forceDisabledTape = false);
   
   /**
    * Gets the retrieve queue statistics for a set of Vids (extracted from the OStoreDB
@@ -95,6 +101,12 @@ public:
    */
   static void updateRetrieveQueueStatisticsCache(const std::string & vid, uint64_t files, uint64_t bytes, uint64_t priority);
   
+  /**
+   * Allows to flush the RetrieveQueueStatisticsCache
+   * TO BE USED BY UNIT TESTS !
+   */
+  static void flushRetrieveQueueStatisticsCache();
+  
 private:
   /** Lock for the retrieve queues stats */
   static cta::threading::Mutex g_retrieveQueueStatisticsMutex;
@@ -112,6 +124,7 @@ private:
   static std::map<std::string, RetrieveQueueStatisticsWithTime> g_retrieveQueueStatistics;
   /** Time between cache updates */
   static const time_t c_retrieveQueueCacheMaxAge = 10;
+  static void logUpdateCacheIfNeeded(const bool entryCreation,const RetrieveQueueStatisticsWithTime& tapeStatistic, std::string message="");
   
 public:
   
diff --git a/objectstore/RepackRequest.cpp b/objectstore/RepackRequest.cpp
index 3e3e52b53502465f4d9f9c20ed5456570ed4e9fd..a766a86f2cc367be61ff2905bfc5499254c77721 100644
--- a/objectstore/RepackRequest.cpp
+++ b/objectstore/RepackRequest.cpp
@@ -78,6 +78,7 @@ void RepackRequest::initialize() {
   m_payload.set_lastexpandedfseq(0);
   m_payload.set_is_expand_finished(false);
   m_payload.set_is_expand_started(false);
+  m_payload.set_force_disabled_tape(false);
   // This object is good to go (to storage)
   m_payloadInterpreted = true;
 }
@@ -145,6 +146,7 @@ common::dataStructures::RepackInfo RepackRequest::getInfo() {
   ret.lastExpandedFseq = m_payload.lastexpandedfseq();
   ret.userProvidedFiles = m_payload.userprovidedfiles();
   ret.isExpandFinished = m_payload.is_expand_finished();
+  ret.forceDisabledTape = m_payload.force_disabled_tape();
   if (m_payload.move_mode()) {
     if (m_payload.add_copies_mode()) {
       ret.type = RepackInfo::Type::MoveAndAddCopies;
@@ -192,6 +194,15 @@ common::dataStructures::MountPolicy RepackRequest::getMountPolicy(){
   return mpSerDeser;
 }
 
+void RepackRequest::setForceDisabledTape(const bool disabledTape){
+  checkPayloadWritable();
+  m_payload.set_force_disabled_tape(disabledTape);
+}
+
+bool RepackRequest::getForceDisabledTape() {
+  checkPayloadReadable();
+  return m_payload.force_disabled_tape();
+}
 void RepackRequest::setStatus(){
   checkPayloadWritable();
   checkPayloadReadable();
@@ -538,7 +549,8 @@ auto RepackRequest::getStats() -> std::map<StatsType, StatsValues> {
 //------------------------------------------------------------------------------
 void RepackRequest::reportRetrieveCreationFailures(const std::list<cta::SchedulerDatabase::RepackRequest::Subrequest>& notCreatedSubrequests){
   checkPayloadWritable();
-  uint64_t failedToRetrieveFiles, failedToRetrieveBytes, failedToCreateArchiveReq = 0;
+  checkPayloadReadable();
+  uint64_t failedToRetrieveFiles = 0, failedToRetrieveBytes = 0, failedToCreateArchiveReq = 0;
   for(auto & subreq: notCreatedSubrequests){
     failedToRetrieveFiles++;
     failedToRetrieveBytes+=subreq.archiveFile.fileSize;
@@ -636,6 +648,7 @@ RepackRequest::AsyncOwnerAndStatusUpdater* RepackRequest::asyncUpdateOwnerAndSta
       retRef.m_repackInfo.status = (RepackInfo::Status) payload.status();
       retRef.m_repackInfo.vid = payload.vid();
       retRef.m_repackInfo.repackBufferBaseURL = payload.buffer_url();
+      retRef.m_repackInfo.forceDisabledTape = payload.force_disabled_tape();
       if (payload.move_mode()) {
         if (payload.add_copies_mode()) {
           retRef.m_repackInfo.type = RepackInfo::Type::MoveAndAddCopies;
diff --git a/objectstore/RepackRequest.hpp b/objectstore/RepackRequest.hpp
index bfa7c812d8e30ba3b8d8a062fef6009b7493685c..9f2601da0f6427db14b1b57ef6dbd9524f10a048 100644
--- a/objectstore/RepackRequest.hpp
+++ b/objectstore/RepackRequest.hpp
@@ -50,6 +50,14 @@ public:
   cta::SchedulerDatabase::RepackRequest::TotalStatsFiles getTotalStatsFile();
   void setMountPolicy(const common::dataStructures::MountPolicy &mp);
   common::dataStructures::MountPolicy getMountPolicy();
+  /**
+   * Set the flag disabledTape to allow the mounting of a
+   * disabled tape for file retrieval
+   * @param disabledTape if true, the disabled tape will be mounted for retrieval, if false, the
+   * tape will not be mounted if it is disabled
+   */
+  void setForceDisabledTape(const bool disabledTape);
+  bool getForceDisabledTape();
   
   /**
    * Automatically set the new status of the Repack Request
diff --git a/objectstore/RetrieveRequest.cpp b/objectstore/RetrieveRequest.cpp
index 704eb05e735a3d4ba74e5452e789836e5bef17da..2a77db5af1630508a78f6d9848e8c1d48389d107 100644
--- a/objectstore/RetrieveRequest.cpp
+++ b/objectstore/RetrieveRequest.cpp
@@ -153,7 +153,7 @@ void RetrieveRequest::garbageCollect(const std::string& presumedOwner, AgentRefe
   // filter on tape availability.
   try {
     // If we have to fetch the status of the tapes and queued for the non-disabled vids.
-    bestVid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, m_objectStore);
+    bestVid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, m_objectStore,m_payload.repack_info().force_disabled_tape());
     goto queueForTransfer;
   } catch (Helpers::NoTapeAvailableForRetrieve &) {}
 queueForFailure:;
@@ -489,6 +489,14 @@ void RetrieveRequest::setRetrieveFileQueueCriteria(const cta::common::dataStruct
   ArchiveFileSerDeser(criteria.archiveFile).serialize(*m_payload.mutable_archivefile());
   for (auto &tf: criteria.archiveFile.tapeFiles) {
     MountPolicySerDeser(criteria.mountPolicy).serialize(*m_payload.mutable_mountpolicy());
+    /*
+     * Explaination about these hardcoded retries :
+     * The hardcoded RetriesWithinMount will ensure that we will try to retrieve the file 3 times
+     * in the same mount.
+     * The hardcoded TotalRetries ensure that we will never try more than 6 times to retrieve a file.
+     * As totalretries = 6 and retrieswithinmount = 3, this will ensure that the file will be retried by maximum 2 mounts.
+     * (2 mounts * 3 retrieswithinmount = 6 totalretries)
+     */
     const uint32_t hardcodedRetriesWithinMount = 3;
     const uint32_t hardcodedTotalRetries = 6;
     const uint32_t hardcodedReportRetries = 2;
@@ -644,6 +652,7 @@ void RetrieveRequest::setRepackInfo(const RepackInfo& repackInfo) {
     for (auto cntr: repackInfo.copyNbsToRearchive) {
       m_payload.mutable_repack_info()->mutable_copy_nbs_to_rearchive()->Add(cntr);
     }
+    m_payload.mutable_repack_info()->set_force_disabled_tape(repackInfo.forceDisabledTape);
     m_payload.mutable_repack_info()->set_file_buffer_url(repackInfo.fileBufferURL);
     m_payload.mutable_repack_info()->set_repack_request_address(repackInfo.repackRequestAddress);
     m_payload.mutable_repack_info()->set_fseq(repackInfo.fSeq);
@@ -941,6 +950,7 @@ auto RetrieveRequest::asyncUpdateJobOwner(uint32_t copyNumber, const std::string
               ri.isRepack = true;
               ri.repackRequestAddress = payload.repack_info().repack_request_address();
               ri.fSeq = payload.repack_info().fseq();
+              ri.forceDisabledTape = payload.repack_info().force_disabled_tape();
             }
             // TODO serialization of payload maybe not necessary
             oh.set_payload(payload.SerializePartialAsString());
diff --git a/objectstore/RetrieveRequest.hpp b/objectstore/RetrieveRequest.hpp
index b5a11ff62d5663aeca6090a5cd4516f687046b05..63c37f74a2efd7b3f7b8d76518723668bba00620 100644
--- a/objectstore/RetrieveRequest.hpp
+++ b/objectstore/RetrieveRequest.hpp
@@ -151,6 +151,7 @@ public:
   };
   struct RepackInfo {
     bool isRepack = false;
+    bool forceDisabledTape = false;
     std::map<uint32_t, std::string> archiveRouteMap;
     std::set<uint32_t> copyNbsToRearchive;
     std::string repackRequestAddress;
@@ -173,6 +174,7 @@ public:
       rrri.set_file_buffer_url(fileBufferURL);
       rrri.set_repack_request_address(repackRequestAddress);
       rrri.set_fseq(fSeq);
+      rrri.set_force_disabled_tape(forceDisabledTape);
     }
     
     void deserialize(const cta::objectstore::serializers::RetrieveRequestRepackInfo & rrri) {
@@ -182,6 +184,7 @@ public:
       fileBufferURL = rrri.file_buffer_url();
       repackRequestAddress = rrri.repack_request_address();
       fSeq = rrri.fseq();
+      forceDisabledTape = rrri.force_disabled_tape();
     }
   };
 private:
diff --git a/objectstore/Sorter.cpp b/objectstore/Sorter.cpp
index 8f5945084e51fae2ea5a25fb9a7ffe07a1253c57..54b0809cca7cadb23be59a19fb58a675ccd16b3d 100644
--- a/objectstore/Sorter.cpp
+++ b/objectstore/Sorter.cpp
@@ -318,7 +318,7 @@ std::set<std::string> Sorter::getCandidateVidsToTransfer(RetrieveRequestInfosAcc
 std::string Sorter::getBestVidForQueueingRetrieveRequest(RetrieveRequestInfosAccessorInterface &requestAccessor, std::set<std::string>& candidateVids, log::LogContext &lc){
   std::string vid;
   try{
-    vid = Helpers::selectBestRetrieveQueue(candidateVids,m_catalogue,m_objectstore);
+    vid = Helpers::selectBestRetrieveQueue(candidateVids,m_catalogue,m_objectstore,requestAccessor.getForceDisabledTape());
   } catch (Helpers::NoTapeAvailableForRetrieve & ex) {
     log::ScopedParamContainer params(lc);
     params.add("fileId", requestAccessor.getArchiveFile().archiveFileID);
@@ -414,6 +414,10 @@ std::string OStoreRetrieveRequestAccessor::getRepackAddress(){
   return m_retrieveRequest->getRepackInfo().repackRequestAddress;
 }
 
+bool OStoreRetrieveRequestAccessor::getForceDisabledTape(){
+  return m_retrieveRequest->getRepackInfo().forceDisabledTape;
+}
+
 /* END OF RetrieveRequestAccessor CLASS */
 
 
@@ -448,6 +452,10 @@ std::string SorterRetrieveRequestAccessor::getRepackAddress(){
   return m_retrieveRequest.repackRequestAddress;
 }
 
+bool SorterRetrieveRequestAccessor::getForceDisabledTape(){
+  return m_retrieveRequest.forceDisabledTape;
+}
+
 /* END OF SorterRetrieveRequestAccessor CLASS*/
 
 }}
diff --git a/objectstore/Sorter.hpp b/objectstore/Sorter.hpp
index 064b924cbf40a01b217c4374a92b7b1c3ffbd02e..d13967be86d0e7716257d71c1c6bcae4ea50dd91 100644
--- a/objectstore/Sorter.hpp
+++ b/objectstore/Sorter.hpp
@@ -118,7 +118,7 @@ public:
     common::dataStructures::ArchiveFile archiveFile;
     std::map<uint32_t, RetrieveJob> retrieveJobs;
     std::string repackRequestAddress;
-    bool isRepack;
+    bool forceDisabledTape = false;
   };
   
   /* Retrieve-related methods */
@@ -231,6 +231,7 @@ class RetrieveRequestInfosAccessorInterface{
     virtual ~RetrieveRequestInfosAccessorInterface();
     virtual serializers::RetrieveJobStatus getJobStatus(const uint32_t copyNb) = 0;
     virtual std::string getRepackAddress() = 0;
+    virtual bool getForceDisabledTape() = 0;
 };
 
 class OStoreRetrieveRequestAccessor: public RetrieveRequestInfosAccessorInterface{
@@ -243,6 +244,7 @@ class OStoreRetrieveRequestAccessor: public RetrieveRequestInfosAccessorInterfac
         const uint32_t copyNb, const uint64_t fSeq, AgentReferenceInterface* previousOwner);
     serializers::RetrieveJobStatus getJobStatus(const uint32_t copyNb);
     std::string getRepackAddress();
+    bool getForceDisabledTape();
   private:
     std::shared_ptr<RetrieveRequest> m_retrieveRequest;
 };
@@ -257,6 +259,7 @@ class SorterRetrieveRequestAccessor: public RetrieveRequestInfosAccessorInterfac
         const uint32_t copyNb, const uint64_t fSeq, AgentReferenceInterface* previousOwner);
     serializers::RetrieveJobStatus getJobStatus(const uint32_t copyNb);
     std::string getRepackAddress();
+    bool getForceDisabledTape();
   private:
     Sorter::SorterRetrieveRequest& m_retrieveRequest;
 };
diff --git a/objectstore/cta.proto b/objectstore/cta.proto
index 1c90c23a9a99d4e88ae2a7dbf5fdbc2723670a6d..7d0efbe22f9a7bc41fd0ab8eb44ca38da5f1ea8c 100644
--- a/objectstore/cta.proto
+++ b/objectstore/cta.proto
@@ -218,6 +218,13 @@ message DiskSpaceReservation {
   required uint64 reserved_bytes = 5110;
 }
 
+message DriveConfig {
+  required string category = 13000;
+  required string key = 13001;
+  required string value = 13002;
+  required string source = 13003;
+}
+
 message DriveState {
   required string drivename = 5000;
   required string host = 5001;
@@ -243,6 +250,7 @@ message DriveState {
   required bool desiredUp = 5019;
   required bool desiredForceDown = 5020;
   optional string currentvid = 5021;
+  optional string cta_version = 5035;
   optional uint64 current_priority = 5028;
   optional string current_activity = 5029;
   optional double current_activity_weight = 5030;
@@ -254,6 +262,9 @@ message DriveState {
   optional string next_activity = 5032;
   optional double next_activity_weight = 5033;
   repeated DiskSpaceReservation disk_space_reservations = 5034;
+  optional string dev_file_name = 5036;
+  optional string raw_library_slot = 5037;
+  repeated DriveConfig drive_config = 5038;
 // TODO: implement or remove  required EntryLog creationlog = 5023;
 }
 
@@ -406,6 +417,7 @@ message RetrieveRequestRepackInfo {
   required string repack_request_address = 9520;
   required string file_buffer_url = 9530;
   required uint64 fseq = 9540;
+  required bool force_disabled_tape = 9560;
 }
 
 // The different timings of the lifecycle of a RetrieveRequest (creation time, first select time, request complete)
@@ -591,6 +603,7 @@ message RepackRequest {
   required bool is_expand_finished = 11561;
   required bool is_expand_started = 11562;
   required MountPolicy mount_policy = 11563;
+  required bool force_disabled_tape = 11564;
   repeated RepackSubRequestPointer subrequests = 11570;
 }
 
diff --git a/python/eosfstgcd/cta-fst-gcd.conf.example b/python/eosfstgcd/cta-fst-gcd.conf.example
index eab5852edbbb8c814c5a19aa63667cc9f18edaaf..700a92df8d615bfda71ce7eb299b057e2bcd17d1 100644
--- a/python/eosfstgcd/cta-fst-gcd.conf.example
+++ b/python/eosfstgcd/cta-fst-gcd.conf.example
@@ -27,6 +27,7 @@ logfile = /var/log/eos/fst/cta-fst-gcd.log ; Path of garbage collector log file
 mgmhost = HOSTNAME.2NDLEVEL.TOPLEVEL ; Fully qualified host name of EOS MGM
 minfreebytes = 10000000000 ; Minimum number of free bytes a filesystem should have
 gcagesecs = 7200 ; Age at which a file can be considered for garbage collection
+absolutemaxagesecs = 604800 ; Age at which a file will be considered for garbage collection no matter the amount of free space
 queryperiodsecs = 310 ; Delay in seconds between free space queries to the local file systems
 mainloopperiodsecs = 300 ; Period in seconds of the main loop of the cta-fst-gcd daemon
 xrdsecssskt = /etc/eos.keytab ; Path to simple shared secret to authenticate with EOS MGM
diff --git a/python/eosfstgcd/ctafstgcd.py b/python/eosfstgcd/ctafstgcd.py
index 3a4b9c4342f2806bae2f70113eec3aa022342a08..90cca7738366960f4a5081a18bd2b4c84bf10d95 100755
--- a/python/eosfstgcd/ctafstgcd.py
+++ b/python/eosfstgcd/ctafstgcd.py
@@ -37,6 +37,9 @@ class UserError(Exception):
 class StagerrmError(Exception):
   pass
 
+class AttrsetError(Exception):
+  pass
+
 class NoMgmHost(UserError):
   pass
 
@@ -142,6 +145,22 @@ class RealEos:
     if 0 != process.returncode:
       raise StagerrmError("'{}' returned non zero: returncode={}".format(cmd, process.returncode))
 
+  def attrset(self, name, value, fxid):
+    mgmurl = "root://{}".format(self.mgmhost)
+    args = ["eos", "-r", "0", "0", mgmurl, "attr", "set", '{}={}'.format(name, value), "fxid:{}".format(fxid)]
+    env = os.environ.copy()
+    env["XrdSecPROTOCOL"] = "sss"
+    env["XrdSecSSSKT"] = self.xrdsecssskt
+    process = None
+    try:
+      process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+    except Exception as err:
+      raise Exception("Failed to execute '{}': {}".format(" ".join(args), err))
+    stdout,stderr = process.communicate()
+
+    if 0 != process.returncode:
+      raise AttrsetError("'{}' returned non zero: returncode={}".format(" ".join(args), process.returncode))
+
 class SpaceTracker:
   '''Calculates the amount of effective free space in the file system of a given
   file or directory by querying the OS and taking into account the pending
@@ -252,54 +271,73 @@ class Gc:
     self.log.info("Config: mgmhost={}".format(self.config.mgmhost))
     self.log.info("Config: minfreebytes={}".format(self.config.minfreebytes))
     self.log.info("Config: gcagesecs={}".format(self.config.gcagesecs))
+    self.log.info("Config: absolutemaxagesecs={}".format(self.config.absolutemaxagesecs))
     self.log.info("Config: queryperiodsecs={}". format(self.config.queryperiodsecs))
     self.log.info("Config: mainloopperiodsecs={}". format(self.config.mainloopperiodsecs))
     self.log.info("Config: xrdsecssskt={}".format(self.config.xrdsecssskt))
 
   def processfile(self, subdir, fstfile):
+    fullpath = os.path.join(subdir,fstfile)
+    filesizeandctime = None
+    try:
+      filesizeandctime = self.disk.getfilesizeandctime(fullpath)
+    except Exception as err:
+      self.log.error(err)
+
+    if not filesizeandctime:
+      return
+
+    now = time.time()
+    agesecs = now - filesizeandctime.ctime
+    absolutemaxagereached = agesecs > self.config.absolutemaxagesecs
+    gcagereached = agesecs > self.config.gcagesecs
     spacetracker = self.spacetrackers.gettracker(subdir)
     totalfreebytes = spacetracker.getfreebytes()
     shouldfreespace = totalfreebytes < self.config.minfreebytes
 
-    if shouldfreespace:
-      fullpath = os.path.join(subdir,fstfile)
-
-      filesizeandctime = None
+    if absolutemaxagereached or (shouldfreespace and gcagereached):
       try:
-        filesizeandctime = self.disk.getfilesizeandctime(fullpath)
+        bytesrequiredbefore = 0
+        if self.config.minfreebytes > totalfreebytes:
+          bytesrequiredbefore = self.config.minfreebytes - totalfreebytes
+        self.eos.stagerrm(fstfile)
+        spacetracker.stagerrmqueued(filesizeandctime.sizebytes)
+        self.log.info("stagerrm: subdir={}, fxid={}, bytesrequiredbefore={}, filesizebytes={}, absolutemaxagereached={}, shouldfreespace={}, gcagereached={}"
+          .format(subdir, fstfile, bytesrequiredbefore, filesizeandctime.sizebytes, absolutemaxagereached, shouldfreespace, gcagereached))
+        nowstr = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f")
+        attrname = "sys.retrieve.error"
+        attrvalue = "Garbage collected at {}".format(nowstr)
+        self.eos.attrset(attrname, attrvalue, fstfile)
+      except StagerrmError as err:
+        pass
       except Exception as err:
         self.log.error(err)
 
-      if filesizeandctime:
-        now = time.time()
-        agesecs = now - filesizeandctime.ctime
-        if agesecs > self.config.gcagesecs:
-          try:
-            bytesrequiredbefore = self.config.minfreebytes - totalfreebytes
-            self.eos.stagerrm(fstfile)
-            spacetracker.stagerrmqueued(filesizeandctime.sizebytes)
-            self.log.info("stagerrm: subdir={}, fxid={}, bytesrequiredbefore={}, filesizebytes={}"
-              .format(subdir, fstfile, bytesrequiredbefore, filesizeandctime.sizebytes))
-          except StagerrmError as err:
-            pass
-          except Exception as err:
-            self.log.error(err)
-
   def processfssubdir(self, subdir):
-    spacetracker = self.spacetrackers.gettracker(subdir)
-    totalfreebytes = spacetracker.getfreebytes()
-    shouldfreespace = totalfreebytes < self.config.minfreebytes
-
-    if shouldfreespace:
-      subdirfiles = []
-      try:
-        subdirfiles = self.disk.listdir(subdir)
-      except Exception as err:
-        self.log.error("Failed to list contents of sub directory: subdir={}: {}".format(subdir, err))
+    #spacetracker = self.spacetrackers.gettracker(subdir)
+    #totalfreebytes = spacetracker.getfreebytes()
+    #shouldfreespace = totalfreebytes < self.config.minfreebytes
+
+    #if shouldfreespace:
+    #  subdirfiles = []
+    #  try:
+    #    subdirfiles = self.disk.listdir(subdir)
+    #  except Exception as err:
+    #    self.log.error("Failed to list contents of sub directory: subdir={}: {}".format(subdir, err))
+
+    #  fstfiles = [f for f in subdirfiles if re.match('^[0-9A-Fa-f]{8}$', f) and self.disk.isfile(os.path.join(subdir, f))]
+    #  for fstfile in fstfiles:
+    #    self.processfile(subdir, fstfile)
+
+    subdirfiles = []
+    try:
+      subdirfiles = self.disk.listdir(subdir)
+    except Exception as err:
+      self.log.error("Failed to list contents of sub directory: subdir={}: {}".format(subdir, err))
 
-      fstfiles = [f for f in subdirfiles if re.match('^[0-9A-Fa-f]{8}$', f) and self.disk.isfile(os.path.join(subdir, f))]
-      for fstfile in fstfiles:
-        self.processfile(subdir, fstfile)
+    fstfiles = [f for f in subdirfiles if re.match('^[0-9A-Fa-f]{8}$', f) and self.disk.isfile(os.path.join(subdir, f))]
+    for fstfile in fstfiles:
+      self.processfile(subdir, fstfile)
 
   def processfs(self, path):
     fsfiles = []
@@ -401,6 +439,7 @@ def parseconf(conffile):
     config.mgmhost = parser.get('main', 'mgmhost')
     config.minfreebytes = parser.getint('main', 'minfreebytes')
     config.gcagesecs = parser.getint('main', 'gcagesecs')
+    config.absolutemaxagesecs = parser.getint('main', 'absolutemaxagesecs')
     config.queryperiodsecs = parser.getint('main', 'queryperiodsecs')
     config.mainloopperiodsecs = parser.getint('main', 'mainloopperiodsecs')
     config.xrdsecssskt = parser.get('main', 'xrdsecssskt')
diff --git a/python/eosfstgcd/test_ctafstgcd.py b/python/eosfstgcd/test_ctafstgcd.py
index 206e720360b6dfe1ce4acbe8a693085944c82265..5dd063af12e285be6eda412cfbcdd7062feb392f 100755
--- a/python/eosfstgcd/test_ctafstgcd.py
+++ b/python/eosfstgcd/test_ctafstgcd.py
@@ -57,6 +57,7 @@ class MockDisk:
     self.nbgetfreebytes = 0
 
   def listdir(self, path):
+    print "listdir path={}".format(path)
     self.nblistdir = self.nblistdir + 1
 
     pathlist = self.pathtolist(path)
@@ -113,6 +114,7 @@ class MockEos:
     self.filesystems = filesystems
     self.nbfsls = 0
     self.nbstagerrm = 0
+    self.nbattrset = 0
 
   def fsls(self):
     self.nbfsls = self.nbfsls + 1
@@ -121,6 +123,9 @@ class MockEos:
   def stagerrm(self, fxid):
     self.nbstagerrm = self.nbstagerrm + 1
 
+  def attrset(self, name, value, fxid):
+    self.nbattrset = self.nbattrset + 1
+
 class RealDiskCase(unittest.TestCase):
   def setUp(self):
     self.log = DummyLogger()
@@ -216,6 +221,7 @@ class GcTestCase(unittest.TestCase):
     self.config.mgmhost = 'mgmhost'
     self.config.minfreebytes = 0
     self.config.gcagesecs = 1000
+    self.config.absolutemaxagesecs = 604800
     self.config.queryperiodsecs = 0
     self.config.mainloopperiodsecs = 0
     self.config.xrdsecssskt = ''
@@ -234,6 +240,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
 
@@ -244,6 +251,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_no_fs(self):
     disk = MockDisk(self.mocktree, self.freebytes, self.filesizeandctime)
@@ -256,6 +264,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
 
@@ -266,6 +275,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
@@ -277,6 +287,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs(self):
     mockfs = MockTreeNode("filesystem1")
@@ -297,6 +308,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
 
@@ -307,6 +319,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
@@ -318,6 +331,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_no_free_space(self):
     mocksubdir = MockTreeNode("12345678")
@@ -339,6 +353,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     self.config.minfreebytes = self.freebytes + 1
 
@@ -351,6 +366,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
@@ -358,10 +374,11 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(2, disk.nblistdir) # ls of fs and subdir
     self.assertEqual(1, disk.nbisdir)
     self.assertEqual(0, disk.nbisfile)
-    self.assertEqual(1, disk.nbgetfreebytes) # subdir
+    self.assertEqual(0, disk.nbgetfreebytes)
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_free_space(self):
     mocksubdir = MockTreeNode("12345678")
@@ -383,6 +400,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
 
@@ -393,17 +411,19 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
 
-    self.assertEqual(1, disk.nblistdir)
+    self.assertEqual(2, disk.nblistdir)
     self.assertEqual(1, disk.nbisdir)
     self.assertEqual(0, disk.nbisfile)
-    self.assertEqual(1, disk.nbgetfreebytes) # subdir
+    self.assertEqual(0, disk.nbgetfreebytes) # subdir
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_one_file_free_space(self):
     mockfile = MockTreeNode("90abcdef")
@@ -426,6 +446,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
 
@@ -436,17 +457,19 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
 
-    self.assertEqual(1, disk.nblistdir)
+    self.assertEqual(2, disk.nblistdir)
     self.assertEqual(1, disk.nbisdir)
-    self.assertEqual(0, disk.nbisfile)
-    self.assertEqual(1, disk.nbgetfreebytes) # subdir
-    self.assertEqual(0, disk.nbgetfilesizeandctime)
+    self.assertEqual(1, disk.nbisfile)
+    self.assertEqual(1, disk.nbgetfreebytes)
+    self.assertEqual(1, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_one_file_no_free_space_young_file(self):
     mockfile = MockTreeNode("90abcdef")
@@ -469,6 +492,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     self.config.minfreebytes = self.freebytes + 1
 
@@ -481,6 +505,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
@@ -488,10 +513,11 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(2, disk.nblistdir)
     self.assertEqual(1, disk.nbisdir)
     self.assertEqual(1, disk.nbisfile)
-    self.assertEqual(2, disk.nbgetfreebytes) # subdir and file
+    self.assertEqual(1, disk.nbgetfreebytes) # file
     self.assertEqual(1, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_one_file_no_free_space_old_file(self):
     mockfile = MockTreeNode("90abcdef")
@@ -517,6 +543,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     self.config.minfreebytes = self.freebytes + 1
 
@@ -529,6 +556,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
@@ -536,10 +564,62 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(2, disk.nblistdir)
     self.assertEqual(1, disk.nbisdir)
     self.assertEqual(1, disk.nbisfile)
-    self.assertEqual(2, disk.nbgetfreebytes) # subdir and file
+    self.assertEqual(1, disk.nbgetfreebytes) # file
     self.assertEqual(1, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(1, eos.nbstagerrm)
+    self.assertEqual(1, eos.nbattrset)
+
+  def test_run_only_once_one_fs_one_subdir_one_file_free_space_absolutely_old_file(self):
+    mockfile = MockTreeNode("90abcdef")
+    mocksubdir = MockTreeNode("12345678", [mockfile])
+    mockfs = MockTreeNode("filesystem1", [mocksubdir])
+    mocktree = MockTreeNode("/", [mockfs])
+    filesizeandctime = ctafstgcd.FileSizeAndCtime()
+    filesizeandctime.sizebytes = 1000
+    filesizeandctime.ctime = time.time() - self.config.absolutemaxagesecs - 1
+    disk = MockDisk(mocktree, self.freebytes, filesizeandctime)
+
+    filesystem1 = {
+      "path" : "/filesystem1",
+      "host" : self.fqdn
+    }
+    filesystems = [filesystem1]
+    eos = MockEos(filesystems)
+
+    self.assertEqual(0, disk.nblistdir)
+    self.assertEqual(0, disk.nbisdir)
+    self.assertEqual(0, disk.nbisfile)
+    self.assertEqual(0, disk.nbgetfreebytes)
+    self.assertEqual(0, disk.nbgetfilesizeandctime)
+    self.assertEqual(0, eos.nbfsls)
+    self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
+
+    self.config.minfreebytes = self.freebytes + 1
+
+    gc = ctafstgcd.Gc(self.log, self.fqdn, disk, eos, self.config)
+
+    self.assertEqual(0, disk.nblistdir)
+    self.assertEqual(0, disk.nbisdir)
+    self.assertEqual(0, disk.nbisfile)
+    self.assertEqual(0, disk.nbgetfreebytes)
+    self.assertEqual(0, disk.nbgetfilesizeandctime)
+    self.assertEqual(0, eos.nbfsls)
+    self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
+
+    runonlyonce = True
+    gc.run(runonlyonce)
+
+    self.assertEqual(2, disk.nblistdir)
+    self.assertEqual(1, disk.nbisdir)
+    self.assertEqual(1, disk.nbisfile)
+    self.assertEqual(1, disk.nbgetfreebytes) # file
+    self.assertEqual(1, disk.nbgetfilesizeandctime)
+    self.assertEqual(1, eos.nbfsls)
+    self.assertEqual(1, eos.nbstagerrm)
+    self.assertEqual(1, eos.nbattrset)
 
   def test_run_only_once_one_fs_one_subdir_one_file_free_space_old_file(self):
     mockfile = MockTreeNode("90abcdef")
@@ -565,6 +645,7 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     self.config.minfreebytes = self.freebytes
 
@@ -577,17 +658,19 @@ class GcTestCase(unittest.TestCase):
     self.assertEqual(0, disk.nbgetfilesizeandctime)
     self.assertEqual(0, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
     runonlyonce = True
     gc.run(runonlyonce)
 
-    self.assertEqual(1, disk.nblistdir)
+    self.assertEqual(2, disk.nblistdir)
     self.assertEqual(1, disk.nbisdir)
-    self.assertEqual(0, disk.nbisfile)
+    self.assertEqual(1, disk.nbisfile)
     self.assertEqual(1, disk.nbgetfreebytes)
-    self.assertEqual(0, disk.nbgetfilesizeandctime)
+    self.assertEqual(1, disk.nbgetfilesizeandctime)
     self.assertEqual(1, eos.nbfsls)
     self.assertEqual(0, eos.nbstagerrm)
+    self.assertEqual(0, eos.nbattrset)
 
 if __name__ == '__main__':
   suites = []
diff --git a/rdbms/Rset.cpp b/rdbms/Rset.cpp
index 7950e08c95560c5c804cd4b667dc7f3095efe783..4c36b22cd6f15ceaef636320e708a253e5c99cf7 100644
--- a/rdbms/Rset.cpp
+++ b/rdbms/Rset.cpp
@@ -62,10 +62,15 @@ Rset &Rset::operator=(Rset &&rhs) {
 // columnString
 //------------------------------------------------------------------------------
 std::string Rset::columnBlob(const std::string &colName) const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->columnBlob(colName);
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->columnBlob(colName);
 }
 
 //------------------------------------------------------------------------------
@@ -74,7 +79,7 @@ std::string Rset::columnBlob(const std::string &colName) const {
 std::string Rset::columnString(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+      throw InvalidResultSet("This result set is invalid");
     }
 
     const optional<std::string> col = columnOptionalString(colName);
@@ -84,7 +89,8 @@ std::string Rset::columnString(const std::string &colName) const {
       throw NullDbValue(std::string("Database column ") + colName + " contains a null value");
     }
   } catch(exception::Exception &ex) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -94,7 +100,7 @@ std::string Rset::columnString(const std::string &colName) const {
 uint64_t Rset::columnUint64(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+      throw InvalidResultSet("This result set is invalid");
     }
 
     const optional<uint64_t> col = columnOptionalUint64(colName);
@@ -104,7 +110,8 @@ uint64_t Rset::columnUint64(const std::string &colName) const {
       throw NullDbValue(std::string("Database column ") + colName + " contains a null value");
     }
   } catch(exception::Exception &ex) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -114,7 +121,7 @@ uint64_t Rset::columnUint64(const std::string &colName) const {
 bool Rset::columnBool(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+      throw InvalidResultSet("This result set is invalid");
     }
 
     const optional<bool> col = columnOptionalBool(colName);
@@ -124,7 +131,8 @@ bool Rset::columnBool(const std::string &colName) const {
       throw NullDbValue(std::string("Database column ") + colName + " contains a null value");
     }
   } catch(exception::Exception &ex) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -134,7 +142,7 @@ bool Rset::columnBool(const std::string &colName) const {
 optional<bool> Rset::columnOptionalBool(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+      throw InvalidResultSet("This result set is invalid");
     }
 
     const auto column = columnOptionalUint64(colName);
@@ -144,7 +152,8 @@ optional<bool> Rset::columnOptionalBool(const std::string &colName) const {
       return nullopt;
     }
   } catch(exception::Exception &ex) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -152,28 +161,38 @@ optional<bool> Rset::columnOptionalBool(const std::string &colName) const {
 // getSql
 //------------------------------------------------------------------------------
 const std::string &Rset::getSql() const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->getSql();
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->getSql();
 }
 
 //------------------------------------------------------------------------------
 // next
 //------------------------------------------------------------------------------
 bool Rset::next() {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
-  }
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
 
-  const bool aRowHasBeenRetrieved = m_impl->next();
+    const bool aRowHasBeenRetrieved = m_impl->next();
 
-  // Release resources of result set when its end has been reached
-  if(!aRowHasBeenRetrieved) {
-    m_impl.reset(nullptr);
-  }
+    // Release resources of result set when its end has been reached
+    if(!aRowHasBeenRetrieved) {
+      m_impl.reset(nullptr);
+    }
 
-  return aRowHasBeenRetrieved;
+    return aRowHasBeenRetrieved;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
+  }
 }
 
 //------------------------------------------------------------------------------
@@ -188,30 +207,45 @@ bool Rset::isEmpty() const
 // columnIsNull
 //------------------------------------------------------------------------------
 bool Rset::columnIsNull(const std::string &colName) const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->columnIsNull(colName);
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->columnIsNull(colName);
 }
 
 //------------------------------------------------------------------------------
 // columnOptionalString
 //------------------------------------------------------------------------------
 optional<std::string> Rset::columnOptionalString(const std::string &colName) const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->columnOptionalString(colName);
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->columnOptionalString(colName);
 }
 
 //------------------------------------------------------------------------------
 // columnOptionalUint64
 //------------------------------------------------------------------------------
 optional<uint64_t> Rset::columnOptionalUint64(const std::string &colName) const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->columnOptionalUint64(colName);
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->columnOptionalUint64(colName);
 }
 
 //------------------------------------------------------------------------------
@@ -220,7 +254,7 @@ optional<uint64_t> Rset::columnOptionalUint64(const std::string &colName) const
 double Rset::columnDouble(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+      throw InvalidResultSet("This result set is invalid");
     }
 
     const optional<double> col = columnOptionalDouble(colName);
@@ -230,7 +264,8 @@ double Rset::columnDouble(const std::string &colName) const {
       throw NullDbValue(std::string("Database column ") + colName + " contains a null value");
     }
   } catch(exception::Exception &ex) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -238,10 +273,15 @@ double Rset::columnDouble(const std::string &colName) const {
 // columnOptionalDouble
 //------------------------------------------------------------------------------
 optional<double> Rset::columnOptionalDouble(const std::string &colName) const {
-  if(nullptr == m_impl) {
-    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  try {
+    if(nullptr == m_impl) {
+      throw InvalidResultSet("This result set is invalid");
+    }
+    return m_impl->columnOptionalDouble(colName);
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
-  return m_impl->columnOptionalDouble(colName);
 }
 
 } // namespace rdbms
diff --git a/rdbms/Stmt.cpp b/rdbms/Stmt.cpp
index c465d85f6f5fd7742d9b919e45d24b85baea6bc9..e0bb8d295bed6b4dbedb713c98680e318ff56911 100644
--- a/rdbms/Stmt.cpp
+++ b/rdbms/Stmt.cpp
@@ -88,10 +88,15 @@ Stmt &Stmt::operator=(Stmt &&rhs) {
 // getSql
 //-----------------------------------------------------------------------------
 const std::string &Stmt::getSql() const {
-  if(nullptr != m_stmt) {
-    return m_stmt->getSql();
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->getSql();
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -99,10 +104,15 @@ const std::string &Stmt::getSql() const {
 // getParamIdx
 //-----------------------------------------------------------------------------
 uint32_t Stmt::getParamIdx(const std::string &paramName) const {
-  if(nullptr != m_stmt) {
-    return m_stmt->getParamIdx(paramName);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->getParamIdx(paramName);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -110,10 +120,15 @@ uint32_t Stmt::getParamIdx(const std::string &paramName) const {
 // bindUint64
 //-----------------------------------------------------------------------------
 void Stmt::bindUint64(const std::string &paramName, const uint64_t paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindUint64(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindUint64(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -121,10 +136,15 @@ void Stmt::bindUint64(const std::string &paramName, const uint64_t paramValue) {
 // bindOptionalUint64
 //-----------------------------------------------------------------------------
 void Stmt::bindOptionalUint64(const std::string &paramName, const optional<uint64_t> &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindOptionalUint64(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindOptionalUint64(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -132,10 +152,15 @@ void Stmt::bindOptionalUint64(const std::string &paramName, const optional<uint6
 // bindDouble
 //-----------------------------------------------------------------------------
 void Stmt::bindDouble(const std::string &paramName, const double paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindDouble(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindDouble(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -143,10 +168,15 @@ void Stmt::bindDouble(const std::string &paramName, const double paramValue) {
 // bindOptionalDouble
 //-----------------------------------------------------------------------------
 void Stmt::bindOptionalDouble(const std::string &paramName, const optional<double> &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindOptionalDouble(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindOptionalDouble(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -154,10 +184,15 @@ void Stmt::bindOptionalDouble(const std::string &paramName, const optional<doubl
 // bindBool
 //-----------------------------------------------------------------------------
 void Stmt::bindBool(const std::string &paramName, const bool paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindBool(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindBool(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -165,10 +200,15 @@ void Stmt::bindBool(const std::string &paramName, const bool paramValue) {
 // bindOptionalBool
 //-----------------------------------------------------------------------------
 void Stmt::bindOptionalBool(const std::string &paramName, const optional<bool> &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindOptionalBool(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindOptionalBool(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -176,10 +216,15 @@ void Stmt::bindOptionalBool(const std::string &paramName, const optional<bool> &
 // bindString
 //-----------------------------------------------------------------------------
 void Stmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindBlob(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindBlob(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -187,10 +232,15 @@ void Stmt::bindBlob(const std::string &paramName, const std::string &paramValue)
 // bindString
 //-----------------------------------------------------------------------------
 void Stmt::bindString(const std::string &paramName, const std::string &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindString(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindString(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -198,10 +248,15 @@ void Stmt::bindString(const std::string &paramName, const std::string &paramValu
 // bindOptionalString
 //-----------------------------------------------------------------------------
 void Stmt::bindOptionalString(const std::string &paramName, const optional<std::string> &paramValue) {
-  if(nullptr != m_stmt) {
-    return m_stmt->bindOptionalString(paramName, paramValue);
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->bindOptionalString(paramName, paramValue);
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -209,10 +264,15 @@ void Stmt::bindOptionalString(const std::string &paramName, const optional<std::
 // executeQuery
 //-----------------------------------------------------------------------------
 Rset Stmt::executeQuery() {
-  if(nullptr != m_stmt) {
-    return Rset(m_stmt->executeQuery());
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return Rset(m_stmt->executeQuery());
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -220,10 +280,15 @@ Rset Stmt::executeQuery() {
 // executeNonQuery
 //-----------------------------------------------------------------------------
 void Stmt::executeNonQuery() {
-  if(nullptr != m_stmt) {
-    return m_stmt->executeNonQuery();
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->executeNonQuery();
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -231,10 +296,15 @@ void Stmt::executeNonQuery() {
 // getNbAffectedRows
 //-----------------------------------------------------------------------------
 uint64_t Stmt::getNbAffectedRows() const {
-  if(nullptr != m_stmt) {
-    return m_stmt->getNbAffectedRows();
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return m_stmt->getNbAffectedRows();
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
@@ -242,10 +312,15 @@ uint64_t Stmt::getNbAffectedRows() const {
 // getStmt
 //-----------------------------------------------------------------------------
 wrapper::StmtWrapper &Stmt::getStmt() {
-  if(nullptr != m_stmt) {
-    return *m_stmt;
-  } else {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  try {
+    if(nullptr != m_stmt) {
+      return *m_stmt;
+    } else {
+      throw exception::Exception("Stmt does not contain a cached statement");
+    }
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+    throw;
   }
 }
 
diff --git a/scheduler/ArchiveJob.hpp b/scheduler/ArchiveJob.hpp
index 2dc28c02405ec3cfc5f795d52855d48ca2873806..68debbb1d9258ec0f0e7b22c61adc0a0c89c35c0 100644
--- a/scheduler/ArchiveJob.hpp
+++ b/scheduler/ArchiveJob.hpp
@@ -48,7 +48,7 @@ class ArchiveJob {
    */
   friend class ArchiveMount;
   friend class Scheduler;
-
+  
 protected:
   /**
    * Constructor.
diff --git a/scheduler/ArchiveMount.cpp b/scheduler/ArchiveMount.cpp
index fbfe60f96f809b8b48d790412c6f28a77bc64a87..6578013c0b4347288dbd8e9bf40a486c1ae17d10 100644
--- a/scheduler/ArchiveMount.cpp
+++ b/scheduler/ArchiveMount.cpp
@@ -154,7 +154,7 @@ std::list<std::unique_ptr<cta::ArchiveJob> > cta::ArchiveMount::getNextJobBatch(
 // reportJobsBatchWritten
 //------------------------------------------------------------------------------
 void cta::ArchiveMount::reportJobsBatchTransferred(std::queue<std::unique_ptr<cta::ArchiveJob> > & successfulArchiveJobs,
-    std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, cta::log::LogContext& logContext) {
+    std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, std::queue<std::unique_ptr<cta::ArchiveJob>>& failedToReportArchiveJobs,cta::log::LogContext& logContext) {
   std::set<cta::catalogue::TapeItemWrittenPointer> tapeItemsWritten;
   std::list<std::unique_ptr<cta::ArchiveJob> > validatedSuccessfulArchiveJobs;
   std::unique_ptr<cta::ArchiveJob> job;
@@ -169,6 +169,12 @@ void cta::ArchiveMount::reportJobsBatchTransferred(std::queue<std::unique_ptr<ct
       job = std::move(successfulArchiveJobs.front());
       successfulArchiveJobs.pop();
       if (!job.get()) continue;
+      cta::log::ScopedParamContainer params(logContext);
+      params.add("tapeVid",job->tapeFile.vid)
+            .add("mountType",cta::common::dataStructures::toString(job->m_mount->getMountType()))
+            .add("fileId",job->archiveFile.archiveFileID)
+            .add("type", "ReportSuccessful");
+      logContext.log(cta::log::INFO, "In cta::ArchiveMount::reportJobsBatchTransferred(), archive job succesful.");
       tapeItemsWritten.emplace(job->validateAndGetTapeFileWritten().release());
       files++;
       bytes+=job->archiveFile.fileSize;
@@ -224,6 +230,9 @@ void cta::ArchiveMount::reportJobsBatchTransferred(std::queue<std::unique_ptr<ct
     }
     const std::string msg_error="In ArchiveMount::reportJobsBatchWritten(): got an exception";
     logContext.log(cta::log::ERR, msg_error);
+    for(auto &aj: validatedSuccessfulArchiveJobs){
+      failedToReportArchiveJobs.push(std::move(aj));
+    }
     throw cta::ArchiveMount::FailedMigrationRecallResult(msg_error);
   } catch(const std::exception& e){
     cta::log::ScopedParamContainer params(logContext);
@@ -236,6 +245,9 @@ void cta::ArchiveMount::reportJobsBatchTransferred(std::queue<std::unique_ptr<ct
     }
     const std::string msg_error="In ArchiveMount::reportJobsBatchWritten(): got an standard exception";
     logContext.log(cta::log::ERR, msg_error);
+    for(auto &aj: validatedSuccessfulArchiveJobs){
+      failedToReportArchiveJobs.push(std::move(aj));
+    }
     throw cta::ArchiveMount::FailedMigrationRecallResult(msg_error);
   }
 }
diff --git a/scheduler/ArchiveMount.hpp b/scheduler/ArchiveMount.hpp
index 06bf04077baacaa8076075e71a3a5a0c1f9f5737..df62d10ae41afc3ad4a38ab3105728c4bf485595 100644
--- a/scheduler/ArchiveMount.hpp
+++ b/scheduler/ArchiveMount.hpp
@@ -149,7 +149,7 @@ namespace cta {
      * @param logContext
      */
     virtual void reportJobsBatchTransferred (std::queue<std::unique_ptr<cta::ArchiveJob> > & successfulArchiveJobs,
-        std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, cta::log::LogContext &logContext);
+        std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, std::queue<std::unique_ptr<cta::ArchiveJob>>& failedToReportArchiveJobs, cta::log::LogContext &logContext);
     
     /**
      * Returns the tape pool of the tape to be mounted.
diff --git a/scheduler/OStoreDB/OStoreDB.cpp b/scheduler/OStoreDB/OStoreDB.cpp
index 2272862e60e3c4ea8b67f5b21d6e2b189013cc11..66c4b9330f31fbb720e862d1da0c08ed9ae58570 100644
--- a/scheduler/OStoreDB/OStoreDB.cpp
+++ b/scheduler/OStoreDB/OStoreDB.cpp
@@ -1422,7 +1422,7 @@ OStoreDB::RetrieveQueueItor_t* OStoreDB::getRetrieveJobItorPtr(const std::string
 // OStoreDB::queueRepack()
 //------------------------------------------------------------------------------
 void OStoreDB::queueRepack(const std::string& vid, const std::string& bufferURL,
-    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy& mountPolicy, log::LogContext & lc) {
+    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy& mountPolicy, const bool forceDisabledTape,log::LogContext & lc) {
   // Prepare the repack request object in memory.
   assertAgentAddressSet();
   cta::utils::Timer t;
@@ -1434,6 +1434,7 @@ void OStoreDB::queueRepack(const std::string& vid, const std::string& bufferURL,
   rr->setType(repackType);
   rr->setBufferURL(bufferURL);
   rr->setMountPolicy(mountPolicy);
+  rr->setForceDisabledTape(forceDisabledTape);
   // Try to reference the object in the index (will fail if there is already a request with this VID.
   try {
     Helpers::registerRepackRequestToIndex(vid, rr->getAddressIfSet(), *m_agentReference, m_objectStore, lc);
@@ -1656,6 +1657,7 @@ std::unique_ptr<SchedulerDatabase::RepackRequest> OStoreDB::getNextRepackJobToEx
     ret->repackInfo.type = repackInfo.type;
     ret->repackInfo.status = repackInfo.status;
     ret->repackInfo.repackBufferBaseURL = repackInfo.repackBufferBaseURL;
+    ret->repackInfo.forceDisabledTape = repackInfo.forceDisabledTape;
     return std::move(ret);
   }
 }
@@ -1823,7 +1825,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
     // As we are popping from a single report queue, all requests should concern only one repack request.
     if (repackRequestAddresses.size() != 1) {
       std::stringstream err;
-      err << "In OStoreDB::getNextSuccessfulRetrieveRepackReportBatch(): reports for several repack requests in the same queue. ";
+      err << "In OStoreDB::getNextSuccessfulArchiveRepackReportBatch(): reports for several repack requests in the same queue. ";
       for (auto & rr: repackRequestAddresses) { err << rr << " "; }
       throw exception::Exception(err.str());
     }
@@ -2207,11 +2209,12 @@ void OStoreDB::RepackRequest::setLastExpandedFSeq(uint64_t fseq){
 //------------------------------------------------------------------------------
 // OStoreDB::RepackRequest::addSubrequests()
 //------------------------------------------------------------------------------
-void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, 
+uint64_t OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, 
     cta::common::dataStructures::ArchiveRoute::FullMap& archiveRoutesMap, uint64_t maxFSeqLowBound, 
     const uint64_t maxAddedFSeq, const cta::SchedulerDatabase::RepackRequest::TotalStatsFiles &totalStatsFiles, 
     disk::DiskSystemList diskSystemList, log::LogContext& lc) {
   // We need to prepare retrieve requests names and reference them, create them, enqueue them.
+  uint64_t nbRetrieveSubrequestsCreated = 0;
   objectstore::ScopedExclusiveLock rrl (m_repackRequest);
   m_repackRequest.fetch();
   std::set<uint64_t> fSeqs;
@@ -2220,6 +2223,7 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
   m_repackRequest.setTotalStats(totalStatsFiles);
   uint64_t fSeq = std::max(maxFSeqLowBound + 1, maxAddedFSeq + 1);
   common::dataStructures::MountPolicy mountPolicy = m_repackRequest.getMountPolicy();
+  bool forceDisabledTape = m_repackRequest.getInfo().forceDisabledTape;
   // We make sure the references to subrequests exist persistently before creating them.
   m_repackRequest.commit();
   // We keep holding the repack request lock: we need to ensure de deleted boolean of each subrequest does
@@ -2266,6 +2270,15 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
         for (auto & ar: archiveRoutesMap.at(std::make_tuple(rsr.archiveFile.diskInstance, rsr.archiveFile.storageClass))) {
           rRRepackInfo.archiveRouteMap[ar.second.copyNb] = ar.second.tapePoolName;
         }
+        //Check that we do not have the same destination tapepool for two different copyNb
+        for(auto & currentCopyNbTapePool: rRRepackInfo.archiveRouteMap){
+          int nbTapepool = std::count_if(rRRepackInfo.archiveRouteMap.begin(),rRRepackInfo.archiveRouteMap.end(),[&currentCopyNbTapePool](const std::pair<uint64_t,std::string> & copyNbTapepool){
+            return copyNbTapepool.second == currentCopyNbTapePool.second;
+          });
+          if(nbTapepool != 1){
+            throw cta::ExpandRepackRequestException("In OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(), found the same destination tapepool for different copyNb.");
+          }
+        }
       } catch (std::out_of_range &) {
         notCreatedSubrequests.emplace_back(rsr);
         failedCreationStats.files++;
@@ -2289,6 +2302,7 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
       rRRepackInfo.fileBufferURL = rsr.fileBufferURL;
       rRRepackInfo.fSeq = rsr.fSeq;
       rRRepackInfo.isRepack = true;
+      rRRepackInfo.forceDisabledTape = forceDisabledTape;
       rRRepackInfo.repackRequestAddress = m_repackRequest.getAddressIfSet();
       rr->setRepackInfo(rRRepackInfo);
       // Set the queueing parameters
@@ -2305,7 +2319,7 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
         if (tc.vid == repackInfo.vid) {
           try {
             // Try to select the repack VID from a one-vid list.
-            Helpers::selectBestRetrieveQueue({repackInfo.vid}, m_oStoreDB.m_catalogue, m_oStoreDB.m_objectStore);
+            Helpers::selectBestRetrieveQueue({repackInfo.vid}, m_oStoreDB.m_catalogue, m_oStoreDB.m_objectStore,repackInfo.forceDisabledTape);
             bestVid = repackInfo.vid;
             activeCopyNumber = tc.copyNb;
           } catch (Helpers::NoTapeAvailableForRetrieve &) {}
@@ -2317,7 +2331,7 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
         std::set<std::string> candidateVids;
         for (auto & tc: rsr.archiveFile.tapeFiles) candidateVids.insert(tc.vid);
         try {
-          bestVid = Helpers::selectBestRetrieveQueue(candidateVids, m_oStoreDB.m_catalogue, m_oStoreDB.m_objectStore);
+          bestVid = Helpers::selectBestRetrieveQueue(candidateVids, m_oStoreDB.m_catalogue, m_oStoreDB.m_objectStore,forceDisabledTape);
         } catch (Helpers::NoTapeAvailableForRetrieve &) {
           // Count the failure for this subrequest. 
           notCreatedSubrequests.emplace_back(rsr);
@@ -2438,11 +2452,13 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
       is.request->fetch();
       sorter.insertRetrieveRequest(is.request, *m_oStoreDB.m_agentReference, is.activeCopyNb, lc);
     }
+    nbRetrieveSubrequestsCreated = sorter.getAllRetrieve().size();
     locks.clear();
     sorter.flushAll(lc);
   }
   m_repackRequest.setLastExpandedFSeq(fSeq);
   m_repackRequest.commit();
+  return nbRetrieveSubrequestsCreated;
 }
 
 //------------------------------------------------------------------------------
@@ -2762,6 +2778,18 @@ void OStoreDB::reportDriveStatus(const common::dataStructures::DriveInfo& driveI
   updateDriveStatus(driveInfo, inputs, lc);
 }
 
+//------------------------------------------------------------------------------
+// OStoreDB::reportDriveConfig()
+//------------------------------------------------------------------------------
+void OStoreDB::reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine, const cta::tape::daemon::TapedConfiguration& tapedConfig, log::LogContext& lc){
+  objectstore::DriveState ds(m_objectStore);
+  ScopedExclusiveLock dsl;
+  Helpers::getLockedAndFetchedDriveState(ds, dsl, *m_agentReference, tpConfigLine.unitName, lc, Helpers::CreateIfNeeded::doNotCreate);
+  ds.setConfig(tapedConfig);
+  ds.setTpConfig(tpConfigLine);
+  ds.commit();
+}
+
 //------------------------------------------------------------------------------
 // OStoreDB::updateDriveStatus()
 //------------------------------------------------------------------------------
@@ -3797,6 +3825,14 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
     if (osdbJob->isRepack) {
       try {
         osdbJob->m_jobSucceedForRepackReporter->wait();
+        {
+          cta::log::ScopedParamContainer spc(lc);
+          std::string vid = osdbJob->archiveFile.tapeFiles.at(osdbJob->selectedCopyNb).vid;
+          spc.add("tapeVid",vid)
+             .add("mountType","RetrieveForRepack")
+             .add("fileId",osdbJob->archiveFile.archiveFileID);
+          lc.log(cta::log::INFO,"In OStoreDB::RetrieveMount::flushAsyncSuccessReports(), retrieve job successful");
+        }
         mountPolicy = osdbJob->m_jobSucceedForRepackReporter->m_MountPolicy;
         jobsToRequeueForRepackMap[osdbJob->m_repackInfo.repackRequestAddress].emplace_back(osdbJob);
       } catch (cta::exception::Exception & ex) {
@@ -3811,6 +3847,15 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
     } else {
       try {
         osdbJob->m_jobDelete->wait();
+        {
+          //Log for monitoring
+          cta::log::ScopedParamContainer spc(lc);
+          std::string vid = osdbJob->archiveFile.tapeFiles.at(osdbJob->selectedCopyNb).vid;
+          spc.add("tapeVid",vid)
+             .add("mountType","RetrieveForUser")
+             .add("fileId",osdbJob->archiveFile.archiveFileID);
+          lc.log(cta::log::INFO,"In OStoreDB::RetrieveMount::flushAsyncSuccessReports(), retrieve job successful");
+        }
         osdbJob->retrieveRequest.lifecycleTimings.completed_time = time(nullptr);
         std::string requestAddress = osdbJob->m_retrieveRequest.getAddressIfSet();
         
@@ -3819,7 +3864,7 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
         cta::common::dataStructures::LifecycleTimings requestTimings = osdbJob->retrieveRequest.lifecycleTimings;
         log::ScopedParamContainer params(lc);
         params.add("requestAddress",requestAddress)
-              .add("archiveFileID",osdbJob->archiveFile.archiveFileID)
+              .add("fileId",osdbJob->archiveFile.archiveFileID)
               .add("vid",osdbJob->m_retrieveMount->mountInfo.vid)
               .add("timeForSelection",requestTimings.getTimeForSelection())
               .add("timeForCompletion", requestTimings.getTimeForCompletion());
@@ -4192,7 +4237,7 @@ void OStoreDB::ArchiveJob::failTransfer(const std::string& failureReason, log::L
             .add("totalRetries", retryStatus.totalRetries)
             .add("maxTotalRetries", retryStatus.maxTotalRetries);
       lc.log(log::INFO,
-          "In ArchiveJob::failTransfer(): requeued job for (potentially in-mount) retry.");
+          "In ArchiveJob::failTransfer(): requeued job for (potentially in-mount) retry (repack).");
       return;
   }
   case NextStep::StoreInFailedJobsContainer: {
@@ -4413,12 +4458,13 @@ objectstore::RepackRequest::SubrequestStatistics::List OStoreDB::RepackArchiveRe
     ssl.back().fSeq = sri.repackInfo.fSeq;
     ssl.back().copyNb = sri.archivedCopyNb;
     for(auto &j: sri.archiveJobsStatusMap){
-      if(j.first != sri.archivedCopyNb && 
-        (j.second != objectstore::serializers::ArchiveJobStatus::AJS_Complete) && 
-        (j.second != objectstore::serializers::ArchiveJobStatus::AJS_Failed)){
-        break;
-      } else {
-        ssl.back().subrequestDeleted = true;
+      if(j.first != sri.archivedCopyNb){
+        if((j.second != objectstore::serializers::ArchiveJobStatus::AJS_Complete) && (j.second != objectstore::serializers::ArchiveJobStatus::AJS_Failed)){
+          break;
+        } else {
+          ssl.back().subrequestDeleted = true;
+          break;
+        }
       }
     }
   }
@@ -4459,21 +4505,35 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
   for (auto &sri: m_subrequestList) {
     bufferURL = sri.repackInfo.fileBufferURL;
     bool moreJobsToDo = false;
+    //Check if the ArchiveRequest contains other jobs that are not finished
     for (auto &j: sri.archiveJobsStatusMap) {
-      if ((j.first != sri.archivedCopyNb) && 
-          (j.second != serializers::ArchiveJobStatus::AJS_Complete) && 
-          (j.second != serializers::ArchiveJobStatus::AJS_Failed)) {
-        moreJobsToDo = true;
-        break;
+      //Getting the siblings jobs (ie copy nb != current one)
+      if (j.first != sri.archivedCopyNb) {
+        //Sibling job not finished mean its status is nor AJS_Complete nor AJS_Failed 
+        if ((j.second != serializers::ArchiveJobStatus::AJS_Complete) && 
+        (j.second != serializers::ArchiveJobStatus::AJS_Failed)) {
+          //The sibling job is not finished, but maybe it is planned to change its status, checking the jobOwnerUpdaterList that is the list containing the jobs
+          //we want to change its status to AJS_Complete
+          bool copyNbStatusUpdating = (std::find_if(jobOwnerUpdatersList.begin(), jobOwnerUpdatersList.end(), [&j,&sri](JobOwnerUpdaters &jou){
+            return ((jou.subrequestInfo.archiveFile.archiveFileID == sri.archiveFile.archiveFileID) && (jou.subrequestInfo.archivedCopyNb == j.first));
+          }) != jobOwnerUpdatersList.end());
+          if(!copyNbStatusUpdating){
+            //The sibling job is not in the jobOwnerUpdaterList, it means that it is not finished yet, there is more jobs to do
+            moreJobsToDo = true;
+            break;
+          }
+        }
       }
     }
     objectstore::ArchiveRequest & ar = *sri.subrequest;
     if (moreJobsToDo) {
       try {
-        jobOwnerUpdatersList.push_back(JobOwnerUpdaters{std::unique_ptr<objectstore::ArchiveRequest::AsyncJobOwnerUpdater> (
-              ar.asyncUpdateJobOwner(sri.archivedCopyNb, "", m_oStoreDb.m_agentReference->getAgentAddress(),
-              newStatus)), 
-            sri});
+        if(ar.exists()){
+          jobOwnerUpdatersList.push_back(JobOwnerUpdaters{std::unique_ptr<objectstore::ArchiveRequest::AsyncJobOwnerUpdater> (
+                ar.asyncUpdateJobOwner(sri.archivedCopyNb, "", m_oStoreDb.m_agentReference->getAgentAddress(),
+                newStatus)), 
+              sri});
+        }
       } catch (cta::exception::Exception & ex) {
         // Log the error
         log::ScopedParamContainer params(lc);
@@ -4580,7 +4640,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
       params.add("fileId", jou.subrequestInfo.archiveFile.archiveFileID)
             .add("subrequestAddress", jou.subrequestInfo.subrequest->getAddressIfSet())
             .add("exceptionMsg", ex.getMessageValue());
-      lc.log(log::ERR, "In OStoreDB::RepackArchiveReportBatch::report(): async job update.");
+      lc.log(log::ERR, "In OStoreDB::RepackArchiveReportBatch::report(): async job update failed.");
     }    
   }
   timingList.insertAndReset("asyncUpdateOrDeleteCompletionTime", t);
@@ -4782,12 +4842,13 @@ void OStoreDB::RetrieveJob::failTransfer(const std::string &failureReason, log::
           "In OStoreDB::RetrieveJob::failTransfer(): no active job after addJobFailure() returned false."
         );
       }
+      bool disabledTape = m_retrieveRequest.getRepackInfo().forceDisabledTape;
       m_retrieveRequest.commit();
       rel.release();
 
       // Check that the requested retrieve job (for the provided VID) exists, and record the copy number
       std::string bestVid = Helpers::selectBestRetrieveQueue(candidateVids, m_oStoreDB.m_catalogue,
-        m_oStoreDB.m_objectStore);
+        m_oStoreDB.m_objectStore,disabledTape);
 
       auto tf_it = af.tapeFiles.begin();
       for( ; tf_it != af.tapeFiles.end() && tf_it->vid != bestVid; ++tf_it) ;
diff --git a/scheduler/OStoreDB/OStoreDB.hpp b/scheduler/OStoreDB/OStoreDB.hpp
index 3b1d75cdc34c5e6e3df1e06eb4a25c0a3c507554..6ff3839517269ca05f4ede1b19aa5a4f5147fde3 100644
--- a/scheduler/OStoreDB/OStoreDB.hpp
+++ b/scheduler/OStoreDB/OStoreDB.hpp
@@ -348,7 +348,7 @@ public:
   
   /* === Repack requests handling =========================================== */
   void queueRepack(const std::string& vid, const std::string& bufferURL, 
-    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext &logContext) override;
+    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, const bool forceDisabledTape, log::LogContext &logContext) override;
   
   std::list<common::dataStructures::RepackInfo> getRepackInfo() override;
   CTA_GENERATE_EXCEPTION_CLASS(NoSuchRepackRequest);
@@ -361,7 +361,7 @@ public:
   public:
     RepackRequest(const std::string &jobAddress, OStoreDB &oStoreDB) :
     m_oStoreDB(oStoreDB), m_repackRequest(jobAddress, m_oStoreDB.m_objectStore){}
-    void addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, cta::common::dataStructures::ArchiveRoute::FullMap& archiveRoutesMap,
+    uint64_t addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, cta::common::dataStructures::ArchiveRoute::FullMap& archiveRoutesMap,
       uint64_t maxFSeqLowBound, const uint64_t maxAddedFSeq, const TotalStatsFiles &totalStatsFiles, disk::DiskSystemList diskSystemList, 
       log::LogContext& lc) override;
     void expandDone() override;
@@ -550,6 +550,8 @@ public:
     common::dataStructures::DriveStatus status, time_t reportTime, log::LogContext & lc, uint64_t mountSessionId, uint64_t byteTransfered, 
     uint64_t filesTransfered, double latestBandwidth, const std::string& vid, const std::string& tapepool) override;
   
+  void reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine, const cta::tape::daemon::TapedConfiguration& tapedConfig,log::LogContext& lc) override;
+  
   /* --- Private helper part implementing state transition logic -------------*/
   /*
    * The drive register should gracefully handle reports of status from the drive
diff --git a/scheduler/OStoreDB/OStoreDBFactory.hpp b/scheduler/OStoreDB/OStoreDBFactory.hpp
index 5e3a6345c0123b51ccd0ad917a8a932da1fc1eb7..7696929677d19d9327df4e180db3a07a1bb42175 100644
--- a/scheduler/OStoreDB/OStoreDBFactory.hpp
+++ b/scheduler/OStoreDB/OStoreDBFactory.hpp
@@ -68,7 +68,7 @@ namespace {
 template <class BackendType>
 class OStoreDBWrapper: public cta::objectstore::OStoreDBWrapperInterface {
 public:
-  OStoreDBWrapper(const std::string &context, const std::string &URL = "");
+  OStoreDBWrapper(const std::string &context, std::unique_ptr<cta::catalogue::Catalogue>& catalogue, const std::string &URL = "");
   
   ~OStoreDBWrapper() throw () {}
   
@@ -226,8 +226,8 @@ public:
   }
   
 
-  void queueRepack(const std::string& vid, const std::string& bufferURL, common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext& lc) override {
-    m_OStoreDB.queueRepack(vid, bufferURL, repackType, mountPolicy, lc);
+  void queueRepack(const std::string& vid, const std::string& bufferURL, common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, const bool forceDisabledTape, log::LogContext& lc) override {
+    m_OStoreDB.queueRepack(vid, bufferURL, repackType, mountPolicy, forceDisabledTape, lc);
   }
   
   std::list<common::dataStructures::RepackInfo> getRepackInfo() override {
@@ -272,20 +272,24 @@ public:
     m_OStoreDB.reportDriveStatus(driveInfo, mountType, status, reportTime, lc, mountSessionId, byteTransfered, filesTransfered,
        latestBandwidth, vid, tapepool);
   }
+  
+  void reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine, const cta::tape::daemon::TapedConfiguration& tapedConfig,log::LogContext& lc) override {
+    m_OStoreDB.reportDriveConfig(tpConfigLine, tapedConfig,lc);
+  }
 
 private:
   std::unique_ptr <cta::log::Logger> m_logger;
   std::unique_ptr <cta::objectstore::Backend> m_backend;
-  std::unique_ptr <cta::catalogue::Catalogue> m_catalogue;
+  std::unique_ptr <cta::catalogue::Catalogue> & m_catalogue;
   cta::OStoreDB m_OStoreDB;
   std::unique_ptr<objectstore::AgentReference> m_agentReferencePtr;
 };
 
 template <>
 OStoreDBWrapper<cta::objectstore::BackendVFS>::OStoreDBWrapper(
-        const std::string &context, const std::string &URL) :
+        const std::string &context, std::unique_ptr<cta::catalogue::Catalogue> & catalogue, const std::string &URL) :
 m_logger(new cta::log::DummyLogger("", "")), m_backend(new cta::objectstore::BackendVFS()), 
-m_catalogue(new cta::catalogue::DummyCatalogue),
+m_catalogue(catalogue),
 m_OStoreDB(*m_backend, *m_catalogue, *m_logger),
   m_agentReferencePtr(new objectstore::AgentReference("OStoreDBFactory", *m_logger))
 {
@@ -312,9 +316,9 @@ m_OStoreDB(*m_backend, *m_catalogue, *m_logger),
 
 template <>
 OStoreDBWrapper<cta::objectstore::BackendRados>::OStoreDBWrapper(
-        const std::string &context, const std::string &URL) :
+        const std::string &context,std::unique_ptr<cta::catalogue::Catalogue> & catalogue, const std::string &URL) :
 m_logger(new cta::log::DummyLogger("", "")), m_backend(cta::objectstore::BackendFactory::createBackend(URL, *m_logger).release()), 
-m_catalogue(new cta::catalogue::DummyCatalogue),
+m_catalogue(catalogue),
 m_OStoreDB(*m_backend, *m_catalogue, *m_logger),
   m_agentReferencePtr(new objectstore::AgentReference("OStoreDBFactory", *m_logger))
 {
@@ -370,8 +374,8 @@ public:
    *
    * @return A newly created scheduler database object.
    */
-  std::unique_ptr<SchedulerDatabase> create() const {
-    return std::unique_ptr<SchedulerDatabase>(new OStoreDBWrapper<BackendType>("UnitTest", m_URL));
+  std::unique_ptr<SchedulerDatabase> create(std::unique_ptr<cta::catalogue::Catalogue>& catalogue) const {
+    return std::unique_ptr<SchedulerDatabase>(new OStoreDBWrapper<BackendType>("UnitTest", catalogue, m_URL));
   }
   
   private:
diff --git a/scheduler/OStoreDB/OStoreDBTest.cpp b/scheduler/OStoreDB/OStoreDBTest.cpp
index 42469fc0fd1057a4e9c5221b9fdeac39eb4d40b8..4954c16b2dc4f205feb7a1b6c678e4a82c90e3de 100644
--- a/scheduler/OStoreDB/OStoreDBTest.cpp
+++ b/scheduler/OStoreDB/OStoreDBTest.cpp
@@ -27,6 +27,7 @@
 #include "OStoreDB.hpp"
 #include "objectstore/BackendRadosTestSwitch.hpp"
 #include "MemQueues.hpp"
+#include "catalogue/InMemoryCatalogue.hpp"
 
 namespace unitTests {
 
@@ -63,8 +64,9 @@ public:
     // We do a deep reference to the member as the C++ compiler requires the function to be 
     // already defined if called implicitly.
     const auto &factory = GetParam().dbFactory;
+    m_catalogue = cta::make_unique<cta::catalogue::DummyCatalogue>();
     // Get the OStore DB from the factory.
-    auto osdb = std::move(factory.create());
+    auto osdb = std::move(factory.create(m_catalogue));
     // Make sure the type of the SchedulerDatabase is correct (it should be an OStoreDBWrapperInterface).
     dynamic_cast<cta::objectstore::OStoreDBWrapperInterface *> (osdb.get());
     // We know the cast will not fail, so we can safely do it (otherwise we could leak memory).
@@ -73,6 +75,7 @@ public:
 
   virtual void TearDown() {
     m_db.reset();
+    m_catalogue.reset();
   }
 
   cta::objectstore::OStoreDBWrapperInterface &getDb() {
@@ -108,7 +111,8 @@ private:
   OStoreDBTest & operator= (const OStoreDBTest &) = delete;
 
   std::unique_ptr<cta::objectstore::OStoreDBWrapperInterface> m_db;
-
+  
+  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
 }; // class SchedulerDatabaseTest
 
 TEST_P(OStoreDBTest, getBatchArchiveJob) {
diff --git a/scheduler/RepackRequestManager.cpp b/scheduler/RepackRequestManager.cpp
index 62a615b5f2c1189cda4f81845d0f18879da19a25..7f7146793d46699bcea90dc380e284149840d772 100644
--- a/scheduler/RepackRequestManager.cpp
+++ b/scheduler/RepackRequestManager.cpp
@@ -41,7 +41,10 @@ void RepackRequestManager::runOnePass(log::LogContext& lc) {
       //We have a RepackRequest that has the status ToExpand, expand it
       try{
         m_scheduler.expandRepackRequest(repackRequest,timingList,t,lc);
-      } catch(const cta::exception::Exception &e){
+      } catch (const ExpandRepackRequestException& ex){
+        lc.log(log::ERR,ex.what());
+        repackRequest->fail();
+      } catch (const cta::exception::Exception &e){
         lc.log(log::ERR,e.what());
         repackRequest->fail();
         throw(e);
diff --git a/scheduler/RetrieveJob.hpp b/scheduler/RetrieveJob.hpp
index d85f48bdc13371370cdfe683074a3a70a656e23f..7bb4057195f428fbf6fbfe53e691b4c959b5d34d 100644
--- a/scheduler/RetrieveJob.hpp
+++ b/scheduler/RetrieveJob.hpp
@@ -27,6 +27,13 @@
 #include <limits>
 #include <memory>
 
+namespace castor {
+namespace tape {
+namespace tapeserver {
+namespace daemon {
+  class TapeReadTask;
+}}}}
+
 namespace cta {
 
 class RetrieveMount;
@@ -42,7 +49,7 @@ class RetrieveJob {
    */
   friend class RetrieveMount;
   friend class Scheduler;
-  
+  friend class castor::tape::tapeserver::daemon::TapeReadTask;
 public:
   /**
    * Constructor. It is not public as it is generated by the RetrieveMount.
diff --git a/scheduler/RetrieveMount.hpp b/scheduler/RetrieveMount.hpp
index 5a3495f31a7d7013e3c70f9ede114de58439d4a5..e5cb7b9028187436ff6ab4f596b0b6a5b2b1d06c 100644
--- a/scheduler/RetrieveMount.hpp
+++ b/scheduler/RetrieveMount.hpp
@@ -238,7 +238,7 @@ namespace cta {
      * A pointer to the file catalogue.
      */
     cta::catalogue::Catalogue &m_catalogue; 
-
+    
   }; // class RetrieveMount
 
 } // namespace cta
diff --git a/scheduler/Scheduler.cpp b/scheduler/Scheduler.cpp
index d7106341f133df264766ed74beb792e2bdb84a89..bb95ba90f967e126e7839876004ebce8473af574 100644
--- a/scheduler/Scheduler.cpp
+++ b/scheduler/Scheduler.cpp
@@ -344,18 +344,19 @@ void Scheduler::checkTapeFullBeforeRepack(std::string vid){
 // repack
 //------------------------------------------------------------------------------
 void Scheduler::queueRepack(const common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, 
-    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc) {
+    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, const bool forceDisabledTape, log::LogContext & lc) {
   // Check request sanity
   if (vid.empty()) throw exception::UserError("Empty VID name.");
   if (bufferURL.empty()) throw exception::UserError("Empty buffer URL.");
   utils::Timer t;
   checkTapeFullBeforeRepack(vid);
-  m_db.queueRepack(vid, bufferURL, repackType, mountPolicy, lc);
+  m_db.queueRepack(vid, bufferURL, repackType, mountPolicy, forceDisabledTape, lc);
   log::TimingList tl;
   tl.insertAndReset("schedulerDbTime", t);
   log::ScopedParamContainer params(lc);
   params.add("tapeVid", vid)
         .add("repackType", toString(repackType))
+        .add("disabledTape", forceDisabledTape)
         .add("bufferURL", bufferURL);
   tl.addToLog(params);
   lc.log(log::INFO, "In Scheduler::queueRepack(): success.");
@@ -453,17 +454,10 @@ double Scheduler::getRepackRequestExpansionTimeLimit() const {
 // expandRepackRequest
 //------------------------------------------------------------------------------
 void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackRequest, log::TimingList& timingList, utils::Timer& t, log::LogContext& lc) {
-  std::list<common::dataStructures::ArchiveFile> files;
   auto repackInfo = repackRequest->getRepackInfo();
   
   typedef cta::common::dataStructures::RepackInfo::Type RepackType;
-  if (repackInfo.type != RepackType::MoveOnly) {
-    log::ScopedParamContainer params(lc);
-    params.add("tapeVid", repackInfo.vid);
-    lc.log(log::ERR, "In Scheduler::expandRepackRequest(): failing repack request with unsupported (yet) type.");
-    repackRequest->fail();
-    return;
-  }
+  
   //We need to get the ArchiveRoutes to allow the retrieval of the tapePool in which the
   //tape where the file is is located
   std::list<common::dataStructures::ArchiveRoute> routes = m_catalogue.getArchiveRoutes();
@@ -485,26 +479,33 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
   std::stringstream dirBufferURL;
   dirBufferURL << repackInfo.repackBufferBaseURL << "/" << repackInfo.vid << "/";
   std::set<std::string> filesInDirectory;
+  std::unique_ptr<cta::disk::Directory> dir;
   if(archiveFilesForCatalogue.hasMore()){
     //We only create the folder if there are some files to Repack
     cta::disk::DirectoryFactory dirFactory;
-    std::unique_ptr<cta::disk::Directory> dir;
     dir.reset(dirFactory.createDirectory(dirBufferURL.str()));
     if(dir->exist()){
-      filesInDirectory = dir->getFilesName();
+      //TODO : Repack tape repair workflow
+      //filesInDirectory = dir->getFilesName();
     } else {
       dir->mkdir();
     }
   }
   double elapsedTime = 0;
-  bool stopExpansion = false;
+  bool expansionTimeReached = false;
+  
+  std::list<common::dataStructures::StorageClass> storageClasses;
+  if(repackInfo.type == RepackType::AddCopiesOnly || repackInfo.type == RepackType::MoveAndAddCopies)
+    storageClasses = m_catalogue.getStorageClasses();
+  
   repackRequest->m_dbReq->setExpandStartedAndChangeStatus();
-  while(archiveFilesForCatalogue.hasMore() && !stopExpansion) {
+  uint64_t nbRetrieveSubrequestsQueued = 0;
+  
+  while(archiveFilesForCatalogue.hasMore() && !expansionTimeReached) {
     size_t filesCount = 0;
     uint64_t maxAddedFSeq = 0;
     std::list<SchedulerDatabase::RepackRequest::Subrequest> retrieveSubrequests;
-    while(filesCount < c_defaultMaxNbFilesForRepack && !stopExpansion && archiveFilesForCatalogue.hasMore())
-    {
+    while(filesCount < c_defaultMaxNbFilesForRepack && !expansionTimeReached && archiveFilesForCatalogue.hasMore()){
       filesCount++;
       fSeq++;
       retrieveSubrequests.push_back(cta::SchedulerDatabase::RepackRequest::Subrequest());
@@ -533,6 +534,61 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
           //retrieveSubRequest.fSeq = (retrieveSubRequest.fSeq == std::numeric_limits<decltype(retrieveSubRequest.fSeq)>::max()) ? tc.fSeq : std::max(tc.fSeq, retrieveSubRequest.fSeq);
         }
       }
+      
+      if(repackInfo.type == RepackType::AddCopiesOnly || repackInfo.type == RepackType::MoveAndAddCopies){
+        //We are in the case where we possibly need to create new copies (if the number of copies the storage class of the current ArchiveFile 
+        //is greater than the number of tape files we have in the current ArchiveFile)
+        auto archiveFileRoutes = archiveRoutesMap[std::make_pair(archiveFile.diskInstance,archiveFile.storageClass)];
+        auto storageClassItor = std::find_if(storageClasses.begin(),storageClasses.end(),[&archiveFile](const common::dataStructures::StorageClass& sc){
+          return sc.name == archiveFile.storageClass;
+        });
+        if(storageClassItor != storageClasses.end()){
+          common::dataStructures::StorageClass sc = *storageClassItor;
+          uint64_t nbFilesAlreadyArchived = getNbFilesAlreadyArchived(archiveFile);
+          uint64_t nbCopiesInStorageClass = sc.nbCopies;
+          uint64_t filesToArchive = nbCopiesInStorageClass - nbFilesAlreadyArchived;
+          if(filesToArchive > 0){
+            totalStatsFile.totalFilesToArchive += filesToArchive;
+            totalStatsFile.totalBytesToArchive += (filesToArchive * archiveFile.fileSize);
+            std::set<uint64_t> copyNbsAlreadyInCTA;
+            for (auto & tc: archiveFile.tapeFiles) {
+              copyNbsAlreadyInCTA.insert(tc.copyNb);
+              if (tc.vid == repackInfo.vid) {
+                // We make the (reasonable) assumption that the archive file only has one copy on this tape.
+                // If not, we will ensure the subrequest is filed under the lowest fSeq existing on this tape.
+                // This will prevent double subrequest creation (we already have such a mechanism in case of crash and 
+                // restart of expansion.
+                //We found the copy of the file we want to retrieve and archive
+                //retrieveSubRequest.fSeq = tc.fSeq;
+                if(repackInfo.type == RepackType::AddCopiesOnly)
+                  retrieveSubRequest.fSeq = (retrieveSubRequest.fSeq == std::numeric_limits<decltype(retrieveSubRequest.fSeq)>::max()) ? tc.fSeq : std::max(tc.fSeq, retrieveSubRequest.fSeq);
+              }
+            }
+            for(auto archiveFileRoutesItor = archiveFileRoutes.begin(); archiveFileRoutesItor != archiveFileRoutes.end(); ++archiveFileRoutesItor){
+              if(copyNbsAlreadyInCTA.find(archiveFileRoutesItor->first) == copyNbsAlreadyInCTA.end()){
+                //We need to archive the missing copy
+                retrieveSubRequest.copyNbsToRearchive.insert(archiveFileRoutesItor->first);
+              }
+            }
+            if(retrieveSubRequest.copyNbsToRearchive.size() < filesToArchive){
+              deleteRepackBuffer(std::move(dir));
+              throw ExpandRepackRequestException("In Scheduler::expandRepackRequest(): Missing archive routes for the creation of the new copies of the files");
+            }
+          } else {
+            if(repackInfo.type == RepackType::AddCopiesOnly){
+              //Nothing to Archive so nothing to Retrieve as well
+              retrieveSubrequests.pop_back();
+              continue;
+            }
+          }
+        } else {
+          //No storage class have been found for the current tapefile throw an exception
+          deleteRepackBuffer(std::move(dir));
+          throw ExpandRepackRequestException("In Scheduler::expandRepackRequest(): No storage class have been found for the file to add copies");
+        }
+      }
+      
+      
       std::stringstream fileName;
       fileName << std::setw(9) << std::setfill('0') << retrieveSubRequest.fSeq;
       bool createArchiveSubrequest = false;
@@ -549,16 +605,13 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
       if(!createArchiveSubrequest){
         totalStatsFile.totalBytesToRetrieve += retrieveSubRequest.archiveFile.fileSize;
         totalStatsFile.totalFilesToRetrieve += 1;
-        if (repackInfo.type == RepackType::MoveAndAddCopies || repackInfo.type == RepackType::AddCopiesOnly) {
-          // We should not get here are the type is filtered at the beginning of the function.
-          // TODO: add support for expand.
-          throw cta::exception::Exception("In Scheduler::expandRepackRequest(): expand not yet supported.");
-        }
-        if ((retrieveSubRequest.fSeq == std::numeric_limits<decltype(retrieveSubRequest.fSeq)>::max()) || retrieveSubRequest.copyNbsToRearchive.empty()) {
+        if (retrieveSubRequest.fSeq == std::numeric_limits<decltype(retrieveSubRequest.fSeq)>::max()) {
           log::ScopedParamContainer params(lc);
           params.add("fileId", retrieveSubRequest.archiveFile.archiveFileID)
                 .add("repackVid", repackInfo.vid);
           lc.log(log::ERR, "In Scheduler::expandRepackRequest(): no fSeq found for this file on this tape.");
+          totalStatsFile.totalBytesToRetrieve -= retrieveSubRequest.archiveFile.fileSize;
+          totalStatsFile.totalFilesToRetrieve -= 1;
           retrieveSubrequests.pop_back();
         } else {
           // We found some copies to rearchive. We still have to decide which file path we are going to use.
@@ -570,18 +623,23 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
           retrieveSubRequest.fileBufferURL = dirBufferURL.str() + fileName.str();
         }
       }
-      stopExpansion = (elapsedTime >= m_repackRequestExpansionTimeLimit);
-    }
+      expansionTimeReached = (elapsedTime >= m_repackRequestExpansionTimeLimit);
+    }   
     // Note: the highest fSeq will be recorded internally in the following call.
     // We know that the fSeq processed on the tape are >= initial fSeq + filesCount - 1 (or fSeq - 1 as we counted). 
     // We pass this information to the db for recording in the repack request. This will allow restarting from the right
     // value in case of crash.
     auto diskSystemList = m_catalogue.getAllDiskSystems();
     timingList.insertAndReset("getDisksystemsListTime",t);
-    repackRequest->m_dbReq->addSubrequestsAndUpdateStats(retrieveSubrequests, archiveRoutesMap, fSeq, maxAddedFSeq, totalStatsFile, diskSystemList, lc);
+    try{
+      nbRetrieveSubrequestsQueued = repackRequest->m_dbReq->addSubrequestsAndUpdateStats(retrieveSubrequests, archiveRoutesMap, fSeq, maxAddedFSeq, totalStatsFile, diskSystemList, lc);
+    } catch(const cta::ExpandRepackRequestException& e){
+      deleteRepackBuffer(std::move(dir));
+      throw e;
+    }
     timingList.insertAndReset("addSubrequestsAndUpdateStatsTime",t);
     {
-      if(!stopExpansion && archiveFilesForCatalogue.hasMore()){
+      if(!expansionTimeReached && archiveFilesForCatalogue.hasMore()){
         log::ScopedParamContainer params(lc);
         params.add("tapeVid",repackInfo.vid);
         timingList.addToLog(params);
@@ -593,11 +651,14 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
   params.add("tapeVid",repackInfo.vid);
   timingList.addToLog(params);
   if(archiveFilesForCatalogue.hasMore()){
-    if(stopExpansion){
-      repackRequest->m_dbReq->requeueInToExpandQueue(lc);
-      lc.log(log::INFO,"Expansion time reached, Repack Request requeued in ToExpand queue.");
-    }
+    repackRequest->m_dbReq->requeueInToExpandQueue(lc);
+    lc.log(log::INFO,"Repack Request requeued in ToExpand queue.");
   } else {
+    if(totalStatsFile.totalFilesToRetrieve == 0 || nbRetrieveSubrequestsQueued == 0){
+      //If no files have been retrieve, the repack buffer will have to be deleted
+      //TODO : in case of Repack tape repair, we should not try to delete the buffer
+      deleteRepackBuffer(std::move(dir));      
+    }
     repackRequest->m_dbReq->expandDone();
     lc.log(log::INFO,"In Scheduler::expandRepackRequest(), repack request expanded");
   }
@@ -742,6 +803,19 @@ void Scheduler::removeDrive(const common::dataStructures::SecurityIdentity &cliI
   lc.log(log::INFO, "In Scheduler::removeDrive(): success.");   
 }
 
+//------------------------------------------------------------------------------
+// reportDriveConfig
+//------------------------------------------------------------------------------
+void Scheduler::reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine,const cta::tape::daemon::TapedConfiguration& tapedConfig,log::LogContext& lc) {
+  utils::Timer t;
+  m_db.reportDriveConfig(tpConfigLine,tapedConfig,lc);
+  auto schedulerDbTime = t.secs();
+  log::ScopedParamContainer spc(lc);
+   spc.add("drive", tpConfigLine.unitName)
+      .add("schedulerDbTime", schedulerDbTime);
+  lc.log(log::INFO,"In Scheduler::reportDriveConfig(): success.");
+}
+
 //------------------------------------------------------------------------------
 // setDesiredDriveState
 //------------------------------------------------------------------------------
@@ -1021,6 +1095,25 @@ cta::optional<common::dataStructures::LogicalLibrary> Scheduler::getLogicalLibra
   return ret;
 }
 
+void Scheduler::deleteRepackBuffer(std::unique_ptr<cta::disk::Directory> repackBuffer) {
+  if(repackBuffer != nullptr && repackBuffer->exist()){
+    repackBuffer->rmdir();
+  }
+}
+
+uint64_t Scheduler::getNbFilesAlreadyArchived(const common::dataStructures::ArchiveFile& archiveFile) {
+  uint64_t nbFilesAlreadyArchived = 0;
+  for(auto &tf: archiveFile.tapeFiles){
+    if(tf.supersededByVid == ""){
+      //We only want the "active" copies of the archive file
+      nbFilesAlreadyArchived++;
+    }
+  }
+  return nbFilesAlreadyArchived;
+}
+
+
+
 //------------------------------------------------------------------------------
 // getNextMountDryRun
 //------------------------------------------------------------------------------
diff --git a/scheduler/Scheduler.hpp b/scheduler/Scheduler.hpp
index 341052b588c8590732ac0b10d75c1a3069c83001..d378ce5a398ff468ae7bd013d9ff9ea5754245d5 100644
--- a/scheduler/Scheduler.hpp
+++ b/scheduler/Scheduler.hpp
@@ -50,6 +50,9 @@
 #include "objectstore/RetrieveRequest.hpp"
 #include "objectstore/ArchiveRequest.hpp"
 
+#include "tapeserver/daemon/TapedConfiguration.hpp"
+
+#include "disk/DiskFile.hpp"
 #include "disk/DiskReporter.hpp"
 #include "disk/DiskReporterFactory.hpp"
 
@@ -72,6 +75,8 @@ class RetrieveJob;
  * The scheduler is the unique entry point to the central storage for taped. It is 
  * 
  */
+CTA_GENERATE_EXCEPTION_CLASS(ExpandRepackRequestException);
+
 class Scheduler {
   
 public:
@@ -200,7 +205,7 @@ public:
     const bool force);
 
   void queueRepack(const common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, 
-    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc);
+    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy,const bool disabledTape, log::LogContext & lc);
   void cancelRepack(const cta::common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, log::LogContext & lc);
   std::list<cta::common::dataStructures::RepackInfo> getRepacks();
   cta::common::dataStructures::RepackInfo getRepack(const std::string &vid);
@@ -253,6 +258,13 @@ public:
    */
   void reportDriveStatus(const common::dataStructures::DriveInfo& driveInfo, cta::common::dataStructures::MountType type, 
     cta::common::dataStructures::DriveStatus status, log::LogContext & lc);
+  
+  /**
+   * Reports the configuration of the drive to the objectstore.
+   * @param driveName the name of the drive to report the config to the objectstore
+   * @param tapedConfig the config of the drive to report to the objectstore.
+   */
+  void reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine, const cta::tape::daemon::TapedConfiguration& tapedConfig, log::LogContext& lc);
 
   /**
    * Dumps the states of all drives for display
@@ -301,6 +313,10 @@ private:
   
   cta::optional<common::dataStructures::LogicalLibrary> getLogicalLibrary(const std::string &libraryName, double &getLogicalLibraryTime);
   
+  void deleteRepackBuffer(std::unique_ptr<cta::disk::Directory> repackBuffer);
+  
+  uint64_t getNbFilesAlreadyArchived(const common::dataStructures::ArchiveFile& archiveFile);
+  
 public:
   /**
    * Run the mount decision logic lock free, so we have no contention in the 
diff --git a/scheduler/SchedulerDatabase.hpp b/scheduler/SchedulerDatabase.hpp
index cbadd29cc2b1b56a2bd22affe1db55f1a29f9dcb..1abc8dac00828faafc27448e59adf477ef51a085 100644
--- a/scheduler/SchedulerDatabase.hpp
+++ b/scheduler/SchedulerDatabase.hpp
@@ -39,6 +39,7 @@
 #include "common/log/LogContext.hpp"
 #include "catalogue/TapeForWriting.hpp"
 #include "scheduler/TapeMount.hpp"
+#include "tapeserver/daemon/TapedConfiguration.hpp"
 
 #include <list>
 #include <limits>
@@ -398,7 +399,7 @@ public:
 
   /*============ Repack management: user side ================================*/
   virtual void queueRepack(const std::string & vid, const std::string & bufferURL,
-      common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc) = 0;
+      common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, const bool forceDisabledTape, log::LogContext & lc) = 0;
   virtual std::list<common::dataStructures::RepackInfo> getRepackInfo() = 0;
   virtual common::dataStructures::RepackInfo getRepackInfo(const std::string & vid) = 0;
   virtual void cancelRepack(const std::string & vid, log::LogContext & lc) = 0;
@@ -461,7 +462,11 @@ public:
       //TODO : userprovidedfiles and userprovidedbytes
     };
     
-    virtual void addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, 
+    /**
+     * Add Retrieve subrequests to the repack request and update its statistics
+     * @return the number of retrieve subrequests queued
+     */    
+    virtual uint64_t addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests, 
       cta::common::dataStructures::ArchiveRoute::FullMap & archiveRoutesMap, uint64_t maxFSeqLowBound, 
       const uint64_t maxAddedFSeq, const TotalStatsFiles &totalStatsFiles, disk::DiskSystemList diskSystemList,
       log::LogContext & lc) = 0;
@@ -604,19 +609,26 @@ public:
         if (activityNameAndWeightedMountCount.value().weightedMountCount < other.activityNameAndWeightedMountCount.value().weightedMountCount)
           return false;
       }
-      if(minRequestAge < other.minRequestAge)
+      //The smaller the oldest job start time is, the bigger the age is, hence the inverted comparison
+      if(oldestJobStartTime > other.oldestJobStartTime)
 	return true;
-      if(minRequestAge > other.minRequestAge)
+      if(oldestJobStartTime < other.oldestJobStartTime)
 	return false;
       /**
        * For the tests, we try to have the priority by 
-       * alphabetical order : vid1 should be treated before vid2,
+       * alphabetical order : vid1 / tapepool1 should be treated before vid2/tapepool2,
        * so if this->vid < other.vid : then this > other.vid, so return false
        */
       if(vid < other.vid)
 	return false;
       if(vid > other.vid)
 	return true;
+      
+      if(tapePool < other.tapePool)
+	return false;
+      if(tapePool > other.tapePool)
+	return true;
+      
       return false;
     }
   };
@@ -756,6 +768,8 @@ public:
     double latestBandwidth = std::numeric_limits<double>::max(),
     const std::string & vid = "",
     const std::string & tapepool = "") = 0;
+  
+  virtual void reportDriveConfig(const cta::tape::daemon::TpconfigLine& tpConfigLine, const cta::tape::daemon::TapedConfiguration& tapedConfig,log::LogContext& lc) = 0;
 }; // class SchedulerDatabase
 
 } // namespace cta
diff --git a/scheduler/SchedulerDatabaseFactory.hpp b/scheduler/SchedulerDatabaseFactory.hpp
index 7063d8929cc8470920972e02806423a9dccde061..f94e07d28fc1921b24d657498d373d0db8647c1a 100644
--- a/scheduler/SchedulerDatabaseFactory.hpp
+++ b/scheduler/SchedulerDatabaseFactory.hpp
@@ -19,6 +19,7 @@
 #pragma once
 
 #include <memory>
+#include "catalogue/Catalogue.hpp"
 
 namespace cta {
 
@@ -42,7 +43,7 @@ public:
    *
    * @return A newly created scheduler database object.
    */
-  virtual std::unique_ptr<SchedulerDatabase> create() const = 0;
+  virtual std::unique_ptr<SchedulerDatabase> create(std::unique_ptr<cta::catalogue::Catalogue>& catalogue) const = 0;
 
 }; // class SchedulerDatabaseFactory
 
diff --git a/scheduler/SchedulerDatabaseTest.cpp b/scheduler/SchedulerDatabaseTest.cpp
index b1bf033919133d5c55c17bd55aa4a069f552788d..3fa680ab30fb02e38490d8441689e533eb7b6032 100644
--- a/scheduler/SchedulerDatabaseTest.cpp
+++ b/scheduler/SchedulerDatabaseTest.cpp
@@ -25,6 +25,7 @@
 #include "objectstore/BackendRados.hpp"
 #include "common/log/DummyLogger.hpp"
 #include "common/range.hpp"
+#include "common/make_unique.hpp"
 #ifdef STDOUT_LOGGING
 #include "common/log/StdoutLogger.hpp"
 #endif
@@ -72,11 +73,13 @@ public:
     using namespace cta;
 
     const SchedulerDatabaseFactory &factory = GetParam().dbFactory;
-    m_db.reset(factory.create().release());
+    m_catalogue = cta::make_unique<cta::catalogue::DummyCatalogue>();
+    m_db.reset(factory.create(m_catalogue).release());
   }
 
   virtual void TearDown() {
     m_db.reset();
+    m_catalogue.reset();
   }
 
   cta::SchedulerDatabase &getDb() {
@@ -112,6 +115,8 @@ private:
   SchedulerDatabaseTest & operator= (const SchedulerDatabaseTest &);
 
   std::unique_ptr<cta::SchedulerDatabase> m_db;
+  
+  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
 
 }; // class SchedulerDatabaseTest
 
diff --git a/scheduler/SchedulerTest.cpp b/scheduler/SchedulerTest.cpp
index 1bf432a28ea8ca130d0488ed2a01d3fd17d2e24b..0d35428ba4da9a1f735bab6af76b7e52c73509b1 100644
--- a/scheduler/SchedulerTest.cpp
+++ b/scheduler/SchedulerTest.cpp
@@ -114,18 +114,18 @@ public:
 
     // We do a deep reference to the member as the C++ compiler requires the function to be already defined if called implicitly
     const auto &factory = GetParam().dbFactory;
+    const uint64_t nbConns = 1;
+    const uint64_t nbArchiveFileListingConns = 1;
+    //m_catalogue = cta::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns);
+    m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns);
     // Get the OStore DB from the factory
-    auto osdb = std::move(factory.create());
+    auto osdb = std::move(factory.create(m_catalogue));
     // Make sure the type of the SchedulerDatabase is correct (it should be an OStoreDBWrapperInterface)
     dynamic_cast<cta::objectstore::OStoreDBWrapperInterface*>(osdb.get());
     // We know the cast will not fail, so we can safely do it (otherwise we could leak memory)
     m_db.reset(dynamic_cast<cta::objectstore::OStoreDBWrapperInterface*>(osdb.release()));
-
-    const uint64_t nbConns = 1;
-    const uint64_t nbArchiveFileListingConns = 1;
-    //m_catalogue = cta::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns);
-    m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns);
     m_scheduler = cta::make_unique<Scheduler>(*m_catalogue, *m_db, 5, 2*1000*1000);
+    objectstore::Helpers::flushRetrieveQueueStatisticsCache();
   }
 
   virtual void TearDown() {
@@ -248,11 +248,12 @@ protected:
   const std::string s_diskInstance = "disk_instance";
   const std::string s_storageClassName = "TestStorageClass";
   const cta::common::dataStructures::SecurityIdentity s_adminOnAdminHost = { "admin1", "host1" };
-  const std::string s_tapePoolName = "TestTapePool";
+  const std::string s_tapePoolName = "TapePool";
   const std::string s_libraryName = "TestLogicalLibrary";
   const std::string s_vid = "TestVid";
   const std::string s_mediaType = "TestMediaType";
   const std::string s_vendor = "TestVendor";
+  const bool s_defaultRepackDisabledTapeFlag = false;
   //TempFile m_tempSqliteFile;
 
 }; // class SchedulerTest
@@ -475,7 +476,7 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     auto & osdb=getSchedulerDB();
     auto mi=osdb.getMountInfo(lc);
     ASSERT_EQ(1, mi->existingOrNextMounts.size());
-    ASSERT_EQ("TestTapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
     ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
     std::unique_ptr<cta::ArchiveMount> archiveMount;
     archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
@@ -491,8 +492,9 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     archiveJob->validate();
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
     std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::ArchiveJob >> failedToReportArchiveJobs;
     sDBarchiveJobBatch.emplace(std::move(archiveJob));
-    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, lc);
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
     archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
     ASSERT_EQ(0, archiveJobBatch.size());
     archiveMount->complete();
@@ -674,7 +676,7 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
     auto & osdb=getSchedulerDB();
     auto mi=osdb.getMountInfo(lc);
     ASSERT_EQ(1, mi->existingOrNextMounts.size());
-    ASSERT_EQ("TestTapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
     ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
     std::unique_ptr<cta::ArchiveMount> archiveMount;
     archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
@@ -690,8 +692,9 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
     archiveJob->validate();
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
     std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::ArchiveJob >> failedToReportArchiveJobs;
     sDBarchiveJobBatch.emplace(std::move(archiveJob));
-    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, lc);
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
     archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
     ASSERT_EQ(0, archiveJobBatch.size());
     archiveMount->complete();
@@ -924,7 +927,7 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
     auto & osdb=getSchedulerDB();
     auto mi=osdb.getMountInfo(lc);
     ASSERT_EQ(1, mi->existingOrNextMounts.size());
-    ASSERT_EQ("TestTapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
     ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
     std::unique_ptr<cta::ArchiveMount> archiveMount;
     archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
@@ -941,7 +944,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
     std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
     sDBarchiveJobBatch.emplace(std::move(archiveJob));
-    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, lc);
+    std::queue<std::unique_ptr<cta::ArchiveJob>> failedToReportArchiveJobs;
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
     archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
     ASSERT_EQ(0, archiveJobBatch.size());
     archiveMount->complete();
@@ -1280,14 +1284,14 @@ TEST_P(SchedulerTest, repack) {
   catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,false, notReadOnly, "Comment");
   
   //The queueing of a repack request should fail if the tape to repack is not full
-  ASSERT_THROW(scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,lc),cta::exception::UserError);
+  ASSERT_THROW(scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag,lc),cta::exception::UserError);
   //The queueing of a repack request in a vid that does not exist should throw an exception
-  ASSERT_THROW(scheduler.queueRepack(cliId, "NOT_EXIST", "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc),cta::exception::UserError);
+  ASSERT_THROW(scheduler.queueRepack(cliId, "NOT_EXIST", "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc),cta::exception::UserError);
   
   catalogue.setTapeFull(cliId,tape1,true);
   
   // Create and then cancel repack
-  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc);
   {
     auto repacks = scheduler.getRepacks();
     ASSERT_EQ(1, repacks.size());
@@ -1299,7 +1303,7 @@ TEST_P(SchedulerTest, repack) {
   // Recreate a repack and get it moved to ToExpand
   std::string tape2 = "Tape2";
   catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
-  scheduler.queueRepack(cliId, tape2, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+  scheduler.queueRepack(cliId, tape2, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc);
   {
     auto repacks = scheduler.getRepacks();
     ASSERT_EQ(1, repacks.size());
@@ -1340,13 +1344,13 @@ TEST_P(SchedulerTest, getNextRepackRequestToExpand) {
   catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
   
   //Queue the first repack request
-  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,  lc);
+  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc);
   
   std::string tape2 = "Tape2";
   catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
   
   //Queue the second repack request
-  scheduler.queueRepack(cliId,tape2,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::AddCopiesOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+  scheduler.queueRepack(cliId,tape2,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::AddCopiesOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag,lc);
   
   //Test the repack request queued has status Pending
   ASSERT_EQ(scheduler.getRepack(tape1).status,common::dataStructures::RepackInfo::Status::Pending);
@@ -1478,7 +1482,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   {
     for(uint64_t i = 0; i < nbTapesToRepack ; ++i) {
-      scheduler.queueRepack(admin,allVid.at(i),"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+      scheduler.queueRepack(admin,allVid.at(i),"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag, lc);
     }
     scheduler.waitSchedulerDbSubthreadsComplete();
     //scheduler.waitSchedulerDbSubthreadsComplete();
@@ -1795,7 +1799,7 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag,lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
  
     log::TimingList tl;
@@ -2034,7 +2038,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag,lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
     //scheduler.waitSchedulerDbSubthreadsComplete();
  
@@ -2280,7 +2284,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
 
     log::TimingList tl;
@@ -2574,7 +2578,7 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   //one retrieve request
   scheduler.waitSchedulerDbSubthreadsComplete();
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag ,lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
 
     log::TimingList tl;
@@ -2598,6 +2602,135 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   }
 }
 
+TEST_P(SchedulerTest, expandRepackRequestDisabledTape) {
+  using namespace cta;
+  using namespace cta::objectstore;
+  unitTests::TempDirectory tempDirectory;
+  auto &catalogue = getCatalogue();
+  auto &scheduler = getScheduler();
+  auto &schedulerDB = getSchedulerDB();
+
+  cta::objectstore::Backend& backend = schedulerDB.getBackend();
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+  
+  //Create an agent to represent this test process
+  cta::objectstore::AgentReference agentReference("expandRepackRequestTest", dl);
+  cta::objectstore::Agent agent(agentReference.getAgentAddress(), backend);
+  agent.initialize();
+  agent.setTimeout_us(0);
+  agent.insertAndRegisterSelf(lc);
+  
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = true;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
+  cta::common::dataStructures::SecurityIdentity admin;
+  admin.username = "admin_user_name";
+  admin.host = "admin_host";
+  
+  //Create a logical library in the catalogue
+  const bool logicalLibraryIsDisabled = false;
+  catalogue.createLogicalLibrary(admin, s_libraryName, logicalLibraryIsDisabled, "Create logical library");
+  
+  std::ostringstream ossVid;
+  ossVid << s_vid << "_" << 1;
+  std::string vid = ossVid.str();
+  catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+  
+  //Create a storage class in the catalogue
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = s_diskInstance;
+  storageClass.name = s_storageClassName;
+  storageClass.nbCopies = 2;
+  storageClass.comment = "Create storage class";
+
+  const std::string tapeDrive = "tape_drive";
+  const uint64_t nbArchiveFilesPerTape = 10;
+  const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
+  
+  //Simulate the writing of 10 files in 1 tape in the catalogue
+  std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  {
+    uint64_t archiveFileId = 1;
+    std::string currentVid = vid;
+    for(uint64_t j = 1; j <= nbArchiveFilesPerTape; ++j) {
+      std::ostringstream diskFileId;
+      diskFileId << (12345677 + archiveFileId);
+      std::ostringstream diskFilePath;
+      diskFilePath << "/public_dir/public_file_"<<1<<"_"<< j;
+      auto fileWrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+      auto & fileWritten = *fileWrittenUP;
+      fileWritten.archiveFileId = archiveFileId++;
+      fileWritten.diskInstance = storageClass.diskInstance;
+      fileWritten.diskFileId = diskFileId.str();
+      fileWritten.diskFilePath = diskFilePath.str();
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
+      fileWritten.size = archiveFileSize;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
+      fileWritten.storageClassName = s_storageClassName;
+      fileWritten.vid = currentVid;
+      fileWritten.fSeq = j;
+      fileWritten.blockId = j * 100;
+      fileWritten.size = archiveFileSize;
+      fileWritten.copyNb = 1;
+      fileWritten.tapeDrive = tapeDrive;
+      tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
+    }
+    //update the DB tape
+    catalogue.filesWrittenToTape(tapeFilesWrittenCopy1);
+    tapeFilesWrittenCopy1.clear();
+  }
+  //Test the expanding requeue the Repack after the creation of 
+  //one retrieve request
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  {
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag ,lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+
+    log::TimingList tl;
+    utils::Timer t;
+
+    scheduler.promoteRepackRequestsToToExpand(lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+
+    auto repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+    scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    
+    ASSERT_EQ(vid,repackRequestToExpand->getRepackInfo().vid);
+    repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+    ASSERT_EQ(nullptr,repackRequestToExpand);
+  }
+  {
+    //Check that no mount exist because the tape is disabled
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+    ASSERT_EQ(nullptr, mount.get());
+    
+    //Check that the repack request is failed with 10 failed to retrieve and 0 failed archive files (as they have not been created, these archive jobs are not failed)
+    objectstore::RootEntry re(schedulerDB.getBackend());
+    re.fetchNoLock();
+    objectstore::RepackIndex ri(re.getRepackIndexAddress(), schedulerDB.getBackend());
+    ri.fetchNoLock();
+    
+    objectstore::RepackRequest rr(ri.getRepackRequestAddress(vid),schedulerDB.getBackend());
+    rr.fetchNoLock();
+    
+    ASSERT_EQ(10,rr.getStats().at(objectstore::RepackRequest::StatsType::RetrieveFailure).files);
+    ASSERT_EQ(0,rr.getStats().at(objectstore::RepackRequest::StatsType::ArchiveFailure).files);
+    ASSERT_EQ(common::dataStructures::RepackInfo::Status::Failed,rr.getInfo().status);
+  }
+}
+
 TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
   using namespace cta;
 
@@ -2703,7 +2836,7 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
       auto & osdb=getSchedulerDB();
       auto mi=osdb.getMountInfo(lc);
       ASSERT_EQ(1, mi->existingOrNextMounts.size());
-      ASSERT_EQ("TestTapePool", mi->existingOrNextMounts.front().tapePool);
+      ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
       std::unique_ptr<cta::ArchiveMount> archiveMount;
       archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
       ASSERT_NE(nullptr, archiveMount.get());
@@ -2719,8 +2852,9 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
       archiveJob->validate();
       std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
       std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+      std::queue<std::unique_ptr <cta::ArchiveJob >> failedToReportArchiveJobs;
       sDBarchiveJobBatch.emplace(std::move(archiveJob));
-      archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, lc);
+      archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, failedToReportArchiveJobs, lc);
       // Mark the tape full so we get one file per tape.
       archiveMount->setTapeFull();
       archiveMount->complete();
@@ -2842,6 +2976,511 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
   }
 }
 
+TEST_P(SchedulerTest, expandRepackRequestAddCopiesOnly) {
+  using namespace cta;
+  using namespace cta::objectstore;
+  unitTests::TempDirectory tempDirectory;
+  auto &catalogue = getCatalogue();
+  auto &scheduler = getScheduler();
+  auto &schedulerDB = getSchedulerDB();
+  cta::objectstore::Backend& backend = schedulerDB.getBackend();
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+  
+  //Create an agent to represent this test process
+  cta::objectstore::AgentReference agentReference("expandRepackRequestTest", dl);
+  cta::objectstore::Agent agent(agentReference.getAgentAddress(), backend);
+  agent.initialize();
+  agent.setTimeout_us(0);
+  agent.insertAndRegisterSelf(lc);
+  
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = true;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
+  cta::common::dataStructures::SecurityIdentity admin;
+  admin.username = "admin_user_name";
+  admin.host = "admin_host";
+  
+  //Create a logical library in the catalogue
+  const bool logicalLibraryIsDisabled = false;
+  catalogue.createLogicalLibrary(admin, s_libraryName, logicalLibraryIsDisabled, "Create logical library");
+  
+  //Create the source tape
+  std::string vid = "vidSource";
+  catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+  
+  //Create two different destination tapepool
+  std::string tapepool2Name = "tapepool2";
+  const cta::optional<std::string> supply;
+  catalogue.createTapePool(admin,tapepool2Name,"vo",1,false,supply,"comment");
+  
+  std::string tapepool3Name = "tapepool3";
+  catalogue.createTapePool(admin,tapepool3Name,"vo",1,false,supply,"comment"); 
+  
+  //Create a storage class in the catalogue
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = s_diskInstance;
+  storageClass.name = s_storageClassName;
+  storageClass.nbCopies = 3;
+  storageClass.comment = "Create storage class";
+  catalogue.modifyStorageClassNbCopies(admin,storageClass.diskInstance,storageClass.name,storageClass.nbCopies);
+  
+  //Create the two archive routes for the new copies
+  catalogue.createArchiveRoute(admin,storageClass.diskInstance,storageClass.name,2,tapepool2Name,"ArchiveRoute2");
+  catalogue.createArchiveRoute(admin,storageClass.diskInstance,storageClass.name,3,tapepool3Name,"ArchiveRoute3");
+  
+  //Create two other destinationTape
+  std::string vidDestination1 = "vidDestination1";
+  catalogue.createTape(s_adminOnAdminHost,vidDestination1, s_mediaType, s_vendor, s_libraryName, tapepool2Name, capacityInBytes,
+    disabledValue, false, readOnlyValue, comment);
+  
+  std::string vidDestination2 = "vidDestination2";
+  catalogue.createTape(s_adminOnAdminHost,vidDestination2, s_mediaType, s_vendor, s_libraryName, tapepool3Name, capacityInBytes,
+    disabledValue, false, readOnlyValue, comment);
+
+  const std::string tapeDrive = "tape_drive";
+  const uint64_t nbArchiveFilesPerTape = 10;
+  const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
+  
+  //Simulate the writing of 10 files the source tape in the catalogue
+  std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  {
+    uint64_t archiveFileId = 1;
+    std::string currentVid = vid;
+    for(uint64_t j = 1; j <= nbArchiveFilesPerTape; ++j) {
+      std::ostringstream diskFileId;
+      diskFileId << (12345677 + archiveFileId);
+      std::ostringstream diskFilePath;
+      diskFilePath << "/public_dir/public_file_"<<1<<"_"<< j;
+      auto fileWrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+      auto & fileWritten = *fileWrittenUP;
+      fileWritten.archiveFileId = archiveFileId++;
+      fileWritten.diskInstance = storageClass.diskInstance;
+      fileWritten.diskFileId = diskFileId.str();
+      fileWritten.diskFilePath = diskFilePath.str();
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
+      fileWritten.size = archiveFileSize;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
+      fileWritten.storageClassName = s_storageClassName;
+      fileWritten.vid = currentVid;
+      fileWritten.fSeq = j;
+      fileWritten.blockId = j * 100;
+      fileWritten.size = archiveFileSize;
+      fileWritten.copyNb = 1;
+      fileWritten.tapeDrive = tapeDrive;
+      tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
+    }
+    //update the DB tape
+    catalogue.filesWrittenToTape(tapeFilesWrittenCopy1);
+    tapeFilesWrittenCopy1.clear();
+  }
+  //Test the expanding requeue the Repack after the creation of 
+  //one retrieve request
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  {
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::AddCopiesOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, s_defaultRepackDisabledTapeFlag,lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    
+    //Get the address of the Repack Request
+    cta::objectstore::RootEntry re(backend);
+    re.fetchNoLock();
+    
+    std::string repackQueueAddress = re.getRepackQueueAddress(RepackQueueType::Pending);
+    
+    cta::objectstore::RepackQueuePending repackQueuePending(repackQueueAddress,backend);
+    repackQueuePending.fetchNoLock();
+    
+    std::string repackRequestAddress = repackQueuePending.getCandidateList(1,{}).candidates.front().address;
+
+    log::TimingList tl;
+    utils::Timer t;
+
+    scheduler.promoteRepackRequestsToToExpand(lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+
+    auto repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+    //scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    
+    ASSERT_EQ(vid,repackRequestToExpand->getRepackInfo().vid);
+    
+    scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc);
+    
+    {
+      cta::objectstore::RepackRequest rr(repackRequestAddress,backend);
+      rr.fetchNoLock();
+      //As storage class nbcopies = 3 and as the 10 files already archived have 1 copy in CTA,
+      //The repack request should have 20 files to archive
+      ASSERT_EQ(20,rr.getTotalStatsFile().totalFilesToArchive);
+      ASSERT_EQ(20*archiveFileSize, rr.getTotalStatsFile().totalBytesToArchive);
+      //The number of files to Retrieve remains the same
+      ASSERT_EQ(10,rr.getTotalStatsFile().totalFilesToRetrieve);
+      ASSERT_EQ(10*archiveFileSize,rr.getTotalStatsFile().totalBytesToRetrieve);
+    }
+  }
+  
+  {
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+
+    std::list<std::unique_ptr<cta::RetrieveJob>> executedJobs;
+    //For each tape we will see if the retrieve jobs are not null
+    for(uint64_t j = 1; j<=nbArchiveFilesPerTape; ++j)
+    {
+      auto jobBatch = retrieveMount->getNextJobBatch(1,archiveFileSize,lc);
+      retrieveJob.reset(jobBatch.front().release());
+      ASSERT_NE(nullptr, retrieveJob.get());
+      executedJobs.push_back(std::move(retrieveJob));
+    }
+    //Now, report the retrieve jobs to be completed
+    castor::tape::tapeserver::daemon::RecallReportPacker rrp(retrieveMount.get(),lc);
+
+    rrp.startThreads();
+    
+    //Report all jobs as succeeded
+    for(auto it = executedJobs.begin(); it != executedJobs.end(); ++it)
+    {
+      rrp.reportCompletedJob(std::move(*it));
+    }
+   
+    rrp.setDiskDone();
+    rrp.setTapeDone();
+
+    rrp.reportDriveStatus(cta::common::dataStructures::DriveStatus::Unmounting);
+
+    rrp.reportEndOfSession();
+    rrp.waitThread();
+
+    ASSERT_TRUE(rrp.allThreadsDone());
+  }
+  {
+    //Do the reporting of RetrieveJobs, will transform the Retrieve request in Archive requests
+    while (true) {
+      auto rep = schedulerDB.getNextRepackReportBatch(lc);
+      if (nullptr == rep) break;
+      rep->report(lc);
+    }
+  }
+  //All retrieve have been successfully executed, let's see if there are 2 mount for different vids with 10 files
+  //per batch
+  {
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    {
+      //The first mount given by the scheduler should be the vidDestination1 that belongs to the tapepool1
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
+
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+
+      {
+        auto jobBatch = archiveMount->getNextJobBatch(20,20 * archiveFileSize,lc);
+        ASSERT_EQ(10,jobBatch.size());
+        ASSERT_EQ(vidDestination1,archiveMount->getVid());
+      }
+    }
+    
+    {
+      //Second mount should be the vidDestination2 that belongs to the tapepool2
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
+
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+
+      {
+        auto jobBatch = archiveMount->getNextJobBatch(20,20 * archiveFileSize,lc);
+        ASSERT_EQ(10,jobBatch.size());
+        ASSERT_EQ(vidDestination2,archiveMount->getVid());
+      }
+    }
+  }
+}
+
+TEST_P(SchedulerTest, expandRepackRequestMoveAndAddCopies){
+  using namespace cta;
+  using namespace cta::objectstore;
+  unitTests::TempDirectory tempDirectory;
+  auto &catalogue = getCatalogue();
+  auto &scheduler = getScheduler();
+  auto &schedulerDB = getSchedulerDB();
+  cta::objectstore::Backend& backend = schedulerDB.getBackend();
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+  
+  //Create an agent to represent this test process
+  cta::objectstore::AgentReference agentReference("expandRepackRequestTest", dl);
+  cta::objectstore::Agent agent(agentReference.getAgentAddress(), backend);
+  agent.initialize();
+  agent.setTimeout_us(100);
+  agent.insertAndRegisterSelf(lc);
+  
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = true;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
+  cta::common::dataStructures::SecurityIdentity admin;
+  admin.username = "admin_user_name";
+  admin.host = "admin_host";
+  
+  //Create a logical library in the catalogue
+  const bool logicalLibraryIsDisabled = false;
+  catalogue.createLogicalLibrary(admin, s_libraryName, logicalLibraryIsDisabled, "Create logical library");
+  
+  //Create the source tape
+  std::string vid = "vidSource";
+  catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+  
+  //Create two different destination tapepool
+  std::string tapepool2Name = "tapepool2";
+  const cta::optional<std::string> supply;
+  catalogue.createTapePool(admin,tapepool2Name,"vo",1,false,supply,"comment");
+  
+  std::string tapepool3Name = "tapepool3";
+  catalogue.createTapePool(admin,tapepool3Name,"vo",1,false,supply,"comment"); 
+  
+  //Create a storage class in the catalogue
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = s_diskInstance;
+  storageClass.name = s_storageClassName;
+  storageClass.nbCopies = 3;
+  storageClass.comment = "Create storage class";
+  catalogue.modifyStorageClassNbCopies(admin,storageClass.diskInstance,storageClass.name,storageClass.nbCopies);
+  
+  //Create the two archive routes for the new copies
+  catalogue.createArchiveRoute(admin,storageClass.diskInstance,storageClass.name,2,tapepool2Name,"ArchiveRoute2");
+  catalogue.createArchiveRoute(admin,storageClass.diskInstance,storageClass.name,3,tapepool3Name,"ArchiveRoute3");
+  
+  //Create two other destinationTape and one for the move workflow
+  std::string vidDestination1 = "vidDestination1";
+  catalogue.createTape(s_adminOnAdminHost,vidDestination1, s_mediaType, s_vendor, s_libraryName, tapepool2Name, capacityInBytes,
+    disabledValue, false, readOnlyValue, comment);
+  
+  std::string vidDestination2 = "vidDestination2";
+  catalogue.createTape(s_adminOnAdminHost,vidDestination2, s_mediaType, s_vendor, s_libraryName, tapepool3Name, capacityInBytes,
+    disabledValue, false, readOnlyValue, comment);
+  
+  std::string vidMove = "vidMove";
+  catalogue.createTape(s_adminOnAdminHost,vidMove, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
+    disabledValue, false, readOnlyValue, comment);
+  
+  const std::string tapeDrive = "tape_drive";
+  const uint64_t nbArchiveFilesPerTape = 10;
+  const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
+  
+  //Simulate the writing of 10 files the source tape in the catalogue
+  std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  {
+    uint64_t archiveFileId = 1;
+    std::string currentVid = vid;
+    for(uint64_t j = 1; j <= nbArchiveFilesPerTape; ++j) {
+      std::ostringstream diskFileId;
+      diskFileId << (12345677 + archiveFileId);
+      std::ostringstream diskFilePath;
+      diskFilePath << "/public_dir/public_file_"<<1<<"_"<< j;
+      auto fileWrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+      auto & fileWritten = *fileWrittenUP;
+      fileWritten.archiveFileId = archiveFileId++;
+      fileWritten.diskInstance = storageClass.diskInstance;
+      fileWritten.diskFileId = diskFileId.str();
+      fileWritten.diskFilePath = diskFilePath.str();
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
+      fileWritten.size = archiveFileSize;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
+      fileWritten.storageClassName = s_storageClassName;
+      fileWritten.vid = currentVid;
+      fileWritten.fSeq = j;
+      fileWritten.blockId = j * 100;
+      fileWritten.size = archiveFileSize;
+      fileWritten.copyNb = 1;
+      fileWritten.tapeDrive = tapeDrive;
+      tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
+    }
+    //update the DB tape
+    catalogue.filesWrittenToTape(tapeFilesWrittenCopy1);
+    tapeFilesWrittenCopy1.clear();
+  }
+  //Test the expanding requeue the Repack after the creation of 
+  //one retrieve request
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  {
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveAndAddCopies,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    
+    //Get the address of the Repack Request
+    cta::objectstore::RootEntry re(backend);
+    re.fetchNoLock();
+    
+    std::string repackQueueAddress = re.getRepackQueueAddress(RepackQueueType::Pending);
+    
+    cta::objectstore::RepackQueuePending repackQueuePending(repackQueueAddress,backend);
+    repackQueuePending.fetchNoLock();
+    
+    std::string repackRequestAddress = repackQueuePending.getCandidateList(1,{}).candidates.front().address;
+
+    log::TimingList tl;
+    utils::Timer t;
+
+    scheduler.promoteRepackRequestsToToExpand(lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+
+    auto repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    
+    ASSERT_EQ(vid,repackRequestToExpand->getRepackInfo().vid);
+    
+    scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc);
+    
+    {
+      cta::objectstore::RepackRequest rr(repackRequestAddress,backend);
+      rr.fetchNoLock();
+      //As storage class nbcopies = 3 and as the 10 files already archived have 1 copy in CTA,
+      //The repack request should have 20 files to archive
+      ASSERT_EQ(30,rr.getTotalStatsFile().totalFilesToArchive);
+      ASSERT_EQ(30*archiveFileSize, rr.getTotalStatsFile().totalBytesToArchive);
+      //The number of files to Retrieve remains the same
+      ASSERT_EQ(10,rr.getTotalStatsFile().totalFilesToRetrieve);
+      ASSERT_EQ(10*archiveFileSize,rr.getTotalStatsFile().totalBytesToRetrieve);
+    }
+  }
+  
+  {
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+
+    std::list<std::unique_ptr<cta::RetrieveJob>> executedJobs;
+    //For each tape we will see if the retrieve jobs are not null
+    for(uint64_t j = 1; j<=nbArchiveFilesPerTape; ++j)
+    {
+      auto jobBatch = retrieveMount->getNextJobBatch(1,archiveFileSize,lc);
+      retrieveJob.reset(jobBatch.front().release());
+      ASSERT_NE(nullptr, retrieveJob.get());
+      executedJobs.push_back(std::move(retrieveJob));
+    }
+    //Now, report the retrieve jobs to be completed
+    castor::tape::tapeserver::daemon::RecallReportPacker rrp(retrieveMount.get(),lc);
+
+    rrp.startThreads();
+    
+    //Report all jobs as succeeded
+    for(auto it = executedJobs.begin(); it != executedJobs.end(); ++it)
+    {
+      rrp.reportCompletedJob(std::move(*it));
+    }
+   
+    rrp.setDiskDone();
+    rrp.setTapeDone();
+
+    rrp.reportDriveStatus(cta::common::dataStructures::DriveStatus::Unmounting);
+
+    rrp.reportEndOfSession();
+    rrp.waitThread();
+
+    ASSERT_TRUE(rrp.allThreadsDone());
+  }
+  {
+    //Do the reporting of RetrieveJobs, will transform the Retrieve request in Archive requests
+    while (true) {
+      auto rep = schedulerDB.getNextRepackReportBatch(lc);
+      if (nullptr == rep) break;
+      rep->report(lc);
+    }
+  }
+  //All retrieve have been successfully executed, let's see if there are 2 mount for different vids with 10 files
+  //per batch
+  {
+    scheduler.waitSchedulerDbSubthreadsComplete();
+    {
+      //The first mount given by the scheduler should be the vidMove that belongs to the TapePool tapepool
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
+
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+
+      {
+        auto jobBatch = archiveMount->getNextJobBatch(20,20 * archiveFileSize,lc);
+        ASSERT_EQ(10,jobBatch.size());
+        ASSERT_EQ(vidMove,archiveMount->getVid());
+      }
+    }
+    
+    {
+      //Second mount should be the vidDestination1 that belongs to the tapepool
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
+
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+
+      {
+        auto jobBatch = archiveMount->getNextJobBatch(20,20 * archiveFileSize,lc);
+        ASSERT_EQ(10,jobBatch.size());
+        ASSERT_EQ(vidDestination1,archiveMount->getVid());
+      }
+    }
+    
+    {
+      //Third mount should be the vidDestination2 that belongs to the same tapepool as the repacked tape
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
+
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+
+      {
+        auto jobBatch = archiveMount->getNextJobBatch(20,20 * archiveFileSize,lc);
+        ASSERT_EQ(10,jobBatch.size());
+        ASSERT_EQ(vidDestination2,archiveMount->getVid());
+      }
+    }
+  }
+}
 
 #undef TEST_MOCK_DB
 #ifdef TEST_MOCK_DB
diff --git a/scheduler/testingMocks/MockArchiveMount.hpp b/scheduler/testingMocks/MockArchiveMount.hpp
index 48593ff7cd003e3bfa76dcabc18bf149c0407ced..fac5e6a11cd7abfcc36c9e2cb288bd530f7efe54 100644
--- a/scheduler/testingMocks/MockArchiveMount.hpp
+++ b/scheduler/testingMocks/MockArchiveMount.hpp
@@ -50,7 +50,7 @@ namespace cta {
       }
       
       void reportJobsBatchTransferred(std::queue<std::unique_ptr<cta::ArchiveJob> >& successfulArchiveJobs, 
-          std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, cta::log::LogContext& logContext) override {
+          std::queue<cta::catalogue::TapeItemWritten> & skippedFiles, std::queue<std::unique_ptr<cta::ArchiveJob>>& failedToReportArchiveJobs, cta::log::LogContext& logContext) override {
         try {
           std::set<cta::catalogue::TapeItemWrittenPointer> tapeItemsWritten;
           std::list<std::unique_ptr<cta::ArchiveJob> > validatedSuccessfulArchiveJobs;
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
index 83184c131c203ff385d655058ee73abe637830e1..8da9b7cf33549269734f569ec379bf84d436beb5 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
@@ -131,7 +131,6 @@ public:
     using namespace cta;
 
     const DataTransferSessionTestParam &param = GetParam();
-    m_db = param.dbFactory.create();
     const uint64_t nbConns = 1;
     const uint64_t nbArchiveFileListingConns = 1;
 #ifdef USE_ORACLE_CATALOGUE
@@ -153,6 +152,7 @@ public:
     //m_catalogue = cta::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns);
     m_catalogue = cta::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns);
 #endif
+    m_db = param.dbFactory.create(m_catalogue);
     m_scheduler = cta::make_unique<Scheduler>(*m_catalogue, *m_db, 5, 2*1000*1000);
     
     strncpy(m_tmpDir, "/tmp/DataTransferSessionTestXXXXXX", sizeof(m_tmpDir));
diff --git a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
index 6f37b52595d4a0fabc5e1e1f286eb44db50c1137..6acac09a9650eb38ad7cb6d8ce7e7cb2431718f3 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
@@ -56,7 +56,7 @@ MigrationReportPacker::~MigrationReportPacker(){
 //reportCompletedJob
 //------------------------------------------------------------------------------ 
 void MigrationReportPacker::reportCompletedJob(
-std::unique_ptr<cta::ArchiveJob> successfulArchiveJob, cta::log::LogContext & lc) {
+  std::unique_ptr<cta::ArchiveJob> successfulArchiveJob, cta::log::LogContext & lc) {
   std::unique_ptr<Report> rep(new ReportSuccessful(std::move(successfulArchiveJob)));
   cta::log::ScopedParamContainer params(lc);
   params.add("type", "ReportSuccessful");
@@ -245,8 +245,18 @@ void MigrationReportPacker::ReportFlush::execute(MigrationReportPacker& reportPa
       reportPacker.m_lc.log(cta::log::INFO,"Received a flush report from tape, but had no file to report to client. Doing nothing.");
       return;
     }
-    reportPacker.m_archiveMount->reportJobsBatchTransferred(reportPacker.m_successfulArchiveJobs, reportPacker.m_skippedFiles, 
+    std::queue<std::unique_ptr<cta::ArchiveJob>> failedToReportArchiveJobs;
+    try{
+      reportPacker.m_archiveMount->reportJobsBatchTransferred(reportPacker.m_successfulArchiveJobs, reportPacker.m_skippedFiles, failedToReportArchiveJobs, 
         reportPacker.m_lc);
+    } catch(const cta::ArchiveMount::FailedMigrationRecallResult &ex){
+      while(!failedToReportArchiveJobs.empty()){
+        auto archiveJob = std::move(failedToReportArchiveJobs.front());
+        archiveJob->transferFailed(ex.getMessageValue(),reportPacker.m_lc);
+        failedToReportArchiveJobs.pop();
+      }
+      throw ex;
+    }
   } else {
     // This is an abnormal situation: we should never flush after an error!
     reportPacker.m_lc.log(cta::log::ALERT,"Received a flush after an error: sending file errors to client");
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeReadTask.hpp b/tapeserver/castor/tape/tapeserver/daemon/TapeReadTask.hpp
index 23a830da4b55ec4291b9dd899e99083a1f41bdd1..861bbe2cf6b34a080018163211084b8cb41d87ca 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TapeReadTask.hpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TapeReadTask.hpp
@@ -66,12 +66,14 @@ public:
 
     using cta::log::Param;
     
+    bool isRepack = m_retrieveJob->m_dbJob->isRepack;
     // Set the common context for all the coming logs (file info)
     cta::log::ScopedParamContainer params(lc);
     params.add("fileId", m_retrieveJob->archiveFile.archiveFileID)
           .add("BlockId", m_retrieveJob->selectedTapeFile().blockId)
           .add("fSeq", m_retrieveJob->selectedTapeFile().fSeq)
-          .add("dstURL", m_retrieveJob->retrieveRequest.dstURL);
+          .add("dstURL", m_retrieveJob->retrieveRequest.dstURL)
+          .add("isRepack",isRepack);
     
     // We will clock the stats for the file itself, and eventually add those
     // stats to the session's.
@@ -129,6 +131,11 @@ public:
         localStats.readWriteTime += timer.secs(cta::utils::Timer::resetCounter);
         auto blockSize = mb->m_payload.size();
         localStats.dataVolume += blockSize;
+	if(isRepack){
+	  localStats.repackBytesCount += blockSize;
+	} else {
+	  localStats.userBytesCount += blockSize;
+	}
         // Pass the block to the disk write task
         m_fifo.pushDataBlock(mb);
         mb=NULL;
@@ -143,6 +150,11 @@ public:
       localStats.headerVolume += TapeSessionStats::trailerVolumePerFile;
       // We now transmitted one file:
       localStats.filesCount++;
+      if(isRepack){
+	localStats.repackFilesCount++;
+      } else {
+	localStats.userFilesCount++;
+      }
       params.add("positionTime", localStats.positionTime)
             .add("readWriteTime", localStats.readWriteTime)
             .add("waitFreeMemoryTime",localStats.waitFreeMemoryTime)
@@ -156,7 +168,11 @@ public:
                      /1000/1000/localStats.totalTime:0)
             .add("payloadTransferSpeedMBps",
                      localStats.totalTime?1.0*localStats.dataVolume/1000/1000/localStats.totalTime:0)
-            .add("LBPMode", LBPMode);
+            .add("LBPMode", LBPMode)
+	    .add("repackFilesCount",localStats.repackFilesCount)
+	    .add("repackBytesCount",localStats.repackBytesCount)
+	    .add("userFilesCount",localStats.userFilesCount)
+	    .add("userBytesCount",localStats.userBytesCount);
       lc.log(cta::log::INFO, "File successfully read from tape");
       // Add the local counts to the session's
       stats.add(localStats);
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp b/tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp
index 05c08dfae930f4844dec3a75dcc9205b29869610..b6715c9cc6d7b12637b6781cacd295f655fce18a 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp
@@ -92,6 +92,18 @@ namespace daemon {
     /** Count of files actually transfered in the session. */
     uint64_t filesCount;
     
+    /** Count of files coming from repack retrieve request transfered in the session.*/
+    uint64_t repackFilesCount;
+    
+    /** Count of files coming from user retrieve request transfered in the session.*/
+    uint64_t userFilesCount;
+    
+    /** Count of bytes coming from repack retrieve request transfered in the session.*/
+    uint64_t repackBytesCount;
+    
+    /** Count of bytes coming from user retrieve request transfered in the session.*/
+    uint64_t userBytesCount;
+    
     static const uint64_t headerVolumePerFile = 3*80;
     static const uint64_t trailerVolumePerFile = 3*80;
     
@@ -100,7 +112,8 @@ namespace daemon {
     readWriteTime(0.0), flushTime(0.0), unloadTime(0.0), unmountTime(0.0),
     encryptionControlTime(0.0), waitDataTime(0.0), waitFreeMemoryTime(0.0),
     waitInstructionsTime(0.0), waitReportingTime(0.0), totalTime(0.0),
-    deliveryTime(0.0), dataVolume(0), headerVolume(0), filesCount(0) {}
+    deliveryTime(0.0), dataVolume(0), headerVolume(0), filesCount(0), repackFilesCount(0), 
+    userFilesCount(0), repackBytesCount(0), userBytesCount(0) {}
     
     /** Accumulate contents of another stats block */
     void add(const TapeSessionStats& other) {
@@ -121,6 +134,10 @@ namespace daemon {
       dataVolume += other.dataVolume;
       headerVolume += other.headerVolume;
       filesCount += other.filesCount;
+      repackFilesCount += other.repackFilesCount;
+      userFilesCount += other.userFilesCount;
+      repackBytesCount += other.repackBytesCount;
+      userBytesCount += other.userBytesCount;
     }
   };
   
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TaskWatchDog.hpp b/tapeserver/castor/tape/tapeserver/daemon/TaskWatchDog.hpp
index 16dd263f9680063652b68ee00e5a7426f8db7060..3309d1d5011a11e72b09c6ba1a1958f8b40c1c1e 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TaskWatchDog.hpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TaskWatchDog.hpp
@@ -199,6 +199,12 @@ protected:
                 /1000/1000/totalTime:0.0));
       paramList.push_back(Param("driveTransferSpeedMBps", totalTime?1.0*(m_stats.dataVolume+m_stats.headerVolume)
                 /1000/1000/totalTime:0.0));
+      if(m_mount.getMountType() == cta::common::dataStructures::MountType::Retrieve){
+	paramList.push_back(Param("repackFilesCount",m_stats.repackFilesCount));
+	paramList.push_back(Param("userFilesCount",m_stats.userFilesCount));
+	paramList.push_back(Param("repackBytesCount",m_stats.repackBytesCount));
+	paramList.push_back(Param("userBytesCount",m_stats.userBytesCount));
+      }
       // Ship the logs to the initial process
       m_initialProcess.addLogParams(m_driveUnitName, paramList);
     }
diff --git a/tapeserver/daemon/DriveHandler.cpp b/tapeserver/daemon/DriveHandler.cpp
index 35f8bb9cd8f3d6005ba82ba6ea7f90c60a11eea4..82c030e63b389c95961bcc82ea17d340177144a0 100644
--- a/tapeserver/daemon/DriveHandler.cpp
+++ b/tapeserver/daemon/DriveHandler.cpp
@@ -939,7 +939,7 @@ int DriveHandler::runChild() {
   // The object store is accessible, let's turn the agent heartbeat on.
   objectstore::AgentHeartbeatThread agentHeartbeat(backendPopulator->getAgentReference(), *backend, lc.logger());
   agentHeartbeat.startThread();
-
+  
   // 1) Special case first, if we crashed in a cleaner session, we put the drive down
   if (m_previousSession == PreviousSession::Crashed && m_previousType == SessionType::Cleanup) {
     log::ScopedParamContainer params(lc);
@@ -1074,14 +1074,17 @@ int DriveHandler::runChild() {
         scheduler.reportDriveStatus(driveInfo, common::dataStructures::MountType::NoMount, common::dataStructures::DriveStatus::Down, lc);
         cta::common::dataStructures::SecurityIdentity securityIdentity;
         scheduler.setDesiredDriveState(securityIdentity, m_configLine.unitName, false /* down */, false /* no force down*/, lc);
+        scheduler.reportDriveConfig(m_configLine,m_tapedConfig,lc);
       } catch (cta::exception::Exception & ex) {
-        params.add("Message", ex.getMessageValue());
+        params.add("Message", ex.getMessageValue())
+              .add("Backtrace",ex.backtrace());
         lc.log(log::CRIT, "In DriveHandler::runChild(): failed to set drive down");
         // This is a fatal error (failure to access the scheduler). Shut daemon down.
         driveHandlerProxy.reportState(tape::session::SessionState::Fatal, tape::session::SessionType::Undetermined, "");
         return castor::tape::tapeserver::daemon::Session::MARK_DRIVE_AS_DOWN;
       }
     }
+    
     castor::tape::tapeserver::daemon::DataTransferSession dataTransferSession(
       cta::utils::getShortHostname(),
       lc.logger(),
@@ -1092,7 +1095,7 @@ int DriveHandler::runChild() {
       capUtils,
       dataTransferConfig,
       scheduler);
-
+    
     auto ret = dataTransferSession.execute();
     agentHeartbeat.stopAndWaitThread();
     return ret;
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 051ac24e6bbffe8c37c3b6391f42ee2eb7d5c188..fd7308506026681fd3f92a69fda01c0818b81153 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -68,6 +68,7 @@ target_link_libraries(cta-rdbmsUnitTests
   pthread
   ${PROTOBUF3_LIBRARIES})
 
+set_property (TARGET cta-rdbmsUnitTests APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
 if (OCCI_SUPPORT)
   set_property (TARGET cta-rdbmsUnitTests APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 endif (OCCI_SUPPORT)
diff --git a/upgrade_db/OracleDbConn.hpp b/upgrade_db/OracleDbConn.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..15e37912b21fd63e821aaad1c4a6b5a37df8ccc1
--- /dev/null
+++ b/upgrade_db/OracleDbConn.hpp
@@ -0,0 +1,91 @@
+/*!
+ * @project        The CERN Tape Archive (CTA)
+ * @brief          Access Oracle DB for migration operations
+ * @copyright      Copyright 2019 CERN
+ * @license        This program is free software: you can redistribute it and/or modify
+ *                 it under the terms of the GNU General Public License as published by
+ *                 the Free Software Foundation, either version 3 of the License, or
+ *                 (at your option) any later version.
+ *
+ *                 This program is distributed in the hope that it will be useful,
+ *                 but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *                 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *                 GNU General Public License for more details.
+ *
+ *                 You should have received a copy of the GNU General Public License
+ *                 along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <common/make_unique.hpp>
+#include <rdbms/Login.hpp>
+#include <rdbms/ConnPool.hpp>
+
+
+namespace cta {
+namespace migration {
+
+/*!
+ * Manage a single database query over a single database connection
+ */
+class OracleDbConn
+{
+public:
+  OracleDbConn() : m_queryIsEmpty(true) {}
+
+  void connect(const std::string &dbconn = "", unsigned int max_num_conns = 1) {
+    // Initialise the connection pool
+    if(!dbconn.empty()) {
+      m_connPool.reset(new rdbms::ConnPool(rdbms::Login::parseString(dbconn), max_num_conns));
+    }
+
+    // Initialise this connection
+    m_conn = m_connPool->getConn();
+  }
+
+  void execute(const std::string &sqlString) {
+    auto sql = m_conn.createStmt(sqlString);
+    sql.executeNonQuery();
+    sql.getStmt().close();
+  }
+
+  void query(const std::string &sqlString) {
+    auto sql = m_conn.createStmt(sqlString);
+    m_rSet = cta::make_unique<rdbms::Rset>(sql.executeQuery());
+    m_queryIsEmpty = !m_rSet->next();
+  }
+
+  void reset() {
+    m_rSet.reset();
+    m_queryIsEmpty = true;
+  }
+
+  bool nextRow() {
+    if(m_rSet->next()) return true;
+    reset();
+    return false;
+  }
+
+  std::string getResultColumnString(const std::string &col) const {
+    return m_rSet->columnString(col);
+  }
+
+  uint64_t getResultColumnUint64(const std::string &col) const {
+    return m_rSet->columnUint64(col);
+  }
+
+  std::string getResultColumnBlob(const std::string &col) const {
+    return m_rSet->columnBlob(col);
+  }
+
+  bool isQueryEmpty() const {
+    return m_queryIsEmpty;
+  }
+
+private:
+  static std::unique_ptr<rdbms::ConnPool> m_connPool;    //!< The pool of connections to the database
+  rdbms::Conn m_conn;                                    //!< The connection we are using
+  std::unique_ptr<rdbms::Rset> m_rSet;                   //!< Result set for the last query executed
+  bool m_queryIsEmpty;                                   //!< Track whether the last query had an empty result set
+};
+
+}} // namespace cta::migration
diff --git a/upgrade_db/UpgradeDB.cpp b/upgrade_db/UpgradeDB.cpp
index d53f38227972200ad868f42e30462078cf57611e..fd37622bb82e35398a94ccc9964445d2e9646713 100644
--- a/upgrade_db/UpgradeDB.cpp
+++ b/upgrade_db/UpgradeDB.cpp
@@ -23,7 +23,7 @@
 #include <XrdSsiPbConfig.hpp>
 #include <common/exception/Exception.hpp>
 #include <common/checksum/ChecksumBlob.hpp>
-#include <migration/gRPC/OracleDbConn.hpp>
+#include "OracleDbConn.hpp"
 
 namespace cta {
 namespace migration {
diff --git a/xroot_plugins/XrdCtaDriveLs.hpp b/xroot_plugins/XrdCtaDriveLs.hpp
index 58aeb8000aa711978e99cd0eefdd512c9ea91e92..62aecbf139877cce1318cf0e415aed45bdc1b30d 100644
--- a/xroot_plugins/XrdCtaDriveLs.hpp
+++ b/xroot_plugins/XrdCtaDriveLs.hpp
@@ -105,7 +105,8 @@ int DriveLsStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
 
     auto &dr      = m_driveList.front();
     auto  dr_item = record.mutable_drls_item();
-
+    
+    dr_item->set_cta_version(dr.ctaVersion);
     dr_item->set_logical_library(dr.logicalLibrary);
     dr_item->set_drive_name(dr.driveName);
     dr_item->set_host(dr.host);
@@ -121,7 +122,17 @@ int DriveLsStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
     dr_item->set_time_since_last_update(time(nullptr)-dr.lastUpdateTime);
     dr_item->set_current_priority(dr.currentPriority);
     dr_item->set_current_activity(dr.currentActivityAndWeight ? dr.currentActivityAndWeight.value().activity : "");
-
+    dr_item->set_dev_file_name(dr.devFileName);
+    dr_item->set_raw_library_slot(dr.rawLibrarySlot);
+    
+    auto driveConfig = dr_item->mutable_drive_config();
+    for(auto & driveConfigItem: dr.driveConfigItems){
+      auto driveConfigItemProto = driveConfig->Add();
+      driveConfigItemProto->set_category(driveConfigItem.category);
+      driveConfigItemProto->set_key(driveConfigItem.key);
+      driveConfigItemProto->set_value(driveConfigItem.value);
+      driveConfigItemProto->set_source(driveConfigItem.source);
+    }
     // set the time spent in the current state
     uint64_t drive_time = time(nullptr);
 
diff --git a/xroot_plugins/XrdSsiCtaRequestMessage.cpp b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
index 86d27f6bdd99bf5e8e570605014457bbb6ddf6d3..d84a42cc1f7345e9d89bc5ffac794564e587d67a 100644
--- a/xroot_plugins/XrdSsiCtaRequestMessage.cpp
+++ b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
@@ -460,9 +460,9 @@ void RequestMessage::processPREPARE(const cta::eos::Notification &notification,
    }
    
    // Activity value is a string. The parameter might be present or not.
-   try {
+   if(notification.file().xattr().find("activity") != notification.file().xattr().end()) {
      request.activity = notification.file().xattr().at("activity");
-   } catch (...) {}
+   }
 
    cta::utils::Timer t;
 
@@ -472,10 +472,9 @@ void RequestMessage::processPREPARE(const cta::eos::Notification &notification,
    // Create a log entry
    cta::log::ScopedParamContainer params(m_lc);
    params.add("fileId", request.archiveFileID).add("schedulerTime", t.secs());
-   try {
-     // Print out the received activity in the logs for the moment.
-     params.add("activity", notification.file().xattr().at("activity"));
-   } catch (...) {}
+   if(static_cast<bool>(request.activity)) {
+     params.add("activity", request.activity.value());
+   }
    m_lc.log(cta::log::INFO, "In RequestMessage::processPREPARE(): queued file for retrieve.");
 
    // Set response type
@@ -1131,10 +1130,12 @@ void RequestMessage::processRepack_Add(cta::xrd::Response &response)
    } else {
       type = cta::common::dataStructures::RepackInfo::Type::MoveAndAddCopies;
    }
+   
+   bool forceDisabledTape = has_flag(OptionBoolean::DISABLED);
 
    // Process each item in the list
    for(auto it = vid_list.begin(); it != vid_list.end(); ++it) {
-      m_scheduler.queueRepack(m_cliIdentity, *it, bufferURL,  type, mountPolicy , m_lc);
+      m_scheduler.queueRepack(m_cliIdentity, *it, bufferURL, type, mountPolicy, forceDisabledTape, m_lc);
    }
 
    response.set_type(cta::xrd::Response::RSP_SUCCESS);
diff --git a/xrootd-ssi-protobuf-interface b/xrootd-ssi-protobuf-interface
index 13695b0a06fc03c4c1e1afd9e8fc37a8503984f3..e40c45028c42a208cfd820fb8eec598314a35c23 160000
--- a/xrootd-ssi-protobuf-interface
+++ b/xrootd-ssi-protobuf-interface
@@ -1 +1 @@
-Subproject commit 13695b0a06fc03c4c1e1afd9e8fc37a8503984f3
+Subproject commit e40c45028c42a208cfd820fb8eec598314a35c23