diff --git a/.gitmodules b/.gitmodules
index 379fa04386848a16d5df28cf221b123b0a15e9a3..ba41b979ed63ed6879c2056f1483d14b52eb309d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,3 +2,6 @@
 	path = xrootd-ssi-protobuf-interface
    url = https://:@gitlab.cern.ch:8443/eos/xrootd-ssi-protobuf-interface.git
 #   branch = stable
+[submodule "migration/grpc-proto"]
+	path = migration/grpc-proto
+	url = https://:@gitlab.cern.ch:8443/eos/grpc-proto.git
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b4565bf9925aa57455c2a4d0adcc99ca326fefff..77f6db411a432a4c43ab1dabce6c64d6015dae31 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -134,6 +134,10 @@ ELSE(DEFINED PackageOnly)
   set(XRD_SSI_PB_DIR ${PROJECT_SOURCE_DIR}/xrootd-ssi-protobuf-interface)
 
   add_subdirectory(eos_cta)
+
+  add_subdirectory(upgrade_db)
+  add_subdirectory(migration)
+
   add_subdirectory(cmdline)
   add_subdirectory(xroot_plugins)
 
@@ -147,7 +151,7 @@ ELSE(DEFINED PackageOnly)
   add_subdirectory(scheduler)
   add_subdirectory(tapeserver)
   add_subdirectory(XRootdSSiRmcd)
-  
+
   #Generate version information
   configure_file(${PROJECT_SOURCE_DIR}/version.hpp.in
     ${CMAKE_BINARY_DIR}/version.h)
@@ -178,7 +182,7 @@ endif (${COMPILE_PACKAGING} STREQUAL "1")
 configure_file(tests/valgrind.suppr tests/valgrind.suppr COPYONLY)
 configure_file(tests/helgrind.suppr tests/helgrind.suppr COPYONLY)
 set(VALGRIND valgrind)
-set(VALGRIND_OPTS --track-fds=yes --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --max-threads=1000)
+set(VALGRIND_OPTS --leak-check=full --demangle=yes --gen-suppressions=all --show-reachable=yes --error-exitcode=1 --max-threads=1000)
 set(VALGRIND_OPTS_W_SUPPR ${VALGRIND_OPTS} --suppressions=tests/valgrind.suppr)
 string (REPLACE ";" " " VALGRIND_OPTS_STR "${VALGRIND_OPTS}")
 
diff --git a/catalogue/ArchiveFileRow.cpp b/catalogue/ArchiveFileRow.cpp
index d6bdeb287985a6a8650e7d74a6b82b796d3ac806..d1f0b75f863f22ab1e2d2d590d7358a098ea452a 100644
--- a/catalogue/ArchiveFileRow.cpp
+++ b/catalogue/ArchiveFileRow.cpp
@@ -38,11 +38,10 @@ bool ArchiveFileRow::operator==(const ArchiveFileRow &rhs) const {
     diskInstance == rhs.diskInstance &&
     diskFileId == rhs.diskFileId &&
     diskFilePath == rhs.diskFilePath &&
-    diskFileUser == rhs.diskFileUser &&
-    diskFileGroup == rhs.diskFileGroup &&
+    diskFileOwnerUid == rhs.diskFileOwnerUid &&
+    diskFileGid == rhs.diskFileGid &&
     size == rhs.size &&
-    checksumType == rhs.checksumType &&
-    checksumValue == rhs.checksumValue &&
+    checksumBlob == rhs.checksumBlob &&
     storageClassName == rhs.storageClassName;
 }
 
@@ -56,10 +55,10 @@ std::ostream &operator<<(std::ostream &os, const ArchiveFileRow &obj) {
   "diskInstance=" << obj.diskInstance <<
   "diskFileId=" << obj.diskFileId <<
   "diskFilePath=" << obj.diskFilePath <<
-  "diskFileUser=" << obj.diskFileUser <<
-  "diskFileGroup=" << obj.diskFileGroup <<
+  "diskFileOwnerUid=" << obj.diskFileOwnerUid <<
+  "diskFileGid=" << obj.diskFileGid <<
   "size=" << obj.size <<
-  "checksumType=" << obj.checksumType << "checksumValue=" << obj.checksumValue <<
+  "checksumBlob=" << obj.checksumBlob <<
   "storageClassName=" << obj.storageClassName <<
   "}";
   return os;
diff --git a/catalogue/ArchiveFileRow.hpp b/catalogue/ArchiveFileRow.hpp
index 3e3a92600f90af1ff37f0a51982bf6b3f389dd94..c1ac9af922238a110adb5fbacec125f81bd5c53e 100644
--- a/catalogue/ArchiveFileRow.hpp
+++ b/catalogue/ArchiveFileRow.hpp
@@ -18,7 +18,7 @@
 
 #pragma once
 
-#include "common/checksum/Checksum.hpp"
+#include "common/checksum/ChecksumBlob.hpp"
 
 #include <stdint.h>
 #include <string>
@@ -69,14 +69,14 @@ struct ArchiveFileRow {
   std::string diskFilePath;
 
   /**
-   * The user name of the source disk file within its host disk system.
+   * The user ID of the owner of the source disk file within its host disk system.
    */
-  std::string diskFileUser;
+  uint32_t diskFileOwnerUid;
 
   /**
-   * The group name of the source disk file within its host disk system.
+   * The group ID of the source disk file within its host disk system.
    */
-  std::string diskFileGroup;
+  uint32_t diskFileGid;
 
   /**
    * The uncompressed size of the tape file in bytes.
@@ -84,15 +84,10 @@ struct ArchiveFileRow {
   uint64_t size;
   
   /**
-   * Checksum type of the tape file content
+   * Set of checksum types and values
    */
-  std::string checksumType;
+  checksum::ChecksumBlob checksumBlob;
   
-  /**
-   * Checksum value of the file type content
-   */
-  std::string checksumValue;
-
   /**
    * The name of the file's storage class.
    */
diff --git a/catalogue/CMakeLists.txt b/catalogue/CMakeLists.txt
index 422faf569eb8e77b68dcb34cf97aa80b0d210ce3..58d78b5c4606c1404d09703a4d8910159dc8f885 100644
--- a/catalogue/CMakeLists.txt
+++ b/catalogue/CMakeLists.txt
@@ -15,8 +15,15 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 cmake_minimum_required (VERSION 2.6)
 
+find_package(Protobuf3 REQUIRED)
+
 include_directories (${ORACLE-INSTANTCLIENT_INCLUDE_DIRS})
 
+#
+# Compiled protocol buffers (for ChecksumBlob)
+#
+include_directories(${CMAKE_BINARY_DIR}/eos_cta ${PROTOBUF3_INCLUDE_DIRS})
+
 set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow")
 
 set (CATALOGUE_LIB_SRC_FILES
@@ -28,10 +35,7 @@ set (CATALOGUE_LIB_SRC_FILES
   CatalogueFactory.cpp
   CatalogueFactoryFactory.cpp
   CatalogueSchema.cpp
-  ChecksumTypeMismatch.cpp
-  ChecksumValueMismatch.cpp
   CmdLineTool.cpp
-  FileSizeMismatch.cpp
   InMemoryCatalogue.cpp
   InMemoryCatalogueFactory.cpp
   MysqlCatalogue.cpp
@@ -52,7 +56,9 @@ set (CATALOGUE_LIB_SRC_FILES
   SqliteCatalogue.cpp
   SqliteCatalogueFactory.cpp
   TapeForWriting.cpp
+  UserSpecifiedANonEmptyLogicalLibrary.cpp
   UserSpecifiedANonEmptyTape.cpp
+  UserSpecifiedANonExistentLogicalLibrary.cpp
   UserSpecifiedANonExistentTape.cpp
   UserSpecifiedANonExistentDiskSystem.cpp
   UserSpecifiedANonEmptyDiskSystemAfterDelete.cpp
@@ -77,11 +83,10 @@ set (CATALOGUE_LIB_SRC_FILES
 
 add_library (ctacatalogue SHARED
    ${CATALOGUE_LIB_SRC_FILES})
+
 set_property(TARGET ctacatalogue PROPERTY SOVERSION "${CTA_SOVERSION}")
 set_property(TARGET ctacatalogue PROPERTY   VERSION "${CTA_LIBVERSION}")
 
-set_property (TARGET ctacatalogue APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
-
 install (TARGETS ctacatalogue DESTINATION usr/${CMAKE_INSTALL_LIBDIR})
 
 target_link_libraries (ctacatalogue
@@ -94,22 +99,26 @@ add_custom_command (OUTPUT sqlite_catalogue_schema.sql mysql_catalogue_schema.sq
     ${CMAKE_CURRENT_SOURCE_DIR}/common_catalogue_schema.sql
     ${CMAKE_CURRENT_SOURCE_DIR}/sqlite_catalogue_schema_trailer.sql
     | sed 's/NUMERIC\([^\)]*\)/INTEGER/g'
+    | sed 's/CHECKSUM_BLOB_TYPE/BLOB\(200\)/g'
     > sqlite_catalogue_schema.sql
   COMMAND cat 
     ${CMAKE_CURRENT_SOURCE_DIR}/mysql_catalogue_schema_header.sql 
     ${CMAKE_CURRENT_SOURCE_DIR}/common_catalogue_schema.sql 
     ${CMAKE_CURRENT_SOURCE_DIR}/mysql_catalogue_schema_trailer.sql
+    | sed 's/CHECKSUM_BLOB_TYPE/VARBINARY\(200\)/g'
     > mysql_catalogue_schema.sql
   COMMAND cat
     ${CMAKE_CURRENT_SOURCE_DIR}/oracle_catalogue_schema_header.sql
     ${CMAKE_CURRENT_SOURCE_DIR}/common_catalogue_schema.sql
     ${CMAKE_CURRENT_SOURCE_DIR}/oracle_catalogue_schema_trailer.sql
     | sed 's/VARCHAR/VARCHAR2/g'
+    | sed 's/CHECKSUM_BLOB_TYPE/RAW\(200\)/g'
     > oracle_catalogue_schema.sql
   COMMAND cat
     ${CMAKE_CURRENT_SOURCE_DIR}/postgres_catalogue_schema_header.sql
     ${CMAKE_CURRENT_SOURCE_DIR}/common_catalogue_schema.sql
     ${CMAKE_CURRENT_SOURCE_DIR}/postgres_catalogue_schema_trailer.sql
+    | sed 's/CHECKSUM_BLOB_TYPE/BYTEA/g'
     > postgres_catalogue_schema.sql
   DEPENDS
     ${CMAKE_CURRENT_SOURCE_DIR}/common_catalogue_schema.sql
@@ -204,10 +213,9 @@ add_executable(cta-catalogue-schema-create
   PostgresCatalogueSchema.cpp
   MysqlCatalogueSchema.cpp)
 
-target_link_libraries (cta-catalogue-schema-create
-  ctacatalogue)
-
-set_property (TARGET cta-catalogue-schema-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+target_link_libraries (cta-catalogue-schema-create ctacatalogue)
+set_property(TARGET cta-catalogue-schema-create APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+set_property(TARGET cta-catalogue-schema-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 
 install (TARGETS cta-catalogue-schema-create DESTINATION /usr/bin)
 install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-schema-create.1cta DESTINATION /usr/share/man/man1)
@@ -217,10 +225,9 @@ add_executable(cta-catalogue-schema-drop
   DropSchemaCmdLineArgs.cpp
   DropSchemaCmdMain.cpp)
 
-target_link_libraries (cta-catalogue-schema-drop
-  ctacatalogue)
-
-set_property (TARGET cta-catalogue-schema-drop APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+target_link_libraries (cta-catalogue-schema-drop ctacatalogue)
+set_property(TARGET cta-catalogue-schema-drop APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+set_property(TARGET cta-catalogue-schema-drop APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 
 install (TARGETS cta-catalogue-schema-drop DESTINATION /usr/bin)
 install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-schema-drop.1cta DESTINATION /usr/share/man/man1)
@@ -230,10 +237,9 @@ add_executable(cta-database-poll
   PollDatabaseCmdLineArgs.cpp
   PollDatabaseCmdMain.cpp)
 
-target_link_libraries (cta-database-poll
-  ctacatalogue)
-
-set_property (TARGET cta-database-poll APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+target_link_libraries (cta-database-poll ctacatalogue)
+set_property(TARGET cta-database-poll APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+set_property(TARGET cta-database-poll APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 
 install (TARGETS cta-database-poll DESTINATION /usr/bin)
 install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-database-poll.1cta DESTINATION /usr/share/man/man1)
@@ -243,13 +249,12 @@ add_executable(cta-catalogue-admin-user-create
   CreateAdminUserCmdLineArgs.cpp
   CreateAdminUserCmdMain.cpp)
 
-target_link_libraries (cta-catalogue-admin-user-create
-  ctacatalogue)
-
-set_property (TARGET cta-catalogue-admin-user-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+target_link_libraries(cta-catalogue-admin-user-create ctacatalogue) 
+set_property(TARGET cta-catalogue-admin-user-create APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+set_property(TARGET cta-catalogue-admin-user-create APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 
-install (TARGETS cta-catalogue-admin-user-create DESTINATION /usr/bin)
-install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-admin-user-create.1cta DESTINATION /usr/share/man/man1)
+install(TARGETS cta-catalogue-admin-user-create DESTINATION /usr/bin)
+install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-admin-user-create.1cta DESTINATION /usr/share/man/man1)
 
 add_executable(cta-catalogue-schema-verify
   VerifySchemaCmd.cpp
@@ -261,10 +266,9 @@ add_executable(cta-catalogue-schema-verify
   PostgresCatalogueSchema.cpp
   MysqlCatalogueSchema.cpp)
 
-target_link_libraries (cta-catalogue-schema-verify
-  ctacatalogue)
-
-set_property (TARGET cta-catalogue-schema-verify APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+target_link_libraries(cta-catalogue-schema-verify ctacatalogue)
+set_property(TARGET cta-catalogue-schema-verify APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+set_property(TARGET cta-catalogue-schema-verify APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
 
 install (TARGETS cta-catalogue-schema-verify DESTINATION /usr/bin)
 install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-catalogue-schema-verify.1cta DESTINATION /usr/share/man/man1)
diff --git a/catalogue/Catalogue.hpp b/catalogue/Catalogue.hpp
index eb728a91c261f00dc385408f265572990c8de998..5424fa3d859879221e0d7a052d2ff07db9ad1cf6 100644
--- a/catalogue/Catalogue.hpp
+++ b/catalogue/Catalogue.hpp
@@ -52,10 +52,12 @@
 #include "common/dataStructures/TapeCopyToPoolMap.hpp"
 #include "common/dataStructures/TapeFile.hpp"
 #include "common/dataStructures/UpdateFileInfoRequest.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 #include "common/dataStructures/VidToTapeMap.hpp"
 #include "common/dataStructures/WriteTestResult.hpp"
 #include "disk/DiskSystem.hpp"
+#include "common/exception/FileSizeMismatch.hpp"
+#include "common/exception/TapeFseqMismatch.hpp"
 #include "common/exception/UserError.hpp"
 #include "common/log/LogContext.hpp"
 #include "common/log/Logger.hpp"
@@ -115,7 +117,7 @@ public:
   virtual uint64_t checkAndGetNextArchiveFileId(
     const std::string &diskInstanceName,
     const std::string &storageClassName,
-    const common::dataStructures::UserIdentity &user) = 0;
+    const common::dataStructures::RequesterIdentity &user) = 0;
 
   /**
    * Returns the information required to queue an archive request.
@@ -134,12 +136,12 @@ public:
   virtual common::dataStructures::ArchiveFileQueueCriteria getArchiveFileQueueCriteria(
     const std::string &diskInstanceName,
     const std::string &storageClassName,
-    const common::dataStructures::UserIdentity &user) = 0;
+    const common::dataStructures::RequesterIdentity &user) = 0;
 
   /**
    * Returns the list of tapes that can be written to by a tape drive in the
    * specified logical library, in other words tapes that are labelled, not
-   * disabled, not full and are in the specified logical library.
+   * disabled, not full, not read-only and are in the specified logical library.
    *
    * @param logicalLibraryName The name of the logical library.
    * @return The list of tapes for writing.
@@ -150,6 +152,8 @@ public:
    * Notifies the catalogue that the specified files have been written to tape.
    *
    * @param events The tape file written events.
+   * @throw TapeFseqMismatch If an unexpected tape file sequence number is encountered.
+   * @throw FileSizeMismatch If an unexpected tape file size is encountered.
    */
   virtual void filesWrittenToTape(const std::set<TapeItemWrittenPointer> &event) = 0;
 
@@ -185,7 +189,7 @@ public:
   virtual common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(
     const std::string &diskInstanceName,
     const uint64_t archiveFileId,
-    const common::dataStructures::UserIdentity &user,
+    const common::dataStructures::RequesterIdentity &user,
     const optional<std::string> & activity,
     log::LogContext &lc) = 0;
 
@@ -285,7 +289,7 @@ public:
 
   /**
    * Creates a tape which is assumed to have logical block protection (LBP)
-   * enabled.
+   * enabled and isFromCastor disabled.
    */
   virtual void createTape(
     const common::dataStructures::SecurityIdentity &admin,
@@ -297,6 +301,7 @@ public:
     const uint64_t capacityInBytes,
     const bool disabled,
     const bool full,
+    const bool readOnly,
     const std::string &comment) = 0;
 
   virtual void deleteTape(const std::string &vid) = 0;
@@ -342,6 +347,23 @@ public:
    * @param vid The volume identifier of the tape to be reclaimed.
    */
   virtual void reclaimTape(const common::dataStructures::SecurityIdentity &admin, const std::string &vid) = 0;
+  
+  /**
+   * Checks the specified tape for the tape label command.
+   *
+   * This method checks if the tape is safe to be labeled and will throw an 
+   * exception if the specified tape does not ready to be labeled.
+   *
+   * @param vid The volume identifier of the tape to be checked.
+   */
+  virtual void checkTapeForLabel(const std::string &vid) = 0;
+  
+  /**
+   * Returns the number of any files contained in the tape identified by its vid
+   * @param vid the vid in which we will count non superseded files
+   * @return the number of files on the tape
+   */
+  virtual uint64_t getNbFilesOnTape(const std::string &vid) const = 0 ;
 
   virtual void modifyTapeMediaType(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &mediaType) = 0;
   virtual void modifyTapeVendor(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &vendor) = 0;
@@ -361,6 +383,34 @@ public:
    * @param fullValue Set to true if the tape is full.
    */
   virtual void setTapeFull(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool fullValue) = 0;
+  
+  /**
+   * Sets the read-only status of the specified tape.
+   *
+   * Please note that this method is to be called by the CTA front-end in
+   * response to a command from the CTA command-line interface (CLI).
+   *
+   * @param admin The administrator.
+   * @param vid The volume identifier of the tape to be marked as read-only.
+   * @param readOnlyValue Set to true if the tape is read-only.
+   */
+  virtual void setTapeReadOnly(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool readOnlyValue) = 0;
+  
+  /**
+   * This method notifies the CTA catalogue to set the specified tape read-only
+   * in case of a problem.
+   *
+   * @param vid The volume identifier of the tape.
+   */
+  virtual void setTapeReadOnlyOnError(const std::string &vid) = 0;
+  
+  /**
+   * This method notifies the CTA catalogue to set the specified tape is from CASTOR.
+   * This method only for unitTests and MUST never be called in CTA!!! 
+   *
+   * @param vid The volume identifier of the tape.
+   */
+  virtual void setTapeIsFromCastorInUnitTests(const std::string &vid) = 0;
 
   virtual void setTapeDisabled(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool disabledValue) = 0;
   virtual void modifyTapeComment(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &comment) = 0;
diff --git a/catalogue/CatalogueRetryWrapper.hpp b/catalogue/CatalogueRetryWrapper.hpp
index 8aa14e09b83eef2c316db9961fce3115d1c8941f..73797d6a3fe197ee565c89febd4d92b3217b45cc 100644
--- a/catalogue/CatalogueRetryWrapper.hpp
+++ b/catalogue/CatalogueRetryWrapper.hpp
@@ -68,12 +68,12 @@ public:
     return retryOnLostConnection(m_log, [&]{return m_catalogue->tapeLabelled(vid, drive);}, m_maxTriesToConnect);
   }
 
-  uint64_t checkAndGetNextArchiveFileId(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override {
+  uint64_t checkAndGetNextArchiveFileId(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->checkAndGetNextArchiveFileId(diskInstanceName, storageClassName, user);}, m_maxTriesToConnect);
   }
 
   common::dataStructures::ArchiveFileQueueCriteria getArchiveFileQueueCriteria(const std::string &diskInstanceName,
-    const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override {
+    const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->getArchiveFileQueueCriteria(diskInstanceName, storageClassName, user);}, m_maxTriesToConnect);
   }
 
@@ -89,7 +89,7 @@ public:
     return retryOnLostConnection(m_log, [&]{return m_catalogue->tapeMountedForArchive(vid, drive);}, m_maxTriesToConnect);
   }
 
-  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override {
+  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::RequesterIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->prepareToRetrieveFile(diskInstanceName, archiveFileId, user, activity, lc);}, m_maxTriesToConnect);
   }
 
@@ -209,8 +209,8 @@ public:
     return retryOnLostConnection(m_log, [&]{return m_catalogue->setLogicalLibraryDisabled(admin, name, disabledValue);}, m_maxTriesToConnect);
   }
 
-  void createTape(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &mediaType, const std::string &vendor, const std::string &logicalLibraryName, const std::string &tapePoolName, const uint64_t capacityInBytes, const bool disabled, const bool full, const std::string &comment) override {
-    return retryOnLostConnection(m_log, [&]{return m_catalogue->createTape(admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes, disabled, full, comment);}, m_maxTriesToConnect);
+  void createTape(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &mediaType, const std::string &vendor, const std::string &logicalLibraryName, const std::string &tapePoolName, const uint64_t capacityInBytes, const bool disabled, const bool full, const bool readOnly, const std::string &comment) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->createTape(admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes, disabled, full, readOnly, comment);}, m_maxTriesToConnect);
   }
 
   void deleteTape(const std::string &vid) override {
@@ -233,6 +233,14 @@ public:
     return retryOnLostConnection(m_log, [&]{return m_catalogue->reclaimTape(admin, vid);}, m_maxTriesToConnect);
   }
 
+  void checkTapeForLabel(const std::string &vid) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->checkTapeForLabel(vid);}, m_maxTriesToConnect);
+  }
+  
+  uint64_t getNbFilesOnTape(const std::string &vid) const override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->getNbFilesOnTape(vid);}, m_maxTriesToConnect);
+  }
+  
   void modifyTapeMediaType(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &mediaType) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->modifyTapeMediaType(admin, vid, mediaType);}, m_maxTriesToConnect);
   }
@@ -260,7 +268,19 @@ public:
   void setTapeFull(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool fullValue) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->setTapeFull(admin, vid, fullValue);}, m_maxTriesToConnect);
   }
+  
+  void setTapeReadOnly(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool readOnlyValue) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->setTapeReadOnly(admin, vid, readOnlyValue);}, m_maxTriesToConnect);
+  }
+  
+  void setTapeReadOnlyOnError(const std::string &vid) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->setTapeReadOnlyOnError(vid);}, m_maxTriesToConnect);
+  }
 
+  void setTapeIsFromCastorInUnitTests(const std::string &vid) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->setTapeIsFromCastorInUnitTests(vid);}, m_maxTriesToConnect);
+  }
+  
   void setTapeDisabled(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool disabledValue) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->setTapeDisabled(admin, vid, disabledValue);}, m_maxTriesToConnect);
   }
diff --git a/catalogue/CatalogueSchema.cpp b/catalogue/CatalogueSchema.cpp
index bd17aff7ba4439eb5842a67cfdf529465c0f1a08..b1418d6d5ef1f473377aac0777e25d81dd65cc85 100644
--- a/catalogue/CatalogueSchema.cpp
+++ b/catalogue/CatalogueSchema.cpp
@@ -48,7 +48,11 @@ std::map<std::string, std::string> CatalogueSchema::getSchemaColumns(const std::
     "INTEGER|"
     "CHAR|"
     "VARCHAR|"
-    "VARCHAR2";
+    "VARCHAR2|"
+    "BLOB|"
+    "BYTEA|"
+    "VARBINARY|"
+    "RAW";
   
   try {
     while(std::string::npos != (findResult = sql.find(';', searchPos))) {
@@ -58,7 +62,7 @@ std::map<std::string, std::string> CatalogueSchema::getSchemaColumns(const std::
       searchPos = findResult + 1;
 
       if(0 < sqlStmt.size()) { // Ignore empty statements
-        const std::string createTableSQL = "CREATE TABLE " + tableName + "[ ]*\\(([a-zA-Z0-9_, '\\)\\(]+)\\)";
+        const std::string createTableSQL = "CREATE[a-zA-Z ]+TABLE " + tableName + "[ ]*\\(([a-zA-Z0-9_, '\\)\\(]+)\\)";
         cta::utils::Regex tableSqlRegex(createTableSQL.c_str());
         auto tableSql = tableSqlRegex.exec(sqlStmt);
         if (2 == tableSql.size()) {
@@ -103,7 +107,7 @@ std::list<std::string> CatalogueSchema::getSchemaTableNames() const {
       searchPos = findResult + 1;
 
       if(0 < sqlStmt.size()) { // Ignore empty statements
-        cta::utils::Regex tableNamesRegex("CREATE TABLE ([a-zA-Z_0-9]+)");
+        cta::utils::Regex tableNamesRegex("CREATE[a-zA-Z ]+TABLE ([a-zA-Z_0-9]+)");
         auto tableName = tableNamesRegex.exec(sqlStmt);
         if (2 == tableName.size()) {
           schemaTables.push_back(tableName[1].c_str());
diff --git a/catalogue/CatalogueTest.cpp b/catalogue/CatalogueTest.cpp
index d742c187972df6cc30db599e7ada198f36a8c6e8..9488ea21c34c5bc54db96755b48386ff6ac6fa55 100644
--- a/catalogue/CatalogueTest.cpp
+++ b/catalogue/CatalogueTest.cpp
@@ -18,10 +18,9 @@
 
 #include "catalogue/ArchiveFileRow.hpp"
 #include "catalogue/CatalogueTest.hpp"
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
-#include "catalogue/FileSizeMismatch.hpp"
+#include "catalogue/UserSpecifiedANonEmptyLogicalLibrary.hpp"
 #include "catalogue/UserSpecifiedANonEmptyTape.hpp"
+#include "catalogue/UserSpecifiedANonExistentLogicalLibrary.hpp"
 #include "catalogue/UserSpecifiedANonExistentTape.hpp"
 #include "catalogue/UserSpecifiedANonExistentDiskSystem.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringDiskSystemName.hpp"
@@ -63,6 +62,13 @@
 
 namespace unitTests {
 
+const uint32_t PUBLIC_DISK_USER = 9751;
+const uint32_t PUBLIC_DISK_GROUP = 9752;
+const uint32_t DISK_FILE_OWNER_UID = 9753;
+const uint32_t DISK_FILE_GID = 9754;
+const uint32_t NON_EXISTENT_DISK_FILE_OWNER_UID = 9755;
+const uint32_t NON_EXISTENT_DISK_FILE_GID = 9756;
+
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
@@ -956,6 +962,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTapePool_notEmpty) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const cta::optional<std::string> supply("value for the supply pool mechanism");
   const std::string comment = "Create tape";
 
@@ -977,7 +984,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTapePool_notEmpty) {
   }
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   ASSERT_TRUE(m_catalogue->tapeExists(vid));
 
@@ -996,6 +1003,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTapePool_notEmpty) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -1597,9 +1606,30 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapePoolSupply_emptyStringSupply) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const std::string modifiedSupply = "";
-  ASSERT_THROW(m_catalogue->modifyTapePoolSupply(m_admin, tapePoolName, modifiedSupply),
-    catalogue::UserSpecifiedAnEmptyStringSupply);
+  const std::string modifiedSupply;
+  m_catalogue->modifyTapePoolSupply(m_admin, tapePoolName, modifiedSupply);
+
+  {
+    const auto pools = m_catalogue->getTapePools();
+      
+    ASSERT_EQ(1, pools.size());
+      
+    const auto &pool = pools.front();
+    ASSERT_EQ(tapePoolName, pool.name);
+    ASSERT_EQ(vo, pool.vo);
+    ASSERT_EQ(nbPartialTapes, pool.nbPartialTapes);
+    ASSERT_EQ(isEncrypted, pool.encryption);
+    ASSERT_FALSE((bool)pool.supply);
+    ASSERT_EQ(0, pool.nbTapes);
+    ASSERT_EQ(0, pool.capacityBytes);
+    ASSERT_EQ(0, pool.dataBytes);
+    ASSERT_EQ(0, pool.nbPhysicalFiles);
+    ASSERT_EQ(comment, pool.comment);
+
+    const common::dataStructures::EntryLog creationLog = pool.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
 }
 
 TEST_P(cta_catalogue_CatalogueTest, modifyTapePoolSupply_nonExistentTapePool) {
@@ -2446,7 +2476,68 @@ TEST_P(cta_catalogue_CatalogueTest, deleteLogicalLibrary_non_existant) {
   using namespace cta;
       
   ASSERT_TRUE(m_catalogue->getLogicalLibraries().empty());
-  ASSERT_THROW(m_catalogue->deleteLogicalLibrary("non_existant_logical_library"), exception::UserError);
+  ASSERT_THROW(m_catalogue->deleteLogicalLibrary("non_existant_logical_library"),
+    catalogue::UserSpecifiedANonExistentLogicalLibrary);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, deleteLogicalLibrary_non_empty) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, 
+    comment);
+
+  const std::list<common::dataStructures::Tape> tapes =
+    m_catalogue->getTapes();
+
+  ASSERT_EQ(1, tapes.size());
+
+  const common::dataStructures::Tape tape = tapes.front();
+  ASSERT_EQ(vid, tape.vid);
+  ASSERT_EQ(mediaType, tape.mediaType);
+  ASSERT_EQ(vendor, tape.vendor);
+  ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+  ASSERT_EQ(tapePoolName, tape.tapePoolName);
+  ASSERT_EQ(vo, tape.vo);
+  ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+  ASSERT_TRUE(disabledValue == tape.disabled);
+  ASSERT_TRUE(fullValue == tape.full);
+  ASSERT_TRUE(readOnlyValue == tape.readOnly);
+  ASSERT_FALSE(tape.isFromCastor);
+  ASSERT_EQ(comment, tape.comment);
+  ASSERT_FALSE(tape.labelLog);
+  ASSERT_FALSE(tape.lastReadLog);
+  ASSERT_FALSE(tape.lastWriteLog);
+
+  const common::dataStructures::EntryLog creationLog = tape.creationLog;
+  ASSERT_EQ(m_admin.username, creationLog.username);
+  ASSERT_EQ(m_admin.host, creationLog.host);
+
+  const common::dataStructures::EntryLog lastModificationLog =
+    tape.lastModificationLog;
+  ASSERT_EQ(creationLog, lastModificationLog);
+
+  ASSERT_THROW(m_catalogue->deleteLogicalLibrary(logicalLibraryName), catalogue::UserSpecifiedANonEmptyLogicalLibrary);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, modifyLogicalLibraryComment) {
@@ -2537,6 +2628,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -2556,7 +2648,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape) {
   }
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   ASSERT_TRUE(m_catalogue->tapeExists(vid));
 
@@ -2575,6 +2667,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -2621,6 +2715,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringVid) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -2640,7 +2735,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringVid) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringVid);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringVid);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringMediaType) {
@@ -2663,6 +2758,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringMediaType) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
@@ -2680,7 +2776,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringMediaType) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringMediaType);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringMediaType);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringVendor) {
@@ -2703,6 +2799,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringVendor) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
+  
   const std::string comment = "Create tape";
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
@@ -2720,7 +2818,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringVendor) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringVendor);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringVendor);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringLogicalLibraryName) {
@@ -2743,6 +2841,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringLogicalLibraryName) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
@@ -2760,7 +2859,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringLogicalLibraryName) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringLogicalLibraryName);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringLogicalLibraryName);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringTapePoolName) {
@@ -2781,12 +2880,13 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringTapePoolName) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringTapePoolName);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringTapePoolName);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_zeroCapacity) {
@@ -2810,6 +2910,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_zeroCapacity) {
   const uint64_t capacityInBytes = 0;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -2829,7 +2930,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_zeroCapacity) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAZeroCapacity);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAZeroCapacity);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringComment) {
@@ -2853,6 +2954,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringComment) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -2872,7 +2974,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_emptyStringComment) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), catalogue::UserSpecifiedAnEmptyStringComment);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), catalogue::UserSpecifiedAnEmptyStringComment);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_non_existent_logical_library) {
@@ -2892,11 +2994,12 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_non_existent_logical_library) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), exception::UserError);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), exception::UserError);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_non_existent_tape_pool) {
@@ -2913,11 +3016,12 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_non_existent_tape_pool) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment), exception::UserError);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment), exception::UserError);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, createTape_9_exabytes_capacity) {
@@ -2939,6 +3043,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_9_exabytes_capacity) {
   const uint64_t capacityInBytes = 9L * 1000 * 1000 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -2958,7 +3063,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_9_exabytes_capacity) {
   }
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   const auto tapes = m_catalogue->getTapes();
 
@@ -2975,6 +3080,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_9_exabytes_capacity) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3018,6 +3125,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_same_twice) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -3037,7 +3145,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_same_twice) {
   }
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue, comment);
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const auto pools = m_catalogue->getTapePools();
@@ -3053,7 +3161,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_same_twice) {
   }
 
   ASSERT_THROW(m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName,
-    tapePoolName, capacityInBytes, disabledValue, fullValue,
+    tapePoolName, capacityInBytes, disabledValue, fullValue, readOnlyValue,
     comment), exception::UserError);
 
   {
@@ -3085,6 +3193,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_many_tapes) {
   const uint64_t capacityInBytes = (uint64_t) 10 * 1000 * 1000 * 1000 * 1000;
   const bool disabled = true;
   const bool full = false;
+  const bool readOnly = false;
   const std::string comment = "Create tape";
 
   ASSERT_TRUE(m_catalogue->getLogicalLibraries().empty());
@@ -3113,7 +3222,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_many_tapes) {
     vid << "vid" << i;
 
     m_catalogue->createTape(m_admin, vid.str(), mediaType, vendor, logicalLibrary, tapePoolName, capacityInBytes,
-      disabled, full, comment);
+      disabled, full, readOnly, comment);
 
     {
       const auto pools = m_catalogue->getTapePools();
@@ -3152,6 +3261,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_many_tapes) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabled == tape.disabled);
       ASSERT_TRUE(full == tape.full);
+      ASSERT_TRUE(readOnly == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -3348,6 +3459,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_many_tapes) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabled == tape.disabled);
       ASSERT_TRUE(full == tape.full);
+      ASSERT_TRUE(readOnly == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -3389,6 +3502,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -3409,7 +3523,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
 
   {
     m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName,
-      capacityInBytes, disabledValue, fullValue, comment);
+      capacityInBytes, disabledValue, fullValue, readOnlyValue, comment);
     const auto tapes = cta_catalogue_CatalogueTest::tapeListToMap(m_catalogue->getTapes());
     ASSERT_EQ(1, tapes.size());
 
@@ -3426,6 +3540,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3463,16 +3579,14 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
     file1Written.diskInstance         = storageClass.diskInstance;
     file1Written.diskFileId           = "5678";
     file1Written.diskFilePath         = "/public_dir/public_file";
-    file1Written.diskFileUser         = "public_disk_user";
-    file1Written.diskFileGroup        = "public_disk_group";
+    file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+    file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
     file1Written.size                 = fileSize;
-    file1Written.checksumType         = "checksum_type";
-    file1Written.checksumValue        = "checksum_value";
+    file1Written.checksumBlob.insert(checksum::ADLER32, 0x1000); // tests checksum with embedded zeros
     file1Written.storageClassName     = storageClass.name;
     file1Written.vid                  = vid1;
     file1Written.fSeq                 = 1;
     file1Written.blockId              = 4321;
-    file1Written.compressedSize       = fileSize;
     file1Written.copyNb               = 1;
     file1Written.tapeDrive            = "tape_drive";
     m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -3493,7 +3607,7 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
 
   {
     m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName,
-      capacityInBytes, disabledValue, fullValue, comment);
+      capacityInBytes, disabledValue, fullValue, readOnlyValue, comment);
     const auto tapes = cta_catalogue_CatalogueTest::tapeListToMap(m_catalogue->getTapes());
     ASSERT_EQ(2, tapes.size());
 
@@ -3510,6 +3624,8 @@ TEST_P(cta_catalogue_CatalogueTest, createTape_1_tape_with_write_log_1_tape_with
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3556,12 +3672,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTape) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName,
-    capacityInBytes, disabledValue, fullValue,
+    capacityInBytes, disabledValue, fullValue, readOnlyValue, 
     comment);
 
   const std::list<common::dataStructures::Tape> tapes =
@@ -3579,6 +3696,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteTape) {
   ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
   ASSERT_TRUE(disabledValue == tape.disabled);
   ASSERT_TRUE(fullValue == tape.full);
+  ASSERT_TRUE(readOnlyValue == tape.readOnly);
+  ASSERT_FALSE(tape.isFromCastor);
   ASSERT_EQ(comment, tape.comment);
   ASSERT_FALSE(tape.labelLog);
   ASSERT_FALSE(tape.lastReadLog);
@@ -3621,12 +3740,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteNonEmptyTape) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes, disabledValue, fullValue,
-    comment);
+    readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -3644,6 +3764,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteNonEmptyTape) {
     ASSERT_EQ(0, tape.dataOnTapeInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3668,16 +3790,14 @@ TEST_P(cta_catalogue_CatalogueTest, deleteNonEmptyTape) {
     file1Written.diskInstance         = storageClass.diskInstance;
     file1Written.diskFileId           = "5678";
     file1Written.diskFilePath         = "/public_dir/public_file";
-    file1Written.diskFileUser         = "public_disk_user";
-    file1Written.diskFileGroup        = "public_disk_group";
+    file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+    file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
     file1Written.size                 = fileSize;
-    file1Written.checksumType         = "checksum_type";
-    file1Written.checksumValue        = "checksum_value";
+    file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
     file1Written.storageClassName     = storageClass.name;
     file1Written.vid                  = vid;
     file1Written.fSeq                 = 1;
     file1Written.blockId              = 4321;
-    file1Written.compressedSize       = fileSize;
     file1Written.copyNb               = 1;
     file1Written.tapeDrive            = "tape_drive";
     m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -3699,6 +3819,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteNonEmptyTape) {
     ASSERT_EQ(fileSize, tape.dataOnTapeInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3743,13 +3865,14 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeMediaType) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -3766,6 +3889,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeMediaType) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3795,6 +3920,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeMediaType) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3825,13 +3952,14 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeVendor) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -3848,6 +3976,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeVendor) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3878,6 +4008,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeVendor) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3908,6 +4040,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeLogicalLibraryName) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -3916,7 +4049,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeLogicalLibraryName) {
 
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -3933,6 +4066,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeLogicalLibraryName) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -3963,6 +4098,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeLogicalLibraryName) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4007,6 +4144,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeTapePoolName) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4015,7 +4153,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeTapePoolName) {
   m_catalogue->createTapePool(m_admin, anotherTapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create another tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4032,6 +4170,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeTapePoolName) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4062,6 +4202,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeTapePoolName) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4111,6 +4253,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeCapacityInBytes) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4118,7 +4261,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeCapacityInBytes) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4135,6 +4278,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeCapacityInBytes) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4166,6 +4311,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeCapacityInBytes) {
     ASSERT_EQ(modifiedCapacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4206,6 +4353,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeEncryptionKey) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4213,7 +4361,7 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeEncryptionKey) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4230,6 +4378,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeEncryptionKey) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4262,6 +4412,98 @@ TEST_P(cta_catalogue_CatalogueTest, modifyTapeEncryptionKey) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+}
+
+TEST_P(cta_catalogue_CatalogueTest, modifyTapeEncryptionKey_emptyStringEncryptionKey) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  const std::string modifiedEncryptionKey;
+  m_catalogue->modifyTapeEncryptionKey(m_admin, vid, modifiedEncryptionKey);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_FALSE((bool)tape.encryptionKey);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4302,6 +4544,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeLabelled) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4309,7 +4552,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeLabelled) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4326,6 +4569,8 @@ TEST_P(cta_catalogue_CatalogueTest, tapeLabelled) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4357,6 +4602,8 @@ TEST_P(cta_catalogue_CatalogueTest, tapeLabelled) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_TRUE((bool)tape.labelLog);
     ASSERT_EQ(labelDrive, tape.labelLog.value().drive);
@@ -4398,6 +4645,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForArchive) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4405,7 +4653,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForArchive) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4422,6 +4670,10 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForArchive) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(0, tape.readMountCount);
+    ASSERT_EQ(0, tape.writeMountCount);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4453,6 +4705,44 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForArchive) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(0, tape.readMountCount);
+    ASSERT_EQ(1, tape.writeMountCount);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_TRUE((bool)tape.lastWriteLog);
+    ASSERT_EQ(modifiedDrive, tape.lastWriteLog.value().drive);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+  
+  for(int i=1; i<1024; i++) {
+    m_catalogue->tapeMountedForArchive(vid, modifiedDrive);
+  }
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(0, tape.readMountCount);
+    ASSERT_EQ(1024, tape.writeMountCount);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4494,6 +4784,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForRetrieve) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4501,7 +4792,7 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForRetrieve) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4518,6 +4809,10 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForRetrieve) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(0, tape.readMountCount);
+    ASSERT_EQ(0, tape.writeMountCount);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4549,6 +4844,44 @@ TEST_P(cta_catalogue_CatalogueTest, tapeMountedForRetrieve) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(1, tape.readMountCount);
+    ASSERT_EQ(0, tape.writeMountCount);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_TRUE((bool)tape.lastReadLog);
+    ASSERT_EQ(modifiedDrive, tape.lastReadLog.value().drive);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+  
+  for(int i=1; i<1024; i++) {
+    m_catalogue->tapeMountedForRetrieve(vid, modifiedDrive);
+  }
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(1024, tape.readMountCount);
+    ASSERT_EQ(0, tape.writeMountCount);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_TRUE((bool)tape.lastReadLog);
@@ -4590,6 +4923,7 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeFull) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4597,7 +4931,7 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeFull) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4614,6 +4948,8 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeFull) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4644,6 +4980,8 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeFull) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4683,6 +5021,7 @@ TEST_P(cta_catalogue_CatalogueTest, noSpaceLeftOnTape) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4690,7 +5029,7 @@ TEST_P(cta_catalogue_CatalogueTest, noSpaceLeftOnTape) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4707,6 +5046,8 @@ TEST_P(cta_catalogue_CatalogueTest, noSpaceLeftOnTape) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4737,6 +5078,8 @@ TEST_P(cta_catalogue_CatalogueTest, noSpaceLeftOnTape) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4758,7 +5101,7 @@ TEST_P(cta_catalogue_CatalogueTest, noSpaceLeftOnTape_nonExistentTape) {
   ASSERT_THROW(m_catalogue->noSpaceLeftOnTape(vid), exception::Exception);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
+TEST_P(cta_catalogue_CatalogueTest, setTapeReadOnly) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getTapes().empty());
@@ -4774,8 +5117,9 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
   const bool isEncrypted = true;
   const cta::optional<std::string> supply("value for the supply pool mechanism");
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
-  const bool disabledValue = false;
+  const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -4783,7 +5127,7 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4800,6 +5144,8 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4813,7 +5159,7 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  m_catalogue->setTapeDisabled(m_admin, vid, true);
+  m_catalogue->setTapeReadOnly(m_admin, vid, true);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -4828,8 +5174,39 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
     ASSERT_EQ(tapePoolName, tape.tapePoolName);
     ASSERT_EQ(vo, tape.vo);
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
-    ASSERT_TRUE(tape.disabled);
-    ASSERT_FALSE(tape.full);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+  
+   m_catalogue->setTapeReadOnly(m_admin, vid, false);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_FALSE(tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -4841,17 +5218,17 @@ TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled_nonExistentTape) {
+TEST_P(cta_catalogue_CatalogueTest, setTapeReadOnly_nonExistentTape) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getTapes().empty());
 
   const std::string vid = "vid";
 
-  ASSERT_THROW(m_catalogue->setTapeDisabled(m_admin, vid, true), exception::UserError);
+  ASSERT_THROW(m_catalogue->setTapeReadOnly(m_admin, vid, true), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting) {
+TEST_P(cta_catalogue_CatalogueTest, setTapeReadOnlyOnError) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getTapes().empty());
@@ -4867,32 +5244,89 @@ TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting) {
   const bool isEncrypted = true;
   const cta::optional<std::string> supply("value for the supply pool mechanism");
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
-  const bool disabledValue = false;
+  const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-   disabledValue, fullValue, comment);
-  m_catalogue->tapeLabelled(vid, "tape_drive");
+    disabledValue, fullValue, readOnlyValue, comment);
 
-  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  ASSERT_EQ(1, tapes.size());
+    ASSERT_EQ(1, tapes.size());
 
-  const catalogue::TapeForWriting tape = tapes.front();
-  ASSERT_EQ(vid, tape.vid);
-  ASSERT_EQ(mediaType, tape.mediaType);
-  ASSERT_EQ(vendor, tape.vendor);
-  ASSERT_EQ(tapePoolName, tape.tapePool);
-  ASSERT_EQ(vo, tape.vo);
-  ASSERT_EQ(0, tape.lastFSeq);
-  ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
-  ASSERT_EQ(0, tape.dataOnTapeInBytes);
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  m_catalogue->setTapeReadOnlyOnError(vid);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, DISABLED_getTapesForWriting_no_labelled_tapes) {
+TEST_P(cta_catalogue_CatalogueTest, setTapeReadOnlyOnError_nonExistentTape) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid = "vid";
+
+  ASSERT_THROW(m_catalogue->setTapeReadOnlyOnError(vid), exception::Exception);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getTapes().empty());
@@ -4910,257 +5344,572 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_getTapesForWriting_no_labelled_tape
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-   disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
-  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  ASSERT_TRUE(tapes.empty());
-}
+    ASSERT_EQ(1, tapes.size());
 
-TEST_P(cta_catalogue_CatalogueTest, createMountPolicy) {
-  using namespace cta;
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
-  const uint64_t minArchiveRequestAge = 2;
-  const uint64_t retrievePriority = 3;
-  const uint64_t minRetrieveRequestAge = 4;
-  const uint64_t maxDrivesAllowed = 5;
-  const std::string &comment = "Create mount policy";
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
 
-  m_catalogue->createMountPolicy(
-    m_admin,
-    name,
-    archivePriority,
-    minArchiveRequestAge,
-    retrievePriority,
-    minRetrieveRequestAge,
-    maxDrivesAllowed,
-    comment);
+  m_catalogue->setTapeDisabled(m_admin, vid, true);
 
-  const std::list<common::dataStructures::MountPolicy> mountPolicies =
-    m_catalogue->getMountPolicies();
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  ASSERT_EQ(1, mountPolicies.size());
+    ASSERT_EQ(1, tapes.size());
 
-  const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(tape.disabled);
+    ASSERT_FALSE(tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
 
-  ASSERT_EQ(name, mountPolicy.name);
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+}
 
-  ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
-  ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+TEST_P(cta_catalogue_CatalogueTest, setTapeIsFromCastorInUnitTests) {
+  using namespace cta;
 
-  ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-  ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
 
-  ASSERT_EQ(comment, mountPolicy.comment);
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
 
-  const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
-  ASSERT_EQ(m_admin.username, creationLog.username);
-  ASSERT_EQ(m_admin.host, creationLog.host);
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
 
-  const common::dataStructures::EntryLog lastModificationLog =
-    mountPolicy.lastModificationLog;
-  ASSERT_EQ(creationLog, lastModificationLog);
-}
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
 
-TEST_P(cta_catalogue_CatalogueTest, createMountPolicy_same_twice) {
-  using namespace cta;
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+    ASSERT_EQ(1, tapes.size());
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
-  const uint64_t minArchiveRequestAge = 4;
-  const uint64_t retrievePriority = 5;
-  const uint64_t minRetrieveRequestAge = 8;
-  const uint64_t maxDrivesAllowed = 9;
-  const std::string &comment = "Create mount policy";
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
 
-  m_catalogue->createMountPolicy(
-    m_admin,
-    name,
-    archivePriority,
-    minArchiveRequestAge,
-    retrievePriority,
-    minRetrieveRequestAge,
-    maxDrivesAllowed,
-    comment);
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
 
-  ASSERT_THROW(m_catalogue->createMountPolicy(
-    m_admin,
-    name,
-    archivePriority,
-    minArchiveRequestAge,
-    retrievePriority,
-    minRetrieveRequestAge,
-    maxDrivesAllowed,
-    comment), exception::UserError);
-}
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
 
-TEST_P(cta_catalogue_CatalogueTest, deleteMountPolicy) {
-  using namespace cta;
+  m_catalogue->setTapeIsFromCastorInUnitTests(vid);
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
-  const uint64_t minArchiveRequestAge = 2;
-  const uint64_t retrievePriority = 3;
-  const uint64_t minRetrieveRequestAge = 4;
-  const uint64_t maxDrivesAllowed = 5;
-  const std::string &comment = "Create mount policy";
+    ASSERT_EQ(1, tapes.size());
 
-  m_catalogue->createMountPolicy(
-    m_admin,
-    name,
-    archivePriority,
-    minArchiveRequestAge,
-    retrievePriority,
-    minRetrieveRequestAge,
-    maxDrivesAllowed,
-    comment);
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_TRUE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
 
-  const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+  
+  // do it twice
+  m_catalogue->setTapeIsFromCastorInUnitTests(vid);
 
-  ASSERT_EQ(1, mountPolicies.size());
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
 
-  const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+    ASSERT_EQ(1, tapes.size());
 
-  ASSERT_EQ(name, mountPolicy.name);
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_TRUE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
 
-  ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
-  ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }  
+}
 
-  ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-  ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+TEST_P(cta_catalogue_CatalogueTest, setTapeIsFromCastor_nonExistentTape) {
+  using namespace cta;
 
-  ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  ASSERT_EQ(comment, mountPolicy.comment);
+  const std::string vid = "vid";
 
-  const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
-  ASSERT_EQ(m_admin.username, creationLog.username);
-  ASSERT_EQ(m_admin.host, creationLog.host);
+  ASSERT_THROW(m_catalogue->setTapeIsFromCastorInUnitTests(vid), exception::Exception);
+}
 
-  const common::dataStructures::EntryLog lastModificationLog = mountPolicy.lastModificationLog;
-  ASSERT_EQ(creationLog, lastModificationLog);
+TEST_P(cta_catalogue_CatalogueTest, setTapeDisabled_nonExistentTape) {
+  using namespace cta;
 
-  m_catalogue->deleteMountPolicy(name);
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  const std::string vid = "vid";
+
+  ASSERT_THROW(m_catalogue->setTapeDisabled(m_admin, vid, true), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, deleteMountPolicy_non_existant) {
+TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting) {
   using namespace cta;
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
-  ASSERT_THROW(m_catalogue->deleteMountPolicy("non_existant_mount_policy"), exception::UserError);
-}
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchivePriority) {
-  using namespace cta;
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = false;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+  m_catalogue->tapeLabelled(vid, "tape_drive");
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
-  const uint64_t minArchiveRequestAge = 2;
-  const uint64_t retrievePriority = 3;
-  const uint64_t minRetrieveRequestAge = 4;
-  const uint64_t maxDrivesAllowed = 5;
-  const std::string &comment = "Create mount policy";
+  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
 
-  m_catalogue->createMountPolicy(
-    m_admin,
-    name,
-    archivePriority,
-    minArchiveRequestAge,
-    retrievePriority,
-    minRetrieveRequestAge,
-    maxDrivesAllowed,
-    comment);
+  ASSERT_EQ(1, tapes.size());
 
-  {
-    const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
-    ASSERT_EQ(1, mountPolicies.size());
+  const catalogue::TapeForWriting tape = tapes.front();
+  ASSERT_EQ(vid, tape.vid);
+  ASSERT_EQ(mediaType, tape.mediaType);
+  ASSERT_EQ(vendor, tape.vendor);
+  ASSERT_EQ(tapePoolName, tape.tapePool);
+  ASSERT_EQ(vo, tape.vo);
+  ASSERT_EQ(0, tape.lastFSeq);
+  ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+  ASSERT_EQ(0, tape.dataOnTapeInBytes);
+}
 
-    const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting_disabled_tape) {
+  using namespace cta;
 
-    ASSERT_EQ(name, mountPolicy.name);
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-    ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
-    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
 
-    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+  m_catalogue->tapeLabelled(vid, "tape_drive");
 
-    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
 
-    ASSERT_EQ(comment, mountPolicy.comment);
+  ASSERT_EQ(0, tapes.size());
+}
 
-    const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
-    ASSERT_EQ(m_admin.username, creationLog.username);
-    ASSERT_EQ(m_admin.host, creationLog.host);
+TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting_full_tape) {
+  using namespace cta;
 
-    const common::dataStructures::EntryLog lastModificationLog = mountPolicy.lastModificationLog;
-    ASSERT_EQ(creationLog, lastModificationLog);
-  }
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  const uint64_t modifiedArchivePriority = archivePriority + 10;
-  m_catalogue->modifyMountPolicyArchivePriority(m_admin, name, modifiedArchivePriority);
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = true;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
 
-  {
-    const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
-    ASSERT_EQ(1, mountPolicies.size());
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+  m_catalogue->tapeLabelled(vid, "tape_drive");
 
-    const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
 
-    ASSERT_EQ(name, mountPolicy.name);
+  ASSERT_EQ(0, tapes.size());
+}
 
-    ASSERT_EQ(modifiedArchivePriority, mountPolicy.archivePriority);
-    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting_read_only_tape) {
+  using namespace cta;
 
-    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
 
-    ASSERT_EQ(comment, mountPolicy.comment);
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+  m_catalogue->tapeLabelled(vid, "tape_drive");
 
-    const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
-    ASSERT_EQ(m_admin.username, creationLog.username);
-    ASSERT_EQ(m_admin.host, creationLog.host);
-  }
+  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+
+  ASSERT_EQ(0, tapes.size());
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchivePriority_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, getTapesForWriting_is_from_castor_tape) {
   using namespace cta;
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = false;
+  const bool readOnlyValue = false;
+  const std::string comment = "Create tape";
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyArchivePriority(m_admin, name, archivePriority), exception::UserError);
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+  m_catalogue->tapeLabelled(vid, "tape_drive");
+  {
+    const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+    ASSERT_EQ(1, tapes.size());
+  }
+  {
+    m_catalogue->setTapeIsFromCastorInUnitTests(vid);
+    const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+    ASSERT_EQ(0, tapes.size());
+  }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge) {
+TEST_P(cta_catalogue_CatalogueTest, DISABLED_getTapesForWriting_no_labelled_tapes) {
   using namespace cta;
 
-  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
 
-  const std::string name = "mount_policy";
-  const uint64_t archivePriority = 1;
+  const std::string vid = "vid";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = false;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+   disabledValue, fullValue, readOnlyValue, comment);
+
+  const std::list<catalogue::TapeForWriting> tapes = m_catalogue->getTapesForWriting(logicalLibraryName);
+
+  ASSERT_TRUE(tapes.empty());
+}
+
+TEST_P(cta_catalogue_CatalogueTest, createMountPolicy) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const uint64_t archivePriority = 1;
+  const uint64_t minArchiveRequestAge = 2;
+  const uint64_t retrievePriority = 3;
+  const uint64_t minRetrieveRequestAge = 4;
+  const uint64_t maxDrivesAllowed = 5;
+  const std::string &comment = "Create mount policy";
+
+  m_catalogue->createMountPolicy(
+    m_admin,
+    name,
+    archivePriority,
+    minArchiveRequestAge,
+    retrievePriority,
+    minRetrieveRequestAge,
+    maxDrivesAllowed,
+    comment);
+
+  const std::list<common::dataStructures::MountPolicy> mountPolicies =
+    m_catalogue->getMountPolicies();
+
+  ASSERT_EQ(1, mountPolicies.size());
+
+  const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+
+  ASSERT_EQ(name, mountPolicy.name);
+
+  ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
+  ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+
+  ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
+  ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+
+  ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+
+  ASSERT_EQ(comment, mountPolicy.comment);
+
+  const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
+  ASSERT_EQ(m_admin.username, creationLog.username);
+  ASSERT_EQ(m_admin.host, creationLog.host);
+
+  const common::dataStructures::EntryLog lastModificationLog =
+    mountPolicy.lastModificationLog;
+  ASSERT_EQ(creationLog, lastModificationLog);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, createMountPolicy_same_twice) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const uint64_t archivePriority = 1;
+  const uint64_t minArchiveRequestAge = 4;
+  const uint64_t retrievePriority = 5;
+  const uint64_t minRetrieveRequestAge = 8;
+  const uint64_t maxDrivesAllowed = 9;
+  const std::string &comment = "Create mount policy";
+
+  m_catalogue->createMountPolicy(
+    m_admin,
+    name,
+    archivePriority,
+    minArchiveRequestAge,
+    retrievePriority,
+    minRetrieveRequestAge,
+    maxDrivesAllowed,
+    comment);
+
+  ASSERT_THROW(m_catalogue->createMountPolicy(
+    m_admin,
+    name,
+    archivePriority,
+    minArchiveRequestAge,
+    retrievePriority,
+    minRetrieveRequestAge,
+    maxDrivesAllowed,
+    comment), exception::UserError);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, deleteMountPolicy) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const uint64_t archivePriority = 1;
+  const uint64_t minArchiveRequestAge = 2;
+  const uint64_t retrievePriority = 3;
+  const uint64_t minRetrieveRequestAge = 4;
+  const uint64_t maxDrivesAllowed = 5;
+  const std::string &comment = "Create mount policy";
+
+  m_catalogue->createMountPolicy(
+    m_admin,
+    name,
+    archivePriority,
+    minArchiveRequestAge,
+    retrievePriority,
+    minRetrieveRequestAge,
+    maxDrivesAllowed,
+    comment);
+
+  const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
+
+  ASSERT_EQ(1, mountPolicies.size());
+
+  const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+
+  ASSERT_EQ(name, mountPolicy.name);
+
+  ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
+  ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+
+  ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
+  ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+
+  ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+
+  ASSERT_EQ(comment, mountPolicy.comment);
+
+  const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
+  ASSERT_EQ(m_admin.username, creationLog.username);
+  ASSERT_EQ(m_admin.host, creationLog.host);
+
+  const common::dataStructures::EntryLog lastModificationLog = mountPolicy.lastModificationLog;
+  ASSERT_EQ(creationLog, lastModificationLog);
+
+  m_catalogue->deleteMountPolicy(name);
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+}
+
+TEST_P(cta_catalogue_CatalogueTest, deleteMountPolicy_non_existant) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+  ASSERT_THROW(m_catalogue->deleteMountPolicy("non_existant_mount_policy"), exception::UserError);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchivePriority) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const uint64_t archivePriority = 1;
   const uint64_t minArchiveRequestAge = 2;
   const uint64_t retrievePriority = 3;
   const uint64_t minRetrieveRequestAge = 4;
@@ -5203,8 +5952,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const uint64_t modifiedMinArchiveRequestAge = minArchiveRequestAge + 10;
-  m_catalogue->modifyMountPolicyArchiveMinRequestAge(m_admin, name, modifiedMinArchiveRequestAge);
+  const uint64_t modifiedArchivePriority = archivePriority + 10;
+  m_catalogue->modifyMountPolicyArchivePriority(m_admin, name, modifiedArchivePriority);
 
   {
     const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
@@ -5214,8 +5963,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge) {
 
     ASSERT_EQ(name, mountPolicy.name);
 
-    ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
-    ASSERT_EQ(modifiedMinArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+    ASSERT_EQ(modifiedArchivePriority, mountPolicy.archivePriority);
+    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
 
     ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
     ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
@@ -5230,18 +5979,18 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchivePriority_nonExistentMountPolicy) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
 
   const std::string name = "mount_policy";
-  const uint64_t minArchiveRequestAge = 2;
+  const uint64_t archivePriority = 1;
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyArchiveMinRequestAge(m_admin, name, minArchiveRequestAge), exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyMountPolicyArchivePriority(m_admin, name, archivePriority), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetreivePriority) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
@@ -5290,8 +6039,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetreivePriority) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const uint64_t modifiedRetrievePriority = retrievePriority + 10;
-  m_catalogue->modifyMountPolicyRetrievePriority(m_admin, name, modifiedRetrievePriority);
+  const uint64_t modifiedMinArchiveRequestAge = minArchiveRequestAge + 10;
+  m_catalogue->modifyMountPolicyArchiveMinRequestAge(m_admin, name, modifiedMinArchiveRequestAge);
 
   {
     const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
@@ -5302,9 +6051,9 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetreivePriority) {
     ASSERT_EQ(name, mountPolicy.name);
 
     ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
-    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+    ASSERT_EQ(modifiedMinArchiveRequestAge, mountPolicy.archiveMinRequestAge);
 
-    ASSERT_EQ(modifiedRetrievePriority, mountPolicy.retrievePriority);
+    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
     ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
 
     ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
@@ -5317,18 +6066,18 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetreivePriority) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrievePriority_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyArchiveMinRequestAge_nonExistentMountPolicy) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
 
   const std::string name = "mount_policy";
-  const uint64_t retrievePriority = 1;
+  const uint64_t minArchiveRequestAge = 2;
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyRetrievePriority(m_admin, name, retrievePriority), exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyMountPolicyArchiveMinRequestAge(m_admin, name, minArchiveRequestAge), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetreivePriority) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
@@ -5377,8 +6126,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const uint64_t modifiedMinRetrieveRequestAge = minRetrieveRequestAge + 10;
-  m_catalogue->modifyMountPolicyRetrieveMinRequestAge(m_admin, name, modifiedMinRetrieveRequestAge);
+  const uint64_t modifiedRetrievePriority = retrievePriority + 10;
+  m_catalogue->modifyMountPolicyRetrievePriority(m_admin, name, modifiedRetrievePriority);
 
   {
     const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
@@ -5391,8 +6140,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge) {
     ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
     ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
 
-    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-    ASSERT_EQ(modifiedMinRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+    ASSERT_EQ(modifiedRetrievePriority, mountPolicy.retrievePriority);
+    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
 
     ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
 
@@ -5404,18 +6153,18 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrievePriority_nonExistentMountPolicy) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
 
   const std::string name = "mount_policy";
-  const uint64_t minRetrieveRequestAge = 2;
+  const uint64_t retrievePriority = 1;
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyRetrieveMinRequestAge(m_admin, name, minRetrieveRequestAge), exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyMountPolicyRetrievePriority(m_admin, name, retrievePriority), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
@@ -5464,8 +6213,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const uint64_t modifiedMaxDrivesAllowed = maxDrivesAllowed + 10;
-  m_catalogue->modifyMountPolicyMaxDrivesAllowed(m_admin, name, modifiedMaxDrivesAllowed);
+  const uint64_t modifiedMinRetrieveRequestAge = minRetrieveRequestAge + 10;
+  m_catalogue->modifyMountPolicyRetrieveMinRequestAge(m_admin, name, modifiedMinRetrieveRequestAge);
 
   {
     const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
@@ -5479,9 +6228,9 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed) {
     ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
 
     ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
-    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+    ASSERT_EQ(modifiedMinRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
 
-    ASSERT_EQ(modifiedMaxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
 
     ASSERT_EQ(comment, mountPolicy.comment);
 
@@ -5491,18 +6240,18 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyRetrieveMinRequestAge_nonExistentMountPolicy) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
 
   const std::string name = "mount_policy";
-  const uint64_t maxDrivesAllowed = 2;
+  const uint64_t minRetrieveRequestAge = 2;
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyMaxDrivesAllowed(m_admin, name, maxDrivesAllowed), exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyMountPolicyRetrieveMinRequestAge(m_admin, name, minRetrieveRequestAge), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
@@ -5551,8 +6300,8 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment) {
     ASSERT_EQ(creationLog, lastModificationLog);
   }
 
-  const std::string modifiedComment = "Modified comment";
-  m_catalogue->modifyMountPolicyComment(m_admin, name, modifiedComment);
+  const uint64_t modifiedMaxDrivesAllowed = maxDrivesAllowed + 10;
+  m_catalogue->modifyMountPolicyMaxDrivesAllowed(m_admin, name, modifiedMaxDrivesAllowed);
 
   {
     const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
@@ -5568,9 +6317,9 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment) {
     ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
     ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
 
-    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+    ASSERT_EQ(modifiedMaxDrivesAllowed, mountPolicy.maxDrivesAllowed);
 
-    ASSERT_EQ(modifiedComment, mountPolicy.comment);
+    ASSERT_EQ(comment, mountPolicy.comment);
 
     const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
     ASSERT_EQ(m_admin.username, creationLog.username);
@@ -5578,18 +6327,105 @@ TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment) {
   }
 }
 
-TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment_nonExistentMountPolicy) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyMaxDrivesAllowed_nonExistentMountPolicy) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
 
   const std::string name = "mount_policy";
-  const std::string comment = "Comment";
+  const uint64_t maxDrivesAllowed = 2;
 
-  ASSERT_THROW(m_catalogue->modifyMountPolicyComment(m_admin, name, comment), exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyMountPolicyMaxDrivesAllowed(m_admin, name, maxDrivesAllowed), exception::UserError);
 }
 
-TEST_P(cta_catalogue_CatalogueTest, createRequesterMountRule) {
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const uint64_t archivePriority = 1;
+  const uint64_t minArchiveRequestAge = 2;
+  const uint64_t retrievePriority = 3;
+  const uint64_t minRetrieveRequestAge = 4;
+  const uint64_t maxDrivesAllowed = 5;
+  const std::string &comment = "Create mount policy";
+
+  m_catalogue->createMountPolicy(
+    m_admin,
+    name,
+    archivePriority,
+    minArchiveRequestAge,
+    retrievePriority,
+    minRetrieveRequestAge,
+    maxDrivesAllowed,
+    comment);
+
+  {
+    const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
+    ASSERT_EQ(1, mountPolicies.size());
+
+    const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+
+    ASSERT_EQ(name, mountPolicy.name);
+
+    ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
+    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+
+    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
+    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+
+    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+
+    ASSERT_EQ(comment, mountPolicy.comment);
+
+    const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = mountPolicy.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  const std::string modifiedComment = "Modified comment";
+  m_catalogue->modifyMountPolicyComment(m_admin, name, modifiedComment);
+
+  {
+    const std::list<common::dataStructures::MountPolicy> mountPolicies = m_catalogue->getMountPolicies();
+    ASSERT_EQ(1, mountPolicies.size());
+
+    const common::dataStructures::MountPolicy mountPolicy = mountPolicies.front();
+
+    ASSERT_EQ(name, mountPolicy.name);
+
+    ASSERT_EQ(archivePriority, mountPolicy.archivePriority);
+    ASSERT_EQ(minArchiveRequestAge, mountPolicy.archiveMinRequestAge);
+
+    ASSERT_EQ(retrievePriority, mountPolicy.retrievePriority);
+    ASSERT_EQ(minRetrieveRequestAge, mountPolicy.retrieveMinRequestAge);
+
+    ASSERT_EQ(maxDrivesAllowed, mountPolicy.maxDrivesAllowed);
+
+    ASSERT_EQ(modifiedComment, mountPolicy.comment);
+
+    const common::dataStructures::EntryLog creationLog = mountPolicy.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+  }
+}
+
+TEST_P(cta_catalogue_CatalogueTest, modifyMountPolicyComment_nonExistentMountPolicy) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getMountPolicies().empty());
+
+  const std::string name = "mount_policy";
+  const std::string comment = "Comment";
+
+  ASSERT_THROW(m_catalogue->modifyMountPolicyComment(m_admin, name, comment), exception::UserError);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, createRequesterMountRule) {
   using namespace cta;
 
   ASSERT_TRUE(m_catalogue->getRequesterMountRules().empty());
@@ -6248,11 +7084,11 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_no_archive_rout
   storageClass.comment = "Create storage class";
   m_catalogue->createStorageClass(m_admin, storageClass);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
-  ASSERT_THROW(m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, userIdentity),
+  ASSERT_THROW(m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, requesterIdentity),
     exception::UserError);
 }
 
@@ -6264,7 +7100,6 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_no_mount_rules)
   ASSERT_TRUE(m_catalogue->getArchiveRoutes().empty());
 
   const std::string diskInstanceName = "disk_instance_name";
-  common::dataStructures::UserIdentity userIdentity;
 
   common::dataStructures::StorageClass storageClass;
   storageClass.diskInstance = diskInstanceName;
@@ -6303,10 +7138,11 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_no_mount_rules)
   ASSERT_EQ(creationLog, lastModificationLog);
 
   const std::string requesterName = "requester_name";
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
-  ASSERT_THROW(m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, userIdentity),
+  ASSERT_THROW(m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, requesterIdentity),
     exception::UserError);
 }
 
@@ -6388,14 +7224,14 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_requester_mount
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
   std::set<uint64_t> archiveFileIds;
   for(uint64_t i = 0; i<10; i++) {
     const uint64_t archiveFileId =
-      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, userIdentity);
+      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, requesterIdentity);
 
     const bool archiveFileIdIsNew = archiveFileIds.end() == archiveFileIds.find(archiveFileId);
     ASSERT_TRUE(archiveFileIdIsNew);
@@ -6479,14 +7315,14 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_requester_group
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = "username";
-  userIdentity.group = requesterGroupName;
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = "username";
+  requesterIdentity.group = requesterGroupName;
 
   std::set<uint64_t> archiveFileIds;
   for(uint64_t i = 0; i<10; i++) {
     const uint64_t archiveFileId =
-      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, userIdentity);
+      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, requesterIdentity);
 
     const bool archiveFileIdIsNew = archiveFileIds.end() == archiveFileIds.find(archiveFileId);
     ASSERT_TRUE(archiveFileIdIsNew);
@@ -6589,14 +7425,14 @@ TEST_P(cta_catalogue_CatalogueTest, checkAndGetNextArchiveFileId_requester_mount
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
   std::set<uint64_t> archiveFileIds;
   for(uint64_t i = 0; i<10; i++) {
     const uint64_t archiveFileId =
-      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, userIdentity);
+      m_catalogue->checkAndGetNextArchiveFileId(storageClass.diskInstance, storageClass.name, requesterIdentity);
 
     const bool archiveFileIdIsNew = archiveFileIds.end() == archiveFileIds.find(archiveFileId);
     ASSERT_TRUE(archiveFileIdIsNew);
@@ -6653,11 +7489,11 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_no_archive_route
   storageClass.comment = "Create storage class";
   m_catalogue->createStorageClass(m_admin, storageClass);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
-  ASSERT_THROW(m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, userIdentity),
+  ASSERT_THROW(m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, requesterIdentity),
     exception::UserError);
 }
 
@@ -6739,10 +7575,10 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_requester_mount_
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
-  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, userIdentity);
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
+  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, requesterIdentity);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_requester_group_mount_rule) {
@@ -6822,10 +7658,10 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_requester_group_
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = "username";
-  userIdentity.group = requesterGroupName;
-  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, userIdentity);
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = "username";
+  requesterIdentity.group = requesterGroupName;
+  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, requesterIdentity);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_requester_mount_rule_overide) {
@@ -6924,10 +7760,10 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFileQueueCriteria_requester_mount_
   const common::dataStructures::EntryLog lastModificationLog = route.lastModificationLog;
   ASSERT_EQ(creationLog, lastModificationLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
-  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, userIdentity);
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
+  m_catalogue->getArchiveFileQueueCriteria(storageClass.diskInstance, storageClass.name, requesterIdentity);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
@@ -6952,14 +7788,15 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string createTapeComment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
 
   const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
   const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
@@ -6975,6 +7812,8 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -7000,6 +7839,8 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -7029,8 +7870,6 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
 
   const uint64_t archiveFileSize = 1;
   const std::string tapeDrive = "tape_drive";
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file1Written = *file1WrittenUP;
@@ -7039,16 +7878,14 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -7059,14 +7896,13 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -7075,9 +7911,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -7089,16 +7923,14 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -7109,14 +7941,13 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -7126,9 +7957,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
 
     auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
     ASSERT_FALSE(copyNbToTapeFile2Itor == archiveFile.tapeFiles.end());
@@ -7136,9 +7965,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
   }
 
   const std::string mountPolicyName = "mount_policy";
@@ -7177,11 +8004,11 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
 
   log::LogContext dummyLc(m_dummyLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
   const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-    m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
+    m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, requesterIdentity, cta::nullopt, dummyLc);
 
   ASSERT_EQ(2, queueCriteria.archiveFile.tapeFiles.size());
   ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
@@ -7189,7 +8016,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   ASSERT_EQ(maxDrivesAllowed, queueCriteria.mountPolicy.maxDrivesAllowed);
 
   // Check that the diskInstanceName mismatch detection works
-  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName2, archiveFileId, userIdentity, cta::nullopt, dummyLc),
+  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName2, archiveFileId, requesterIdentity, cta::nullopt, dummyLc),
     exception::UserError);
 }
 
@@ -7214,14 +8041,15 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string createTapeComment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
 
   const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
   const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
@@ -7237,6 +8065,8 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -7262,6 +8092,8 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -7290,26 +8122,23 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
 
   const uint64_t archiveFileSize = 1;
   const std::string tapeDrive = "tape_drive";
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file1Written = *file1WrittenUP;
-    std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;    file1WrittenSet.insert(file1WrittenUP.release());
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
   file1Written.archiveFileId        = archiveFileId;
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -7320,14 +8149,13 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -7336,9 +8164,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -7350,16 +8176,14 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -7370,14 +8194,13 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -7387,9 +8210,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
 
     const auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
     ASSERT_FALSE(copyNbToTapeFile2Itor == archiveFile.tapeFiles.end());
@@ -7397,9 +8218,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
   }
 
   const std::string mountPolicyName = "mount_policy";
@@ -7438,13 +8257,13 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
 
   log::LogContext dummyLc(m_dummyLog);
 
-  common::dataStructures::UserIdentity userIdentity;
-  userIdentity.name = requesterName;
-  userIdentity.group = "group";
+  common::dataStructures::RequesterIdentity requesterIdentity;
+  requesterIdentity.name = requesterName;
+  requesterIdentity.group = "group";
 
   {
     const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
+      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, requesterIdentity, cta::nullopt, dummyLc);
 
     ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
     ASSERT_EQ(minArchiveRequestAge, queueCriteria.mountPolicy.archiveMinRequestAge);
@@ -7458,9 +8277,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
     const auto copyNbToTapeFile2Itor = queueCriteria.archiveFile.tapeFiles.find(2);
@@ -7469,16 +8286,14 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
   }
 
   m_catalogue->setTapeDisabled(m_admin, vid1, true);
 
   {
     const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
+      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, requesterIdentity, cta::nullopt, dummyLc);
 
     ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
     ASSERT_EQ(minArchiveRequestAge, queueCriteria.mountPolicy.archiveMinRequestAge);
@@ -7492,14 +8307,12 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
   }
 
   m_catalogue->setTapeDisabled(m_admin, vid2, true);
 
-  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc),
+  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, requesterIdentity, cta::nullopt, dummyLc),
     exception::UserError);
 }
 
@@ -7520,7 +8333,7 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFiles_disk_file_group_without_inst
   ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
 
   catalogue::TapeFileSearchCriteria searchCriteria;
-  searchCriteria.diskFileGroup = "disk_file_group";
+  searchCriteria.diskFileGid = DISK_FILE_GID;
 
   ASSERT_THROW(m_catalogue->getArchiveFilesItor(searchCriteria), exception::UserError);
 }
@@ -7531,7 +8344,7 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFiles_non_existant_disk_file_group
 
   catalogue::TapeFileSearchCriteria searchCriteria;
   searchCriteria.diskInstance = "non_existant_disk_instance";
-  searchCriteria.diskFileGroup = "non_existant_disk_file_group";
+  searchCriteria.diskFileGid = NON_EXISTENT_DISK_FILE_GID;
 
   ASSERT_THROW(m_catalogue->getArchiveFilesItor(searchCriteria), exception::UserError);
 }
@@ -7586,7 +8399,7 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFiles_disk_file_user_without_insta
   ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
 
   catalogue::TapeFileSearchCriteria searchCriteria;
-  searchCriteria.diskFileUser = "disk_file_user";
+  searchCriteria.diskFileOwnerUid = DISK_FILE_OWNER_UID;
 
   ASSERT_THROW(m_catalogue->getArchiveFilesItor(searchCriteria), exception::UserError);
 }
@@ -7597,7 +8410,7 @@ TEST_P(cta_catalogue_CatalogueTest, getArchiveFiles_non_existant_disk_file_user)
 
   catalogue::TapeFileSearchCriteria searchCriteria;
   searchCriteria.diskInstance = "non_existant_disk_instance";
-  searchCriteria.diskFileUser = "non_existant_disk_file_user";
+  searchCriteria.diskFileOwnerUid = NON_EXISTENT_DISK_FILE_OWNER_UID;
 
   ASSERT_THROW(m_catalogue->getArchiveFilesItor(searchCriteria), exception::UserError);
 }
@@ -7694,6 +8507,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -7733,7 +8547,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
   }
 
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName1, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   {
     const auto pools = m_catalogue->getTapePools();
     ASSERT_EQ(2, pools.size());
@@ -7751,7 +8565,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
   }
 
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName2, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   {
     const auto pools = m_catalogue->getTapePools();
     ASSERT_EQ(2, pools.size());
@@ -7787,6 +8601,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -7813,6 +8631,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -7834,14 +8654,11 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
   storageClass.comment = "Create storage class";
   m_catalogue->createStorageClass(m_admin, storageClass);
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
   const uint64_t nbArchiveFiles = 10; // Must be a multiple of 2 for this test
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
 
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
   for(uint64_t i = 1; i <= nbArchiveFiles; i++) {
@@ -7857,16 +8674,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     fileWritten.diskInstance = storageClass.diskInstance;
     fileWritten.diskFileId = diskFileId.str();
     fileWritten.diskFilePath = diskFilePath.str();
-    fileWritten.diskFileUser = "public_disk_user";
-    fileWritten.diskFileGroup = "public_disk_group";
+    fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+    fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
     fileWritten.size = archiveFileSize;
-    fileWritten.checksumType = checksumType;
-    fileWritten.checksumValue = checksumValue;
+    fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
     fileWritten.storageClassName = storageClass.name;
     fileWritten.vid = vid1;
     fileWritten.fSeq = i;
     fileWritten.blockId = i * 100;
-    fileWritten.compressedSize = compressedFileSize;
     fileWritten.copyNb = 1;
     fileWritten.tapeDrive = tapeDrive;
     tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -7884,7 +8699,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     ASSERT_EQ(tapePoolName1, pool.name);
     ASSERT_EQ(1, pool.nbTapes);
     ASSERT_EQ(capacityInBytes, pool.capacityBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, pool.dataBytes);
+    ASSERT_EQ(nbArchiveFiles * archiveFileSize, pool.dataBytes);
     ASSERT_EQ(nbArchiveFiles, pool.nbPhysicalFiles);
   }
 
@@ -7920,16 +8735,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     fileWritten.diskInstance = storageClass.diskInstance;
     fileWritten.diskFileId = diskFileId.str();
     fileWritten.diskFilePath = diskFilePath.str();
-    fileWritten.diskFileUser = "public_disk_user";
-    fileWritten.diskFileGroup = "public_disk_group";
+    fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+    fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
     fileWritten.size = archiveFileSize;
-    fileWritten.checksumType = checksumType;
-    fileWritten.checksumValue = checksumValue;
+    fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
     fileWritten.storageClassName = storageClass.name;
     fileWritten.vid = vid2;
     fileWritten.fSeq = i;
     fileWritten.blockId = i * 100;
-    fileWritten.compressedSize = compressedFileSize;
     fileWritten.copyNb = 2;
     fileWritten.tapeDrive = tapeDrive;
     tapeFilesWrittenCopy2.emplace(fileWrittenUP.release());
@@ -7947,7 +8760,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     ASSERT_EQ(tapePoolName2, pool.name);
     ASSERT_EQ(1, pool.nbTapes);
     ASSERT_EQ(capacityInBytes, pool.capacityBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, pool.dataBytes);
+    ASSERT_EQ(nbArchiveFiles * archiveFileSize, pool.dataBytes);
     ASSERT_EQ(nbArchiveFiles, pool.nbPhysicalFiles);
   }
 
@@ -7975,8 +8788,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     searchCriteria.diskInstance = storageClass.diskInstance;
     searchCriteria.diskFileId = std::to_string(12345678);
     searchCriteria.diskFilePath = "/public_dir/public_file_1";
-    searchCriteria.diskFileUser = "public_disk_user";
-    searchCriteria.diskFileGroup = "public_disk_group";
+    searchCriteria.diskFileOwnerUid = PUBLIC_DISK_USER;
+    searchCriteria.diskFileGid = PUBLIC_DISK_GROUP;
     searchCriteria.storageClass = storageClass.name;
     searchCriteria.vid = vid1;
     searchCriteria.tapeFileCopyNb = 1;
@@ -7993,8 +8806,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
     ASSERT_EQ(searchCriteria.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(searchCriteria.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(searchCriteria.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(searchCriteria.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(searchCriteria.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(searchCriteria.diskFileOwnerUid, static_cast<uint64_t>(archiveFile.diskFileInfo.owner_uid));
+    ASSERT_EQ(searchCriteria.diskFileGid, static_cast<uint64_t>(archiveFile.diskFileInfo.gid));
     ASSERT_EQ(searchCriteria.storageClass, archiveFile.storageClass);
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     ASSERT_EQ(searchCriteria.vid, archiveFile.tapeFiles.begin()->vid);
@@ -8016,16 +8829,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       fileWritten1.diskInstance = storageClass.diskInstance;
       fileWritten1.diskFileId = diskFileId.str();
       fileWritten1.diskFilePath = diskFilePath.str();
-      fileWritten1.diskFileUser = "public_disk_user";
-      fileWritten1.diskFileGroup = "public_disk_group";
+      fileWritten1.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten1.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten1.size = archiveFileSize;
-      fileWritten1.checksumType = checksumType;
-      fileWritten1.checksumValue = checksumValue;
+      fileWritten1.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten1.storageClassName = storageClass.name;
       fileWritten1.vid = vid1;
       fileWritten1.fSeq = i;
       fileWritten1.blockId = i * 100;
-      fileWritten1.compressedSize = compressedFileSize;
       fileWritten1.copyNb = 1;
 
       catalogue::TapeFileWritten fileWritten2 = fileWritten1;
@@ -8039,11 +8850,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(fileWritten1.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten1.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten1.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten1.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten1.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten1.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten1.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten1.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten1.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten1.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten1.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten1.storageClassName, archiveFile.storageClass);
       ASSERT_EQ(storageClass.nbCopies, archiveFile.tapeFiles.size());
 
@@ -8054,9 +8864,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten1.vid, it->vid);
         ASSERT_EQ(fileWritten1.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten1.blockId, it->blockId);
-        ASSERT_EQ(fileWritten1.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten1.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten1.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten1.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten1.copyNb, it->copyNb);
       }
 
@@ -8067,9 +8875,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten2.vid, it->vid);
         ASSERT_EQ(fileWritten2.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten2.blockId, it->blockId);
-        ASSERT_EQ(fileWritten2.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten2.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten2.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten2.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten2.copyNb, it->copyNb);
       }
     }
@@ -8092,16 +8898,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       fileWritten1.diskInstance = storageClass.diskInstance;
       fileWritten1.diskFileId = diskFileId.str();
       fileWritten1.diskFilePath = diskFilePath.str();
-      fileWritten1.diskFileUser = "public_disk_user";
-      fileWritten1.diskFileGroup = "public_disk_group";
+      fileWritten1.diskFileOwnerUid     = PUBLIC_DISK_USER;
+      fileWritten1.diskFileGid     = PUBLIC_DISK_GROUP;
       fileWritten1.size = archiveFileSize;
-      fileWritten1.checksumType = checksumType;
-      fileWritten1.checksumValue = checksumValue;
+      fileWritten1.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten1.storageClassName = storageClass.name;
       fileWritten1.vid = vid1;
       fileWritten1.fSeq = i;
       fileWritten1.blockId = i * 100;
-      fileWritten1.compressedSize = compressedFileSize;
       fileWritten1.copyNb = 1;
 
       catalogue::TapeFileWritten fileWritten2 = fileWritten1;
@@ -8115,11 +8919,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(fileWritten1.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten1.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten1.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten1.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten1.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten1.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten1.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten1.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten1.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten1.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten1.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten1.storageClassName, archiveFile.storageClass);
       ASSERT_EQ(storageClass.nbCopies, archiveFile.tapeFiles.size());
 
@@ -8130,9 +8933,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten1.vid, it->vid);
         ASSERT_EQ(fileWritten1.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten1.blockId, it->blockId);
-        ASSERT_EQ(fileWritten1.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten1.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten1.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten1.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten1.copyNb, it->copyNb);
       }
 
@@ -8143,9 +8944,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten2.vid, it->vid);
         ASSERT_EQ(fileWritten2.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten2.blockId, it->blockId);
-        ASSERT_EQ(fileWritten2.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten2.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten2.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten2.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten2.copyNb, it->copyNb);
       }
     }
@@ -8170,16 +8969,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = i;
       fileWritten.blockId = i * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -8189,11 +8986,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -8206,9 +9002,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -8233,16 +9027,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = i;
       fileWritten.blockId = i * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -8252,11 +9044,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -8269,9 +9060,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -8296,16 +9085,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = i;
       fileWritten.blockId = i * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -8315,11 +9102,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -8332,9 +9118,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -8351,7 +9135,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8364,7 +9147,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8379,7 +9161,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8394,35 +9175,32 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
   {
     catalogue::TapeFileSearchCriteria searchCriteria;
     searchCriteria.diskInstance = storageClass.diskInstance;
-    searchCriteria.diskFileUser = "public_disk_user";
+    searchCriteria.diskFileOwnerUid     = PUBLIC_DISK_USER;
     auto archiveFileItor = m_catalogue->getArchiveFilesItor(searchCriteria);
     const auto m = archiveFileItorToMap(archiveFileItor);
     ASSERT_EQ(nbArchiveFiles, m.size());
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
   {
     catalogue::TapeFileSearchCriteria searchCriteria;
     searchCriteria.diskInstance = storageClass.diskInstance;
-    searchCriteria.diskFileGroup = "public_disk_group";
+    searchCriteria.diskFileGid = PUBLIC_DISK_GROUP;
     auto archiveFileItor = m_catalogue->getArchiveFilesItor(searchCriteria);
     const auto m = archiveFileItorToMap(archiveFileItor);
     ASSERT_EQ(nbArchiveFiles, m.size());
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8436,7 +9214,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8448,7 +9225,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -8461,7 +9237,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -8474,7 +9249,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -8487,7 +9261,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -8500,7 +9273,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -8511,7 +9283,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_many_archive_files) {
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(0, summary.totalBytes);
-    ASSERT_EQ(0, summary.totalCompressedBytes);
     ASSERT_EQ(0, summary.totalFiles);
   }
 }
@@ -8573,14 +9344,12 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         const uint64_t batchSize,
         const common::dataStructures::StorageClass &storageClass,
         const uint64_t &archiveFileSize,
-        const std::string &checksumType,
-        const std::string &checksumValue,
+        const checksum::ChecksumBlob &checksumBlob,
         const std::string &vid,
-        const uint64_t &compressedFileSize,
         const uint64_t &copyNb,
         const std::string &tapeDrive) :
           m_cat(cat), m_barrier(barrier), m_nbArchiveFiles(nbArchiveFiles), m_batchSize(batchSize), m_storageClass(storageClass), m_archiveFileSize(archiveFileSize),
-          m_checksumType(checksumType), m_checksumValue(checksumValue), m_vid(vid), m_compressedFileSize(compressedFileSize), m_copyNb(copyNb), m_tapeDrive(tapeDrive) { }
+          m_checksumBlob(checksumBlob), m_vid(vid), m_copyNb(copyNb), m_tapeDrive(tapeDrive) { }
 
     void run() override {
       for(uint64_t batch=0;batch< 1 + (m_nbArchiveFiles-1)/m_batchSize;++batch) {
@@ -8605,16 +9374,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
           fileWritten.diskInstance = m_storageClass.diskInstance;
           fileWritten.diskFileId = diskFileId.str();
           fileWritten.diskFilePath = diskFilePath.str();
-          fileWritten.diskFileUser = "public_disk_user";
-          fileWritten.diskFileGroup = "public_disk_group";
+          fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+          fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
           fileWritten.size = m_archiveFileSize;
-          fileWritten.checksumType = m_checksumType;
-          fileWritten.checksumValue = m_checksumValue;
+          fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
           fileWritten.storageClassName = m_storageClass.name;
           fileWritten.vid = m_vid;
           fileWritten.fSeq = fn_seq;
           fileWritten.blockId = fn_seq * 100;
-          fileWritten.compressedSize = m_compressedFileSize;
           fileWritten.copyNb = m_copyNb;
           fileWritten.tapeDrive = m_tapeDrive;
           tapeFilesWritten.emplace(fileWrittenUP.release());
@@ -8640,10 +9407,8 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
     const uint64_t m_batchSize;
     const common::dataStructures::StorageClass m_storageClass;
     const uint64_t m_archiveFileSize;
-    const std::string m_checksumType;
-    const std::string m_checksumValue;
+    const checksum::ChecksumBlob m_checksumBlob;
     const std::string m_vid;
-    const uint64_t m_compressedFileSize;
     const uint64_t m_copyNb;
     const std::string m_tapeDrive;
   };
@@ -8683,6 +9448,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
@@ -8722,7 +9488,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
   }
 
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName1, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   {
     const auto pools = m_catalogue->getTapePools();
     ASSERT_EQ(2, pools.size());
@@ -8740,7 +9506,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
   }
 
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName2, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   {
     const auto pools = m_catalogue->getTapePools();
     ASSERT_EQ(2, pools.size());
@@ -8776,6 +9542,8 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -8802,6 +9570,8 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -8823,22 +9593,22 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
   storageClass.comment = "Create storage class";
   m_catalogue->createStorageClass(m_admin, storageClass);
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive1 = "tape_drive1";
   const std::string tapeDrive2 = "tape_drive2";
 
   ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
   const uint64_t nbArchiveFiles = 200; // Must be a multiple of batchsize for this test
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
 
   const uint64_t batchsize = 20;
 
+  checksum::ChecksumBlob checksumBlob;
+  checksumBlob.insert(checksum::ADLER32, "9876");
+
   {
     Barrier barrier(2);
-    filesWrittenThread a(m_catalogue.get(), barrier, nbArchiveFiles, batchsize, storageClass, archiveFileSize, checksumType, checksumValue, vid1, compressedFileSize, 1, tapeDrive1);
-    filesWrittenThread b(catalogue2.get(), barrier, nbArchiveFiles, batchsize, storageClass, archiveFileSize, checksumType, checksumValue, vid2, compressedFileSize, 2, tapeDrive2);
+    filesWrittenThread a(m_catalogue.get(), barrier, nbArchiveFiles, batchsize, storageClass, archiveFileSize, checksumBlob, vid1, 1, tapeDrive1);
+    filesWrittenThread b(catalogue2.get(), barrier, nbArchiveFiles, batchsize, storageClass, archiveFileSize, checksumBlob, vid2, 2, tapeDrive2);
 
     filesWrittenRunner r1(a);
     filesWrittenRunner r2(b);
@@ -8858,7 +9628,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
     ASSERT_EQ(tapePoolName1, pool.name);
     ASSERT_EQ(1, pool.nbTapes);
     ASSERT_EQ(capacityInBytes, pool.capacityBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, pool.dataBytes);
+    ASSERT_EQ(nbArchiveFiles * archiveFileSize, pool.dataBytes);
     ASSERT_EQ(nbArchiveFiles, pool.nbPhysicalFiles);
   }
 
@@ -8892,7 +9662,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
     ASSERT_EQ(tapePoolName2, pool.name);
     ASSERT_EQ(1, pool.nbTapes);
     ASSERT_EQ(capacityInBytes, pool.capacityBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, pool.dataBytes);
+    ASSERT_EQ(nbArchiveFiles * archiveFileSize, pool.dataBytes);
     ASSERT_EQ(nbArchiveFiles, pool.nbPhysicalFiles);
   }
 
@@ -8920,8 +9690,8 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
     searchCriteria.diskInstance = storageClass.diskInstance;
     searchCriteria.diskFileId = std::to_string(12345678);
     searchCriteria.diskFilePath = "/public_dir/public_file_1";
-    searchCriteria.diskFileUser = "public_disk_user";
-    searchCriteria.diskFileGroup = "public_disk_group";
+    searchCriteria.diskFileOwnerUid = PUBLIC_DISK_USER;
+    searchCriteria.diskFileGid = PUBLIC_DISK_GROUP;
     searchCriteria.storageClass = storageClass.name;
     searchCriteria.vid = vid1;
     searchCriteria.tapeFileCopyNb = 1;
@@ -8938,8 +9708,8 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
     ASSERT_EQ(searchCriteria.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(searchCriteria.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(searchCriteria.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(searchCriteria.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(searchCriteria.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(searchCriteria.diskFileOwnerUid, static_cast<uint64_t>(archiveFile.diskFileInfo.owner_uid));
+    ASSERT_EQ(searchCriteria.diskFileGid, static_cast<uint64_t>(archiveFile.diskFileInfo.gid));
     ASSERT_EQ(searchCriteria.storageClass, archiveFile.storageClass);
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     ASSERT_EQ(searchCriteria.vid, archiveFile.tapeFiles.begin()->vid);
@@ -8975,16 +9745,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       fileWritten1.diskInstance = storageClass.diskInstance;
       fileWritten1.diskFileId = diskFileId.str();
       fileWritten1.diskFilePath = diskFilePath.str();
-      fileWritten1.diskFileUser = "public_disk_user";
-      fileWritten1.diskFileGroup = "public_disk_group";
+      fileWritten1.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten1.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten1.size = archiveFileSize;
-      fileWritten1.checksumType = checksumType;
-      fileWritten1.checksumValue = checksumValue;
+      fileWritten1.checksumBlob.insert(checksum::ADLER32, "2468");
       fileWritten1.storageClassName = storageClass.name;
       fileWritten1.vid = vid1;
       fileWritten1.fSeq = seq1;
       fileWritten1.blockId = seq1 * 100;
-      fileWritten1.compressedSize = compressedFileSize;
       fileWritten1.copyNb = 1;
 
       catalogue::TapeFileWritten fileWritten2 = fileWritten1;
@@ -9000,11 +9768,10 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(fileWritten1.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten1.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten1.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten1.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten1.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten1.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten1.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten1.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten1.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten1.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten1.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten1.storageClassName, archiveFile.storageClass);
       ASSERT_EQ(storageClass.nbCopies, archiveFile.tapeFiles.size());
 
@@ -9015,9 +9782,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten1.vid, it->vid);
         ASSERT_EQ(fileWritten1.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten1.blockId, it->blockId);
-        ASSERT_EQ(fileWritten1.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten1.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten1.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten1.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten1.copyNb, it->copyNb);
       }
 
@@ -9028,9 +9793,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten2.vid, it->vid);
         ASSERT_EQ(fileWritten2.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten2.blockId, it->blockId);
-        ASSERT_EQ(fileWritten2.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten2.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten2.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten2.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten2.copyNb, it->copyNb);
       }
     }
@@ -9055,16 +9818,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       fileWritten1.diskInstance = storageClass.diskInstance;
       fileWritten1.diskFileId = diskFileId.str();
       fileWritten1.diskFilePath = diskFilePath.str();
-      fileWritten1.diskFileUser = "public_disk_user";
-      fileWritten1.diskFileGroup = "public_disk_group";
+      fileWritten1.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten1.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten1.size = archiveFileSize;
-      fileWritten1.checksumType = checksumType;
-      fileWritten1.checksumValue = checksumValue;
+      fileWritten1.checksumBlob.insert(checksum::ADLER32, "2468");
       fileWritten1.storageClassName = storageClass.name;
       fileWritten1.vid = vid1;
       fileWritten1.fSeq = seq1;
       fileWritten1.blockId = seq1 * 100;
-      fileWritten1.compressedSize = compressedFileSize;
       fileWritten1.copyNb = 1;
 
       catalogue::TapeFileWritten fileWritten2 = fileWritten1;
@@ -9080,11 +9841,10 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(fileWritten1.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten1.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten1.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten1.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten1.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten1.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten1.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten1.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten1.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten1.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten1.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten1.storageClassName, archiveFile.storageClass);
       ASSERT_EQ(storageClass.nbCopies, archiveFile.tapeFiles.size());
 
@@ -9095,9 +9855,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten1.vid, it->vid);
         ASSERT_EQ(fileWritten1.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten1.blockId, it->blockId);
-        ASSERT_EQ(fileWritten1.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten1.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten1.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten1.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten1.copyNb, it->copyNb);
       }
 
@@ -9108,9 +9866,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten2.vid, it->vid);
         ASSERT_EQ(fileWritten2.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten2.blockId, it->blockId);
-        ASSERT_EQ(fileWritten2.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten2.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten2.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten2.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten2.copyNb, it->copyNb);
       }
     }
@@ -9138,16 +9894,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = seq;
       fileWritten.blockId = seq * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -9157,11 +9911,10 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -9174,9 +9927,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -9204,16 +9955,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = seq;
       fileWritten.blockId = seq * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -9223,11 +9972,10 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -9240,9 +9988,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -9270,16 +10016,14 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = "public_disk_user";
-      fileWritten.diskFileGroup = "public_disk_group";
+      fileWritten.diskFileOwnerUid = PUBLIC_DISK_USER;
+      fileWritten.diskFileGid = PUBLIC_DISK_GROUP;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(checksum::ADLER32, "1357");
       fileWritten.storageClassName = storageClass.name;
       fileWritten.vid = vid;
       fileWritten.fSeq = seq;
       fileWritten.blockId = seq * 100;
-      fileWritten.compressedSize = compressedFileSize;
       fileWritten.copyNb = copyNb;
 
       const auto idAndFile = m.find(i);
@@ -9289,11 +10033,10 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
       ASSERT_EQ(fileWritten.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(fileWritten.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(fileWritten.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(fileWritten.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(fileWritten.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(fileWritten.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(fileWritten.diskFileGid, archiveFile.diskFileInfo.gid);
       ASSERT_EQ(fileWritten.size, archiveFile.fileSize);
-      ASSERT_EQ(fileWritten.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(fileWritten.storageClassName, archiveFile.storageClass);
 
       // There is only one tape copy because repack only want the tape file on a
@@ -9306,9 +10049,7 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
         ASSERT_EQ(fileWritten.vid, it->vid);
         ASSERT_EQ(fileWritten.fSeq, it->fSeq);
         ASSERT_EQ(fileWritten.blockId, it->blockId);
-        ASSERT_EQ(fileWritten.compressedSize, it->compressedSize);
-        ASSERT_EQ(fileWritten.checksumType, it->checksumType);
-        ASSERT_EQ(fileWritten.checksumValue, it->checksumValue);
+        ASSERT_EQ(fileWritten.checksumBlob, it->checksumBlob);
         ASSERT_EQ(fileWritten.copyNb, it->copyNb);
       }
     }
@@ -9325,7 +10066,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9338,7 +10078,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9353,7 +10092,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9368,35 +10106,32 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(storageClass.nbCopies, summary.totalFiles);
   }
 
   {
     catalogue::TapeFileSearchCriteria searchCriteria;
     searchCriteria.diskInstance = storageClass.diskInstance;
-    searchCriteria.diskFileUser = "public_disk_user";
+    searchCriteria.diskFileOwnerUid     = PUBLIC_DISK_USER;
     auto archiveFileItor = m_catalogue->getArchiveFilesItor(searchCriteria);
     const auto m = archiveFileItorToMap(archiveFileItor);
     ASSERT_EQ(nbArchiveFiles, m.size());
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
   {
     catalogue::TapeFileSearchCriteria searchCriteria;
     searchCriteria.diskInstance = storageClass.diskInstance;
-    searchCriteria.diskFileGroup = "public_disk_group";
+    searchCriteria.diskFileGid = PUBLIC_DISK_GROUP;
     auto archiveFileItor = m_catalogue->getArchiveFilesItor(searchCriteria);
     const auto m = archiveFileItorToMap(archiveFileItor);
     ASSERT_EQ(nbArchiveFiles, m.size());
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9410,7 +10145,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9422,7 +10156,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles * storageClass.nbCopies, summary.totalFiles);
   }
 
@@ -9435,7 +10168,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -9448,7 +10180,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -9461,7 +10192,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -9474,7 +10204,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(nbArchiveFiles * archiveFileSize, summary.totalBytes);
-    ASSERT_EQ(nbArchiveFiles * compressedFileSize, summary.totalCompressedBytes);
     ASSERT_EQ(nbArchiveFiles, summary.totalFiles);
   }
 
@@ -9485,7 +10214,6 @@ TEST_P(cta_catalogue_CatalogueTest, DISABLED_concurrent_filesWrittenToTape_many_
 
     const common::dataStructures::ArchiveFileSummary summary = m_catalogue->getTapeFileSummary(searchCriteria);
     ASSERT_EQ(0, summary.totalBytes);
-    ASSERT_EQ(0, summary.totalCompressedBytes);
     ASSERT_EQ(0, summary.totalFiles);
   }
 }
@@ -9507,14 +10235,15 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -9534,6 +10263,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -9559,6 +10292,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -9587,8 +10324,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -9599,16 +10334,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -9628,14 +10361,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -9644,9 +10376,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -9658,16 +10388,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -9688,14 +10416,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -9705,9 +10432,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
     auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
@@ -9716,9 +10441,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
     ASSERT_EQ(file2Written.copyNb, tapeFile2.copyNb);
   }
 }
@@ -9740,14 +10463,15 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -9767,6 +10491,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -9792,6 +10518,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -9820,8 +10548,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -9832,16 +10558,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -9861,14 +10585,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -9877,9 +10600,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -9891,16 +10612,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 1;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -9921,14 +10640,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     // If there are two or more tape copies with the same copy number then
     // only one of them will be active (<=> supersededByVid.empty()).
@@ -9945,9 +10663,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(fileWritten.vid, tapeFile.vid);
       ASSERT_EQ(fileWritten.fSeq, tapeFile.fSeq);
       ASSERT_EQ(fileWritten.blockId, tapeFile.blockId);
-      ASSERT_EQ(fileWritten.compressedSize, tapeFile.compressedSize);
-      ASSERT_EQ(fileWritten.checksumType, tapeFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, tapeFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, tapeFile.checksumBlob);
       ASSERT_EQ(fileWritten.copyNb, tapeFile.copyNb);
     }
   }
@@ -9969,12 +10685,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -9994,6 +10711,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10022,8 +10743,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -10034,16 +10753,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -10063,14 +10780,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10079,9 +10795,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -10093,16 +10807,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid1;
   file2Written.fSeq                 = 2;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 1;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -10123,14 +10835,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     // If there are two or more tape copies with the same copy number then
     // only one of them will be active (<=> supersededByVid.empty()).
@@ -10147,14 +10858,163 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(fileWritten.vid, tapeFile.vid);
       ASSERT_EQ(fileWritten.fSeq, tapeFile.fSeq);
       ASSERT_EQ(fileWritten.blockId, tapeFile.blockId);
-      ASSERT_EQ(fileWritten.compressedSize, tapeFile.compressedSize);
-      ASSERT_EQ(fileWritten.checksumType, tapeFile.checksumType);
-      ASSERT_EQ(fileWritten.checksumValue, tapeFile.checksumValue);
+      ASSERT_EQ(fileWritten.checksumBlob, tapeFile.checksumBlob);
       ASSERT_EQ(fileWritten.copyNb, tapeFile.copyNb);
     }
   }
 }
 
+TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_copies_same_fseq_same_tape) {
+  using namespace cta;
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
+    {
+      auto it = vidToTape.find(vid1);
+      const common::dataStructures::Tape &tape = it->second;
+      ASSERT_EQ(vid1, tape.vid);
+      ASSERT_EQ(mediaType, tape.mediaType);
+      ASSERT_EQ(vendor, tape.vendor);
+      ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+      ASSERT_EQ(tapePoolName, tape.tapePoolName);
+      ASSERT_EQ(vo, tape.vo);
+      ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+      ASSERT_TRUE(disabledValue == tape.disabled);
+      ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
+      ASSERT_EQ(comment, tape.comment);
+      ASSERT_FALSE(tape.labelLog);
+      ASSERT_FALSE(tape.lastReadLog);
+      ASSERT_FALSE(tape.lastWriteLog);
+
+      const common::dataStructures::EntryLog creationLog = tape.creationLog;
+      ASSERT_EQ(m_admin.username, creationLog.username);
+      ASSERT_EQ(m_admin.host, creationLog.host);
+
+      const common::dataStructures::EntryLog lastModificationLog =
+        tape.lastModificationLog;
+      ASSERT_EQ(creationLog, lastModificationLog);
+    }
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = "disk_instance";
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 2;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    catalogue::TapeSearchCriteria searchCriteria;
+    searchCriteria.vid = file1Written.vid;
+    std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes(searchCriteria);
+    ASSERT_EQ(1, tapes.size());
+    const common::dataStructures::Tape &tape = tapes.front();
+    ASSERT_EQ(1, tape.lastFSeq);
+  }
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  auto file2WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file2Written = *file2WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file2WrittenSet;
+  file2WrittenSet.insert(file2WrittenUP.release());
+  file2Written.archiveFileId        = file1Written.archiveFileId;
+  file2Written.diskInstance         = file1Written.diskInstance;
+  file2Written.diskFileId           = file1Written.diskFileId;
+  file2Written.diskFilePath         = file1Written.diskFilePath;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
+  file2Written.size                 = archiveFileSize;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
+  file2Written.storageClassName     = storageClass.name;
+  file2Written.vid                  = vid1;
+  file2Written.fSeq                 = file1Written.fSeq;
+  file2Written.blockId              = 4331;
+  file2Written.copyNb               = 2;
+  file2Written.tapeDrive            = tapeDrive;
+  ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet), exception::TapeFseqMismatch);
+}
+
 TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_copies_different_sizes) {
   using namespace cta;
 
@@ -10172,14 +11032,15 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -10199,6 +11060,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10224,6 +11089,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10252,8 +11119,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize1 = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -10264,16 +11129,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize1;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -10293,14 +11156,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10309,9 +11171,7 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -10325,16 +11185,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize2;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet),
@@ -10358,14 +11216,15 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -10385,6 +11244,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10410,6 +11273,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);      
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10438,8 +11305,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType1 = "checksum_type_1";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -10450,16 +11315,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType1;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -10479,14 +11342,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10495,14 +11357,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
-  const std::string checksumType2 = "checksum_type_2";
-
   auto file2WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file2Written = *file2WrittenUP;
   std::set<cta::catalogue::TapeItemWrittenPointer> file2WrittenSet;
@@ -10511,20 +11369,17 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType2;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob.insert(checksum::CRC32, "1234");
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
-  ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet),
-    catalogue::ChecksumTypeMismatch);
+  ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet), exception::ChecksumTypeMismatch);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_copies_different_checksum_values) {
@@ -10544,14 +11399,15 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -10571,6 +11427,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10596,6 +11454,8 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10624,8 +11484,6 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue1 = "checksum_value_1";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -10636,16 +11494,14 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue1;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -10665,14 +11521,13 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10681,13 +11536,10 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
-  const std::string checksumValue2 = "checksum_value_2";
 
   auto file2WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file2Written = *file2WrittenUP;
@@ -10697,20 +11549,17 @@ TEST_P(cta_catalogue_CatalogueTest, filesWrittenToTape_1_archive_file_2_tape_cop
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue2;
+  file2Written.checksumBlob.insert(checksum::ADLER32, "5678");
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
-  ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet),
-    catalogue::ChecksumValueMismatch);
+  ASSERT_THROW(m_catalogue->filesWrittenToTape(file2WrittenSet), exception::ChecksumValueMismatch);
 }
 
 TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
@@ -10730,14 +11579,15 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -10757,6 +11607,10 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
+      ASSERT_EQ(0, tape.readMountCount);
+      ASSERT_EQ(0, tape.writeMountCount);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10782,6 +11636,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -10810,8 +11666,6 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -10822,16 +11676,14 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -10857,14 +11709,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10873,9 +11724,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -10885,14 +11734,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -10901,9 +11749,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -10915,16 +11761,14 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob         = file1Written.checksumBlob;
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -10953,14 +11797,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
       ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
       ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-      ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
       ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
       ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -10970,9 +11813,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
       ASSERT_EQ(file1Written.vid, tapeFile1.vid);
       ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
       ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-      ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-      ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-      ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+      ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
       ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
       auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
@@ -10981,9 +11822,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
       ASSERT_EQ(file2Written.vid, tapeFile2.vid);
       ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
       ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-      ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-      ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-      ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+      ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
       ASSERT_EQ(file2Written.copyNb, tapeFile2.copyNb);
     }
   }
@@ -10994,14 +11833,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -11011,9 +11849,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
     auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
@@ -11022,9 +11858,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile) {
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
     ASSERT_EQ(file2Written.copyNb, tapeFile2.copyNb);
   }
 
@@ -11051,14 +11885,15 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -11078,6 +11913,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -11103,6 +11940,8 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
       ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
       ASSERT_TRUE(disabledValue == tape.disabled);
       ASSERT_TRUE(fullValue == tape.full);
+      ASSERT_TRUE(readOnlyValue == tape.readOnly);
+      ASSERT_FALSE(tape.isFromCastor);
       ASSERT_EQ(comment, tape.comment);
       ASSERT_FALSE(tape.labelLog);
       ASSERT_FALSE(tape.lastReadLog);
@@ -11131,8 +11970,6 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
   m_catalogue->createStorageClass(m_admin, storageClass);
 
   const uint64_t archiveFileSize = 1;
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
@@ -11143,16 +11980,14 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -11178,14 +12013,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -11194,9 +12028,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -11206,14 +12038,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -11222,9 +12053,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -11236,16 +12065,14 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
   file2Written.diskInstance         = file1Written.diskInstance;
   file2Written.diskFileId           = file1Written.diskFileId;
   file2Written.diskFilePath         = file1Written.diskFilePath;
-  file2Written.diskFileUser         = file1Written.diskFileUser;
-  file2Written.diskFileGroup        = file1Written.diskFileGroup;
+  file2Written.diskFileOwnerUid     = file1Written.diskFileOwnerUid;
+  file2Written.diskFileGid          = file1Written.diskFileGid;
   file2Written.size                 = archiveFileSize;
-  file2Written.checksumType         = checksumType;
-  file2Written.checksumValue        = checksumValue;
+  file2Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file2Written.storageClassName     = storageClass.name;
   file2Written.vid                  = vid2;
   file2Written.fSeq                 = 1;
   file2Written.blockId              = 4331;
-  file2Written.compressedSize       = 1;
   file2Written.copyNb               = 2;
   file2Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file2WrittenSet);
@@ -11274,14 +12101,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
       ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
       ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
       ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-      ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-      ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+      ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
       ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
       ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
       ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-      ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-      ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+      ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+      ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
       ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -11291,9 +12117,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
       ASSERT_EQ(file1Written.vid, tapeFile1.vid);
       ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
       ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-      ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-      ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-      ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+      ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
       ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
       auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
@@ -11302,9 +12126,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
       ASSERT_EQ(file2Written.vid, tapeFile2.vid);
       ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
       ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-      ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-      ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-      ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+      ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
       ASSERT_EQ(file2Written.copyNb, tapeFile2.copyNb);
     }
   }
@@ -11315,14 +12137,13 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file2Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file2Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file2Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file2Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file2Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file2Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file2Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file2Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file2Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file2Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file2Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(2, archiveFile.tapeFiles.size());
 
@@ -11332,9 +12153,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
 
     auto copyNbToTapeFile2Itor = archiveFile.tapeFiles.find(2);
@@ -11343,9 +12162,7 @@ TEST_P(cta_catalogue_CatalogueTest, deleteArchiveFile_by_archive_file_id_of_anot
     ASSERT_EQ(file2Written.vid, tapeFile2.vid);
     ASSERT_EQ(file2Written.fSeq, tapeFile2.fSeq);
     ASSERT_EQ(file2Written.blockId, tapeFile2.blockId);
-    ASSERT_EQ(file2Written.compressedSize, tapeFile2.compressedSize);
-    ASSERT_EQ(file2Written.checksumType, tapeFile2.checksumType);
-    ASSERT_EQ(file2Written.checksumValue, tapeFile2.checksumValue);
+    ASSERT_EQ(file2Written.checksumBlob, tapeFile2.checksumBlob);
     ASSERT_EQ(file2Written.copyNb, tapeFile2.copyNb);
   }
 
@@ -11400,6 +12217,7 @@ TEST_P(cta_catalogue_CatalogueTest, getAllTapes_many_tapes) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
@@ -11411,7 +12229,7 @@ TEST_P(cta_catalogue_CatalogueTest, getAllTapes_many_tapes) {
     vid << "V" << std::setfill('0') << std::setw(5) << i;
     const std::string tapeComment = "Create tape " + vid.str();
     m_catalogue->createTape(m_admin, vid.str(), mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-      disabledValue, fullValue, tapeComment);
+      disabledValue, fullValue, readOnlyValue, tapeComment);
   }
 
   const auto vidToTapeMap = m_catalogue->getAllTapes();
@@ -11434,6 +12252,10 @@ TEST_P(cta_catalogue_CatalogueTest, getAllTapes_many_tapes) {
     ASSERT_EQ(capacityInBytes, tapeItor->second.capacityInBytes);
     ASSERT_EQ(disabledValue, tapeItor->second.disabled);
     ASSERT_EQ(fullValue, tapeItor->second.full);
+    ASSERT_EQ(readOnlyValue, tapeItor->second.readOnly);
+    ASSERT_FALSE(tapeItor->second.isFromCastor);
+    ASSERT_EQ(0, tapeItor->second.readMountCount);
+    ASSERT_EQ(0, tapeItor->second.writeMountCount);
     ASSERT_EQ(tapeComment, tapeItor->second.comment);
   }
 }
@@ -12357,6 +13179,722 @@ TEST_P(cta_catalogue_CatalogueTest, modifyDiskSystemCommentL_emptyStringComment)
     catalogue::UserSpecifiedAnEmptyStringComment);
 }
 
+TEST_P(cta_catalogue_CatalogueTest, getNbFilesOnTape_no_tape_files) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string vid = "vid";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+ 
+  ASSERT_EQ(0, m_catalogue->getNbFilesOnTape(vid));
+}
+
+TEST_P(cta_catalogue_CatalogueTest, getNbFilesOnTape_one_tape_file) {
+  using namespace cta;
+
+  const std::string diskInstanceName1 = "disk_instance_1";
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string createTapeComment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid1, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(createTapeComment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = diskInstanceName1;
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  ASSERT_EQ(1, m_catalogue->getNbFilesOnTape(vid1));
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_no_tape_files) {
+  using namespace cta;
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string vid = "vid";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string comment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, comment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(comment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+ 
+  ASSERT_NO_THROW(m_catalogue->checkTapeForLabel(vid));
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_one_tape_file) {
+  using namespace cta;
+
+  const std::string diskInstanceName1 = "disk_instance_1";
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string createTapeComment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid1, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(createTapeComment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = diskInstanceName1;
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  ASSERT_THROW(m_catalogue->checkTapeForLabel(vid1), exception::UserError);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_one_tape_file_reclaimed_tape) {
+  using namespace cta;
+
+  const std::string diskInstanceName1 = "disk_instance_1";
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid1 = "VID123";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string createTapeComment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+
+    ASSERT_EQ(1, tapes.size());
+
+    const common::dataStructures::Tape tape = tapes.front();
+    ASSERT_EQ(vid1, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(createTapeComment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog = tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = diskInstanceName1;
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  ASSERT_THROW(m_catalogue->checkTapeForLabel(vid1), exception::UserError);
+  
+  log::LogContext dummyLc(m_dummyLog);
+  m_catalogue->deleteArchiveFile(diskInstanceName1, archiveFileId, dummyLc);
+  
+  m_catalogue->setTapeFull(m_admin, vid1, true);
+  m_catalogue->reclaimTape(m_admin, vid1);
+  
+  ASSERT_NO_THROW(m_catalogue->checkTapeForLabel(vid1));
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_one_tape_file_superseded) {
+  using namespace cta;
+
+  const std::string diskInstanceName1 = "disk_instance_1";
+
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+
+  const std::string vid1 = "VID123";
+  const std::string vid2 = "VID234";
+  const std::string mediaType = "media_type";
+  const std::string vendor = "vendor";
+  const std::string logicalLibraryName = "logical_library_name";
+  const bool logicalLibraryIsDisabled= false;
+  const std::string tapePoolName = "tape_pool_name";
+  const std::string vo = "vo";
+  const uint64_t nbPartialTapes = 2;
+  const bool isEncrypted = true;
+  const cta::optional<std::string> supply("value for the supply pool mechanism");
+  const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
+  const bool disabledValue = true;
+  const bool fullValue = false;
+  const bool readOnlyValue = true;
+  const std::string createTapeComment = "Create tape";
+
+  m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
+  m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
+  m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
+  m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+    const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
+    ASSERT_EQ(2, vidToTape.size());
+
+    auto it = vidToTape.find(vid1);
+    const common::dataStructures::Tape &tape = it->second;
+    ASSERT_EQ(vid1, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(0, tape.dataOnTapeInBytes);
+    ASSERT_EQ(0, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(createTapeComment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_FALSE(tape.lastWriteLog);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog =
+      tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  // Record initial tape file
+    
+  const uint64_t archiveFileId = 1234;
+
+  ASSERT_FALSE(m_catalogue->getArchiveFilesItor().hasMore());
+  ASSERT_THROW(m_catalogue->getArchiveFileById(archiveFileId), exception::Exception);
+
+  common::dataStructures::StorageClass storageClass;
+  storageClass.diskInstance = diskInstanceName1;
+  storageClass.name = "storage_class";
+  storageClass.nbCopies = 1;
+  storageClass.comment = "Create storage class";
+  m_catalogue->createStorageClass(m_admin, storageClass);
+
+  const uint64_t archiveFileSize = 1;
+  const std::string tapeDrive = "tape_drive";
+
+  auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1Written = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenSet;
+  file1WrittenSet.insert(file1WrittenUP.release());
+  file1Written.archiveFileId        = archiveFileId;
+  file1Written.diskInstance         = storageClass.diskInstance;
+  file1Written.diskFileId           = "5678";
+  file1Written.diskFilePath         = "/public_dir/public_file";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1Written.size                 = archiveFileSize;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1Written.storageClassName     = storageClass.name;
+  file1Written.vid                  = vid1;
+  file1Written.fSeq                 = 1;
+  file1Written.blockId              = 4321;
+  file1Written.size                 = 1;
+  file1Written.copyNb               = 1;
+  file1Written.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenSet);
+
+  {
+    const common::dataStructures::ArchiveFile archiveFile = m_catalogue->getArchiveFileById(archiveFileId);
+
+    ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
+    ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
+    ASSERT_EQ(file1Written.size, archiveFile.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
+    ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
+
+    ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
+    ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
+
+    ASSERT_EQ(1, archiveFile.tapeFiles.size());
+    auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
+    ASSERT_FALSE(copyNbToTapeFile1Itor == archiveFile.tapeFiles.end());
+    const common::dataStructures::TapeFile &tapeFile1 = *copyNbToTapeFile1Itor;
+    ASSERT_EQ(file1Written.vid, tapeFile1.vid);
+    ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
+    ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
+    ASSERT_EQ(file1Written.size, tapeFile1.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
+    ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
+  }
+
+  {
+    const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
+    const std::map<std::string, common::dataStructures::Tape> vidToTape = tapeListToMap(tapes);
+    ASSERT_EQ(2, vidToTape.size());
+
+    auto it = vidToTape.find(vid1);
+    const common::dataStructures::Tape &tape = it->second;
+    ASSERT_EQ(vid1, tape.vid);
+    ASSERT_EQ(mediaType, tape.mediaType);
+    ASSERT_EQ(vendor, tape.vendor);
+    ASSERT_EQ(file1Written.size, tape.dataOnTapeInBytes);
+    ASSERT_EQ(1, tape.lastFSeq);
+    ASSERT_EQ(logicalLibraryName, tape.logicalLibraryName);
+    ASSERT_EQ(tapePoolName, tape.tapePoolName);
+    ASSERT_EQ(vo, tape.vo);
+    ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
+    ASSERT_TRUE(disabledValue == tape.disabled);
+    ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
+    ASSERT_EQ(createTapeComment, tape.comment);
+    ASSERT_FALSE(tape.labelLog);
+    ASSERT_FALSE(tape.lastReadLog);
+    ASSERT_TRUE((bool)tape.lastWriteLog);
+    ASSERT_EQ(tapeDrive, tape.lastWriteLog.value().drive);
+
+    const common::dataStructures::EntryLog creationLog = tape.creationLog;
+    ASSERT_EQ(m_admin.username, creationLog.username);
+    ASSERT_EQ(m_admin.host, creationLog.host);
+
+    const common::dataStructures::EntryLog lastModificationLog =
+      tape.lastModificationLog;
+    ASSERT_EQ(creationLog, lastModificationLog);
+  }
+
+  m_catalogue->setTapeFull(m_admin, vid1, true);
+  
+  // Record superseding tape file
+
+  file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
+  auto & file1WrittenAgain = *file1WrittenUP;
+  std::set<cta::catalogue::TapeItemWrittenPointer> file1WrittenAgainSet;
+  file1WrittenAgainSet.insert(file1WrittenUP.release());
+  file1WrittenAgain.archiveFileId        = archiveFileId;
+  file1WrittenAgain.diskInstance         = storageClass.diskInstance;
+  file1WrittenAgain.diskFileId           = "5678";
+  file1WrittenAgain.diskFilePath         = "/public_dir/public_file";
+  file1WrittenAgain.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1WrittenAgain.diskFileGid          = PUBLIC_DISK_GROUP;
+  file1WrittenAgain.size                 = archiveFileSize;
+  file1WrittenAgain.checksumBlob.insert(checksum::ADLER32, "1234");
+  file1WrittenAgain.storageClassName     = storageClass.name;
+  file1WrittenAgain.vid                  = vid2;
+  file1WrittenAgain.fSeq                 = 1;
+  file1WrittenAgain.blockId              = 4321;
+  file1WrittenAgain.size                 = 1;
+  file1WrittenAgain.copyNb               = 1;
+  file1WrittenAgain.tapeDrive            = tapeDrive;
+  m_catalogue->filesWrittenToTape(file1WrittenAgainSet);
+  
+  common::dataStructures::ArchiveFile repackedFile = m_catalogue->getArchiveFileById(archiveFileId);
+  ASSERT_EQ(2, repackedFile.tapeFiles.size());
+  for (auto &tf: repackedFile.tapeFiles) {
+    if (tf.vid == vid1)
+      ASSERT_EQ(vid2, tf.supersededByVid);
+    else
+      ASSERT_EQ("", tf.supersededByVid);
+  }
+  
+  ASSERT_THROW(m_catalogue->checkTapeForLabel(vid1), exception::UserError);
+ 
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_not_in_the_catalogue) {
+    using namespace cta;
+    
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+  
+  const std::string vid = "vid";
+  
+  ASSERT_THROW(m_catalogue->checkTapeForLabel(vid), exception::UserError);
+}
+
+TEST_P(cta_catalogue_CatalogueTest, checkTapeForLabel_empty_vid) {
+    using namespace cta;
+    
+  ASSERT_TRUE(m_catalogue->getTapes().empty());
+  
+  const std::string vid = "";
+  
+  ASSERT_THROW(m_catalogue->checkTapeForLabel(vid), exception::UserError);
+}
+
 TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_0_no_tape_files) {
   using namespace cta;
 
@@ -12375,12 +13913,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_0_no_tape_files) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -12398,6 +13937,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_0_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12431,6 +13972,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_0_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_FALSE(tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12460,12 +14003,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_not_full_lastFSeq_0_no_tape_file
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string comment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -12484,6 +14028,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_not_full_lastFSeq_0_no_tape_file
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(comment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12520,12 +14066,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string createTapeComment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -12545,6 +14092,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12573,8 +14122,6 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
 
   const uint64_t archiveFileSize = 1;
   const std::string tapeDrive = "tape_drive";
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file1Written = *file1WrittenUP;
@@ -12584,16 +14131,14 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -12604,14 +14149,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -12620,9 +14164,7 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -12643,6 +14185,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12680,6 +14224,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12715,6 +14261,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_no_tape_files) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12747,12 +14295,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string createTapeComment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -12772,6 +14321,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12800,8 +14351,6 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
 
   const uint64_t archiveFileSize = 1;
   const std::string tapeDrive = "tape_drive";
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file1Written = *file1WrittenUP;
@@ -12811,16 +14360,14 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -12831,14 +14378,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -12847,9 +14393,7 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -12871,6 +14415,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file) {
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12911,14 +14457,15 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = true;
   const bool fullValue = false;
+  const bool readOnlyValue = true;
   const std::string createTapeComment = "Create tape";
 
   m_catalogue->createLogicalLibrary(m_admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
   m_catalogue->createTapePool(m_admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
   m_catalogue->createTape(m_admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
   m_catalogue->createTape(m_admin, vid2, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-    disabledValue, fullValue, createTapeComment);
+    disabledValue, fullValue, readOnlyValue, createTapeComment);
 
   {
     const std::list<common::dataStructures::Tape> tapes = m_catalogue->getTapes();
@@ -12938,6 +14485,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -12968,8 +14517,6 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
 
   const uint64_t archiveFileSize = 1;
   const std::string tapeDrive = "tape_drive";
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
 
   auto file1WrittenUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
   auto & file1Written = *file1WrittenUP;
@@ -12979,16 +14526,15 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
   file1Written.diskInstance         = storageClass.diskInstance;
   file1Written.diskFileId           = "5678";
   file1Written.diskFilePath         = "/public_dir/public_file";
-  file1Written.diskFileUser         = "public_disk_user";
-  file1Written.diskFileGroup        = "public_disk_group";
+  file1Written.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1Written.diskFileGid          = PUBLIC_DISK_GROUP;
   file1Written.size                 = archiveFileSize;
-  file1Written.checksumType         = checksumType;
-  file1Written.checksumValue        = checksumValue;
+  file1Written.checksumBlob.insert(checksum::ADLER32, "1234");
   file1Written.storageClassName     = storageClass.name;
   file1Written.vid                  = vid1;
   file1Written.fSeq                 = 1;
   file1Written.blockId              = 4321;
-  file1Written.compressedSize       = 1;
+  file1Written.size                 = 1;
   file1Written.copyNb               = 1;
   file1Written.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenSet);
@@ -12999,14 +14545,13 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
     ASSERT_EQ(file1Written.archiveFileId, archiveFile.archiveFileID);
     ASSERT_EQ(file1Written.diskFileId, archiveFile.diskFileId);
     ASSERT_EQ(file1Written.size, archiveFile.fileSize);
-    ASSERT_EQ(file1Written.checksumType, archiveFile.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, archiveFile.checksumValue);
+    ASSERT_EQ(file1Written.checksumBlob, archiveFile.checksumBlob);
     ASSERT_EQ(file1Written.storageClassName, archiveFile.storageClass);
 
     ASSERT_EQ(file1Written.diskInstance, archiveFile.diskInstance);
     ASSERT_EQ(file1Written.diskFilePath, archiveFile.diskFileInfo.path);
-    ASSERT_EQ(file1Written.diskFileUser, archiveFile.diskFileInfo.owner);
-    ASSERT_EQ(file1Written.diskFileGroup, archiveFile.diskFileInfo.group);
+    ASSERT_EQ(file1Written.diskFileOwnerUid, archiveFile.diskFileInfo.owner_uid);
+    ASSERT_EQ(file1Written.diskFileGid, archiveFile.diskFileInfo.gid);
 
     ASSERT_EQ(1, archiveFile.tapeFiles.size());
     auto copyNbToTapeFile1Itor = archiveFile.tapeFiles.find(1);
@@ -13015,9 +14560,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
     ASSERT_EQ(file1Written.vid, tapeFile1.vid);
     ASSERT_EQ(file1Written.fSeq, tapeFile1.fSeq);
     ASSERT_EQ(file1Written.blockId, tapeFile1.blockId);
-    ASSERT_EQ(file1Written.compressedSize, tapeFile1.compressedSize);
-    ASSERT_EQ(file1Written.checksumType, tapeFile1.checksumType);
-    ASSERT_EQ(file1Written.checksumValue, tapeFile1.checksumValue);
+    ASSERT_EQ(file1Written.size, tapeFile1.fileSize);
+    ASSERT_EQ(file1Written.checksumBlob, tapeFile1.checksumBlob);
     ASSERT_EQ(file1Written.copyNb, tapeFile1.copyNb);
   }
 
@@ -13039,6 +14583,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(disabledValue == tape.disabled);
     ASSERT_TRUE(fullValue == tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
@@ -13066,16 +14612,15 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
   file1WrittenAgain.diskInstance         = storageClass.diskInstance;
   file1WrittenAgain.diskFileId           = "5678";
   file1WrittenAgain.diskFilePath         = "/public_dir/public_file";
-  file1WrittenAgain.diskFileUser         = "public_disk_user";
-  file1WrittenAgain.diskFileGroup        = "public_disk_group";
+  file1WrittenAgain.diskFileOwnerUid     = PUBLIC_DISK_USER;
+  file1WrittenAgain.diskFileGid          = PUBLIC_DISK_GROUP;
   file1WrittenAgain.size                 = archiveFileSize;
-  file1WrittenAgain.checksumType         = checksumType;
-  file1WrittenAgain.checksumValue        = checksumValue;
+  file1WrittenAgain.checksumBlob.insert(checksum::ADLER32, "1234");
   file1WrittenAgain.storageClassName     = storageClass.name;
   file1WrittenAgain.vid                  = vid2;
   file1WrittenAgain.fSeq                 = 1;
   file1WrittenAgain.blockId              = 4321;
-  file1WrittenAgain.compressedSize       = 1;
+  file1WrittenAgain.size                 = 1;
   file1WrittenAgain.copyNb               = 1;
   file1WrittenAgain.tapeDrive            = tapeDrive;
   m_catalogue->filesWrittenToTape(file1WrittenAgainSet);
@@ -13105,6 +14650,8 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
     ASSERT_EQ(capacityInBytes, tape.capacityInBytes);
     ASSERT_TRUE(tape.disabled);
     ASSERT_FALSE(tape.full);
+    ASSERT_TRUE(readOnlyValue == tape.readOnly);
+    ASSERT_FALSE(tape.isFromCastor);
     ASSERT_EQ(createTapeComment, tape.comment);
     ASSERT_FALSE(tape.labelLog);
     ASSERT_FALSE(tape.lastReadLog);
diff --git a/catalogue/DummyCatalogue.hpp b/catalogue/DummyCatalogue.hpp
index c2f06ef66a216ae30f3039fdcb0bf6370da0a666..4faf50c85f9a151d491128c8dfa6083006659c88 100644
--- a/catalogue/DummyCatalogue.hpp
+++ b/catalogue/DummyCatalogue.hpp
@@ -42,7 +42,7 @@ public:
   void createRequesterGroupMountRule(const common::dataStructures::SecurityIdentity& admin, const std::string& mountPolicyName, const std::string& diskInstanceName, const std::string& requesterGroupName, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createRequesterMountRule(const common::dataStructures::SecurityIdentity& admin, const std::string& mountPolicyName, const std::string& diskInstance, const std::string& requesterName, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createStorageClass(const common::dataStructures::SecurityIdentity& admin, const common::dataStructures::StorageClass& storageClass) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
-  void createTape(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const std::string &mediaType, const std::string &vendor, const std::string& logicalLibraryName, const std::string& tapePoolName, const uint64_t capacityInBytes, const bool disabled, const bool full, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void createTape(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const std::string &mediaType, const std::string &vendor, const std::string& logicalLibraryName, const std::string& tapePoolName, const uint64_t capacityInBytes, const bool disabled, const bool full, const bool readOnly, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createTapePool(const common::dataStructures::SecurityIdentity& admin, const std::string& name, const std::string & vo, const uint64_t nbPartialTapes, const bool encryptionValue, const cta::optional<std::string> &supply, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteAdminUser(const std::string& username) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
@@ -116,13 +116,18 @@ public:
   void noSpaceLeftOnTape(const std::string& vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void ping() override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   std::map<std::string, uint64_t> getSchemaVersion() const override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
-  uint64_t checkAndGetNextArchiveFileId(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  uint64_t checkAndGetNextArchiveFileId(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   common::dataStructures::ArchiveFileQueueCriteria getArchiveFileQueueCriteria(const std::string &diskInstanceName,
-    const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
-  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+    const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::RequesterIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void reclaimTape(const common::dataStructures::SecurityIdentity& admin, const std::string& vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void checkTapeForLabel(const std::string& vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  uint64_t getNbFilesOnTape(const std::string& vid) const  override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void setTapeDisabled(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const bool disabledValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void setTapeFull(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const bool fullValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void setTapeReadOnly(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool readOnlyValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void setTapeReadOnlyOnError(const std::string &vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void setTapeIsFromCastorInUnitTests(const std::string &vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void setTapePoolEncryption(const common::dataStructures::SecurityIdentity& admin, const std::string& name, const bool encryptionValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   bool tapeExists(const std::string& vid) const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   bool diskSystemExists(const std::string& name) const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
diff --git a/catalogue/MysqlCatalogue.cpp b/catalogue/MysqlCatalogue.cpp
index b27d7095290dcb78e7e0c7ac23d9d7d2096dd417..5f4f01837b6e31c55c8734fac45a491538442477 100644
--- a/catalogue/MysqlCatalogue.cpp
+++ b/catalogue/MysqlCatalogue.cpp
@@ -17,9 +17,6 @@
  */
 
 #include "catalogue/ArchiveFileRow.hpp"
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
-#include "catalogue/FileSizeMismatch.hpp"
 #include "catalogue/MysqlCatalogueSchema.hpp"
 #include "catalogue/MysqlCatalogue.hpp"
 #include "common/exception/DatabaseConstraintError.hpp"
@@ -156,6 +153,8 @@ common::dataStructures::Tape MysqlCatalogue::selectTapeForUpdate(rdbms::Conn &co
       "LAST_FSEQ AS LAST_FSEQ,"
       "IS_DISABLED AS IS_DISABLED,"
       "IS_FULL AS IS_FULL,"
+      "IS_READ_ONLY AS IS_READ_ONLY,"
+      "IS_FROM_CASTOR AS IS_FROM_CASTOR,"
 
       "LABEL_DRIVE AS LABEL_DRIVE,"
       "LABEL_TIME AS LABEL_TIME,"
@@ -198,6 +197,8 @@ common::dataStructures::Tape MysqlCatalogue::selectTapeForUpdate(rdbms::Conn &co
     tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
     tape.disabled = rset.columnBool("IS_DISABLED");
     tape.full = rset.columnBool("IS_FULL");
+    tape.readOnly = rset.columnBool("IS_READ_ONLY");
+    tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
     tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
     tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
@@ -205,8 +206,7 @@ common::dataStructures::Tape MysqlCatalogue::selectTapeForUpdate(rdbms::Conn &co
 
     tape.comment = rset.columnString("USER_COMMENT");
 
-    common::dataStructures::UserIdentity creatorUI;
-    creatorUI.name = rset.columnString("CREATION_LOG_USER_NAME");
+    // std::string creatorUIname = rset.columnString("CREATION_LOG_USER_NAME");
 
     common::dataStructures::EntryLog creationLog;
     creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -215,8 +215,7 @@ common::dataStructures::Tape MysqlCatalogue::selectTapeForUpdate(rdbms::Conn &co
 
     tape.creationLog = creationLog;
 
-    common::dataStructures::UserIdentity updaterUI;
-    updaterUI.name = rset.columnString("LAST_UPDATE_USER_NAME");
+    // std::string updaterUIname = rset.columnString("LAST_UPDATE_USER_NAME");
 
     common::dataStructures::EntryLog updateLog;
     updateLog.username = rset.columnString("LAST_UPDATE_USER_NAME");
@@ -254,7 +253,7 @@ void MysqlCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer> &
 
     const auto tape = selectTapeForUpdate(conn, firstEvent.vid);
     uint64_t expectedFSeq = tape.lastFSeq + 1;
-    uint64_t totalCompressedBytesWritten = 0;
+    uint64_t totalLogicalBytesWritten = 0;
 
     for(const auto &eventP: events) {
       const auto & event = *eventP;
@@ -265,7 +264,7 @@ void MysqlCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer> &
       }
 
       if(expectedFSeq != event.fSeq) {
-        exception::Exception ex;
+        exception::TapeFseqMismatch ex;
         ex.getMessage() << "FSeq mismatch for tape " << firstEvent.vid << ": expected=" << expectedFSeq << " actual=" <<
           firstEvent.fSeq;
         throw ex;
@@ -276,14 +275,14 @@ void MysqlCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer> &
       try {
         // If this is a file (as opposed to a placeholder), do the full processing.
         const auto &fileEvent=dynamic_cast<const TapeFileWritten &>(event); 
-        totalCompressedBytesWritten += fileEvent.compressedSize;
+        totalLogicalBytesWritten += fileEvent.size;
       } catch (std::bad_cast&) {}
     }
 
     auto lastEventItor = events.cend();
     lastEventItor--;
     const TapeItemWritten &lastEvent = **lastEventItor;
-    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, lastEvent.tapeDrive);
+    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalLogicalBytesWritten, lastEvent.tapeDrive);
 
     for(const auto &event : events) {
       try {
@@ -316,17 +315,14 @@ void MysqlCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
       row.diskFileId = event.diskFileId;
       row.diskInstance = event.diskInstance;
       row.size = event.size;
-      row.checksumType = event.checksumType;
-      row.checksumValue = event.checksumValue;
+      row.checksumBlob = event.checksumBlob;
       row.storageClassName = event.storageClassName;
       row.diskFilePath = event.diskFilePath;
-      row.diskFileUser = event.diskFileUser;
-      row.diskFileGroup = event.diskFileGroup;
+      row.diskFileOwnerUid = event.diskFileOwnerUid;
+      row.diskFileGid = event.diskFileGid;
       insertArchiveFile(conn, row);
     } catch(exception::DatabasePrimaryKeyError &) {
       // Ignore this error
-    } catch(...) {
-      throw;
     }
 
     const time_t now = time(nullptr);
@@ -350,26 +346,14 @@ void MysqlCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
       throw ex;
     }
 
-    if(archiveFile->checksumType != event.checksumType) {
-      catalogue::ChecksumTypeMismatch ex;
-      ex.getMessage() << "Checksum type mismatch: expected=" << archiveFile->checksumType << ", actual=" <<
-        event.checksumType << ": " << fileContext.str();
-      throw ex;
-    }
-
-    if(archiveFile->checksumValue != event.checksumValue) {
-      catalogue::ChecksumValueMismatch ex;
-      ex.getMessage() << "Checksum value mismatch: expected=" << archiveFile->checksumValue << ", actual=" <<
-        event.checksumValue << ": " << fileContext.str();
-      throw ex;
-    }
+    archiveFile->checksumBlob.validate(event.checksumBlob);
 
     // Insert the tape file
     common::dataStructures::TapeFile tapeFile;
     tapeFile.vid            = event.vid;
     tapeFile.fSeq           = event.fSeq;
     tapeFile.blockId        = event.blockId;
-    tapeFile.compressedSize = event.compressedSize;
+    tapeFile.fileSize       = event.size;
     tapeFile.copyNb         = event.copyNb;
     tapeFile.creationTime   = now;
     insertTapeFile(conn, tapeFile, event.archiveFileId);
@@ -393,18 +377,18 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -437,11 +421,10 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
         archiveFile->diskInstance = selectRset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = selectRset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = selectRset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = selectRset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = selectRset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = selectRset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = selectRset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = selectRset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = selectRset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = selectRset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(selectRset.columnBlob("CHECKSUM_BLOB"), selectRset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = selectRset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = selectRset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = selectRset.columnUint64("RECONCILIATION_TIME");
@@ -454,15 +437,14 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
         tapeFile.vid = selectRset.columnString("VID");
         tapeFile.fSeq = selectRset.columnUint64("FSEQ");
         tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = selectRset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
         tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
         if (!selectRset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = selectRset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = selectRset.columnUint64("SSBY_FSEQ");
         }
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
 
         archiveFile->tapeFiles.push_back(tapeFile);
       }
@@ -482,11 +464,10 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
          .add("requestDiskInstance", diskInstanceName)
          .add("diskFileId", archiveFile->diskFileId)
          .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-         .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-         .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+         .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+         .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
          .add("fileSize", std::to_string(archiveFile->fileSize))
-         .add("checksumType", archiveFile->checksumType)
-         .add("checksumValue", archiveFile->checksumValue)
+         .add("checksumBlob", archiveFile->checksumBlob)
          .add("creationTime", std::to_string(archiveFile->creationTime))
          .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
          .add("storageClass", archiveFile->storageClass)
@@ -500,9 +481,8 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
           << " fSeq: " << it->fSeq
           << " blockId: " << it->blockId
           << " creationTime: " << it->creationTime
-          << " compressedSize: " << it->compressedSize
-          << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-          << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+          << " fileSize: " << it->fileSize
+          << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
           << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
           << " supersededByVid: " << it->supersededByVid
           << " supersededByFSeq: " << it->supersededByFSeq;
@@ -547,11 +527,10 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
        .add("diskInstance", archiveFile->diskInstance)
        .add("diskFileId", archiveFile->diskFileId)
        .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-       .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-       .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+       .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+       .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
        .add("fileSize", std::to_string(archiveFile->fileSize))
-       .add("checksumType", archiveFile->checksumType)
-       .add("checksumValue", archiveFile->checksumValue)
+       .add("checksumBlob", archiveFile->checksumBlob)
        .add("creationTime", std::to_string(archiveFile->creationTime))
        .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
        .add("storageClass", archiveFile->storageClass)
@@ -568,9 +547,8 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
         << " fSeq: " << it->fSeq
         << " blockId: " << it->blockId
         << " creationTime: " << it->creationTime
-        << " compressedSize: " << it->compressedSize
-        << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-        << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+        << " fileSize: " << it->fileSize
+        << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
         << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
         << " supersededByVid: " << it->supersededByVid
         << " supersededByFSeq: " << it->supersededByFSeq;
diff --git a/catalogue/OracleCatalogue.cpp b/catalogue/OracleCatalogue.cpp
index defbd1aa982cb502d9b02b95c029dbe46c0993f4..df420ca72fa928ea69c60fe1e8460719178dd2d2 100644
--- a/catalogue/OracleCatalogue.cpp
+++ b/catalogue/OracleCatalogue.cpp
@@ -17,13 +17,11 @@
  */
 
 #include "catalogue/ArchiveFileRow.hpp"
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
-#include "catalogue/FileSizeMismatch.hpp"
 #include "catalogue/OracleCatalogue.hpp"
 #include "catalogue/retryOnLostConnection.hpp"
 #include "common/exception/Exception.hpp"
 #include "common/exception/LostDatabaseConnection.hpp"
+#include "common/exception/TapeFseqMismatch.hpp"
 #include "common/exception/UserError.hpp"
 #include "common/make_unique.hpp"
 #include "common/threading/MutexLocker.hpp"
@@ -48,7 +46,7 @@ namespace {
     rdbms::wrapper::OcciColumn vid;
     rdbms::wrapper::OcciColumn fSeq;
     rdbms::wrapper::OcciColumn blockId;
-    rdbms::wrapper::OcciColumn compressedSize;
+    rdbms::wrapper::OcciColumn fileSize;
     rdbms::wrapper::OcciColumn copyNb;
     rdbms::wrapper::OcciColumn creationTime;
     rdbms::wrapper::OcciColumn archiveFileId;
@@ -63,7 +61,7 @@ namespace {
       vid("VID", nbRows),
       fSeq("FSEQ", nbRows),
       blockId("BLOCK_ID", nbRows),
-      compressedSize("COMPRESSED_SIZE_IN_BYTES", nbRows),
+      fileSize("LOGICAL_SIZE_IN_BYTES", nbRows),
       copyNb("COPY_NB", nbRows),
       creationTime("CREATION_TIME", nbRows),
       archiveFileId("ARCHIVE_FILE_ID", nbRows) {
@@ -83,8 +81,8 @@ namespace {
     rdbms::wrapper::OcciColumn diskFileUser;
     rdbms::wrapper::OcciColumn diskFileGroup;
     rdbms::wrapper::OcciColumn size;
-    rdbms::wrapper::OcciColumn checksumType;
-    rdbms::wrapper::OcciColumn checksumValue;
+    rdbms::wrapper::OcciColumn checksumBlob;
+    rdbms::wrapper::OcciColumn checksumAdler32;
     rdbms::wrapper::OcciColumn storageClassName;
     rdbms::wrapper::OcciColumn creationTime;
     rdbms::wrapper::OcciColumn reconciliationTime;
@@ -100,11 +98,11 @@ namespace {
       diskInstance("DISK_INSTANCE_NAME", nbRows),
       diskFileId("DISK_FILE_ID", nbRows),
       diskFilePath("DISK_FILE_PATH", nbRows),
-      diskFileUser("DISK_FILE_USER", nbRows),
-      diskFileGroup("DISK_FILE_GROUP", nbRows),
+      diskFileUser("DISK_FILE_UID", nbRows),
+      diskFileGroup("DISK_FILE_GID", nbRows),
       size("SIZE_IN_BYTES", nbRows),
-      checksumType("CHECKSUM_TYPE", nbRows),
-      checksumValue("CHECKSUM_VALUE", nbRows),
+      checksumBlob("CHECKSUM_BLOB", nbRows),
+      checksumAdler32("CHECKSUM_ADLER32", nbRows),
       storageClassName("STORAGE_CLASS_NAME", nbRows),
       creationTime("CREATION_TIME", nbRows),
       reconciliationTime("RECONCILIATION_TIME", nbRows) {
@@ -220,6 +218,8 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::Conn &c
         "LAST_FSEQ AS LAST_FSEQ,"
         "IS_DISABLED AS IS_DISABLED,"
         "IS_FULL AS IS_FULL,"
+        "IS_READ_ONLY AS IS_READ_ONLY,"
+        "IS_FROM_CASTOR AS IS_FROM_CASTOR,"
 
         "LABEL_DRIVE AS LABEL_DRIVE,"
         "LABEL_TIME AS LABEL_TIME,"
@@ -262,6 +262,8 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::Conn &c
     tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
     tape.disabled = rset.columnBool("IS_DISABLED");
     tape.full = rset.columnBool("IS_FULL");
+    tape.readOnly = rset.columnBool("IS_READ_ONLY");
+    tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
     tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
     tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
@@ -269,8 +271,7 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::Conn &c
 
     tape.comment = rset.columnString("USER_COMMENT");
 
-    common::dataStructures::UserIdentity creatorUI;
-    creatorUI.name = rset.columnString("CREATION_LOG_USER_NAME");
+    //std::string creatorUIname = rset.columnString("CREATION_LOG_USER_NAME");
 
     common::dataStructures::EntryLog creationLog;
     creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -279,8 +280,7 @@ common::dataStructures::Tape OracleCatalogue::selectTapeForUpdate(rdbms::Conn &c
 
     tape.creationLog = creationLog;
 
-    common::dataStructures::UserIdentity updaterUI;
-    updaterUI.name = rset.columnString("LAST_UPDATE_USER_NAME");
+    //std::string updaterUIname = rset.columnString("LAST_UPDATE_USER_NAME");
 
     common::dataStructures::EntryLog updateLog;
     updateLog.username = rset.columnString("LAST_UPDATE_USER_NAME");
@@ -319,7 +319,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
 
     const auto tape = selectTapeForUpdate(conn, firstEvent.vid);
     uint64_t expectedFSeq = tape.lastFSeq + 1;
-    uint64_t totalCompressedBytesWritten = 0;
+    uint64_t totalLogicalBytesWritten = 0;
 
     uint32_t i = 0;
     // We have a mix of files and items. Only files will be recorded, but items
@@ -341,7 +341,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       }
       
       if (expectedFSeq != event.fSeq) {
-        exception::Exception ex;
+        exception::TapeFseqMismatch ex;
         ex.getMessage() << "FSeq mismatch for tape " << firstEvent.vid << ": expected=" << expectedFSeq << " actual=" <<
           event.fSeq;
         throw ex;
@@ -354,14 +354,14 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
 
         checkTapeFileWrittenFieldsAreSet(__FUNCTION__, fileEvent);
         
-        totalCompressedBytesWritten += fileEvent.compressedSize;
+        totalLogicalBytesWritten += fileEvent.size;
 
         // Store the length of each field and implicitly calculate the maximum field
         // length of each column 
         tapeFileBatch.vid.setFieldLenToValueLen(i, fileEvent.vid);
         tapeFileBatch.fSeq.setFieldLenToValueLen(i, fileEvent.fSeq);
         tapeFileBatch.blockId.setFieldLenToValueLen(i, fileEvent.blockId);
-        tapeFileBatch.compressedSize.setFieldLenToValueLen(i, fileEvent.compressedSize);
+        tapeFileBatch.fileSize.setFieldLenToValueLen(i, fileEvent.size);
         tapeFileBatch.copyNb.setFieldLenToValueLen(i, fileEvent.copyNb);
         tapeFileBatch.creationTime.setFieldLenToValueLen(i, now);
         tapeFileBatch.archiveFileId.setFieldLenToValueLen(i, fileEvent.archiveFileId);
@@ -375,7 +375,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
     auto lastEventItor = events.cend();
     lastEventItor--;
     const TapeItemWritten &lastEvent = **lastEventItor;
-    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, lastEvent.tapeDrive);
+    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalLogicalBytesWritten, lastEvent.tapeDrive);
 
     // If we had only placeholders and no file recorded, we are done (but we still commit the update of the tape's fSeq).
     if (fileEvents.empty()) {
@@ -414,19 +414,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
         throw ex;
       }
 
-      if(fileSizeAndChecksum.checksumType != event.checksumType) {
-        catalogue::ChecksumTypeMismatch ex;
-        ex.getMessage() << __FUNCTION__ << ": Checksum type mismatch: expected=" << fileSizeAndChecksum.checksumType <<
-          ", actual=" << event.checksumType << ": " << fileContext.str();
-        throw ex;
-      }
-
-      if(fileSizeAndChecksum.checksumValue != event.checksumValue) {
-        catalogue::ChecksumValueMismatch ex;
-        ex.getMessage() << __FUNCTION__ << ": Checksum value mismatch: expected=" << fileSizeAndChecksum.checksumValue
-          << ", actual=" << event.checksumValue << ": " << fileContext.str();
-        throw ex;
-      }
+      fileSizeAndChecksum.checksumBlob.validate(event.checksumBlob);
     }
 
     // Store the value of each field
@@ -435,7 +423,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       tapeFileBatch.vid.setFieldValue(i, event.vid);
       tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
       tapeFileBatch.blockId.setFieldValue(i, event.blockId);
-      tapeFileBatch.compressedSize.setFieldValue(i, event.compressedSize);
+      tapeFileBatch.fileSize.setFieldValue(i, event.size);
       tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
       tapeFileBatch.creationTime.setFieldValue(i, now);
       tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
@@ -448,7 +436,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
         "VID,"                                                                          "\n"
         "FSEQ,"                                                                         "\n"
         "BLOCK_ID,"                                                                     "\n"
-        "COMPRESSED_SIZE_IN_BYTES,"                                                     "\n"
+        "LOGICAL_SIZE_IN_BYTES,"                                                        "\n"
         "COPY_NB,"                                                                      "\n"
         "CREATION_TIME,"                                                                "\n"
         "ARCHIVE_FILE_ID)"                                                              "\n"
@@ -456,13 +444,13 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
         ":VID,"                                                                         "\n"
         ":FSEQ,"                                                                        "\n"
         ":BLOCK_ID,"                                                                    "\n"
-        ":COMPRESSED_SIZE_IN_BYTES,"                                                    "\n"
+        ":LOGICAL_SIZE_IN_BYTES,"                                                       "\n"
         ":COPY_NB,"                                                                     "\n"
         ":CREATION_TIME,"                                                               "\n"
         ":ARCHIVE_FILE_ID);"                                                            "\n"
-      "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, COMPRESSED_SIZE_IN_BYTES,"           "\n"
+      "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"              "\n"
          "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID)"                                     "\n"
-      "SELECT VID, FSEQ, BLOCK_ID, COMPRESSED_SIZE_IN_BYTES,"                           "\n"
+      "SELECT VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"                              "\n"
          "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID FROM TEMP_TAPE_FILE_INSERTION_BATCH;" "\n"
       "FOR TF IN (SELECT * FROM TEMP_TAPE_FILE_INSERTION_BATCH)"                        "\n"
       "LOOP"                                                                            "\n"
@@ -481,7 +469,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
     occiStmt.setColumn(tapeFileBatch.vid);
     occiStmt.setColumn(tapeFileBatch.fSeq);
     occiStmt.setColumn(tapeFileBatch.blockId);
-    occiStmt.setColumn(tapeFileBatch.compressedSize);
+    occiStmt.setColumn(tapeFileBatch.fileSize);
     occiStmt.setColumn(tapeFileBatch.copyNb);
     occiStmt.setColumn(tapeFileBatch.creationTime);
     occiStmt.setColumn(tapeFileBatch.archiveFileId);
@@ -528,20 +516,29 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const
   try {
     ArchiveFileBatch archiveFileBatch(events.size());
     const time_t now = time(nullptr);
+    std::vector<uint32_t> adler32(events.size());
 
-    // Store the length of each field and implicitly calculate the maximum field
-    // length of each column 
+    // Store the length of each field and implicitly calculate the maximum field length of each column 
     uint32_t i = 0;
     for (const auto &event: events) {
+      // Keep transition ADLER32 checksum column up-to-date with the ChecksumBlob
+      try {
+        std::string adler32hex = checksum::ChecksumBlob::ByteArrayToHex(event.checksumBlob.at(checksum::ADLER32));
+        adler32[i] = strtoul(adler32hex.c_str(), 0, 16);
+      } catch(exception::ChecksumTypeMismatch &ex) {
+        // No ADLER32 checksum exists in the checksumBlob
+        adler32[i] = 0;
+      }
+
       archiveFileBatch.archiveFileId.setFieldLenToValueLen(i, event.archiveFileId);
       archiveFileBatch.diskInstance.setFieldLenToValueLen(i, event.diskInstance);
       archiveFileBatch.diskFileId.setFieldLenToValueLen(i, event.diskFileId);
       archiveFileBatch.diskFilePath.setFieldLenToValueLen(i, event.diskFilePath);
-      archiveFileBatch.diskFileUser.setFieldLenToValueLen(i, event.diskFileUser);
-      archiveFileBatch.diskFileGroup.setFieldLenToValueLen(i, event.diskFileGroup);
+      archiveFileBatch.diskFileUser.setFieldLenToValueLen(i, event.diskFileOwnerUid);
+      archiveFileBatch.diskFileGroup.setFieldLenToValueLen(i, event.diskFileGid);
       archiveFileBatch.size.setFieldLenToValueLen(i, event.size);
-      archiveFileBatch.checksumType.setFieldLenToValueLen(i, event.checksumType);
-      archiveFileBatch.checksumValue.setFieldLenToValueLen(i, event.checksumValue);
+      archiveFileBatch.checksumBlob.setFieldLen(i, 2 + event.checksumBlob.length());
+      archiveFileBatch.checksumAdler32.setFieldLenToValueLen(i, adler32[i]);
       archiveFileBatch.storageClassName.setFieldLenToValueLen(i, event.storageClassName);
       archiveFileBatch.creationTime.setFieldLenToValueLen(i, now);
       archiveFileBatch.reconciliationTime.setFieldLenToValueLen(i, now);
@@ -555,11 +552,11 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const
       archiveFileBatch.diskInstance.setFieldValue(i, event.diskInstance);
       archiveFileBatch.diskFileId.setFieldValue(i, event.diskFileId);
       archiveFileBatch.diskFilePath.setFieldValue(i, event.diskFilePath);
-      archiveFileBatch.diskFileUser.setFieldValue(i, event.diskFileUser);
-      archiveFileBatch.diskFileGroup.setFieldValue(i, event.diskFileGroup);
+      archiveFileBatch.diskFileUser.setFieldValue(i, event.diskFileOwnerUid);
+      archiveFileBatch.diskFileGroup.setFieldValue(i, event.diskFileGid);
       archiveFileBatch.size.setFieldValue(i, event.size);
-      archiveFileBatch.checksumType.setFieldValue(i, event.checksumType);
-      archiveFileBatch.checksumValue.setFieldValue(i, event.checksumValue);
+      archiveFileBatch.checksumBlob.setFieldValueToRaw(i, event.checksumBlob.serialize());
+      archiveFileBatch.checksumAdler32.setFieldValue(i, adler32[i]);
       archiveFileBatch.storageClassName.setFieldValue(i, event.storageClassName);
       archiveFileBatch.creationTime.setFieldValue(i, now);
       archiveFileBatch.reconciliationTime.setFieldValue(i, now);
@@ -572,11 +569,11 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const
         "DISK_INSTANCE_NAME,"
         "DISK_FILE_ID,"
         "DISK_FILE_PATH,"
-        "DISK_FILE_USER,"
-        "DISK_FILE_GROUP,"
+        "DISK_FILE_UID,"
+        "DISK_FILE_GID,"
         "SIZE_IN_BYTES,"
-        "CHECKSUM_TYPE,"
-        "CHECKSUM_VALUE,"
+        "CHECKSUM_BLOB,"
+        "CHECKSUM_ADLER32,"
         "STORAGE_CLASS_ID,"
         "CREATION_TIME,"
         "RECONCILIATION_TIME)"
@@ -585,11 +582,11 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const
         "DISK_INSTANCE_NAME,"
         ":DISK_FILE_ID,"
         ":DISK_FILE_PATH,"
-        ":DISK_FILE_USER,"
-        ":DISK_FILE_GROUP,"
+        ":DISK_FILE_UID,"
+        ":DISK_FILE_GID,"
         ":SIZE_IN_BYTES,"
-        ":CHECKSUM_TYPE,"
-        ":CHECKSUM_VALUE,"
+        ":CHECKSUM_BLOB,"
+        ":CHECKSUM_ADLER32,"
         "STORAGE_CLASS_ID,"
         ":CREATION_TIME,"
         ":RECONCILIATION_TIME "
@@ -609,8 +606,8 @@ void OracleCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn, const
     occiStmt.setColumn(archiveFileBatch.diskFileUser);
     occiStmt.setColumn(archiveFileBatch.diskFileGroup);
     occiStmt.setColumn(archiveFileBatch.size);
-    occiStmt.setColumn(archiveFileBatch.checksumType);
-    occiStmt.setColumn(archiveFileBatch.checksumValue);
+    occiStmt.setColumn(archiveFileBatch.checksumBlob, oracle::occi::OCCI_SQLT_VBI);
+    occiStmt.setColumn(archiveFileBatch.checksumAdler32);
     occiStmt.setColumn(archiveFileBatch.storageClassName);
     occiStmt.setColumn(archiveFileBatch.creationTime);
     occiStmt.setColumn(archiveFileBatch.reconciliationTime);
@@ -685,8 +682,8 @@ std::map<uint64_t, OracleCatalogue::FileSizeAndChecksum> OracleCatalogue::select
       "SELECT "
         "ARCHIVE_FILE.ARCHIVE_FILE_ID AS ARCHIVE_FILE_ID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE "
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32 "
       "FROM "
         "ARCHIVE_FILE "
       "INNER JOIN TEMP_TAPE_FILE_BATCH ON "
@@ -705,12 +702,9 @@ std::map<uint64_t, OracleCatalogue::FileSizeAndChecksum> OracleCatalogue::select
           "Found duplicate archive file identifier in batch of files written to tape: archiveFileId=" << archiveFileId;
         throw ex;
       }
-
       FileSizeAndChecksum fileSizeAndChecksum;
       fileSizeAndChecksum.fileSize = rset.columnUint64("SIZE_IN_BYTES");
-      fileSizeAndChecksum.checksumType = rset.columnString("CHECKSUM_TYPE");
-      fileSizeAndChecksum.checksumValue = rset.columnString("CHECKSUM_VALUE");
-
+      fileSizeAndChecksum.checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
       fileSizesAndChecksums[archiveFileId] = fileSizeAndChecksum;
     }
 
@@ -777,18 +771,18 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -825,11 +819,10 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
         archiveFile->diskInstance = selectRset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = selectRset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = selectRset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = selectRset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = selectRset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = selectRset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = selectRset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = selectRset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = selectRset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = selectRset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(selectRset.columnBlob("CHECKSUM_BLOB"), selectRset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = selectRset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = selectRset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = selectRset.columnUint64("RECONCILIATION_TIME");
@@ -842,11 +835,10 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
         tapeFile.vid = selectRset.columnString("VID");
         tapeFile.fSeq = selectRset.columnUint64("FSEQ");
         tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = selectRset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
         tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!selectRset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = selectRset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = selectRset.columnUint64("SSBY_FSEQ");
@@ -870,11 +862,10 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
          .add("requestDiskInstance", diskInstanceName)
          .add("diskFileId", archiveFile->diskFileId)
          .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-         .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-         .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+         .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+         .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
          .add("fileSize", std::to_string(archiveFile->fileSize))
-         .add("checksumType", archiveFile->checksumType)
-         .add("checksumValue", archiveFile->checksumValue)
+         .add("checksumBlob", archiveFile->checksumBlob)
          .add("creationTime", std::to_string(archiveFile->creationTime))
          .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
          .add("storageClass", archiveFile->storageClass)
@@ -888,9 +879,8 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
           << " fSeq: " << it->fSeq
           << " blockId: " << it->blockId
           << " creationTime: " << it->creationTime
-          << " compressedSize: " << it->compressedSize
-          << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-          << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+          << " fileSize: " << it->fileSize
+          << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
           << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
           << " supersededByVid: " << it->supersededByVid
           << " supersededByFSeq: " << it->supersededByFSeq;
@@ -932,11 +922,10 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
        .add("diskInstance", archiveFile->diskInstance)
        .add("diskFileId", archiveFile->diskFileId)
        .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-       .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-       .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+       .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+       .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
        .add("fileSize", std::to_string(archiveFile->fileSize))
-       .add("checksumType", archiveFile->checksumType)
-       .add("checksumValue", archiveFile->checksumValue)
+       .add("checksumBlob", archiveFile->checksumBlob)
        .add("creationTime", std::to_string(archiveFile->creationTime))
        .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
        .add("storageClass", archiveFile->storageClass)
@@ -953,9 +942,8 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
         << " fSeq: " << it->fSeq
         << " blockId: " << it->blockId
         << " creationTime: " << it->creationTime
-        << " compressedSize: " << it->compressedSize
-        << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-        << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+        << " fileSize: " << it->fileSize
+        << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
         << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
         << " supersededByVid: " << it->supersededByVid
         << " supersededByFSeq: " << it->supersededByFSeq;
diff --git a/catalogue/OracleCatalogue.hpp b/catalogue/OracleCatalogue.hpp
index 49262e3a85562ad0e946596bc1d60b9c78eb4080..fe6fd8bc2e6745d51a54110df1f300637fd297b1 100644
--- a/catalogue/OracleCatalogue.hpp
+++ b/catalogue/OracleCatalogue.hpp
@@ -146,8 +146,7 @@ private:
    */
   struct FileSizeAndChecksum {
     uint64_t fileSize;
-    std::string checksumType;
-    std::string checksumValue;
+    checksum::ChecksumBlob checksumBlob;
   };
 
   /**
diff --git a/catalogue/PostgresCatalogue.cpp b/catalogue/PostgresCatalogue.cpp
index 73d834871bcdcdcd751a47dd855ab79126123740..8fcf1dc7a0dfec2ad3a62385026c10f300323553 100644
--- a/catalogue/PostgresCatalogue.cpp
+++ b/catalogue/PostgresCatalogue.cpp
@@ -17,9 +17,6 @@
  */
 
 #include "catalogue/ArchiveFileRow.hpp"
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
-#include "catalogue/FileSizeMismatch.hpp"
 #include "catalogue/PostgresCatalogue.hpp"
 #include "catalogue/retryOnLostConnection.hpp"
 #include "common/exception/Exception.hpp"
@@ -47,7 +44,7 @@ namespace {
     rdbms::wrapper::PostgresColumn vid;
     rdbms::wrapper::PostgresColumn fSeq;
     rdbms::wrapper::PostgresColumn blockId;
-    rdbms::wrapper::PostgresColumn compressedSize;
+    rdbms::wrapper::PostgresColumn fileSize;
     rdbms::wrapper::PostgresColumn copyNb;
     rdbms::wrapper::PostgresColumn creationTime;
     rdbms::wrapper::PostgresColumn archiveFileId;
@@ -62,7 +59,7 @@ namespace {
       vid("VID", nbRows),
       fSeq("FSEQ", nbRows),
       blockId("BLOCK_ID", nbRows),
-      compressedSize("COMPRESSED_SIZE_IN_BYTES", nbRows),
+      fileSize("LOGICAL_SIZE_IN_BYTES", nbRows),
       copyNb("COPY_NB", nbRows),
       creationTime("CREATION_TIME", nbRows),
       archiveFileId("ARCHIVE_FILE_ID", nbRows) {
@@ -82,8 +79,8 @@ namespace {
     rdbms::wrapper::PostgresColumn diskFileUser;
     rdbms::wrapper::PostgresColumn diskFileGroup;
     rdbms::wrapper::PostgresColumn size;
-    rdbms::wrapper::PostgresColumn checksumType;
-    rdbms::wrapper::PostgresColumn checksumValue;
+    rdbms::wrapper::PostgresColumn checksumBlob;
+    rdbms::wrapper::PostgresColumn checksumAdler32;
     rdbms::wrapper::PostgresColumn storageClassName;
     rdbms::wrapper::PostgresColumn creationTime;
     rdbms::wrapper::PostgresColumn reconciliationTime;
@@ -99,11 +96,11 @@ namespace {
       diskInstance("DISK_INSTANCE_NAME", nbRows),
       diskFileId("DISK_FILE_ID", nbRows),
       diskFilePath("DISK_FILE_PATH", nbRows),
-      diskFileUser("DISK_FILE_USER", nbRows),
-      diskFileGroup("DISK_FILE_GROUP", nbRows),
+      diskFileUser("DISK_FILE_UID", nbRows),
+      diskFileGroup("DISK_FILE_GID", nbRows),
       size("SIZE_IN_BYTES", nbRows),
-      checksumType("CHECKSUM_TYPE", nbRows),
-      checksumValue("CHECKSUM_VALUE", nbRows),
+      checksumBlob("CHECKSUM_BLOB", nbRows),
+      checksumAdler32("CHECKSUM_ADLER32", nbRows),
       storageClassName("STORAGE_CLASS_NAME", nbRows),
       creationTime("CREATION_TIME", nbRows),
       reconciliationTime("RECONCILIATION_TIME", nbRows) {
@@ -211,6 +208,8 @@ common::dataStructures::Tape PostgresCatalogue::selectTapeForUpdate(rdbms::Conn
         "LAST_FSEQ AS LAST_FSEQ,"
         "IS_DISABLED AS IS_DISABLED,"
         "IS_FULL AS IS_FULL,"
+        "IS_READ_ONLY AS IS_READ_ONLY,"
+        "IS_FROM_CASTOR AS IS_FROM_CASTOR,"
 
         "LABEL_DRIVE AS LABEL_DRIVE,"
         "LABEL_TIME AS LABEL_TIME,"
@@ -253,6 +252,8 @@ common::dataStructures::Tape PostgresCatalogue::selectTapeForUpdate(rdbms::Conn
     tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
     tape.disabled = rset.columnBool("IS_DISABLED");
     tape.full = rset.columnBool("IS_FULL");
+    tape.readOnly = rset.columnBool("IS_READ_ONLY");
+    tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
     tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
     tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
@@ -260,8 +261,7 @@ common::dataStructures::Tape PostgresCatalogue::selectTapeForUpdate(rdbms::Conn
 
     tape.comment = rset.columnString("USER_COMMENT");
 
-    common::dataStructures::UserIdentity creatorUI;
-    creatorUI.name = rset.columnString("CREATION_LOG_USER_NAME");
+    //std::string creatorUIname = rset.columnString("CREATION_LOG_USER_NAME");
 
     common::dataStructures::EntryLog creationLog;
     creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -270,8 +270,7 @@ common::dataStructures::Tape PostgresCatalogue::selectTapeForUpdate(rdbms::Conn
 
     tape.creationLog = creationLog;
 
-    common::dataStructures::UserIdentity updaterUI;
-    updaterUI.name = rset.columnString("LAST_UPDATE_USER_NAME");
+    //std::string updaterUIname = rset.columnString("LAST_UPDATE_USER_NAME");
 
     common::dataStructures::EntryLog updateLog;
     updateLog.username = rset.columnString("LAST_UPDATE_USER_NAME");
@@ -313,7 +312,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
 
     const auto tape = selectTapeForUpdate(conn, firstEvent.vid);
     uint64_t expectedFSeq = tape.lastFSeq + 1;
-    uint64_t totalCompressedBytesWritten = 0;
+    uint64_t totalLogicalBytesWritten = 0;
 
     // We have a mix of files and items. Only files will be recorded, but items
     // allow checking fSeq coherency.
@@ -334,7 +333,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
       }
       
       if (expectedFSeq != event.fSeq) {
-        exception::Exception ex;
+        exception::TapeFseqMismatch ex;
         ex.getMessage() << "FSeq mismatch for tape " << firstEvent.vid << ": expected=" << expectedFSeq << " actual=" <<
           event.fSeq;
         throw ex;
@@ -347,7 +346,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
 
         checkTapeFileWrittenFieldsAreSet(__FUNCTION__, fileEvent);
         
-        totalCompressedBytesWritten += fileEvent.compressedSize;
+        totalLogicalBytesWritten += fileEvent.size;
         
         fileEvents.insert(fileEvent);
       } catch (std::bad_cast&) {}
@@ -357,7 +356,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
     auto lastEventItor = events.cend();
     lastEventItor--;
     const TapeItemWritten &lastEvent = **lastEventItor;
-    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten,
+    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalLogicalBytesWritten,
       lastEvent.tapeDrive);
 
     // If we had only placeholders and no file recorded, we are done (but we still commit the update of the tape's fSeq).
@@ -401,19 +400,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
         throw ex;
       }
 
-      if(fileSizeAndChecksum.checksumType != event.checksumType) {
-        catalogue::ChecksumTypeMismatch ex;
-        ex.getMessage() << __FUNCTION__ << ": Checksum type mismatch: expected=" << fileSizeAndChecksum.checksumType <<
-          ", actual=" << event.checksumType << ": " << fileContext.str();
-        throw ex;
-      }
-
-      if(fileSizeAndChecksum.checksumValue != event.checksumValue) {
-        catalogue::ChecksumValueMismatch ex;
-        ex.getMessage() << __FUNCTION__ << ": Checksum value mismatch: expected=" << fileSizeAndChecksum.checksumValue
-          << ", actual=" << event.checksumValue << ": " << fileContext.str();
-        throw ex;
-      }
+      fileSizeAndChecksum.checksumBlob.validate(event.checksumBlob);
     }
 
     // Store the value of each field
@@ -422,7 +409,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
       tapeFileBatch.vid.setFieldValue(i, event.vid);
       tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
       tapeFileBatch.blockId.setFieldValue(i, event.blockId);
-      tapeFileBatch.compressedSize.setFieldValue(i, event.compressedSize);
+      tapeFileBatch.fileSize.setFieldValue(i, event.size);
       tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
       tapeFileBatch.creationTime.setFieldValue(i, now);
       tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
@@ -437,7 +424,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
       "VID,"                                                                         "\n"
       "FSEQ,"                                                                        "\n"
       "BLOCK_ID,"                                                                    "\n"
-      "COMPRESSED_SIZE_IN_BYTES,"                                                    "\n"
+      "LOGICAL_SIZE_IN_BYTES,"                                                       "\n"
       "COPY_NB,"                                                                     "\n"
       "CREATION_TIME,"                                                               "\n"
       "ARCHIVE_FILE_ID) "                                                            "\n"
@@ -445,13 +432,13 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
       "-- :VID,"                                                                     "\n"
       "-- :FSEQ,"                                                                    "\n"
       "-- :BLOCK_ID,"                                                                "\n"
-      "-- :COMPRESSED_SIZE_IN_BYTES,"                                                "\n"
+      "-- :LOGICAL_SIZE_IN_BYTES,"                                                   "\n"
       "-- :COPY_NB,"                                                                 "\n"
       "-- :CREATION_TIME,"                                                           "\n"
       "-- :ARCHIVE_FILE_ID"                                                          "\n"
-    "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, COMPRESSED_SIZE_IN_BYTES,"          "\n"
+    "INSERT INTO TAPE_FILE (VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"             "\n"
       "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID)"                                     "\n"
-    "SELECT VID, FSEQ, BLOCK_ID, COMPRESSED_SIZE_IN_BYTES,"                          "\n"
+    "SELECT VID, FSEQ, BLOCK_ID, LOGICAL_SIZE_IN_BYTES,"                             "\n"
       "COPY_NB, CREATION_TIME, ARCHIVE_FILE_ID FROM TEMP_TAPE_FILE_INSERTION_BATCH;" "\n"
     "DO $$ "                                                                         "\n"
       "DECLARE"                                                                      "\n"
@@ -474,7 +461,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
     postgresStmt.setColumn(tapeFileBatch.vid);
     postgresStmt.setColumn(tapeFileBatch.fSeq);
     postgresStmt.setColumn(tapeFileBatch.blockId);
-    postgresStmt.setColumn(tapeFileBatch.compressedSize);
+    postgresStmt.setColumn(tapeFileBatch.fileSize);
     postgresStmt.setColumn(tapeFileBatch.copyNb);
     postgresStmt.setColumn(tapeFileBatch.creationTime);
     postgresStmt.setColumn(tapeFileBatch.archiveFileId);
@@ -505,11 +492,20 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
       archiveFileBatch.diskInstance.setFieldValue(i, event.diskInstance);
       archiveFileBatch.diskFileId.setFieldValue(i, event.diskFileId);
       archiveFileBatch.diskFilePath.setFieldValue(i, event.diskFilePath);
-      archiveFileBatch.diskFileUser.setFieldValue(i, event.diskFileUser);
-      archiveFileBatch.diskFileGroup.setFieldValue(i, event.diskFileGroup);
+      archiveFileBatch.diskFileUser.setFieldValue(i, event.diskFileOwnerUid);
+      archiveFileBatch.diskFileGroup.setFieldValue(i, event.diskFileGid);
       archiveFileBatch.size.setFieldValue(i, event.size);
-      archiveFileBatch.checksumType.setFieldValue(i, event.checksumType);
-      archiveFileBatch.checksumValue.setFieldValue(i, event.checksumValue);
+      archiveFileBatch.checksumBlob.setFieldByteA(conn, i, event.checksumBlob.serialize());
+      // Keep transition ADLER32 checksum up-to-date if it exists
+      std::string adler32str;
+      try {
+        std::string adler32hex = checksum::ChecksumBlob::ByteArrayToHex(event.checksumBlob.at(checksum::ADLER32));
+        uint32_t adler32 = strtoul(adler32hex.c_str(), 0, 16);
+        adler32str = std::to_string(adler32);
+      } catch(exception::ChecksumTypeMismatch &ex) {
+        adler32str = "0";
+      }
+      archiveFileBatch.checksumAdler32.setFieldValue(i, adler32str);
       archiveFileBatch.storageClassName.setFieldValue(i, event.storageClassName);
       archiveFileBatch.creationTime.setFieldValue(i, now);
       archiveFileBatch.reconciliationTime.setFieldValue(i, now);
@@ -522,11 +518,11 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
         "DISK_INSTANCE_NAME,"
         "DISK_FILE_ID,"
         "DISK_FILE_PATH,"
-        "DISK_FILE_USER,"
-        "DISK_FILE_GROUP,"
+        "DISK_FILE_UID,"
+        "DISK_FILE_GID,"
         "SIZE_IN_BYTES,"
-        "CHECKSUM_TYPE,"
-        "CHECKSUM_VALUE,"
+        "CHECKSUM_BLOB,"
+        "CHECKSUM_ADLER32,"
         "STORAGE_CLASS_NAME,"
         "CREATION_TIME,"
         "RECONCILIATION_TIME) "
@@ -535,11 +531,11 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
         ":DISK_INSTANCE_NAME,"
         ":DISK_FILE_ID,"
         ":DISK_FILE_PATH,"
-        ":DISK_FILE_USER,"
-        ":DISK_FILE_GROUP,"
+        ":DISK_FILE_UID,"
+        ":DISK_FILE_GID,"
         ":SIZE_IN_BYTES,"
-        ":CHECKSUM_TYPE,"
-        ":CHECKSUM_VALUE,"
+        ":CHECKSUM_BLOB,"
+        ":CHECKSUM_ADLER32,"
         ":STORAGE_CLASS_NAME,"
         ":CREATION_TIME,"
         ":RECONCILIATION_TIME";
@@ -554,8 +550,8 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
     postgresStmt.setColumn(archiveFileBatch.diskFileUser);
     postgresStmt.setColumn(archiveFileBatch.diskFileGroup);
     postgresStmt.setColumn(archiveFileBatch.size);
-    postgresStmt.setColumn(archiveFileBatch.checksumType);
-    postgresStmt.setColumn(archiveFileBatch.checksumValue);
+    postgresStmt.setColumn(archiveFileBatch.checksumBlob);
+    postgresStmt.setColumn(archiveFileBatch.checksumAdler32);
     postgresStmt.setColumn(archiveFileBatch.storageClassName);
     postgresStmt.setColumn(archiveFileBatch.creationTime);
     postgresStmt.setColumn(archiveFileBatch.reconciliationTime);
@@ -568,11 +564,11 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
   	"DISK_INSTANCE_NAME,"
         "DISK_FILE_ID,"
         "DISK_FILE_PATH,"
-        "DISK_FILE_USER,"
-        "DISK_FILE_GROUP,"
+        "DISK_FILE_UID,"
+        "DISK_FILE_GID,"
         "SIZE_IN_BYTES,"
-        "CHECKSUM_TYPE,"
-        "CHECKSUM_VALUE,"
+        "CHECKSUM_BLOB,"
+        "CHECKSUM_ADLER32,"
         "STORAGE_CLASS_ID,"
         "CREATION_TIME,"
         "RECONCILIATION_TIME) "
@@ -581,11 +577,11 @@ void PostgresCatalogue::idempotentBatchInsertArchiveFiles(rdbms::Conn &conn,
         "A.DISK_INSTANCE_NAME,"
         "A.DISK_FILE_ID,"
         "A.DISK_FILE_PATH,"
-        "A.DISK_FILE_USER,"
-        "A.DISK_FILE_GROUP,"
+        "A.DISK_FILE_UID,"
+        "A.DISK_FILE_GID,"
         "A.SIZE_IN_BYTES,"
-        "A.CHECKSUM_TYPE," 
-  	    "A.CHECKSUM_VALUE,"
+        "A.CHECKSUM_BLOB," 
+        "A.CHECKSUM_ADLER32," 
         "S.STORAGE_CLASS_ID,"
         "A.CREATION_TIME,"
         "A.RECONCILIATION_TIME "
@@ -625,8 +621,8 @@ std::map<uint64_t, PostgresCatalogue::FileSizeAndChecksum> PostgresCatalogue::se
       "SELECT "
         "ARCHIVE_FILE.ARCHIVE_FILE_ID AS ARCHIVE_FILE_ID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE "
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32 "
       "FROM "
         "ARCHIVE_FILE "
       "INNER JOIN TEMP_TAPE_FILE_BATCH ON "
@@ -648,9 +644,7 @@ std::map<uint64_t, PostgresCatalogue::FileSizeAndChecksum> PostgresCatalogue::se
 
       FileSizeAndChecksum fileSizeAndChecksum;
       fileSizeAndChecksum.fileSize = rset.columnUint64("SIZE_IN_BYTES");
-      fileSizeAndChecksum.checksumType = rset.columnString("CHECKSUM_TYPE");
-      fileSizeAndChecksum.checksumValue = rset.columnString("CHECKSUM_VALUE");
-
+      fileSizeAndChecksum.checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
       fileSizesAndChecksums[archiveFileId] = fileSizeAndChecksum;
     }
 
@@ -709,18 +703,18 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -755,11 +749,10 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
         archiveFile->diskInstance = selectRset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = selectRset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = selectRset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = selectRset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = selectRset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = selectRset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = selectRset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = selectRset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = selectRset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = selectRset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(selectRset.columnBlob("CHECKSUM_BLOB"), selectRset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = selectRset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = selectRset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = selectRset.columnUint64("RECONCILIATION_TIME");
@@ -772,11 +765,10 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
         tapeFile.vid = selectRset.columnString("VID");
         tapeFile.fSeq = selectRset.columnUint64("FSEQ");
         tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = selectRset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
         tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!selectRset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = selectRset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = selectRset.columnUint64("SSBY_FSEQ");
@@ -800,11 +792,10 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
          .add("requestDiskInstance", diskInstanceName)
          .add("diskFileId", archiveFile->diskFileId)
          .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-         .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-         .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+         .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+         .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
          .add("fileSize", std::to_string(archiveFile->fileSize))
-         .add("checksumType", archiveFile->checksumType)
-         .add("checksumValue", archiveFile->checksumValue)
+         .add("checksumBlob", archiveFile->checksumBlob)
          .add("creationTime", std::to_string(archiveFile->creationTime))
          .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
          .add("storageClass", archiveFile->storageClass)
@@ -818,9 +809,8 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
           << " fSeq: " << it->fSeq
           << " blockId: " << it->blockId
           << " creationTime: " << it->creationTime
-          << " compressedSize: " << it->compressedSize
-          << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-          << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+          << " fileSize: " << it->fileSize
+          << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
           << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
           << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
           << " supersededByVid: " << it->supersededByVid
@@ -864,11 +854,10 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
        .add("diskInstance", archiveFile->diskInstance)
        .add("diskFileId", archiveFile->diskFileId)
        .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-       .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-       .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+       .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+       .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
        .add("fileSize", std::to_string(archiveFile->fileSize))
-       .add("checksumType", archiveFile->checksumType)
-       .add("checksumValue", archiveFile->checksumValue)
+       .add("checksumBlob", archiveFile->checksumBlob)
        .add("creationTime", std::to_string(archiveFile->creationTime))
        .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
        .add("storageClass", archiveFile->storageClass)
@@ -885,9 +874,8 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
         << " fSeq: " << it->fSeq
         << " blockId: " << it->blockId
         << " creationTime: " << it->creationTime
-        << " compressedSize: " << it->compressedSize
-        << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-        << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+        << " fileSize: " << it->fileSize
+        << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
         << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
         << " supersededByVid: " << it->supersededByVid
         << " supersededByFSeq: " << it->supersededByFSeq;
diff --git a/catalogue/PostgresCatalogue.hpp b/catalogue/PostgresCatalogue.hpp
index b5daec4f5a557ad8125810beae4ecdaa7e87051c..7c1961c73d2833b456c6b99fcc9ce5fd573b0e56 100644
--- a/catalogue/PostgresCatalogue.hpp
+++ b/catalogue/PostgresCatalogue.hpp
@@ -155,8 +155,7 @@ private:
    */
   struct FileSizeAndChecksum {
     uint64_t fileSize;
-    std::string checksumType;
-    std::string checksumValue;
+    checksum::ChecksumBlob checksumBlob;
   };
 
   /**
diff --git a/catalogue/RdbmsCatalogue.cpp b/catalogue/RdbmsCatalogue.cpp
index e53a64ce05801a1cc6c9553354eb43f5ad4a9db7..e9d97e68d667be749b333336f7cac241650bb42f 100644
--- a/catalogue/RdbmsCatalogue.cpp
+++ b/catalogue/RdbmsCatalogue.cpp
@@ -24,7 +24,9 @@
 #include "catalogue/SqliteCatalogueSchema.hpp"
 #include "catalogue/UserSpecifiedANonExistentDiskSystem.hpp"
 #include "catalogue/UserSpecifiedANonEmptyDiskSystemAfterDelete.hpp"
+#include "catalogue/UserSpecifiedANonEmptyLogicalLibrary.hpp"
 #include "catalogue/UserSpecifiedANonEmptyTape.hpp"
+#include "catalogue/UserSpecifiedANonExistentLogicalLibrary.hpp"
 #include "catalogue/UserSpecifiedANonExistentTape.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringComment.hpp"
 #include "catalogue/UserSpecifiedAnEmptyStringDiskSystemName.hpp"
@@ -781,20 +783,20 @@ bool RdbmsCatalogue::diskFilePathExists(rdbms::Conn &conn, const std::string &di
 // diskFileUserExists
 //------------------------------------------------------------------------------
 bool RdbmsCatalogue::diskFileUserExists(rdbms::Conn &conn, const std::string &diskInstanceName,
-  const std::string &diskFileUser) const {
+  uint32_t diskFileOwnerUid) const {
   try {
     const char *const sql =
       "SELECT "
         "DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME, "
-        "DISK_FILE_USER AS DISK_FILE_USER "
+        "DISK_FILE_UID AS DISK_FILE_UID "
       "FROM "
         "ARCHIVE_FILE "
       "WHERE "
         "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND "
-        "DISK_FILE_USER = :DISK_FILE_USER";
+        "DISK_FILE_UID = :DISK_FILE_UID";
     auto stmt = conn.createStmt(sql);
     stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
-    stmt.bindString(":DISK_FILE_USER", diskFileUser);
+    stmt.bindUint64(":DISK_FILE_UID", diskFileOwnerUid);
     auto rset = stmt.executeQuery();
     return rset.next();
   } catch(exception::UserError &) {
@@ -809,20 +811,20 @@ bool RdbmsCatalogue::diskFileUserExists(rdbms::Conn &conn, const std::string &di
 // diskFileGroupExists
 //------------------------------------------------------------------------------
 bool RdbmsCatalogue::diskFileGroupExists(rdbms::Conn &conn, const std::string &diskInstanceName,
-  const std::string &diskFileGroup) const {
+  uint32_t diskFileGid) const {
   try {
     const char *const sql =
       "SELECT "
         "DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME, "
-        "DISK_FILE_GROUP AS DISK_FILE_GROUP "
+        "DISK_FILE_GID AS DISK_FILE_GID "
       "FROM "
         "ARCHIVE_FILE "
       "WHERE "
         "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND "
-        "DISK_FILE_GROUP = :DISK_FILE_GROUP";
+        "DISK_FILE_GID = :DISK_FILE_GID";
     auto stmt = conn.createStmt(sql);
     stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
-    stmt.bindString(":DISK_FILE_GROUP", diskFileGroup);
+    stmt.bindUint64(":DISK_FILE_GID", diskFileGid);
     auto rset = stmt.executeQuery();
     return rset.next();
   } catch(exception::UserError &) {
@@ -1150,9 +1152,9 @@ void RdbmsCatalogue::modifyTapePoolSupply(const common::dataStructures::Security
         " string");
     }
 
-    if(supply.empty()) {
-      throw UserSpecifiedAnEmptyStringSupply("Cannot modify tape pool because the new supply value is an empty"
-        " string");
+    optional<std::string> optionalSupply;
+    if(!supply.empty()) {
+      optionalSupply = supply;
     }
 
     const time_t now = time(nullptr);
@@ -1166,7 +1168,7 @@ void RdbmsCatalogue::modifyTapePoolSupply(const common::dataStructures::Security
         "TAPE_POOL_NAME = :TAPE_POOL_NAME";
     auto conn = m_connPool.getConn();
     auto stmt = conn.createStmt(sql);
-    stmt.bindString(":SUPPLY", supply);
+    stmt.bindOptionalString(":SUPPLY", optionalSupply);
     stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username);
     stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host);
     stmt.bindUint64(":LAST_UPDATE_TIME", now);
@@ -1587,14 +1589,28 @@ bool RdbmsCatalogue::logicalLibraryExists(rdbms::Conn &conn, const std::string &
 //------------------------------------------------------------------------------
 void RdbmsCatalogue::deleteLogicalLibrary(const std::string &name) {
   try {
-    const char *const sql = "DELETE FROM LOGICAL_LIBRARY WHERE LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME";
+    const char *const sql =
+      "DELETE "
+      "FROM LOGICAL_LIBRARY "
+      "WHERE "
+        "LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME_1 AND "
+        "NOT EXISTS (SELECT LOGICAL_LIBRARY_NAME FROM TAPE WHERE LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME_2)";
     auto conn = m_connPool.getConn();
     auto stmt = conn.createStmt(sql);
-    stmt.bindString(":LOGICAL_LIBRARY_NAME", name);
+    stmt.bindString(":LOGICAL_LIBRARY_NAME_1", name);
+    stmt.bindString(":LOGICAL_LIBRARY_NAME_2", name);
     stmt.executeNonQuery();
 
+    // The delete statement will effect no rows and will not raise an error if
+    // either the logical library does not exist or if it still contains tapes
     if(0 == stmt.getNbAffectedRows()) {
-      throw exception::UserError(std::string("Cannot delete logical-library ") + name + " because it does not exist");
+      if(logicalLibraryExists(conn, name)) {
+        throw UserSpecifiedANonEmptyLogicalLibrary(std::string("Cannot delete logical library ") + name +
+          " because it contains one or more tapes");
+      } else {
+        throw UserSpecifiedANonExistentLogicalLibrary(std::string("Cannot delete logical library ") + name +
+          " because it does not exist");
+      }
     }
   } catch(exception::UserError &) {
     throw;
@@ -1739,7 +1755,10 @@ void RdbmsCatalogue::createTape(
   const uint64_t capacityInBytes,
   const bool disabled,
   const bool full,
+  const bool readOnly,      
   const std::string &comment) {
+  // CTA hard code this field to FALSE
+  const bool isFromCastor = false;
   try {
     if(vid.empty()) {
       throw UserSpecifiedAnEmptyStringVid("Cannot create tape because the VID is an empty string");
@@ -1796,6 +1815,8 @@ void RdbmsCatalogue::createTape(
         "LAST_FSEQ,"
         "IS_DISABLED,"
         "IS_FULL,"
+        "IS_READ_ONLY,"
+        "IS_FROM_CASTOR,"
 
         "USER_COMMENT,"
 
@@ -1817,6 +1838,8 @@ void RdbmsCatalogue::createTape(
         ":LAST_FSEQ,"
         ":IS_DISABLED,"
         ":IS_FULL,"
+        ":IS_READ_ONLY,"
+        ":IS_FROM_CASTOR,"
 
         ":USER_COMMENT,"
 
@@ -1839,6 +1862,8 @@ void RdbmsCatalogue::createTape(
     stmt.bindUint64(":LAST_FSEQ", 0);
     stmt.bindBool(":IS_DISABLED", disabled);
     stmt.bindBool(":IS_FULL", full);
+    stmt.bindBool(":IS_READ_ONLY", readOnly);
+    stmt.bindBool(":IS_FROM_CASTOR", isFromCastor);
 
     stmt.bindString(":USER_COMMENT", comment);
 
@@ -2015,6 +2040,8 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
         "TAPE.LAST_FSEQ AS LAST_FSEQ,"
         "TAPE.IS_DISABLED AS IS_DISABLED,"
         "TAPE.IS_FULL AS IS_FULL,"
+        "TAPE.IS_READ_ONLY AS IS_READ_ONLY,"
+        "TAPE.IS_FROM_CASTOR AS IS_FROM_CASTOR,"
 
         "TAPE.LABEL_DRIVE AS LABEL_DRIVE,"
         "TAPE.LABEL_TIME AS LABEL_TIME,"
@@ -2024,6 +2051,9 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
 
         "TAPE.LAST_WRITE_DRIVE AS LAST_WRITE_DRIVE,"
         "TAPE.LAST_WRITE_TIME AS LAST_WRITE_TIME,"
+            
+        "TAPE.READ_MOUNT_COUNT AS READ_MOUNT_COUNT,"
+        "TAPE.WRITE_MOUNT_COUNT AS WRITE_MOUNT_COUNT,"
 
         "TAPE.USER_COMMENT AS USER_COMMENT,"
 
@@ -2047,7 +2077,8 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
        searchCriteria.vo ||
        searchCriteria.capacityInBytes ||
        searchCriteria.disabled ||
-       searchCriteria.full) {
+       searchCriteria.full ||
+       searchCriteria.readOnly) {
       sql += " WHERE ";
     }
 
@@ -2097,6 +2128,11 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
       sql += " TAPE.IS_FULL = :IS_FULL";
       addedAWhereConstraint = true;
     }
+    if(searchCriteria.readOnly) {
+      if(addedAWhereConstraint) sql += " AND ";
+      sql += " TAPE.IS_READ_ONLY = :IS_READ_ONLY";
+      addedAWhereConstraint = true;
+    }
 
     sql += " ORDER BY TAPE.VID";
 
@@ -2111,6 +2147,7 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
     if(searchCriteria.capacityInBytes) stmt.bindUint64(":CAPACITY_IN_BYTES", searchCriteria.capacityInBytes.value());
     if(searchCriteria.disabled) stmt.bindBool(":IS_DISABLED", searchCriteria.disabled.value());
     if(searchCriteria.full) stmt.bindBool(":IS_FULL", searchCriteria.full.value());
+    if(searchCriteria.readOnly) stmt.bindBool(":IS_READ_ONLY", searchCriteria.readOnly.value());
 
     auto rset = stmt.executeQuery();
     while (rset.next()) {
@@ -2128,10 +2165,15 @@ std::list<common::dataStructures::Tape> RdbmsCatalogue::getTapes(rdbms::Conn &co
       tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
       tape.disabled = rset.columnBool("IS_DISABLED");
       tape.full = rset.columnBool("IS_FULL");
-
+      tape.readOnly = rset.columnBool("IS_READ_ONLY");
+      tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
+      
       tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
       tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
       tape.lastWriteLog = getTapeLogFromRset(rset, "LAST_WRITE_DRIVE", "LAST_WRITE_TIME");
+      
+      tape.readMountCount = rset.columnUint64("READ_MOUNT_COUNT");
+      tape.writeMountCount = rset.columnUint64("WRITE_MOUNT_COUNT");
 
       tape.comment = rset.columnString("USER_COMMENT");
       tape.creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -2173,6 +2215,8 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getTapesByVid(const std::se
         "TAPE.LAST_FSEQ AS LAST_FSEQ,"
         "TAPE.IS_DISABLED AS IS_DISABLED,"
         "TAPE.IS_FULL AS IS_FULL,"
+        "TAPE.IS_READ_ONLY AS IS_READ_ONLY,"
+        "TAPE.IS_FROM_CASTOR AS IS_FROM_CASTOR,"    
 
         "TAPE.LABEL_DRIVE AS LABEL_DRIVE,"
         "TAPE.LABEL_TIME AS LABEL_TIME,"
@@ -2182,6 +2226,9 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getTapesByVid(const std::se
 
         "TAPE.LAST_WRITE_DRIVE AS LAST_WRITE_DRIVE,"
         "TAPE.LAST_WRITE_TIME AS LAST_WRITE_TIME,"
+            
+        "TAPE.READ_MOUNT_COUNT AS READ_MOUNT_COUNT,"
+        "TAPE.WRITE_MOUNT_COUNT AS WRITE_MOUNT_COUNT,"
 
         "TAPE.USER_COMMENT AS USER_COMMENT,"
 
@@ -2240,10 +2287,15 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getTapesByVid(const std::se
       tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
       tape.disabled = rset.columnBool("IS_DISABLED");
       tape.full = rset.columnBool("IS_FULL");
+      tape.readOnly = rset.columnBool("IS_READ_ONLY");
+      tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
       tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
       tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
       tape.lastWriteLog = getTapeLogFromRset(rset, "LAST_WRITE_DRIVE", "LAST_WRITE_TIME");
+      
+      tape.readMountCount = rset.columnUint64("READ_MOUNT_COUNT");
+      tape.writeMountCount = rset.columnUint64("WRITE_MOUNT_COUNT");
 
       tape.comment = rset.columnString("USER_COMMENT");
       tape.creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -2289,6 +2341,8 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getAllTapes() const {
         "TAPE.LAST_FSEQ AS LAST_FSEQ,"
         "TAPE.IS_DISABLED AS IS_DISABLED,"
         "TAPE.IS_FULL AS IS_FULL,"
+        "TAPE.IS_READ_ONLY AS IS_READ_ONLY,"
+        "TAPE.IS_FROM_CASTOR AS IS_FROM_CASTOR,"    
 
         "TAPE.LABEL_DRIVE AS LABEL_DRIVE,"
         "TAPE.LABEL_TIME AS LABEL_TIME,"
@@ -2298,6 +2352,9 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getAllTapes() const {
 
         "TAPE.LAST_WRITE_DRIVE AS LAST_WRITE_DRIVE,"
         "TAPE.LAST_WRITE_TIME AS LAST_WRITE_TIME,"
+                            
+        "TAPE.READ_MOUNT_COUNT AS READ_MOUNT_COUNT,"
+        "TAPE.WRITE_MOUNT_COUNT AS WRITE_MOUNT_COUNT,"
 
         "TAPE.USER_COMMENT AS USER_COMMENT,"
 
@@ -2332,10 +2389,15 @@ common::dataStructures::VidToTapeMap RdbmsCatalogue::getAllTapes() const {
       tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
       tape.disabled = rset.columnBool("IS_DISABLED");
       tape.full = rset.columnBool("IS_FULL");
+      tape.readOnly = rset.columnBool("IS_READ_ONLY");
+      tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
       tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
       tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
       tape.lastWriteLog = getTapeLogFromRset(rset, "LAST_WRITE_DRIVE", "LAST_WRITE_TIME");
+      
+      tape.readMountCount = rset.columnUint64("READ_MOUNT_COUNT");
+      tape.writeMountCount = rset.columnUint64("WRITE_MOUNT_COUNT");
 
       tape.comment = rset.columnString("USER_COMMENT");
       tape.creationLog.username = rset.columnString("CREATION_LOG_USER_NAME");
@@ -2379,6 +2441,42 @@ uint64_t RdbmsCatalogue::getNbNonSupersededFilesOnTape(rdbms::Conn& conn, const
 }
 
 
+//------------------------------------------------------------------------------
+// getNbFilesOnTape
+//------------------------------------------------------------------------------
+uint64_t RdbmsCatalogue::getNbFilesOnTape(const std::string& vid) const {
+  try {
+    auto conn = m_connPool.getConn();
+    return getNbFilesOnTape(conn, vid);
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+//getNbFilesOnTape
+//------------------------------------------------------------------------------
+uint64_t RdbmsCatalogue::getNbFilesOnTape(rdbms::Conn& conn, const std::string& vid) const {
+  try {
+    const char *const sql = 
+    "SELECT COUNT(*) AS NB_FILES FROM TAPE_FILE "
+    "WHERE VID = :VID ";
+    
+    auto stmt = conn.createStmt(sql);
+    
+    stmt.bindString(":VID", vid);
+    auto rset = stmt.executeQuery();
+    rset.next();
+    return rset.columnUint64("NB_FILES");
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 void RdbmsCatalogue::deleteTapeFiles(rdbms::Conn& conn, const std::string& vid) const {
   try {
     const char * const sql = 
@@ -2455,6 +2553,38 @@ void RdbmsCatalogue::reclaimTape(const common::dataStructures::SecurityIdentity
   }
 }
 
+//------------------------------------------------------------------------------
+// checkTapeForLabel
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::checkTapeForLabel(const std::string &vid) {
+   try{
+    auto conn = m_connPool.getConn();
+    
+    TapeSearchCriteria searchCriteria;
+    searchCriteria.vid = vid;
+    const auto tapes = getTapes(conn, searchCriteria);
+
+    if(tapes.empty()) {
+      throw exception::UserError(std::string("Cannot label tape ") + vid + 
+                                             " because it does not exist");
+    } 
+    //The tape exists checks any files on it
+    const uint64_t nbFilesOnTape = getNbFilesOnTape(conn, vid);
+    if( 0 != nbFilesOnTape) {
+      throw exception::UserError(std::string("Cannot label tape ") + vid + 
+                                             " because it has " +
+                                             std::to_string(nbFilesOnTape) + 
+                                             " file(s)");  
+    } 
+  } catch (exception::UserError& ue) {
+    throw;
+  }
+  catch (exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 //------------------------------------------------------------------------------
 // getTapeLogFromRset
 //------------------------------------------------------------------------------
@@ -2672,6 +2802,11 @@ void RdbmsCatalogue::modifyTapeCapacityInBytes(const common::dataStructures::Sec
 void RdbmsCatalogue::modifyTapeEncryptionKey(const common::dataStructures::SecurityIdentity &admin,
   const std::string &vid, const std::string &encryptionKey) {
   try {
+    optional<std::string> optionalEncryptionKey;
+    if(!encryptionKey.empty()) {
+      optionalEncryptionKey = encryptionKey;
+    }
+
     const time_t now = time(nullptr);
     const char *const sql =
       "UPDATE TAPE SET "
@@ -2683,7 +2818,7 @@ void RdbmsCatalogue::modifyTapeEncryptionKey(const common::dataStructures::Secur
         "VID = :VID";
     auto conn = m_connPool.getConn();
     auto stmt = conn.createStmt(sql);
-    stmt.bindString(":ENCRYPTION_KEY", encryptionKey);
+    stmt.bindOptionalString(":ENCRYPTION_KEY", optionalEncryptionKey);
     stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username);
     stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host);
     stmt.bindUint64(":LAST_UPDATE_TIME", now);
@@ -2705,12 +2840,13 @@ void RdbmsCatalogue::modifyTapeEncryptionKey(const common::dataStructures::Secur
 // tapeMountedForArchive
 //------------------------------------------------------------------------------
 void RdbmsCatalogue::tapeMountedForArchive(const std::string &vid, const std::string &drive) {
-  try {
+  try {  
     const time_t now = time(nullptr);
     const char *const sql =
       "UPDATE TAPE SET "
         "LAST_WRITE_DRIVE = :LAST_WRITE_DRIVE,"
-        "LAST_WRITE_TIME = :LAST_WRITE_TIME "
+        "LAST_WRITE_TIME = :LAST_WRITE_TIME, "
+        "WRITE_MOUNT_COUNT = WRITE_MOUNT_COUNT + 1 "
       "WHERE "
         "VID = :VID";
     auto conn = m_connPool.getConn();
@@ -2740,7 +2876,8 @@ void RdbmsCatalogue::tapeMountedForRetrieve(const std::string &vid, const std::s
     const char *const sql =
       "UPDATE TAPE SET "
         "LAST_READ_DRIVE = :LAST_READ_DRIVE,"
-        "LAST_READ_TIME = :LAST_READ_TIME "
+        "LAST_READ_TIME = :LAST_READ_TIME, "
+        "READ_MOUNT_COUNT = READ_MOUNT_COUNT + 1 "
       "WHERE "
         "VID = :VID";
     auto conn = m_connPool.getConn();
@@ -2822,6 +2959,93 @@ void RdbmsCatalogue::noSpaceLeftOnTape(const std::string &vid) {
   }
 }
 
+//------------------------------------------------------------------------------
+// setTapeReadOnly
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::setTapeReadOnly(const common::dataStructures::SecurityIdentity &admin, const std::string &vid,
+  const bool readOnlyValue) {
+  try {
+    const time_t now = time(nullptr);
+    const char *const sql =
+      "UPDATE TAPE SET "
+        "IS_READ_ONLY = :IS_READ_ONLY,"
+        "LAST_UPDATE_USER_NAME = :LAST_UPDATE_USER_NAME,"
+        "LAST_UPDATE_HOST_NAME = :LAST_UPDATE_HOST_NAME,"
+        "LAST_UPDATE_TIME = :LAST_UPDATE_TIME "
+      "WHERE "
+        "VID = :VID";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindBool(":IS_READ_ONLY", readOnlyValue);
+    stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username);
+    stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host);
+    stmt.bindUint64(":LAST_UPDATE_TIME", now);
+    stmt.bindString(":VID", vid);
+    stmt.executeNonQuery();
+
+    if(0 == stmt.getNbAffectedRows()) {
+      throw exception::UserError(std::string("Cannot modify tape ") + vid + " because it does not exist");
+    }
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// setTapeReadOnlyOnError
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::setTapeReadOnlyOnError(const std::string &vid) {
+  try {
+    const char *const sql =
+      "UPDATE TAPE SET "
+        "IS_READ_ONLY = '1' "
+      "WHERE "
+        "VID = :VID";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":VID", vid);
+    stmt.executeNonQuery();
+
+    if (0 == stmt.getNbAffectedRows()) {
+      throw exception::Exception(std::string("Tape ") + vid + " does not exist");
+    }
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// setTapeIsFromCastorInUnitTests
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::setTapeIsFromCastorInUnitTests(const std::string &vid) {
+  try {
+    const char *const sql =
+      "UPDATE TAPE SET "
+        "IS_FROM_CASTOR = '1' "
+      "WHERE "
+        "VID = :VID";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":VID", vid);
+    stmt.executeNonQuery();
+
+    if (0 == stmt.getNbAffectedRows()) {
+      throw exception::Exception(std::string("Tape ") + vid + " does not exist");
+    }
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 //------------------------------------------------------------------------------
 // setTapeDisabled
 //------------------------------------------------------------------------------
@@ -4054,7 +4278,7 @@ void RdbmsCatalogue::createActivitiesFairShareWeight(const common::dataStructure
     
     const time_t now = time(nullptr);
     const char *const sql =
-      "INSERT INTO ACTIVITIES_WEIGHTS ("
+      "INSERT INTO ACTIVITIES_WEIGHTS("
         "DISK_INSTANCE_NAME,"
         "ACTIVITY,"
         "WEIGHT,"
@@ -4705,11 +4929,11 @@ void RdbmsCatalogue::insertArchiveFile(rdbms::Conn &conn, const ArchiveFileRow &
         "DISK_INSTANCE_NAME,"
         "DISK_FILE_ID,"
         "DISK_FILE_PATH,"
-        "DISK_FILE_USER,"
-        "DISK_FILE_GROUP,"
+        "DISK_FILE_UID,"
+        "DISK_FILE_GID,"
         "SIZE_IN_BYTES,"
-        "CHECKSUM_TYPE,"
-        "CHECKSUM_VALUE,"
+        "CHECKSUM_BLOB,"
+        "CHECKSUM_ADLER32,"
         "STORAGE_CLASS_ID,"
         "CREATION_TIME,"
         "RECONCILIATION_TIME)"
@@ -4718,11 +4942,11 @@ void RdbmsCatalogue::insertArchiveFile(rdbms::Conn &conn, const ArchiveFileRow &
         "DISK_INSTANCE_NAME,"
         ":DISK_FILE_ID,"
         ":DISK_FILE_PATH,"
-        ":DISK_FILE_USER,"
-        ":DISK_FILE_GROUP,"
+        ":DISK_FILE_UID,"
+        ":DISK_FILE_GID,"
         ":SIZE_IN_BYTES,"
-        ":CHECKSUM_TYPE,"
-        ":CHECKSUM_VALUE,"
+        ":CHECKSUM_BLOB,"
+        ":CHECKSUM_ADLER32,"
         "STORAGE_CLASS_ID,"
         ":CREATION_TIME,"
         ":RECONCILIATION_TIME "
@@ -4737,11 +4961,19 @@ void RdbmsCatalogue::insertArchiveFile(rdbms::Conn &conn, const ArchiveFileRow &
     stmt.bindString(":DISK_INSTANCE_NAME", row.diskInstance);
     stmt.bindString(":DISK_FILE_ID", row.diskFileId);
     stmt.bindString(":DISK_FILE_PATH", row.diskFilePath);
-    stmt.bindString(":DISK_FILE_USER", row.diskFileUser);
-    stmt.bindString(":DISK_FILE_GROUP", row.diskFileGroup);
+    stmt.bindUint64(":DISK_FILE_UID", row.diskFileOwnerUid);
+    stmt.bindUint64(":DISK_FILE_GID", row.diskFileGid);
     stmt.bindUint64(":SIZE_IN_BYTES", row.size);
-    stmt.bindString(":CHECKSUM_TYPE", row.checksumType);
-    stmt.bindString(":CHECKSUM_VALUE", row.checksumValue);
+    stmt.bindBlob  (":CHECKSUM_BLOB", row.checksumBlob.serialize());
+    // Keep transition ADLER32 checksum up-to-date if it exists
+    uint32_t adler32;
+    try {
+      std::string adler32hex = checksum::ChecksumBlob::ByteArrayToHex(row.checksumBlob.at(checksum::ADLER32));
+      adler32 = strtoul(adler32hex.c_str(), 0, 16);
+    } catch(exception::ChecksumTypeMismatch &ex) {
+      adler32 = 0;
+    }
+    stmt.bindUint64(":CHECKSUM_ADLER32", adler32);
     stmt.bindString(":STORAGE_CLASS_NAME", row.storageClassName);
     stmt.bindUint64(":CREATION_TIME", now);
     stmt.bindUint64(":RECONCILIATION_TIME", now);
@@ -4769,15 +5001,15 @@ void RdbmsCatalogue::checkTapeFileSearchCriteria(const TapeFileSearchCriteria &s
     }
   }
 
-  if(searchCriteria.diskFileGroup && !searchCriteria.diskInstance) {
-    throw exception::UserError(std::string("Disk file group ") + searchCriteria.diskFileGroup.value() + " is ambiguous "
-      "without disk instance name");
+  if(searchCriteria.diskFileGid && !searchCriteria.diskInstance) {
+    throw exception::UserError(std::string("Disk file group ") + std::to_string(searchCriteria.diskFileGid.value()) +
+      " is ambiguous without disk instance name");
   }
 
-  if(searchCriteria.diskInstance && searchCriteria.diskFileGroup) {
-    if(!diskFileGroupExists(conn, searchCriteria.diskInstance.value(), searchCriteria.diskFileGroup.value())) {
+  if(searchCriteria.diskInstance && searchCriteria.diskFileGid) {
+    if(!diskFileGroupExists(conn, searchCriteria.diskInstance.value(), searchCriteria.diskFileGid.value())) {
       throw exception::UserError(std::string("Disk file group ") + searchCriteria.diskInstance.value() + "::" +
-        searchCriteria.diskFileGroup.value() + " does not exist");
+        std::to_string(searchCriteria.diskFileGid.value()) + " does not exist");
     }
   }
 
@@ -4805,15 +5037,15 @@ void RdbmsCatalogue::checkTapeFileSearchCriteria(const TapeFileSearchCriteria &s
     }
   }
 
-  if(searchCriteria.diskFileUser && !searchCriteria.diskInstance) {
-    throw exception::UserError(std::string("Disk file user ") + searchCriteria.diskFileUser.value() + " is ambiguous "
-      "without disk instance name");
+  if(searchCriteria.diskFileOwnerUid && !searchCriteria.diskInstance) {
+    throw exception::UserError(std::string("Disk file user ") + std::to_string(searchCriteria.diskFileOwnerUid.value()) +
+      " is ambiguous without disk instance name");
   }
 
-  if(searchCriteria.diskInstance && searchCriteria.diskFileUser) {
-    if(!diskFileUserExists(conn, searchCriteria.diskInstance.value(), searchCriteria.diskFileUser.value())) {
+  if(searchCriteria.diskInstance && searchCriteria.diskFileOwnerUid) {
+    if(!diskFileUserExists(conn, searchCriteria.diskInstance.value(), searchCriteria.diskFileOwnerUid.value())) {
       throw exception::UserError(std::string("Disk file user ") + searchCriteria.diskInstance.value() + "::" +
-        searchCriteria.diskFileUser.value() + " does not exist");
+        std::to_string(searchCriteria.diskFileOwnerUid.value()) + " does not exist");
     }
   }
 
@@ -4874,18 +5106,18 @@ std::list<common::dataStructures::ArchiveFile> RdbmsCatalogue::getFilesForRepack
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -4919,11 +5151,10 @@ std::list<common::dataStructures::ArchiveFile> RdbmsCatalogue::getFilesForRepack
       archiveFile.diskInstance = rset.columnString("DISK_INSTANCE_NAME");
       archiveFile.diskFileId = rset.columnString("DISK_FILE_ID");
       archiveFile.diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-      archiveFile.diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-      archiveFile.diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+      archiveFile.diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+      archiveFile.diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
       archiveFile.fileSize = rset.columnUint64("SIZE_IN_BYTES");
-      archiveFile.checksumType = rset.columnString("CHECKSUM_TYPE");
-      archiveFile.checksumValue = rset.columnString("CHECKSUM_VALUE");
+      archiveFile.checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
       archiveFile.storageClass = rset.columnString("STORAGE_CLASS_NAME");
       archiveFile.creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
       archiveFile.reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -4932,11 +5163,10 @@ std::list<common::dataStructures::ArchiveFile> RdbmsCatalogue::getFilesForRepack
       tapeFile.vid = rset.columnString("VID");
       tapeFile.fSeq = rset.columnUint64("FSEQ");
       tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-      tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+      tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
       tapeFile.copyNb = rset.columnUint64("COPY_NB");
       tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-      tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
-      tapeFile.checksumValue = archiveFile.checksumValue; // Duplicated for convenience
+      tapeFile.checksumBlob = archiveFile.checksumBlob; // Duplicated for convenience
       if (!rset.columnIsNull("SSBY_VID")) {
         tapeFile.supersededByVid = rset.columnString("SSBY_VID");
         tapeFile.supersededByFSeq = rset.columnUint64("SSBY_VID");
@@ -4981,7 +5211,6 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary(
     std::string sql =
       "SELECT "
         "COALESCE(SUM(ARCHIVE_FILE.SIZE_IN_BYTES), 0) AS TOTAL_BYTES,"
-        "COALESCE(SUM(TAPE_FILE.COMPRESSED_SIZE_IN_BYTES), 0) AS TOTAL_COMPRESSED_BYTES,"
         "COUNT(ARCHIVE_FILE.ARCHIVE_FILE_ID) AS TOTAL_FILES "
       "FROM "
         "ARCHIVE_FILE "
@@ -4997,8 +5226,8 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary(
       searchCriteria.diskInstance   ||
       searchCriteria.diskFileId     ||
       searchCriteria.diskFilePath   ||
-      searchCriteria.diskFileUser   ||
-      searchCriteria.diskFileGroup  ||
+      searchCriteria.diskFileOwnerUid   ||
+      searchCriteria.diskFileGid  ||
       searchCriteria.storageClass   ||
       searchCriteria.vid            ||
       searchCriteria.tapeFileCopyNb ||
@@ -5027,14 +5256,14 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary(
       sql += "ARCHIVE_FILE.DISK_FILE_PATH = :DISK_FILE_PATH";
       addedAWhereConstraint = true;
     }
-    if(searchCriteria.diskFileUser) {
+    if(searchCriteria.diskFileOwnerUid) {
       if(addedAWhereConstraint) sql += " AND ";
-      sql += "ARCHIVE_FILE.DISK_FILE_USER = :DISK_FILE_USER";
+      sql += "ARCHIVE_FILE.DISK_FILE_UID = :DISK_FILE_UID";
       addedAWhereConstraint = true;
     }
-    if(searchCriteria.diskFileGroup) {
+    if(searchCriteria.diskFileGid) {
       if(addedAWhereConstraint) sql += " AND ";
-      sql += "ARCHIVE_FILE.DISK_FILE_GROUP = :DISK_FILE_GROUP";
+      sql += "ARCHIVE_FILE.DISK_FILE_GID = :DISK_FILE_GID";
       addedAWhereConstraint = true;
     }
     if(searchCriteria.storageClass) {
@@ -5071,11 +5300,11 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary(
     if(searchCriteria.diskFilePath) {
       stmt.bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value());
     }
-    if(searchCriteria.diskFileUser) {
-      stmt.bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value());
+    if(searchCriteria.diskFileOwnerUid) {
+      stmt.bindUint64(":DISK_FILE_UID", searchCriteria.diskFileOwnerUid.value());
     }
-    if(searchCriteria.diskFileGroup) {
-      stmt.bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value());
+    if(searchCriteria.diskFileGid) {
+      stmt.bindUint64(":DISK_FILE_GID", searchCriteria.diskFileGid.value());
     }
     if(searchCriteria.storageClass) {
       stmt.bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value());
@@ -5096,7 +5325,6 @@ common::dataStructures::ArchiveFileSummary RdbmsCatalogue::getTapeFileSummary(
 
     common::dataStructures::ArchiveFileSummary summary;
     summary.totalBytes = rset.columnUint64("TOTAL_BYTES");
-    summary.totalCompressedBytes = rset.columnUint64("TOTAL_COMPRESSED_BYTES");
     summary.totalFiles = rset.columnUint64("TOTAL_FILES");
     return summary;
   } catch(exception::UserError &) {
@@ -5165,7 +5393,7 @@ void RdbmsCatalogue::tapeLabelled(const std::string &vid, const std::string &dri
 // checkAndGetNextArchiveFileId
 //------------------------------------------------------------------------------
 uint64_t RdbmsCatalogue::checkAndGetNextArchiveFileId(const std::string &diskInstanceName,
-  const std::string &storageClassName, const common::dataStructures::UserIdentity &user) {
+  const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) {
   try {
     const auto storageClass = StorageClass(diskInstanceName, storageClassName);
     const auto copyToPoolMap = getCachedTapeCopyToPoolMap(storageClass);
@@ -5218,7 +5446,7 @@ uint64_t RdbmsCatalogue::checkAndGetNextArchiveFileId(const std::string &diskIns
 //------------------------------------------------------------------------------
 common::dataStructures::ArchiveFileQueueCriteria RdbmsCatalogue::getArchiveFileQueueCriteria(
   const std::string &diskInstanceName,
-  const std::string &storageClassName, const common::dataStructures::UserIdentity &user) {
+  const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user) {
   try {
     const StorageClass storageClass = StorageClass(diskInstanceName, storageClassName);
     const common::dataStructures::TapeCopyToPoolMap copyToPoolMap = getCachedTapeCopyToPoolMap(storageClass);
@@ -5407,7 +5635,7 @@ void RdbmsCatalogue::updateTape(
 common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetrieveFile(
   const std::string &diskInstanceName,
   const uint64_t archiveFileId,
-  const common::dataStructures::UserIdentity &user,
+  const common::dataStructures::RequesterIdentity &user,
   const optional<std::string>& activity,
   log::LogContext &lc) {
   try {
@@ -5658,6 +5886,8 @@ std::list<TapeForWriting> RdbmsCatalogue::getTapesForWriting(const std::string &
 //      "LABEL_TIME IS NOT NULL AND "  // Set when the tape has been labelled
         "IS_DISABLED = '0' AND "
         "IS_FULL = '0' AND "
+        "IS_READ_ONLY = '0' AND "
+        "IS_FROM_CASTOR = '0' AND "
         "LOGICAL_LIBRARY_NAME = :LOGICAL_LIBRARY_NAME";
 
     auto conn = m_connPool.getConn();
@@ -5702,7 +5932,7 @@ void RdbmsCatalogue::insertTapeFile(
           "VID,"
           "FSEQ,"
           "BLOCK_ID,"
-          "COMPRESSED_SIZE_IN_BYTES,"
+          "LOGICAL_SIZE_IN_BYTES,"
           "COPY_NB,"
           "CREATION_TIME,"
           "ARCHIVE_FILE_ID)"
@@ -5710,7 +5940,7 @@ void RdbmsCatalogue::insertTapeFile(
           ":VID,"
           ":FSEQ,"
           ":BLOCK_ID,"
-          ":COMPRESSED_SIZE_IN_BYTES,"
+          ":LOGICAL_SIZE_IN_BYTES,"
           ":COPY_NB,"
           ":CREATION_TIME,"
           ":ARCHIVE_FILE_ID)";
@@ -5719,7 +5949,7 @@ void RdbmsCatalogue::insertTapeFile(
       stmt.bindString(":VID", tapeFile.vid);
       stmt.bindUint64(":FSEQ", tapeFile.fSeq);
       stmt.bindUint64(":BLOCK_ID", tapeFile.blockId);
-      stmt.bindUint64(":COMPRESSED_SIZE_IN_BYTES", tapeFile.compressedSize);
+      stmt.bindUint64(":LOGICAL_SIZE_IN_BYTES", tapeFile.fileSize);
       stmt.bindUint64(":COPY_NB", tapeFile.copyNb);
       stmt.bindUint64(":CREATION_TIME", now);
       stmt.bindUint64(":ARCHIVE_FILE_ID", archiveFileId);
@@ -5825,18 +6055,18 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -5863,11 +6093,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         archiveFile->diskInstance = rset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = rset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = rset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = rset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = rset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = rset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -5880,11 +6109,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         tapeFile.vid = rset.columnString("VID");
         tapeFile.fSeq = rset.columnUint64("FSEQ");
         tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = rset.columnUint64("COPY_NB");
         tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!rset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = rset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = rset.columnUint64("SSBY_FSEQ");
@@ -5915,18 +6143,18 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -5956,11 +6184,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         archiveFile->diskInstance = rset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = rset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = rset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = rset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = rset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = rset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -5973,11 +6200,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         tapeFile.vid = rset.columnString("VID");
         tapeFile.fSeq = rset.columnUint64("FSEQ");
         tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = rset.columnUint64("COPY_NB");
         tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!rset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = rset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = rset.columnUint64("SSBY_FSEQ");
@@ -6062,18 +6288,18 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -6102,11 +6328,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         archiveFile->diskInstance = rset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = rset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = rset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = rset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = rset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = rset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -6119,11 +6344,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         tapeFile.vid = rset.columnString("VID");
         tapeFile.fSeq = rset.columnUint64("FSEQ");
         tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = rset.columnUint64("COPY_NB");
         tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!rset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = rset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = rset.columnUint64("SSBY_FSEQ");
@@ -6156,18 +6380,18 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME,"
         "TAPE_FILE.SUPERSEDED_BY_VID AS SSBY_VID,"
@@ -6199,11 +6423,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         archiveFile->diskInstance = rset.columnString("DISK_INSTANCE_NAME");
         archiveFile->diskFileId = rset.columnString("DISK_FILE_ID");
         archiveFile->diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-        archiveFile->diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-        archiveFile->diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+        archiveFile->diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+        archiveFile->diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
         archiveFile->fileSize = rset.columnUint64("SIZE_IN_BYTES");
-        archiveFile->checksumType = rset.columnString("CHECKSUM_TYPE");
-        archiveFile->checksumValue = rset.columnString("CHECKSUM_VALUE");
+        archiveFile->checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
         archiveFile->storageClass = rset.columnString("STORAGE_CLASS_NAME");
         archiveFile->creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
         archiveFile->reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -6216,11 +6439,10 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
         tapeFile.vid = rset.columnString("VID");
         tapeFile.fSeq = rset.columnUint64("FSEQ");
         tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-        tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+        tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
         tapeFile.copyNb = rset.columnUint64("COPY_NB");
         tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-        tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
-        tapeFile.checksumValue = archiveFile->checksumValue; // Duplicated for convenience
+        tapeFile.checksumBlob = archiveFile->checksumBlob; // Duplicated for convenience
         if (!rset.columnIsNull("SSBY_VID")) {
           tapeFile.supersededByVid = rset.columnString("SSBY_VID");
           tapeFile.supersededByFSeq = rset.columnUint64("SSBY_FSEQ");
@@ -6305,16 +6527,15 @@ void RdbmsCatalogue::checkTapeFileWrittenFieldsAreSet(const std::string &calling
     if(event.diskInstance.empty()) throw exception::Exception("diskInstance is an empty string");
     if(event.diskFileId.empty()) throw exception::Exception("diskFileId is an empty string");
     if(event.diskFilePath.empty()) throw exception::Exception("diskFilePath is an empty string");
-    if(event.diskFileUser.empty()) throw exception::Exception("diskFileUser is an empty string");
-    if(event.diskFileGroup.empty()) throw exception::Exception("diskFileGroup is an empty string");
+    if(0 == event.diskFileOwnerUid) throw exception::Exception("diskFileOwnerUid is 0");
+    if(0 == event.diskFileGid) throw exception::Exception("diskFileGid is 0");
     if(0 == event.size) throw exception::Exception("size is 0");
-    if(event.checksumType.empty()) throw exception::Exception("checksumType is an empty string");
-    if(event.checksumValue.empty()) throw exception::Exception("checksumValue is an empty string");
+    if(event.checksumBlob.length() == 0) throw exception::Exception("checksumBlob is an empty string");
     if(event.storageClassName.empty()) throw exception::Exception("storageClassName is an empty string");
     if(event.vid.empty()) throw exception::Exception("vid is an empty string");
     if(0 == event.fSeq) throw exception::Exception("fSeq is 0");
     if(0 == event.blockId && event.fSeq != 1) throw exception::Exception("blockId is 0 and fSeq is not 1");
-    if(0 == event.compressedSize) throw exception::Exception("compressedSize is 0");
+    if(0 == event.size) throw exception::Exception("size is 0");
     if(0 == event.copyNb) throw exception::Exception("copyNb is 0");
     if(event.tapeDrive.empty()) throw exception::Exception("tapeDrive is an empty string");
   } catch (exception::Exception &ex) {
diff --git a/catalogue/RdbmsCatalogue.hpp b/catalogue/RdbmsCatalogue.hpp
index 0248740652af4121b5c9c523fc8776ae6dc029eb..beb9a57fbf5c1acff8b580fd787daba3f8b3a9c1 100644
--- a/catalogue/RdbmsCatalogue.hpp
+++ b/catalogue/RdbmsCatalogue.hpp
@@ -116,7 +116,7 @@ public:
   uint64_t checkAndGetNextArchiveFileId(
     const std::string &diskInstanceName,
     const std::string &storageClassName,
-    const common::dataStructures::UserIdentity &user) override;
+    const common::dataStructures::RequesterIdentity &user) override;
 
   /**
    * Returns the information required to queue an archive request.
@@ -135,12 +135,12 @@ public:
   common::dataStructures::ArchiveFileQueueCriteria getArchiveFileQueueCriteria(
     const std::string &diskInstanceName,
     const std::string &storageClassName,
-    const common::dataStructures::UserIdentity &user) override;
+    const common::dataStructures::RequesterIdentity &user) override;
 
   /**
    * Returns the list of tapes that can be written to by a tape drive in the
    * specified logical library, in other words tapes that are labelled, not
-   * disabled, not full and are in the specified logical library.
+   * disabled, not full, not read-only and are in the specified logical library.
    *
    * @param logicalLibraryName The name of the logical library.
    * @return The list of tapes for writing.
@@ -179,7 +179,7 @@ public:
   common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(
     const std::string &diskInstanceName,
     const uint64_t archiveFileId,
-    const common::dataStructures::UserIdentity &user,
+    const common::dataStructures::RequesterIdentity &user,
     const optional<std::string> & activity,
     log::LogContext &lc) override;
 
@@ -291,6 +291,7 @@ public:
     const uint64_t capacityInBytes,
     const bool disabled,
     const bool full,
+    const bool readOnly,
     const std::string &comment) override;
 
   void deleteTape(const std::string &vid) override;
@@ -335,13 +336,40 @@ public:
    * @param vid The volume identifier of the tape to be reclaimed.
    */
   void reclaimTape(const common::dataStructures::SecurityIdentity &admin, const std::string &vid) override;
-   /**
+  
+  /**
+   * Checks the specified tape for the tape label command.
+   *
+   * This method checks if the tape is safe to be labeled and will throw an 
+   * exception if the specified tape does not ready to be labeled.
+   *
+   * @param vid The volume identifier of the tape to be checked.
+   */
+  void checkTapeForLabel(const std::string &vid) override;
+  
+  /**
    * Returns the number of non superseded files contained in the tape identified by its vid
    * @param conn the database connection
    * @param vid the vid in which we will count non superseded files
    * @return the number of non superseded files on the vid
    */
   uint64_t getNbNonSupersededFilesOnTape(rdbms::Conn &conn, const std::string &vid) const;
+  
+  /**
+   * Returns the number of any files contained in the tape identified by its vid
+   * @param vid the vid in which we will count non superseded files
+   * @return the number of files on the tape
+   */
+  uint64_t getNbFilesOnTape(const std::string &vid) const override;
+  
+  /**
+   * Returns the number of any files contained in the tape identified by its vid
+   * @param conn the database connection
+   * @param vid the vid in which we will count non superseded files
+   * @return the number of files on the tape
+   */
+  uint64_t getNbFilesOnTape(rdbms::Conn &conn, const std::string &vid) const;
+  
   /**
    * Delete all the tape files of the VID passed in parameter
    * @param conn the database connection
@@ -374,7 +402,35 @@ public:
    * @param fullValue Set to true if the tape is full.
    */
   void setTapeFull(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool fullValue) override;
-
+  
+  /**
+   * Sets the read-only status of the specified tape.
+   *
+   * Please note that this method is to be called by the CTA front-end in
+   * response to a command from the CTA command-line interface (CLI).
+   *
+   * @param admin The administrator.
+   * @param vid The volume identifier of the tape to be marked as read-only.
+   * @param readOnlyValue Set to true if the tape is read-only.
+   */
+  void setTapeReadOnly(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool readOnlyValue) override;
+  
+  /**
+   * This method notifies the CTA catalogue to set the specified tape read-only
+   * in case of a problem.
+   *
+   * @param vid The volume identifier of the tape.
+   */
+  void setTapeReadOnlyOnError(const std::string &vid) override;
+  
+  /**
+   * This method notifies the CTA catalogue to set the specified tape is from CASTOR.
+   * This method only for unitTests and MUST never be called in CTA!!! 
+   *
+   * @param vid The volume identifier of the tape.
+   */
+  void setTapeIsFromCastorInUnitTests(const std::string &vid) override;
+  
   void setTapeDisabled(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const bool disabledValue) override;
   void modifyTapeComment(const common::dataStructures::SecurityIdentity &admin, const std::string &vid, const std::string &comment) override;
 
@@ -757,26 +813,22 @@ protected:
   /**
    * Returns true if the specified disk file user exists.
    *
-   * @param conn The database connection.
-   * @param diskInstanceName The name of the disk instance to which the disk
-   * file user belongs.
-   * @param diskFileUSer The name of the disk file user.
-   * @return True if the disk file user exists.
+   * @param conn              The database connection.
+   * @param diskInstanceName  The name of the disk instance to which the disk file user belongs.
+   * @param diskFileOwnerUid  The user ID of the disk file owner.
+   * @return                  True if the disk file user exists.
    */
-  bool diskFileUserExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileUser)
-    const;
+  bool diskFileUserExists(rdbms::Conn &conn, const std::string &diskInstanceName, uint32_t diskFileOwnerUid) const;
 
   /**
    * Returns true if the specified disk file group exists.
    *
-   * @param conn The database connection.
-   * @param diskInstanceName The name of the disk instance to which the disk
-   * file group belongs.
-   * @param diskFileGroup The name of the disk file group.
-   * @return True if the disk file group exists.
+   * @param conn              The database connection.
+   * @param diskInstanceName  The name of the disk instance to which the disk file group belongs.
+   * @param diskFileGid       The group ID of the disk file.
+   * @return                  True if the disk file group exists.
    */
-  bool diskFileGroupExists(rdbms::Conn &conn, const std::string &diskInstanceName, const std::string &diskFileGroup)
-    const;
+  bool diskFileGroupExists(rdbms::Conn &conn, const std::string &diskInstanceName, uint32_t diskFileGid) const;
 
   /**
    * Returns true if the specified archive route exists.
diff --git a/catalogue/RdbmsCatalogueGetArchiveFilesForRepackItor.cpp b/catalogue/RdbmsCatalogueGetArchiveFilesForRepackItor.cpp
index c7c0e920a5a0a33f54f4bdc0aa58515356f5f7d1..5857695308921701d8d93b6ab39a5e7382573176 100644
--- a/catalogue/RdbmsCatalogueGetArchiveFilesForRepackItor.cpp
+++ b/catalogue/RdbmsCatalogueGetArchiveFilesForRepackItor.cpp
@@ -44,11 +44,10 @@ namespace {
     archiveFile.diskInstance = rset.columnString("DISK_INSTANCE_NAME");
     archiveFile.diskFileId = rset.columnString("DISK_FILE_ID");
     archiveFile.diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-    archiveFile.diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-    archiveFile.diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+    archiveFile.diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+    archiveFile.diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
     archiveFile.fileSize = rset.columnUint64("SIZE_IN_BYTES");
-    archiveFile.checksumType = rset.columnString("CHECKSUM_TYPE");
-    archiveFile.checksumValue = rset.columnString("CHECKSUM_VALUE");
+    archiveFile.checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
     archiveFile.storageClass = rset.columnString("STORAGE_CLASS_NAME");
     archiveFile.creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
     archiveFile.reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -59,12 +58,18 @@ namespace {
       tapeFile.vid = rset.columnString("VID");
       tapeFile.fSeq = rset.columnUint64("FSEQ");
       tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-      tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+      tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
       tapeFile.copyNb = rset.columnUint64("COPY_NB");
       tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-      tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
-      tapeFile.checksumValue = archiveFile.checksumValue; // Duplicated for convenience
-
+      tapeFile.checksumBlob = archiveFile.checksumBlob; // Duplicated for convenience
+      cta::optional<std::string> supersededByVid = rset.columnOptionalString("SUPERSEDED_BY_VID");
+      if(supersededByVid){
+        tapeFile.supersededByVid = supersededByVid.value();
+      }
+      cta::optional<uint64_t> supersededByFSeq = rset.columnOptionalUint64("SUPERSEDED_BY_FSEQ");
+      if(supersededByFSeq){
+        tapeFile.supersededByFSeq = supersededByFSeq.value();
+      }
       archiveFile.tapeFiles.push_back(tapeFile);
     }
 
@@ -92,20 +97,22 @@ RdbmsCatalogueGetArchiveFilesForRepackItor::RdbmsCatalogueGetArchiveFilesForRepa
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_COPY.VID AS VID,"
         "TAPE_COPY.FSEQ AS FSEQ,"
         "TAPE_COPY.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_COPY.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_COPY.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_COPY.COPY_NB AS COPY_NB,"
         "TAPE_COPY.CREATION_TIME AS TAPE_FILE_CREATION_TIME, "
+        "TAPE_COPY.SUPERSEDED_BY_VID AS SUPERSEDED_BY_VID, "
+        "TAPE_COPY.SUPERSEDED_BY_FSEQ AS SUPERSEDED_BY_FSEQ, "
         "TAPE.TAPE_POOL_NAME AS TAPE_POOL_NAME "
       "FROM "
         "TAPE_FILE REPACK_TAPE "
@@ -121,6 +128,10 @@ RdbmsCatalogueGetArchiveFilesForRepackItor::RdbmsCatalogueGetArchiveFilesForRepa
         "REPACK_TAPE.VID = :VID "
       "AND "
         "REPACK_TAPE.FSEQ >= :START_FSEQ "
+     "AND "
+        "REPACK_TAPE.SUPERSEDED_BY_VID IS NULL "
+      "AND "
+        "REPACK_TAPE.SUPERSEDED_BY_FSEQ IS NULL "
       "ORDER BY REPACK_TAPE.FSEQ";
 
     m_conn = connPool.getConn();
diff --git a/catalogue/RdbmsCatalogueGetArchiveFilesItor.cpp b/catalogue/RdbmsCatalogueGetArchiveFilesItor.cpp
index 9b80ab1c03d5597f78e275eaaba2607669878eec..acaa6c8023ed19f6f1ac213e6224eec15b99ae63 100644
--- a/catalogue/RdbmsCatalogueGetArchiveFilesItor.cpp
+++ b/catalogue/RdbmsCatalogueGetArchiveFilesItor.cpp
@@ -44,11 +44,10 @@ namespace {
     archiveFile.diskInstance = rset.columnString("DISK_INSTANCE_NAME");
     archiveFile.diskFileId = rset.columnString("DISK_FILE_ID");
     archiveFile.diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
-    archiveFile.diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
-    archiveFile.diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
+    archiveFile.diskFileInfo.owner_uid = rset.columnUint64("DISK_FILE_UID");
+    archiveFile.diskFileInfo.gid = rset.columnUint64("DISK_FILE_GID");
     archiveFile.fileSize = rset.columnUint64("SIZE_IN_BYTES");
-    archiveFile.checksumType = rset.columnString("CHECKSUM_TYPE");
-    archiveFile.checksumValue = rset.columnString("CHECKSUM_VALUE");
+    archiveFile.checksumBlob.deserializeOrSetAdler32(rset.columnBlob("CHECKSUM_BLOB"), rset.columnUint64("CHECKSUM_ADLER32"));
     archiveFile.storageClass = rset.columnString("STORAGE_CLASS_NAME");
     archiveFile.creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
     archiveFile.reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
@@ -59,11 +58,10 @@ namespace {
       tapeFile.vid = rset.columnString("VID");
       tapeFile.fSeq = rset.columnUint64("FSEQ");
       tapeFile.blockId = rset.columnUint64("BLOCK_ID");
-      tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
+      tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
       tapeFile.copyNb = rset.columnUint64("COPY_NB");
       tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
-      tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
-      tapeFile.checksumValue = archiveFile.checksumValue; // Duplicated for convenience
+      tapeFile.checksumBlob = archiveFile.checksumBlob; // Duplicated for convenience
       if(!rset.columnIsNull("SUPERSEDED_BY_VID") && !rset.columnIsNull("SUPERSEDED_BY_FSEQ")){
         tapeFile.supersededByVid = rset.columnString("SUPERSEDED_BY_VID");
         tapeFile.supersededByFSeq = rset.columnUint64("SUPERSEDED_BY_FSEQ");
@@ -95,18 +93,18 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor(
         "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
         "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
         "ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
-        "ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
-        "ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
+        "ARCHIVE_FILE.DISK_FILE_UID AS DISK_FILE_UID,"
+        "ARCHIVE_FILE.DISK_FILE_GID AS DISK_FILE_GID,"
         "ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
-        "ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
-        "ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
+        "ARCHIVE_FILE.CHECKSUM_BLOB AS CHECKSUM_BLOB,"
+        "ARCHIVE_FILE.CHECKSUM_ADLER32 AS CHECKSUM_ADLER32,"
         "STORAGE_CLASS.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
         "ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
         "ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
         "TAPE_FILE.VID AS VID,"
         "TAPE_FILE.FSEQ AS FSEQ,"
         "TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
-        "TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
+        "TAPE_FILE.LOGICAL_SIZE_IN_BYTES AS LOGICAL_SIZE_IN_BYTES,"
         "TAPE_FILE.COPY_NB AS COPY_NB,"
         "TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME, "
         "TAPE_FILE.SUPERSEDED_BY_VID AS SUPERSEDED_BY_VID, "
@@ -126,8 +124,8 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor(
       searchCriteria.diskInstance   ||
       searchCriteria.diskFileId     ||
       searchCriteria.diskFilePath   ||
-      searchCriteria.diskFileUser   ||
-      searchCriteria.diskFileGroup  ||
+      searchCriteria.diskFileOwnerUid   ||
+      searchCriteria.diskFileGid  ||
       searchCriteria.storageClass   ||
       searchCriteria.vid            ||
       searchCriteria.tapeFileCopyNb ||
@@ -158,14 +156,14 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor(
       sql += "ARCHIVE_FILE.DISK_FILE_PATH = :DISK_FILE_PATH";
       addedAWhereConstraint = true;
     }
-    if(searchCriteria.diskFileUser) {
+    if(searchCriteria.diskFileOwnerUid) {
       if(addedAWhereConstraint) sql += " AND ";
-      sql += "ARCHIVE_FILE.DISK_FILE_USER = :DISK_FILE_USER";
+      sql += "ARCHIVE_FILE.DISK_FILE_UID = :DISK_FILE_UID";
       addedAWhereConstraint = true;
     }
-    if(searchCriteria.diskFileGroup) {
+    if(searchCriteria.diskFileGid) {
       if(addedAWhereConstraint) sql += " AND ";
-      sql += "ARCHIVE_FILE.DISK_FILE_GROUP = :DISK_FILE_GROUP";
+      sql += "ARCHIVE_FILE.DISK_FILE_GID = :DISK_FILE_GID";
       addedAWhereConstraint = true;
     }
     if(searchCriteria.storageClass) {
@@ -210,11 +208,11 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor(
     if(searchCriteria.diskFilePath) {
       m_stmt.bindString(":DISK_FILE_PATH", searchCriteria.diskFilePath.value());
     }
-    if(searchCriteria.diskFileUser) {
-      m_stmt.bindString(":DISK_FILE_USER", searchCriteria.diskFileUser.value());
+    if(searchCriteria.diskFileOwnerUid) {
+      m_stmt.bindUint64(":DISK_FILE_UID", searchCriteria.diskFileOwnerUid.value());
     }
-    if(searchCriteria.diskFileGroup) {
-      m_stmt.bindString(":DISK_FILE_GROUP", searchCriteria.diskFileGroup.value());
+    if(searchCriteria.diskFileGid) {
+      m_stmt.bindUint64(":DISK_FILE_GID", searchCriteria.diskFileGid.value());
     }
     if(searchCriteria.storageClass) {
       m_stmt.bindString(":STORAGE_CLASS_NAME", searchCriteria.storageClass.value());
diff --git a/catalogue/SqliteCatalogue.cpp b/catalogue/SqliteCatalogue.cpp
index 022143ee3ba2e4a66b74b495c3e352c659ddf55e..541329ba7dea0585671d523d5457ed6fa9b1d236 100644
--- a/catalogue/SqliteCatalogue.cpp
+++ b/catalogue/SqliteCatalogue.cpp
@@ -17,9 +17,6 @@
  */
 
 #include "catalogue/ArchiveFileRow.hpp"
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
-#include "catalogue/FileSizeMismatch.hpp"
 #include "catalogue/SqliteCatalogueSchema.hpp"
 #include "catalogue/SqliteCatalogue.hpp"
 #include "common/exception/DatabaseConstraintError.hpp"
@@ -84,11 +81,10 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
          .add("requestDiskInstance", diskInstanceName)
          .add("diskFileId", archiveFile->diskFileId)
          .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-         .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-         .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+         .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+         .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
          .add("fileSize", std::to_string(archiveFile->fileSize))
-         .add("checksumType", archiveFile->checksumType)
-         .add("checksumValue", archiveFile->checksumValue)
+         .add("checksumBlob", archiveFile->checksumBlob)
          .add("creationTime", std::to_string(archiveFile->creationTime))
          .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
          .add("storageClass", archiveFile->storageClass)
@@ -101,9 +97,8 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
           << " fSeq: " << it->fSeq
           << " blockId: " << it->blockId
           << " creationTime: " << it->creationTime
-          << " compressedSize: " << it->compressedSize
-          << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-          << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+          << " fileSize: " << it->fileSize
+          << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
           << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
           << " supersededByVid: " << it->supersededByVid
           << " supersededByFSeq: " << it->supersededByFSeq;
@@ -150,11 +145,10 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
        .add("diskInstance", archiveFile->diskInstance)
        .add("diskFileId", archiveFile->diskFileId)
        .add("diskFileInfo.path", archiveFile->diskFileInfo.path)
-       .add("diskFileInfo.owner", archiveFile->diskFileInfo.owner)
-       .add("diskFileInfo.group", archiveFile->diskFileInfo.group)
+       .add("diskFileInfo.owner_uid", archiveFile->diskFileInfo.owner_uid)
+       .add("diskFileInfo.gid", archiveFile->diskFileInfo.gid)
        .add("fileSize", std::to_string(archiveFile->fileSize))
-       .add("checksumType", archiveFile->checksumType)
-       .add("checksumValue", archiveFile->checksumValue)
+       .add("checksumBlob", archiveFile->checksumBlob)
        .add("creationTime", std::to_string(archiveFile->creationTime))
        .add("reconciliationTime", std::to_string(archiveFile->reconciliationTime))
        .add("storageClass", archiveFile->storageClass)
@@ -170,9 +164,8 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
         << " fSeq: " << it->fSeq
         << " blockId: " << it->blockId
         << " creationTime: " << it->creationTime
-        << " compressedSize: " << it->compressedSize
-        << " checksumType: " << it->checksumType //this shouldn't be here: repeated field
-        << " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
+        << " fileSize: " << it->fileSize
+        << " checksumBlob: " << it->checksumBlob //this shouldn't be here: repeated field
         << " copyNb: " << it->copyNb //this shouldn't be here: repeated field
         << " supersededByVid: " << it->supersededByVid
         << " supersededByFSeq: " << it->supersededByFSeq;
@@ -263,6 +256,8 @@ common::dataStructures::Tape SqliteCatalogue::selectTape(rdbms::Conn &conn, cons
         "LAST_FSEQ AS LAST_FSEQ,"
         "IS_DISABLED AS IS_DISABLED,"
         "IS_FULL AS IS_FULL,"
+        "IS_READ_ONLY AS IS_READ_ONLY,"
+        "IS_FROM_CASTOR AS IS_FROM_CASTOR,"
 
         "LABEL_DRIVE AS LABEL_DRIVE,"
         "LABEL_TIME AS LABEL_TIME,"
@@ -305,6 +300,8 @@ common::dataStructures::Tape SqliteCatalogue::selectTape(rdbms::Conn &conn, cons
     tape.lastFSeq = rset.columnUint64("LAST_FSEQ");
     tape.disabled = rset.columnBool("IS_DISABLED");
     tape.full = rset.columnBool("IS_FULL");
+    tape.readOnly = rset.columnBool("IS_READ_ONLY");
+    tape.isFromCastor = rset.columnBool("IS_FROM_CASTOR");
 
     tape.labelLog = getTapeLogFromRset(rset, "LABEL_DRIVE", "LABEL_TIME");
     tape.lastReadLog = getTapeLogFromRset(rset, "LAST_READ_DRIVE", "LAST_READ_TIME");
@@ -312,7 +309,7 @@ common::dataStructures::Tape SqliteCatalogue::selectTape(rdbms::Conn &conn, cons
 
     tape.comment = rset.columnString("USER_COMMENT");
 
-    common::dataStructures::UserIdentity creatorUI;
+    common::dataStructures::RequesterIdentity creatorUI;
     creatorUI.name = rset.columnString("CREATION_LOG_USER_NAME");
 
     common::dataStructures::EntryLog creationLog;
@@ -322,7 +319,7 @@ common::dataStructures::Tape SqliteCatalogue::selectTape(rdbms::Conn &conn, cons
 
     tape.creationLog = creationLog;
 
-    common::dataStructures::UserIdentity updaterUI;
+    common::dataStructures::RequesterIdentity updaterUI;
     updaterUI.name = rset.columnString("LAST_UPDATE_USER_NAME");
 
     common::dataStructures::EntryLog updateLog;
@@ -365,7 +362,7 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
 
     const auto tape = selectTape(conn, firstEvent.vid);
     uint64_t expectedFSeq = tape.lastFSeq + 1;
-    uint64_t totalCompressedBytesWritten = 0;
+    uint64_t totalLogicalBytesWritten = 0;
 
     for(const auto &eventP: events) {
       const auto & event = *eventP;
@@ -376,7 +373,7 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       }
 
       if(expectedFSeq != event.fSeq) {
-        exception::Exception ex;
+        exception::TapeFseqMismatch ex;
         ex.getMessage() << "FSeq mismatch for tape " << firstEvent.vid << ": expected=" << expectedFSeq << " actual=" <<
           firstEvent.fSeq;
         throw ex;
@@ -387,14 +384,14 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
       try {
         // If this is a file (as opposed to a placeholder), do the full processing.
         const auto &fileEvent=dynamic_cast<const TapeFileWritten &>(event); 
-        totalCompressedBytesWritten += fileEvent.compressedSize;
+        totalLogicalBytesWritten += fileEvent.size;
       } catch (std::bad_cast&) {}
     }
 
     auto lastEventItor = events.cend();
     lastEventItor--;
     const TapeItemWritten &lastEvent = **lastEventItor;
-    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalCompressedBytesWritten, lastEvent.tapeDrive);
+    updateTape(conn, lastEvent.vid, lastEvent.fSeq, totalLogicalBytesWritten, lastEvent.tapeDrive);
 
     for(const auto &event : events) {
       try {
@@ -426,12 +423,11 @@ void SqliteCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
       row.diskFileId = event.diskFileId;
       row.diskInstance = event.diskInstance;
       row.size = event.size;
-      row.checksumType = event.checksumType;
-      row.checksumValue = event.checksumValue;
+      row.checksumBlob = event.checksumBlob;
       row.storageClassName = event.storageClassName;
       row.diskFilePath = event.diskFilePath;
-      row.diskFileUser = event.diskFileUser;
-      row.diskFileGroup = event.diskFileGroup;
+      row.diskFileOwnerUid = event.diskFileOwnerUid;
+      row.diskFileGid = event.diskFileGid;
       insertArchiveFile(conn, row);
     } catch(exception::DatabasePrimaryKeyError &) {
       // Ignore this error
@@ -460,26 +456,14 @@ void SqliteCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
       throw ex;
     }
 
-    if(archiveFile->checksumType != event.checksumType) {
-      catalogue::ChecksumTypeMismatch ex;
-      ex.getMessage() << "Checksum type mismatch: expected=" << archiveFile->checksumType << ", actual=" <<
-        event.checksumType << ": " << fileContext.str();
-      throw ex;
-    }
-
-    if(archiveFile->checksumValue != event.checksumValue) {
-      catalogue::ChecksumValueMismatch ex;
-      ex.getMessage() << "Checksum value mismatch: expected=" << archiveFile->checksumValue << ", actual=" <<
-        event.checksumValue << ": " << fileContext.str();
-      throw ex;
-    }
+    archiveFile->checksumBlob.validate(event.checksumBlob);
 
     // Insert the tape file
     common::dataStructures::TapeFile tapeFile;
     tapeFile.vid            = event.vid;
     tapeFile.fSeq           = event.fSeq;
     tapeFile.blockId        = event.blockId;
-    tapeFile.compressedSize = event.compressedSize;
+    tapeFile.fileSize       = event.size;
     tapeFile.copyNb         = event.copyNb;
     tapeFile.creationTime   = now;
     insertTapeFile(conn, tapeFile, event.archiveFileId);
diff --git a/catalogue/TapeFileSearchCriteria.hpp b/catalogue/TapeFileSearchCriteria.hpp
index b85eb9da03436ae5e12ead7033acc809c7ef6dcf..4ac9f622247f589e7907f3274cc66dfef47bfb9f 100644
--- a/catalogue/TapeFileSearchCriteria.hpp
+++ b/catalogue/TapeFileSearchCriteria.hpp
@@ -62,12 +62,12 @@ struct TapeFileSearchCriteria {
   /**
    * The owner of a file within its disk instance.
    */
-  optional<std::string> diskFileUser;
+  optional<uint64_t> diskFileOwnerUid;
 
   /**
    * The group of a file within its disk instance.
    */
-  optional<std::string> diskFileGroup;
+  optional<uint64_t> diskFileGid;
 
   /**
    * The storage class name of the file.
diff --git a/catalogue/TapeFileWritten.cpp b/catalogue/TapeFileWritten.cpp
index 086073095bce61ec77b6ef3ec63da78cfab8944e..3ff44a373114a964f245751df59064cb0a8dc04b 100644
--- a/catalogue/TapeFileWritten.cpp
+++ b/catalogue/TapeFileWritten.cpp
@@ -26,9 +26,10 @@ namespace catalogue {
 //------------------------------------------------------------------------------
 TapeFileWritten::TapeFileWritten() :
   archiveFileId(0),
+  diskFileOwnerUid(0),
+  diskFileGid(0),
   size(0),
   blockId(0),
-  compressedSize(0),
   copyNb(0) {
 }
 
@@ -42,14 +43,12 @@ bool TapeFileWritten::operator==(const TapeFileWritten &rhs) const {
     diskInstance == rhs.diskInstance &&
     diskFileId == rhs.diskFileId &&
     diskFilePath == rhs.diskFilePath &&
-    diskFileUser == rhs.diskFileUser &&
-    diskFileGroup == rhs.diskFileGroup &&
+    diskFileOwnerUid == rhs.diskFileOwnerUid &&
+    diskFileGid == rhs.diskFileGid &&
     size == rhs.size &&
-    checksumType == rhs.checksumType &&
-    checksumValue == rhs.checksumValue &&
+    checksumBlob == rhs.checksumBlob &&
     storageClassName == rhs.storageClassName &&
     blockId == rhs.blockId &&
-    compressedSize == rhs.compressedSize &&
     copyNb == rhs.copyNb &&
     tapeDrive == rhs.tapeDrive;
 }
@@ -64,15 +63,14 @@ std::ostream &operator<<(std::ostream &os, const TapeFileWritten &obj) {
   "diskInstance=" << obj.diskInstance << ","
   "diskFileId=" << obj.diskFileId << ","
   "diskFilePath=" << obj.diskFilePath << ","
-  "diskFileUser=" << obj.diskFileUser << ","
-  "diskFileGroup=" << obj.diskFileGroup << ","
+  "diskFileOwnerUid=" << obj.diskFileOwnerUid << ","
+  "diskFileGid=" << obj.diskFileGid << ","
   "size=" << obj.size << ","
-  "checksumType=" << obj.checksumType << "checksumValue=" << obj.checksumValue << ","
+  "checksumBlob=" << obj.checksumBlob << ","
   "storageClassName=" << obj.storageClassName << ","
   "vid=" << obj.vid << ","
   "fSeq=" << obj.fSeq << ","
   "blockId=" << obj.blockId << ","
-  "compressedSize=" << obj.compressedSize << ","
   "copyNb=" << obj.copyNb << ","
   "tapeDrive=" << obj.tapeDrive <<
   "}";
diff --git a/catalogue/TapeFileWritten.hpp b/catalogue/TapeFileWritten.hpp
index 93f3c5379e868f30e6e52f88c97410bba242dbcd..c4402280660134aac77cf0eb5428d82cf2bb60e5 100644
--- a/catalogue/TapeFileWritten.hpp
+++ b/catalogue/TapeFileWritten.hpp
@@ -18,7 +18,7 @@
 
 #pragma once
 
-#include "common/checksum/Checksum.hpp"
+#include "common/checksum/ChecksumBlob.hpp"
 #include "TapeItemWritten.hpp"
 
 #include <string>
@@ -71,12 +71,12 @@ struct TapeFileWritten: public TapeItemWritten {
   /**
    * The user name of the source disk file within its host disk system.
    */
-  std::string diskFileUser;
+  uint32_t diskFileOwnerUid;
 
   /**
    * The group name of the source disk file within its host disk system.
    */
-  std::string diskFileGroup;
+  uint32_t diskFileGid;
 
   /**
    * The uncompressed size of the tape file in bytes.
@@ -84,15 +84,10 @@ struct TapeFileWritten: public TapeItemWritten {
   uint64_t size;
 
   /**
-   * Checksum type for the tape file contents.
+   * Set of checksum types and values
    */
-  std::string checksumType;
+  checksum::ChecksumBlob checksumBlob;
 
-  /**
-   * Checksum value for the tape file contents.
-   */
-  std::string checksumValue;
-  
   /**
    * The name of the file's storage class.
    */
@@ -104,12 +99,6 @@ struct TapeFileWritten: public TapeItemWritten {
    */
   uint64_t blockId;
 
-  /**
-   * The compressed size of the tape file in bytes.  In other words the actual
-   * number of bytes it occupies on tape.
-   */
-  uint64_t compressedSize;
-
   /**
    * The copy number of the tape file.
    */
diff --git a/catalogue/TapeSearchCriteria.hpp b/catalogue/TapeSearchCriteria.hpp
index 898ddbeffc29a5929bb8a94c9f169113d2b4aeaf..066eebb92f6c0e6cf3992992b81e0ad3203a1a24 100644
--- a/catalogue/TapeSearchCriteria.hpp
+++ b/catalogue/TapeSearchCriteria.hpp
@@ -80,6 +80,11 @@ struct TapeSearchCriteria {
    * Set to true if searching for full tapes.
    */
   optional<bool> full;
+  
+  /**
+   * Set to true if searching for read-only tapes.
+   */
+  optional<bool> readOnly;
 
 }; // struct TapeSearchCriteria
 
diff --git a/catalogue/FileSizeMismatch.cpp b/catalogue/UserSpecifiedANonEmptyLogicalLibrary.cpp
similarity index 70%
rename from catalogue/FileSizeMismatch.cpp
rename to catalogue/UserSpecifiedANonEmptyLogicalLibrary.cpp
index d30afbfb92ed22a57810b97f481b7e14d23a7ade..5564c844b0b03df0c016a7f383fd68e251fcbc23 100644
--- a/catalogue/FileSizeMismatch.cpp
+++ b/catalogue/UserSpecifiedANonEmptyLogicalLibrary.cpp
@@ -16,23 +16,17 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "catalogue/FileSizeMismatch.hpp"
+#include "UserSpecifiedANonEmptyLogicalLibrary.hpp"
 
 namespace cta {
 namespace catalogue {
 
-
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-FileSizeMismatch::FileSizeMismatch(const std::string &context, const bool embedBacktrace):
-  cta::exception::Exception(context, embedBacktrace) {
-}
-
-//------------------------------------------------------------------------------
-// destructor
-//------------------------------------------------------------------------------
-FileSizeMismatch::~FileSizeMismatch() {
+UserSpecifiedANonEmptyLogicalLibrary::UserSpecifiedANonEmptyLogicalLibrary(const std::string &context,
+  const bool embedBacktrace):
+  UserError(context, embedBacktrace) {
 }
 
 } // namespace catalogue
diff --git a/catalogue/UserSpecifiedANonEmptyLogicalLibrary.hpp b/catalogue/UserSpecifiedANonEmptyLogicalLibrary.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..acaf7dcb6938ac567df11381d0c24ac16c03d1d3
--- /dev/null
+++ b/catalogue/UserSpecifiedANonEmptyLogicalLibrary.hpp
@@ -0,0 +1,46 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/UserError.hpp"
+
+namespace cta {
+namespace catalogue {
+
+/**
+ * User error thrown when a logical library they specified contains one or more
+ * tapes when it should be empty.
+ */
+class UserSpecifiedANonEmptyLogicalLibrary: public exception::UserError {
+public:
+
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  UserSpecifiedANonEmptyLogicalLibrary(const std::string &context = "", const bool embedBacktrace = true);
+
+}; // class UserSpecifiedANonEmptyLogicalLibrary
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/ChecksumTypeMismatch.cpp b/catalogue/UserSpecifiedANonExistentLogicalLibrary.cpp
similarity index 69%
rename from catalogue/ChecksumTypeMismatch.cpp
rename to catalogue/UserSpecifiedANonExistentLogicalLibrary.cpp
index ffc4d1915fbf00de2f22c35486c925865dd4178e..741216c2f3d013620b642a6d2fe33b8032341203 100644
--- a/catalogue/ChecksumTypeMismatch.cpp
+++ b/catalogue/UserSpecifiedANonExistentLogicalLibrary.cpp
@@ -16,23 +16,17 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "catalogue/ChecksumTypeMismatch.hpp"
+#include "UserSpecifiedANonExistentLogicalLibrary.hpp"
 
 namespace cta {
 namespace catalogue {
 
-
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-ChecksumTypeMismatch::ChecksumTypeMismatch(const std::string &context, const bool embedBacktrace):
-  cta::exception::Exception(context, embedBacktrace) {
-}
-
-//------------------------------------------------------------------------------
-// destructor
-//------------------------------------------------------------------------------
-ChecksumTypeMismatch::~ChecksumTypeMismatch() {
+UserSpecifiedANonExistentLogicalLibrary::UserSpecifiedANonExistentLogicalLibrary(const std::string &context,
+  const bool embedBacktrace):
+  UserError(context, embedBacktrace) {
 }
 
 } // namespace catalogue
diff --git a/catalogue/UserSpecifiedANonExistentLogicalLibrary.hpp b/catalogue/UserSpecifiedANonExistentLogicalLibrary.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..27d1970a98a9b5496223a26930747e62f0e8ab1a
--- /dev/null
+++ b/catalogue/UserSpecifiedANonExistentLogicalLibrary.hpp
@@ -0,0 +1,46 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/UserError.hpp"
+
+namespace cta {
+namespace catalogue {
+
+/**
+ * User error thrown when a logical library they specified does not exist when
+ * it should.
+ */
+class UserSpecifiedANonExistentLogicalLibrary: public exception::UserError {
+public:
+
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  UserSpecifiedANonExistentLogicalLibrary(const std::string &context = "", const bool embedBacktrace = true);
+
+}; // class UserSpecifiedANonExistentLogicalLibrary
+
+} // namespace catalogue
+} // namespace cta
diff --git a/catalogue/catalogue_schema.pdf b/catalogue/catalogue_schema.pdf
index f91a9c188847cff6105dd7d6895224feebe8a6f0..38026d31d5f3955b46c2ed5da9a6522019c56da6 100644
Binary files a/catalogue/catalogue_schema.pdf and b/catalogue/catalogue_schema.pdf differ
diff --git a/catalogue/common_catalogue_schema.sql b/catalogue/common_catalogue_schema.sql
index 9ce6d376bcaf78e05741e8705fd56d4602d9d576..b848cea07c2f03e3f5cf244987dda0e5b8b615f0 100644
--- a/catalogue/common_catalogue_schema.sql
+++ b/catalogue/common_catalogue_schema.sql
@@ -102,6 +102,8 @@ CREATE TABLE TAPE(
   LAST_FSEQ               NUMERIC(20, 0)  CONSTRAINT TAPE_LF_NN   NOT NULL,
   IS_DISABLED             CHAR(1)         CONSTRAINT TAPE_ID_NN   NOT NULL,
   IS_FULL                 CHAR(1)         CONSTRAINT TAPE_IF_NN   NOT NULL,
+  IS_READ_ONLY            CHAR(1)         CONSTRAINT TAPE_IRO_NN  NOT NULL,
+  IS_FROM_CASTOR          CHAR(1)         CONSTRAINT TAPE_IFC_NN  NOT NULL,
   LABEL_DRIVE             VARCHAR(100),
   LABEL_TIME              NUMERIC(20, 0),
   LAST_READ_DRIVE         VARCHAR(100),
@@ -123,7 +125,9 @@ CREATE TABLE TAPE(
   CONSTRAINT TAPE_TAPE_POOL_FK FOREIGN KEY(TAPE_POOL_NAME)
     REFERENCES TAPE_POOL(TAPE_POOL_NAME),
   CONSTRAINT TAPE_IS_DISABLED_BOOL_CK CHECK(IS_DISABLED IN ('0', '1')),
-  CONSTRAINT TAPE_IS_FULL_BOOL_CK CHECK(IS_FULL IN ('0', '1'))
+  CONSTRAINT TAPE_IS_FULL_BOOL_CK CHECK(IS_FULL IN ('0', '1')),
+  CONSTRAINT TAPE_IS_READ_ONLY_BOOL_CK CHECK(IS_READ_ONLY IN ('0', '1')),
+  CONSTRAINT TAPE_IS_FROM_CASTOR_BOOL_CK CHECK(IS_FROM_CASTOR IN ('0', '1'))
 );
 CREATE INDEX TAPE_TAPE_POOL_NAME_IDX ON TAPE(TAPE_POOL_NAME);
 CREATE TABLE MOUNT_POLICY(
@@ -177,11 +181,11 @@ CREATE TABLE ARCHIVE_FILE(
   DISK_INSTANCE_NAME      VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_DIN_NN  NOT NULL,
   DISK_FILE_ID            VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_DFI_NN  NOT NULL,
   DISK_FILE_PATH          VARCHAR(2000)   CONSTRAINT ARCHIVE_FILE_DFP_NN  NOT NULL,
-  DISK_FILE_USER          VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_DFU_NN  NOT NULL,
-  DISK_FILE_GROUP         VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_DFG_NN  NOT NULL,
+  DISK_FILE_UID           NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_DFUID_NN  NOT NULL,
+  DISK_FILE_GID           NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_DFGID_NN  NOT NULL,
   SIZE_IN_BYTES           NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_SIB_NN  NOT NULL,
-  CHECKSUM_TYPE           VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_CT1_NN  NOT NULL,
-  CHECKSUM_VALUE          VARCHAR(100)    CONSTRAINT ARCHIVE_FILE_CV_NN   NOT NULL,
+  CHECKSUM_BLOB           CHECKSUM_BLOB_TYPE,
+  CHECKSUM_ADLER32        NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_CB2_NN  NOT NULL,
   STORAGE_CLASS_ID        NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_SCI_NN  NOT NULL,
   CREATION_TIME           NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_CT2_NN  NOT NULL,
   RECONCILIATION_TIME     NUMERIC(20, 0)  CONSTRAINT ARCHIVE_FILE_RT_NN   NOT NULL,
@@ -194,7 +198,7 @@ CREATE TABLE TAPE_FILE(
   VID                      VARCHAR(100)   CONSTRAINT TAPE_FILE_V_NN    NOT NULL,
   FSEQ                     NUMERIC(20, 0) CONSTRAINT TAPE_FILE_F_NN    NOT NULL,
   BLOCK_ID                 NUMERIC(20, 0) CONSTRAINT TAPE_FILE_BI_NN   NOT NULL,
-  COMPRESSED_SIZE_IN_BYTES NUMERIC(20, 0) CONSTRAINT TAPE_FILE_CSIB_NN NOT NULL,
+  LOGICAL_SIZE_IN_BYTES    NUMERIC(20, 0) CONSTRAINT TAPE_FILE_CSIB_NN NOT NULL,
   COPY_NB                  NUMERIC(20, 0) CONSTRAINT TAPE_FILE_CN_NN   NOT NULL,
   CREATION_TIME            NUMERIC(20, 0) CONSTRAINT TAPE_FILE_CT_NN   NOT NULL,
   ARCHIVE_FILE_ID          NUMERIC(20, 0) CONSTRAINT TAPE_FILE_AFI_NN  NOT NULL,
diff --git a/catalogue/mysql_catalogue_schema_trigger.sql b/catalogue/mysql_catalogue_schema_trigger.sql
index 16ac8b0277bac899c3bf6f0ebf86dec36d43753a..d383d500f636aa77133e359c902f069e2c7be00e 100644
--- a/catalogue/mysql_catalogue_schema_trigger.sql
+++ b/catalogue/mysql_catalogue_schema_trigger.sql
@@ -45,6 +45,14 @@ CREATE TRIGGER `CHECK_TAPE_BEFORE_INSERT` BEFORE INSERT ON `TAPE`
       SIGNAL SQLSTATE '45000'
       SET MESSAGE_TEXT = 'TAPE.IS_FULL should be 0 or 1';       
     END IF;
+    IF new.IS_READ_ONLY not in ('0','1') THEN
+      SIGNAL SQLSTATE '45000'
+      SET MESSAGE_TEXT = 'TAPE.IS_READ_ONLY should be 0 or 1';
+    END IF;
+    IF new.IS_FROM_CASTOR not in ('0','1') THEN
+      SIGNAL SQLSTATE '45000'
+      SET MESSAGE_TEXT = 'TAPE.IS_FROM_CASTOR should be 0 or 1';
+    END IF;
   END;
 
 CREATE TRIGGER `CHECK_TAPE_BEFORE_UPDATE` BEFORE UPDATE ON `TAPE`
@@ -58,6 +66,14 @@ CREATE TRIGGER `CHECK_TAPE_BEFORE_UPDATE` BEFORE UPDATE ON `TAPE`
       SIGNAL SQLSTATE '45000'
       SET MESSAGE_TEXT = 'TAPE.IS_FULL should be 0 or 1';       
     END IF;
+    IF new.IS_READ_ONLY not in ('0','1') THEN
+      SIGNAL SQLSTATE '45000'
+      SET MESSAGE_TEXT = 'TAPE.IS_READ_ONLY should be 0 or 1';
+    END IF;
+    IF new.IS_FROM_CASTOR not in ('0','1') THEN
+      SIGNAL SQLSTATE '45000'
+      SET MESSAGE_TEXT = 'TAPE.IS_FROM_CASTOR should be 0 or 1';
+    END IF;
   END;
 
 CREATE TRIGGER `TAPE_FILE_COPY_NB_GT_ZERO_BEFORE_INSERT` BEFORE INSERT ON `TAPE_FILE`
diff --git a/catalogue/oracle_catalogue_schema_header.sql b/catalogue/oracle_catalogue_schema_header.sql
index 710db2d3d46c57396789f34e2844d7cebdba8c60..4235531d59e2061793d31e303e06681bc4fb56dd 100644
--- a/catalogue/oracle_catalogue_schema_header.sql
+++ b/catalogue/oracle_catalogue_schema_header.sql
@@ -1,6 +1,6 @@
 CREATE SEQUENCE ARCHIVE_FILE_ID_SEQ
   INCREMENT BY 1
-  START WITH 1
+  START WITH 4294967296
   NOMAXVALUE
   MINVALUE 1
   NOCYCLE
@@ -8,7 +8,7 @@ CREATE SEQUENCE ARCHIVE_FILE_ID_SEQ
   NOORDER;
 CREATE SEQUENCE STORAGE_CLASS_ID_SEQ
   INCREMENT BY 1
-  START WITH 1
+  START WITH 4294967296
   NOMAXVALUE
   MINVALUE 1
   NOCYCLE
@@ -20,13 +20,13 @@ CREATE GLOBAL TEMPORARY TABLE TEMP_TAPE_FILE_BATCH(
 ON COMMIT DELETE ROWS;
 CREATE INDEX TEMP_T_F_B_ARCHIVE_FILE_ID_I ON TEMP_TAPE_FILE_BATCH(ARCHIVE_FILE_ID);
 CREATE GLOBAL TEMPORARY TABLE TEMP_TAPE_FILE_INSERTION_BATCH(
-  VID                      VARCHAR(100),
-  FSEQ                     NUMERIC(20, 0),
-  BLOCK_ID                 NUMERIC(20, 0),
-  COMPRESSED_SIZE_IN_BYTES NUMERIC(20, 0),
-  COPY_NB                  NUMERIC(20, 0),
-  CREATION_TIME            NUMERIC(20, 0),
-  ARCHIVE_FILE_ID          NUMERIC(20, 0)
+  VID                   VARCHAR(100),
+  FSEQ                  NUMERIC(20, 0),
+  BLOCK_ID              NUMERIC(20, 0),
+  LOGICAL_SIZE_IN_BYTES NUMERIC(20, 0),
+  COPY_NB               NUMERIC(20, 0),
+  CREATION_TIME         NUMERIC(20, 0),
+  ARCHIVE_FILE_ID       NUMERIC(20, 0)
 )
 ON COMMIT DELETE ROWS;
 CREATE INDEX TEMP_T_F_I_B_ARCHIVE_FILE_ID_I ON TEMP_TAPE_FILE_INSERTION_BATCH(ARCHIVE_FILE_ID);
diff --git a/catalogue/retryOnLostConnection.hpp b/catalogue/retryOnLostConnection.hpp
index 246de0a7d84e112f2e01f1db879f6b119f6869d5..d433f019f03413b7112148eb6dd8c2495de92a64 100644
--- a/catalogue/retryOnLostConnection.hpp
+++ b/catalogue/retryOnLostConnection.hpp
@@ -18,10 +18,10 @@
 
 #pragma once
 
-#include "catalogue/ChecksumTypeMismatch.hpp"
-#include "catalogue/ChecksumValueMismatch.hpp"
 #include "common/exception/Exception.hpp"
 #include "common/exception/LostDatabaseConnection.hpp"
+//#include "common/checksum/ChecksumTypeMismatch.hpp"
+//#include "common/checksum/ChecksumValueMismatch.hpp"
 #include "common/log/Logger.hpp"
 
 #include <list>
diff --git a/cmake/FindGRPC.cmake b/cmake/FindGRPC.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..d48a692d36c5b7618336d77e92a0b0d8e2a6efb6
--- /dev/null
+++ b/cmake/FindGRPC.cmake
@@ -0,0 +1,126 @@
+#
+# Locate and configure the gRPC library
+#
+# Adds the following targets:
+#
+#  gRPC::grpc - gRPC library
+#  gRPC::grpc++ - gRPC C++ library
+#  gRPC::grpc++_reflection - gRPC C++ reflection library
+#  gRPC::grpc_cpp_plugin - C++ generator plugin for Protocol Buffers
+#
+
+#
+# Generates C++ sources from the .proto files
+#
+# grpc_generate_cpp (<SRCS> <HDRS> <DEST> [<ARGN>...])
+#
+#  SRCS - variable to define with autogenerated source files
+#  HDRS - variable to define with autogenerated header files
+#  DEST - directory where the source files will be created
+#  ARGN - .proto files
+#
+function(GRPC_GENERATE_CPP SRCS HDRS DEST)
+  if(NOT ARGN)
+    message(SEND_ERROR "Error: GRPC_GENERATE_CPP() called without any proto files")
+    return()
+  endif()
+
+  if(GRPC_GENERATE_CPP_APPEND_PATH)
+    # Create an include path for each file specified
+    foreach(FIL ${ARGN})
+      get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
+      get_filename_component(ABS_PATH ${ABS_FIL} PATH)
+      list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
+      if(${_contains_already} EQUAL -1)
+          list(APPEND _protobuf_include_path -I ${ABS_PATH})
+      endif()
+    endforeach()
+  else()
+    set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
+  endif()
+
+  if(DEFINED PROTOBUF3_IMPORT_DIRS)
+    foreach(DIR ${PROTOBUF3_IMPORT_DIRS})
+      get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
+      list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
+      if(${_contains_already} EQUAL -1)
+          list(APPEND _protobuf_include_path -I ${ABS_PATH})
+      endif()
+    endforeach()
+  endif()
+
+  set(${SRCS})
+  set(${HDRS})
+  foreach(FIL ${ARGN})
+    get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
+    get_filename_component(FIL_WE ${FIL} NAME_WE)
+
+    list(APPEND ${SRCS} "${DEST}/${FIL_WE}.grpc.pb.cc")
+    list(APPEND ${HDRS} "${DEST}/${FIL_WE}.grpc.pb.h")
+
+    add_custom_command(
+      OUTPUT "${DEST}/${FIL_WE}.grpc.pb.cc"
+             "${DEST}/${FIL_WE}.grpc.pb.h"
+      COMMAND ${PROTOBUF3_PROTOC3_EXECUTABLE}
+      ARGS --grpc_out ${DEST} ${_protobuf_include_path} --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} ${ABS_FIL}
+      DEPENDS ${ABS_FIL} ${PROTOBUF3_PROTOC3_EXECUTABLE} gRPC::grpc_cpp_plugin
+      COMMENT "Running C++ gRPC compiler on ${FIL}"
+      VERBATIM )
+  endforeach()
+
+  set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
+  set(${SRCS} ${${SRCS}} PARENT_SCOPE)
+  set(${HDRS} ${${HDRS}} PARENT_SCOPE)
+endfunction()
+
+# By default have GRPC_GENERATE_CPP macro pass -I to protoc
+# for each directory where a proto file is referenced.
+if(NOT DEFINED GRPC_GENERATE_CPP_APPEND_PATH)
+  set(GRPC_GENERATE_CPP_APPEND_PATH TRUE)
+endif()
+
+# Find gRPC include directory
+find_path(GRPC_INCLUDE_DIR grpc/grpc.h)
+mark_as_advanced(GRPC_INCLUDE_DIR)
+
+# Find gRPC library
+find_library(GRPC_LIBRARY NAMES grpc)
+mark_as_advanced(GRPC_LIBRARY)
+add_library(gRPC::grpc UNKNOWN IMPORTED)
+set_target_properties(gRPC::grpc PROPERTIES
+    INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR}
+    INTERFACE_LINK_LIBRARIES "-lpthread;-ldl"
+    IMPORTED_LOCATION ${GRPC_LIBRARY}
+)
+
+# Find gRPC C++ library
+find_library(GRPC_GRPC++_LIBRARY NAMES grpc++)
+mark_as_advanced(GRPC_GRPC++_LIBRARY)
+add_library(gRPC::grpc++ UNKNOWN IMPORTED)
+set_target_properties(gRPC::grpc++ PROPERTIES
+    INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR}
+    INTERFACE_LINK_LIBRARIES gRPC::grpc
+    IMPORTED_LOCATION ${GRPC_GRPC++_LIBRARY}
+)
+
+# Find gRPC C++ reflection library
+find_library(GRPC_GRPC++_REFLECTION_LIBRARY NAMES grpc++_reflection)
+mark_as_advanced(GRPC_GRPC++_REFLECTION_LIBRARY)
+add_library(gRPC::grpc++_reflection UNKNOWN IMPORTED)
+set_target_properties(gRPC::grpc++_reflection PROPERTIES
+    INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR}
+    INTERFACE_LINK_LIBRARIES gRPC::grpc++
+    IMPORTED_LOCATION ${GRPC_GRPC++_REFLECTION_LIBRARY}
+)
+
+# Find gRPC CPP generator
+find_program(GRPC_CPP_PLUGIN NAMES grpc_cpp_plugin)
+mark_as_advanced(GRPC_CPP_PLUGIN)
+add_executable(gRPC::grpc_cpp_plugin IMPORTED)
+set_target_properties(gRPC::grpc_cpp_plugin PROPERTIES
+    IMPORTED_LOCATION ${GRPC_CPP_PLUGIN}
+)
+
+include(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(gRPC DEFAULT_MSG
+    GRPC_LIBRARY GRPC_INCLUDE_DIR GRPC_GRPC++_REFLECTION_LIBRARY GRPC_CPP_PLUGIN)
diff --git a/cmake/FindProtobuf3.cmake b/cmake/FindProtobuf3.cmake
index 2f794ebd04ec81b8f449d76bb6796b7aa68149ab..bbb25ecc722c21bc21ac47eeafd26261378d01a8 100644
--- a/cmake/FindProtobuf3.cmake
+++ b/cmake/FindProtobuf3.cmake
@@ -15,20 +15,23 @@
 # (To distribute this file outside of CMake, substitute the full
 #  License text for the above reference.)
 
-set(PROTOBUF3_RPATH /usr/lib64/protobuf3)
+set(PROTOBUF3_ROOT /usr)
+#set(PROTOBUF3_ROOT /opt/eos)
+
+set(PROTOBUF3_RPATH ${PROTOBUF3_ROOT}/lib64/protobuf3)
 message(STATUS "PROTOBUF3_RPATH=${PROTOBUF3_RPATH}")
 
 set(PROTOBUF3_INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR})
 
 find_program(PROTOBUF3_PROTOC3_EXECUTABLE
-    NAMES protoc3
+    NAMES ${PROTOBUF3_ROOT}/bin/protoc3
     DOC "Version 3 of The Google Protocol Buffers Compiler"
 )
 message(STATUS "protoc3 is at ${PROTOBUF3_PROTOC3_EXECUTABLE} ")
 
 find_path(PROTOBUF3_INCLUDE_DIRS
   google/protobuf/message.h
-  PATHS /usr/include/protobuf3
+  PATHS ${PROTOBUF3_ROOT}/include/protobuf3
   NO_DEFAULT_PATH)
 message(STATUS "PROTOBUF3_INCLUDE_DIRS=${PROTOBUF3_INCLUDE_DIRS}")
 
diff --git a/cmdline/CMakeLists.txt b/cmdline/CMakeLists.txt
index 9098c080de5c08551c43c311c821febcf6f42739..3938037431ff395bd734925a2b9d4b0e533068d0 100644
--- a/cmdline/CMakeLists.txt
+++ b/cmdline/CMakeLists.txt
@@ -32,13 +32,8 @@ include_directories(${CMAKE_BINARY_DIR}/eos_cta ${PROTOBUF3_INCLUDE_DIRS})
 #
 # cta-admin <admin_command> is the SSI version of "cta <admin_command>"
 #
-add_executable(cta-admin
-               CtaAdminCmd.cpp
-               CtaAdminCmdParse.cpp
-               CtaAdminTextFormatter.cpp
-               ../common/dataStructures/DriveStatus.cpp
-               ../common/dataStructures/MountType.cpp)
-target_link_libraries(cta-admin XrdSsiPbEosCta XrdSsi-4 XrdSsiLib XrdUtils)
+add_executable(cta-admin CtaAdminCmd.cpp CtaAdminCmdParse.cpp CtaAdminTextFormatter.cpp)
+target_link_libraries(cta-admin XrdSsiPbEosCta XrdSsi-4 XrdSsiLib XrdUtils ctacommon)
 set_property (TARGET cta-admin APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
 
 #
diff --git a/cmdline/CtaAdminCmdParse.hpp b/cmdline/CtaAdminCmdParse.hpp
index 26c6a7377e300c1b6ea7e7782b31804fba411294..6ff0ea7b1db0d1b1f722cf97a2e5202681ff3658 100644
--- a/cmdline/CtaAdminCmdParse.hpp
+++ b/cmdline/CtaAdminCmdParse.hpp
@@ -246,6 +246,7 @@ const std::map<std::string, OptionBoolean::Key> boolOptions = {
    { "--encrypted",             OptionBoolean::ENCRYPTED },
    { "--force",                 OptionBoolean::FORCE },
    { "--full",                  OptionBoolean::FULL },
+   { "--readonly",              OptionBoolean::READ_ONLY },
 
    // hasOption options
    { "--checkchecksum",         OptionBoolean::CHECK_CHECKSUM },
@@ -268,6 +269,7 @@ const std::map<std::string, OptionUInt64::Key> uint64Options = {
    { "--capacity",              OptionUInt64::CAPACITY },
    { "--copynb",                OptionUInt64::COPY_NUMBER },
    { "--firstfseq",             OptionUInt64::FIRST_FSEQ },
+   { "--gid",                   OptionUInt64::GID },
    { "--id",                    OptionUInt64::ARCHIVE_FILE_ID },
    { "--lastfseq",              OptionUInt64::LAST_FSEQ },
    { "--maxdrivesallowed",      OptionUInt64::MAX_DRIVES_ALLOWED },
@@ -280,7 +282,8 @@ const std::map<std::string, OptionUInt64::Key> uint64Options = {
    { "--size",                  OptionUInt64::FILE_SIZE },
    { "--refreshinterval",       OptionUInt64::REFRESH_INTERVAL },
    { "--targetedfreespace",     OptionUInt64::TARGETED_FREE_SPACE },
-   { "--sleeptime",             OptionUInt64::SLEEP_TIME }
+   { "--sleeptime",             OptionUInt64::SLEEP_TIME },
+   { "--uid",                   OptionUInt64::OWNER_UID }
 };
 
 
@@ -295,7 +298,6 @@ const std::map<std::string, OptionString::Key> strOptions = {
    { "--drive",                 OptionString::DRIVE },
    { "--encryptionkey",         OptionString::ENCRYPTION_KEY },
    { "--file",                  OptionString::FILENAME },
-   { "--group",                 OptionString::GROUP },
    { "--hostname",              OptionString::HOSTNAME },
    { "--input",                 OptionString::INPUT },
    { "--instance",              OptionString::INSTANCE },
@@ -303,7 +305,6 @@ const std::map<std::string, OptionString::Key> strOptions = {
    { "--mediatype",             OptionString::MEDIA_TYPE },
    { "--mountpolicy",           OptionString::MOUNT_POLICY },
    { "--output",                OptionString::OUTPUT },
-   { "--owner",                 OptionString::OWNER },
    { "--path",                  OptionString::PATH },
    { "--storageclass",          OptionString::STORAGE_CLASS },
    { "--supply",                OptionString::SUPPLY },
@@ -347,7 +348,19 @@ const std::map<AdminCmd::Cmd, CmdHelp> cmdHelp = {
    { AdminCmd::CMD_LISTPENDINGRETRIEVES, { "listpendingretrieves", "lpr", { } }},
    { AdminCmd::CMD_LOGICALLIBRARY,       { "logicallibrary",       "ll",  { "add", "ch", "rm", "ls" } }},
    { AdminCmd::CMD_MOUNTPOLICY,          { "mountpolicy",          "mp",  { "add", "ch", "rm", "ls" } }},
-   { AdminCmd::CMD_REPACK,               { "repack",               "re",  { "add", "rm", "ls", "err" } }},
+   { AdminCmd::CMD_REPACK,               { "repack",               "re",  { "add", "rm", "ls", "err" },
+			 "\n  This command allows to manage repack requests.\n\n"
+			   "  Submit a repack request by using the \"add\" subcommand :\n"
+			   "   * Specify the vid (--vid option) or all the vids to repack by giving a file path to the --vidfile option.\n"
+			   "   * If the --bufferURL option is set, it will overwrite the default one. It should respect the following format : root://eosinstance//path/to/repack/buffer.\n"
+			   "     The default bufferURL is set in the CTA frontend configuration file.\n"
+			   "   * If the --justmove option is set, the files located on the tape to repack will be migrated on one or multiple tapes.\n"
+			   "     If the --justaddcopies option is set, new (or missing) copies (as defined by the storage class) of the files located on the tape to repack will be created and migrated.\n"
+			   "     By default, CTA will migrate AND add new (or missing) copies (as defined by the storage class) of the files located on the tape to repack.\n"
+                           "   * The --mountpolicy option allows to give a specific mount policy that will be applied to the repack subrequests (retrieve and archive requests).\n"
+			   "     By default, a hardcoded mount policy is applied (every request priorities and minimum request ages = 1)."
+					"\n\n" 
+					 }},
    { AdminCmd::CMD_REQUESTERMOUNTRULE,   { "requestermountrule",   "rmr", { "add", "ch", "rm", "ls" } }},
    { AdminCmd::CMD_SHOWQUEUES,           { "showqueues",           "sq",  { } }},
    { AdminCmd::CMD_STORAGECLASS,         { "storageclass",         "sc",  { "add", "ch", "rm", "ls" } }},
@@ -380,7 +393,7 @@ const Option opt_filename             { Option::OPT_STR,  "--file",
 const Option opt_firstfseq            { Option::OPT_UINT, "--firstfseq",             "-f",   " <first_fseq>" };
 const Option opt_force                { Option::OPT_BOOL, "--force",                 "-f",   " <\"true\" or \"false\">" };
 const Option opt_force_flag           { Option::OPT_FLAG, "--force",                 "-f",   "" };
-const Option opt_group                { Option::OPT_STR,  "--group",                 "-g",   " <group>" };
+const Option opt_gid                  { Option::OPT_UINT, "--gid",                   "-g",   " <group_id>" };
 const Option opt_hostname_alias       { Option::OPT_STR,  "--name",                  "-n",   " <host_name>",
                                         "--hostname" };
 const Option opt_input                { Option::OPT_STR,  "--input",                 "-i",   " <\"zero\" or \"urandom\">" };
@@ -405,7 +418,7 @@ const Option opt_number_of_files      { Option::OPT_UINT, "--nbfiles",
 const Option opt_number_of_files_alias{ Option::OPT_UINT, "--number",                "-n",   " <number_of_files>",
                                         "--nbfiles" };
 const Option opt_output               { Option::OPT_STR,  "--output",                "-o",   " <\"null\" or output_dir>" };
-const Option opt_owner                { Option::OPT_STR,  "--owner",                 "-o",   " <owner>" };
+const Option opt_owner_uid            { Option::OPT_UINT, "--uid",                   "-u",   " <owner_uid>" };
 const Option opt_partialfiles         { Option::OPT_UINT, "--partial",               "-p",   " <number_of_files_per_tape>" };
 const Option opt_partialtapes         { Option::OPT_UINT, "--partialtapesnumber",    "-p",   " <number_of_partial_tapes>" };
 const Option opt_path                 { Option::OPT_STR,  "--path",                  "-p",   " <full_path>" };
@@ -427,6 +440,7 @@ const Option opt_vid                  { Option::OPT_STR,  "--vid",
 const Option opt_vo                   { Option::OPT_STR,  "--vo",                    "--vo", " <vo>" };
 const Option opt_vidfile              { Option::OPT_STR_LIST, "--vidfile",           "-f",   " <filename>" };
 const Option opt_full                 { Option::OPT_BOOL, "--full",                  "-f",   " <\"true\" or \"false\">" };
+const Option opt_readonly             { Option::OPT_BOOL, "--readonly",              "-r",   " <\"true\" or \"false\">" };
 
 const Option opt_disksystem           { Option::OPT_STR,  "--disksystem",            "-n", " <disk_system_name>" };
 const Option opt_file_regexp          { Option::OPT_STR,  "--fileregexp",            "-r", " <file_regexp>" };
@@ -446,7 +460,7 @@ const std::map<cmd_key_t, cmd_val_t> cmdOptions = {
    /*----------------------------------------------------------------------------------------------------*/
    {{ AdminCmd::CMD_ARCHIVEFILE,          AdminCmd::SUBCMD_LS    },
       { opt_archivefileid.optional(), opt_diskid.optional(), opt_copynb.optional(),
-        opt_vid.optional(), opt_tapepool.optional(), opt_owner.optional(), opt_group.optional(),
+        opt_vid.optional(), opt_tapepool.optional(), opt_owner_uid.optional(), opt_gid.optional(),
         opt_storageclass.optional(), opt_path.optional(), opt_instance.optional(), opt_all.optional(),
         opt_summary.optional() }},
    /*----------------------------------------------------------------------------------------------------*/
@@ -500,7 +514,7 @@ const std::map<cmd_key_t, cmd_val_t> cmdOptions = {
    {{ AdminCmd::CMD_MOUNTPOLICY,          AdminCmd::SUBCMD_LS    }, { }},
    /*----------------------------------------------------------------------------------------------------*/
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_ADD   },
-      { opt_vid.optional(), opt_vidfile.optional(), opt_bufferurl, opt_justmove.optional(), opt_justaddcopies.optional() }},
+      { opt_vid.optional(), opt_vidfile.optional(), opt_bufferurl.optional(), opt_justmove.optional(), opt_justaddcopies.optional(), opt_mountpolicy.optional() }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_RM    }, { opt_vid }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_LS    }, { opt_vid.optional() }},
    {{ AdminCmd::CMD_REPACK,               AdminCmd::SUBCMD_ERR   }, { opt_vid }},
@@ -522,18 +536,18 @@ const std::map<cmd_key_t, cmd_val_t> cmdOptions = {
    {{ AdminCmd::CMD_STORAGECLASS,         AdminCmd::SUBCMD_LS    }, { }},
    /*----------------------------------------------------------------------------------------------------*/
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_ADD   },
-      { opt_vid, opt_mediatype, opt_vendor, opt_logicallibrary, opt_tapepool, opt_capacity, opt_disabled, opt_full,
+      { opt_vid, opt_mediatype, opt_vendor, opt_logicallibrary, opt_tapepool, opt_capacity, opt_disabled, opt_full, opt_readonly,
         opt_comment.optional() }},
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_CH    },
       { opt_vid, opt_mediatype.optional(), opt_vendor.optional(), opt_logicallibrary.optional(),
         opt_tapepool.optional(), opt_capacity.optional(), opt_encryptionkey.optional(), opt_disabled.optional(),
-        opt_full.optional(), opt_comment.optional() }},
+        opt_full.optional(), opt_readonly.optional(), opt_comment.optional() }},
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_RM    }, { opt_vid }},
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_RECLAIM }, { opt_vid }},
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_LS    },
       { opt_vid.optional(), opt_mediatype.optional(), opt_vendor.optional(),
         opt_logicallibrary.optional(), opt_tapepool.optional(), opt_vo.optional(), opt_capacity.optional(),
-        opt_disabled.optional(), opt_full.optional(), opt_all.optional() }},
+        opt_disabled.optional(), opt_full.optional(), opt_readonly.optional(), opt_all.optional() }},
    {{ AdminCmd::CMD_TAPE,                 AdminCmd::SUBCMD_LABEL },
       { opt_vid, opt_force.optional() }},
    /*----------------------------------------------------------------------------------------------------*/
diff --git a/cmdline/CtaAdminTextFormatter.cpp b/cmdline/CtaAdminTextFormatter.cpp
index 09ed468a2254814a989b9faaef35c2851b432fb9..683b3c61fa1afae5bd8b3efaa2277b4fdd4a47a4 100644
--- a/cmdline/CtaAdminTextFormatter.cpp
+++ b/cmdline/CtaAdminTextFormatter.cpp
@@ -20,6 +20,7 @@
 #include <iostream>
 #include <iomanip>
 #include <cmdline/CtaAdminTextFormatter.hpp>
+#include <common/checksum/ChecksumBlobSerDeser.hpp>
 #include <common/dataStructures/DriveStatusSerDeser.hpp>
 #include <common/dataStructures/MountTypeSerDeser.hpp>
 
@@ -162,6 +163,22 @@ void TextFormatter::printArchiveFileLsHeader() {
 }
 
 void TextFormatter::print(const ArchiveFileLsItem &afls_item) {
+  using namespace cta::checksum;
+
+  std::string checksumType("NONE");
+  std::string checksumValue;
+
+  ChecksumBlob csb;
+  ProtobufToChecksumBlob(afls_item.af().csb(), csb);
+
+  // Files can have multiple checksums of different types. Display only the first checksum here. All
+  // checksums will be listed in JSON.
+  if(!csb.empty()) {
+    auto cs_it = csb.getMap().begin();
+    checksumType = ChecksumTypeName.at(cs_it->first);
+    checksumValue = "0x" + ChecksumBlob::ByteArrayToHex(cs_it->second);
+  }
+
   push_back(
     afls_item.af().archive_id(),
     afls_item.copy_nb(),
@@ -171,11 +188,11 @@ void TextFormatter::print(const ArchiveFileLsItem &afls_item) {
     afls_item.af().disk_instance(),
     afls_item.af().disk_id(),
     dataSizeToStr(afls_item.af().size()),
-    afls_item.af().cs().type(),
-    afls_item.af().cs().value(),
+    checksumType,
+    checksumValue,
     afls_item.af().storage_class(),
-    afls_item.af().df().owner(),
-    afls_item.af().df().group(),
+    afls_item.af().df().owner_id().uid(),
+    afls_item.af().df().owner_id().gid(),
     timeToStr(afls_item.af().creation_time()),
     afls_item.tf().superseded_by_vid(),
     afls_item.tf().superseded_by_f_seq(),
@@ -421,6 +438,22 @@ void TextFormatter::printListPendingArchivesHeader() {
 }
 
 void TextFormatter::print(const ListPendingArchivesItem &lpa_item) {
+  using namespace cta::checksum;
+
+  std::string checksumType("NONE");
+  std::string checksumValue;
+
+  ChecksumBlob csb;
+  ProtobufToChecksumBlob(lpa_item.af().csb(), csb);
+
+  // Files can have multiple checksums of different types. Display only the first checksum here. All
+  // checksums will be listed in JSON.
+  if(!csb.empty()) {
+    auto cs_it = csb.getMap().begin();
+    checksumType = ChecksumTypeName.at(cs_it->first);
+    checksumValue = "0x" + ChecksumBlob::ByteArrayToHex(cs_it->second);
+  }
+
   push_back(
     lpa_item.tapepool(),
     lpa_item.af().archive_id(),
@@ -428,11 +461,11 @@ void TextFormatter::print(const ListPendingArchivesItem &lpa_item) {
     lpa_item.copy_nb(),
     lpa_item.af().disk_id(),
     lpa_item.af().disk_instance(),
-    lpa_item.af().cs().type(),
-    lpa_item.af().cs().value(),
+    checksumType,
+    checksumValue,
     dataSizeToStr(lpa_item.af().size()),
-    lpa_item.af().df().owner(),
-    lpa_item.af().df().group(),
+    lpa_item.af().df().owner_id().uid(),
+    lpa_item.af().df().owner_id().gid(),
     lpa_item.af().df().path()
   );
 }
@@ -477,8 +510,8 @@ void TextFormatter::print(const ListPendingRetrievesItem &lpr_item) {
     lpr_item.tf().f_seq(),
     lpr_item.tf().block_id(),
     dataSizeToStr(lpr_item.af().size()),
-    lpr_item.af().df().owner(),
-    lpr_item.af().df().group(),
+    lpr_item.af().df().owner_id().uid(),
+    lpr_item.af().df().owner_id().gid(),
     lpr_item.af().df().path()
   );
 }
@@ -601,7 +634,7 @@ void TextFormatter::print(const RepackLsItem &rels_item) {
    rels_item.failed_to_retrieve_files(),
    dataSizeToStr(rels_item.failed_to_retrieve_bytes()),
    rels_item.failed_to_archive_files(),
-   dataSizeToStr(rels_item.failed_to_retrieve_bytes()),
+   dataSizeToStr(rels_item.failed_to_archive_bytes()),
    rels_item.last_expanded_fseq(),
    rels_item.status()
   );
@@ -662,6 +695,7 @@ void TextFormatter::printShowQueuesHeader() {
     "full tapes",
     "empty tapes",
     "disabled tapes",
+    "rdonly tapes",
     "writable tapes"
   );
 }
@@ -671,7 +705,7 @@ void TextFormatter::print(const ShowQueuesItem &sq_item) {
   std::string minAge;
   std::string maxDrivesAllowed;
 
-  if(sq_item.mount_type() == ARCHIVE_FOR_USER ||
+  if(sq_item.mount_type() == ARCHIVE_FOR_USER || sq_item.mount_type() == ARCHIVE_FOR_REPACK || 
      sq_item.mount_type() == RETRIEVE) {
     priority         = std::to_string(sq_item.priority());
     minAge           = std::to_string(sq_item.min_age());
@@ -700,6 +734,7 @@ void TextFormatter::print(const ShowQueuesItem &sq_item) {
     sq_item.full_tapes(),
     sq_item.empty_tapes(),
     sq_item.disabled_tapes(),
+    sq_item.rdonly_tapes(),
     sq_item.writable_tapes()
   );
 }
@@ -750,18 +785,23 @@ void TextFormatter::printTapeLsHeader() {
     "last fseq",
     "full",
     "disabled",
+    "rdonly",
+    "from castor",
     "label drive",
     "label time",
     "last w drive",
     "last w time",
+    "w mounts",
     "last r drive",
     "last r time",
+    "r mounts",
     "c.user",
     "c.host",
     "c.time",
     "m.user",
     "m.host",
-    "m.time"
+    "m.time",
+    "comment"
   );
 }
 
@@ -779,18 +819,23 @@ void TextFormatter::print(const TapeLsItem &tals_item) {
     tals_item.last_fseq(),
     tals_item.full(),
     tals_item.disabled(),
+    tals_item.rdonly(),
+    tals_item.from_castor(),
     tals_item.has_label_log()        ? tals_item.label_log().drive()                  : "",
     tals_item.has_label_log()        ? timeToStr(tals_item.label_log().time())        : "",
     tals_item.has_last_written_log() ? tals_item.last_written_log().drive()           : "",
     tals_item.has_last_written_log() ? timeToStr(tals_item.last_written_log().time()) : "",
+    tals_item.write_mount_count(),
     tals_item.has_last_read_log()    ? tals_item.last_read_log().drive()              : "",
     tals_item.has_last_read_log()    ? timeToStr(tals_item.last_read_log().time())    : "",
+    tals_item.read_mount_count(),
     tals_item.creation_log().username(),
     tals_item.creation_log().host(),
     timeToStr(tals_item.creation_log().time()),
     tals_item.last_modification_log().username(),
     tals_item.last_modification_log().host(),
-    timeToStr(tals_item.last_modification_log().time())
+    timeToStr(tals_item.last_modification_log().time()),
+    tals_item.comment()
   );
 }
 
diff --git a/cmdline/EosCtaStub.cpp b/cmdline/EosCtaStub.cpp
index e53d4c16ca997085c820f47f55090f3c568a7379..19b79091112e86d8ffd9a9f8fab79a09d161b7b5 100644
--- a/cmdline/EosCtaStub.cpp
+++ b/cmdline/EosCtaStub.cpp
@@ -24,7 +24,8 @@
 
 #include <XrdSsiPbLog.hpp>
 
-#include "common/dataStructures/FrontendReturnCode.hpp"
+#include <common/dataStructures/FrontendReturnCode.hpp>
+#include <common/checksum/ChecksumBlobSerDeser.hpp>
 #include "CtaFrontendApi.hpp"
 
 
@@ -109,8 +110,15 @@ void base64Decode(cta::eos::Notification &notification, const std::string &argva
          notification.mutable_file()->mutable_mtime()->set_nsec(stoi(val.substr(pt_pos+1)));
       }
       else if(key == "size") notification.mutable_file()->set_size(stoi(val));
-      else if(key == "xstype") notification.mutable_file()->mutable_cks()->set_type(val);
-      else if(key == "xs") notification.mutable_file()->mutable_cks()->set_value(val);
+      else if(key == "xs")
+      {
+         // In principle it's possible to set the full checksum blob with multiple checksums of different
+         // types, but this is not currently supported in eos_wfe_stub. It's only possible to set one
+         // checksum, which is assumed to be of type ADLER32.
+         auto cs = notification.mutable_file()->mutable_csb()->add_cs();
+         cs->set_type(cta::common::ChecksumBlob::Checksum::ADLER32);
+         cs->set_value(cta::checksum::ChecksumBlob::HexToByteArray(val));
+      }
       else if(key == "mode") notification.mutable_file()->set_mode(stoi(val));
       else if(key == "file") notification.mutable_file()->set_lpath(val);
       else {
@@ -217,11 +225,18 @@ void fillNotification(cta::eos::Notification &notification, int argc, const char
       else if(argstr == "--dsturl")              notification.mutable_transport()->set_dst_url(argval); // for retrieve WF
 
       else if(argstr == "--diskid")              notification.mutable_file()->set_fid(std::stoi(argval));
-      else if(argstr == "--diskfileowner")       notification.mutable_file()->mutable_owner()->set_username(argval);
-      else if(argstr == "--diskfilegroup")       notification.mutable_file()->mutable_owner()->set_groupname(argval);
+      else if(argstr == "--diskfileowner")       notification.mutable_file()->mutable_owner()->set_uid(std::stoi(argval));
+      else if(argstr == "--diskfilegroup")       notification.mutable_file()->mutable_owner()->set_gid(std::stoi(argval));
       else if(argstr == "--size")                notification.mutable_file()->set_size(std::stoi(argval));
-      else if(argstr == "--checksumtype")        notification.mutable_file()->mutable_cks()->set_type(argval);
-      else if(argstr == "--checksumvalue")       notification.mutable_file()->mutable_cks()->set_value(argval);
+      else if(argstr == "--checksumvalue")
+      {
+         // In principle it's possible to set the full checksum blob with multiple checksums of different
+         // types, but this is not currently supported in eos_wfe_stub. It's only possible to set one
+         // checksum, which is assumed to be of type ADLER32.
+         auto cs = notification.mutable_file()->mutable_csb()->add_cs();
+         cs->set_type(cta::common::ChecksumBlob::Checksum::ADLER32);
+         cs->set_value(cta::checksum::ChecksumBlob::HexToByteArray(argval));
+      }
       else if(argstr == "--diskfilepath")        notification.mutable_file()->set_lpath(argval);
       else if(argstr == "--storageclass")        {
          google::protobuf::MapPair<std::string,std::string> sc("CTA_StorageClass", argval);
@@ -231,13 +246,6 @@ void fillNotification(cta::eos::Notification &notification, int argc, const char
          google::protobuf::MapPair<std::string,std::string> id("CTA_ArchiveFileId", argval);
          notification.mutable_file()->mutable_xattr()->insert(id);
       }
-      else if(argstr == "--diskpool")            {} // = default?
-      else if(argstr == "--throughput")          {} // = 10000?
-      else if(argstr == "--recoveryblob:base64") try {
-         base64Decode(notification, argval);
-      } catch(...) {
-         throw std::runtime_error("Invalid recovery blob: " + argval);
-      }
       else throw std::runtime_error("Unrecognised key " + argstr);
    }
 }
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
index 5c0965ce33438e166a913155209838590cc863b8..59cc3fd04f02961ce15e446cf7364e682806c7b7 100644
--- a/common/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -25,6 +25,11 @@ add_subdirectory (exception)
 
 include_directories (${XROOTD_INCLUDE_DIR})
 
+#
+# Compiled protocol buffers (used for ChecksumBlob serialization)
+#
+include_directories(${CMAKE_BINARY_DIR}/eos_cta ${PROTOBUF3_INCLUDE_DIRS})
+
 set_source_files_properties(CRC.cpp PROPERTIES COMPILE_FLAGS -O2)
 
 set (COMMON_LIB_SRC_FILES
@@ -50,10 +55,12 @@ set (COMMON_LIB_SRC_FILES
   dataStructures/LogicalLibrary.cpp
   dataStructures/MountType.cpp
   dataStructures/MountPolicy.cpp
+  dataStructures/OwnerIdentity.cpp
   dataStructures/QueueAndMountSummary.cpp
   dataStructures/ReadTestResult.cpp
   dataStructures/RepackInfo.cpp
   dataStructures/RequesterGroupMountRule.cpp
+  dataStructures/RequesterIdentity.cpp
   dataStructures/RequesterMountRule.cpp
   dataStructures/RetrieveFileQueueCriteria.cpp
   dataStructures/RetrieveJob.cpp
@@ -66,10 +73,9 @@ set (COMMON_LIB_SRC_FILES
   dataStructures/TestSourceType.cpp
   dataStructures/UpdateFileInfoRequest.cpp
   dataStructures/UpdateFileStorageClassRequest.cpp
-  dataStructures/UserIdentity.cpp
   dataStructures/WriteTestResult.cpp
   dataStructures/utils.cpp
-  checksum/Checksum.cpp
+  checksum/ChecksumBlob.cpp
   exception/AcceptConnectionInterrupted.cpp
   exception/AcsQueryVolumeCmd.cpp
   exception/Backtrace.cpp
@@ -143,7 +149,6 @@ set (COMMON_LIB_SRC_FILES
   ConfigurationFile.cpp
   SourcedParameter.cpp   
   Timer.cpp
-  UserIdentity.cpp
   optional.cpp)
 
 add_library (ctacommon SHARED
@@ -154,16 +159,16 @@ set_property(TARGET ctacommon PROPERTY   VERSION "${CTA_LIBVERSION}")
 install (TARGETS ctacommon DESTINATION usr/${CMAKE_INSTALL_LIBDIR})
 
 target_link_libraries (ctacommon
+  XrdSsiPbEosCta
   pthread
   uuid
   z
   cap
   XrdCl
-  ${XROOTD_XRDCLIENT_LIB}
 )
 
 set (COMMON_UNIT_TESTS_LIB_SRC_FILES
-  checksum/ChecksumTest.cpp
+  checksum/ChecksumBlobTest.cpp
   ConfigurationFileTests.cpp
   SourcedParameterTests.cpp
   dataStructures/ArchiveFileTest.cpp
@@ -193,7 +198,6 @@ set (COMMON_UNIT_TESTS_LIB_SRC_FILES
   utils/GetOptThreadSafeTest.cpp
   utils/RegexTest.cpp
   utils/UtilsTest.cpp
-  UserIdentityTest.cpp
   optionalTest.cpp
   rangeTest.cpp)
 
diff --git a/common/Constants.hpp b/common/Constants.hpp
index 3943b9697c4b319c65439e4492cd5b7255932463..dd8b09d876611f19e70fa21c4c2221bc5df5d49b 100644
--- a/common/Constants.hpp
+++ b/common/Constants.hpp
@@ -29,6 +29,7 @@ namespace cta {
 const int CA_MAXVIDLEN = 6; // maximum length for a VID
 const int CTA_SCHEMA_VERSION_MAJOR = 0;
 const int CTA_SCHEMA_VERSION_MINOR = 0;
+const int TAPE_LABEL_UNITREADY_TIMEOUT = 300;
 
 } // namespace cta
 
diff --git a/common/CreationLog.cpp b/common/CreationLog.cpp
index e905ad54f292455475903ad327f675cfb7f08df2..823e1788835a341990b0deb4376ac4d9032fc024 100644
--- a/common/CreationLog.cpp
+++ b/common/CreationLog.cpp
@@ -30,6 +30,6 @@ cta::CreationLog::CreationLog():
 // constructor
 //------------------------------------------------------------------------------
 
-cta::CreationLog::CreationLog(const UserIdentity& user, 
+cta::CreationLog::CreationLog(const cta::common::dataStructures::OwnerIdentity& user, 
     const std::string& host, const time_t time, const std::string& comment):
   user(user), host(host), time(time), comment(comment) {}
diff --git a/common/CreationLog.hpp b/common/CreationLog.hpp
index 3f4c5e94c397ae6afb1c2d777aa3e88acedb14f9..f2ce28999e9ca7d677d6257c14f16113bb22d830 100644
--- a/common/CreationLog.hpp
+++ b/common/CreationLog.hpp
@@ -18,10 +18,10 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
-
 #include <string>
 
+#include "common/dataStructures/OwnerIdentity.hpp"
+
 namespace cta {
 
 /**
@@ -38,13 +38,13 @@ struct CreationLog {
   /**
    * Constructor.
    */
-  CreationLog(const UserIdentity &user, const std::string &host,
+  CreationLog(const common::dataStructures::OwnerIdentity &user, const std::string &host,
     const time_t time,  const std::string & comment = "");
 
   /**
    * The identity of the creator.
    */
-  UserIdentity user;
+  common::dataStructures::OwnerIdentity user;
 
   /**
    * The network name of the host from which they are submitting a request.
diff --git a/common/UserIdentity.hpp b/common/UserIdentity.hpp
deleted file mode 100644
index fae4e09de3a46dd72128827202349f4251c7c9ce..0000000000000000000000000000000000000000
--- a/common/UserIdentity.hpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#pragma once
-
-#include <ostream>
-#include <stdint.h>
-#include <string>
-
-namespace cta {
-
-/**
- * Class representing the identity of a user.
- */
-struct UserIdentity {
-
-  /**
-   * Constructor.
-   *
-   * Initialises all integer member-variables to non-valid values (max int).
-   */
-  UserIdentity() throw();
-
-  /**
-   * Constructor.
-   *
-   * @param uid The user ID of the user.
-   * @param gid The group ID of the user.
-   */
-  UserIdentity(const uint32_t uid, const uint32_t gid) throw();
-
-  /** 
-   * Returns true if the specified right-hand side is equal to this object.
-   *
-   * @param rhs The object on the right-hand side of the == operator.
-   * @return True if the specified right-hand side is equal to this object.
-   */
-  bool operator==(const UserIdentity &rhs) const;
-
-  /**
-   * Returns true if the specified right-hand side is not euqal to this object.
-   *
-   * @param rhs The object on the right-hand side of the != operator.
-   * @return True if the specified right-hand side is not equal to this object.
-   */
-  bool operator!=(const UserIdentity &rhs) const;
-
-  /**
-   * The user ID of the user.
-   */
-  uint32_t uid;
-
-  /**
-   * The group ID of the user.
-   */
-  uint32_t gid;
-
-
-}; // class UserIdentity
-
-} // namespace cta
-
-/**
- * Output stream operator for the cta::UserIdentity class.
- */
-std::ostream &operator<<(std::ostream &os, const cta::UserIdentity &obj);
diff --git a/common/UserIdentityTest.cpp b/common/UserIdentityTest.cpp
deleted file mode 100644
index a7dec0a0740896b28083eefbf391f08e9c15d8c9..0000000000000000000000000000000000000000
--- a/common/UserIdentityTest.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "common/UserIdentity.hpp"
-
-#include <gtest/gtest.h>
-
-namespace unitTests {
-
-class cta_UserIdentityTest: public ::testing::Test {
-protected:
-
-  virtual void SetUp() {
-  }
-
-  virtual void TearDown() {
-  }
-};
-
-TEST_F(cta_UserIdentityTest, equals_operator) {
-  using namespace cta;
-
-  const uint16_t uid1 = 1111;
-  const uint16_t uid2 = 2222;
-  const uint16_t gid1 = 3333;
-  const uint16_t gid2 = 4444;
-
-  const UserIdentity                        user(uid1, gid1);
-  const UserIdentity            sameUserAndGroup(uid1, gid1);
-  const UserIdentity     sameUserDifferrentGroup(uid1, gid2);
-  const UserIdentity      differentUserSameGroup(uid2, gid1);
-  const UserIdentity differentUserDifferentGroup(uid2, gid2);
-
-  ASSERT_TRUE(user == user);
-  ASSERT_TRUE(user == sameUserAndGroup);
-  ASSERT_TRUE(user == sameUserDifferrentGroup);
-  ASSERT_FALSE(user == differentUserSameGroup);
-  ASSERT_FALSE(user == differentUserDifferentGroup);
-}
-
-TEST_F(cta_UserIdentityTest, not_equals_operator) {
-  using namespace cta;
-  
-  const uint16_t uid1 = 1111;
-  const uint16_t uid2 = 2222;
-  const uint16_t gid1 = 3333;
-  const uint16_t gid2 = 4444;
-
-  const UserIdentity                        user(uid1, gid1);
-  const UserIdentity            sameUserAndGroup(uid1, gid1);
-  const UserIdentity     sameUserDifferrentGroup(uid1, gid2);
-  const UserIdentity      differentUserSameGroup(uid2, gid1);
-  const UserIdentity differentUserDifferentGroup(uid2, gid2);
-
-  ASSERT_FALSE(user != user);
-  ASSERT_FALSE(user != sameUserAndGroup);
-  ASSERT_FALSE(user != sameUserDifferrentGroup);
-  ASSERT_TRUE(user != differentUserSameGroup);
-  ASSERT_TRUE(user != differentUserDifferentGroup);
-}
-
-} // namespace unitTests
diff --git a/common/checksum/Checksum.cpp b/common/checksum/Checksum.cpp
deleted file mode 100644
index cf8322bd5eddf7c7ba013ebde5fd4a5d0fc3def3..0000000000000000000000000000000000000000
--- a/common/checksum/Checksum.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "common/checksum/Checksum.hpp"
-#include "common/utils/Regex.hpp"
-#include <sstream>
-
-//------------------------------------------------------------------------------
-// checksumTypeToStr
-//------------------------------------------------------------------------------
-const char *cta::Checksum::checksumTypeToStr(const ChecksumType enumValue)
-  throw() {
-  switch(enumValue) {
-  case CHECKSUMTYPE_NONE   : return "NONE";
-  case CHECKSUMTYPE_ADLER32: return "ADLER32";
-  default                  : return "UNKNOWN";
-  }
-}
-
-//------------------------------------------------------------------------------
-// constructor
-//------------------------------------------------------------------------------
-cta::Checksum::Checksum(): m_type(CHECKSUMTYPE_NONE) { }
-
-
-cta::Checksum::Checksum(const std::string& url): m_type(CHECKSUMTYPE_NONE) {
-  if (url.empty() || url == "-") {
-    return;
-  }
-  utils::Regex re("^adler32:0[Xx]([[:xdigit:]]+)$");
-  auto result = re.exec(url);
-  if (result.size()) {
-    m_type = CHECKSUMTYPE_ADLER32;
-    std::stringstream valStr(result.at(1));
-    uint32_t val;
-    valStr >> std::hex >> val;
-    setNumeric(val);
-  }
-}
-
-
-//------------------------------------------------------------------------------
-// operator==
-//------------------------------------------------------------------------------
-bool cta::Checksum::operator==(const Checksum &rhs) const {
-  return m_type == rhs.m_type && m_byteArray == rhs.m_byteArray;
-}
-
-//------------------------------------------------------------------------------
-// getType
-//------------------------------------------------------------------------------
-cta::Checksum::ChecksumType cta::Checksum::getType() const throw() {
-  return m_type;
-}
-
-//------------------------------------------------------------------------------
-// getByteArray
-//------------------------------------------------------------------------------
-const std::string &cta::Checksum::getByteArray() const throw() {
-  return m_byteArray;
-}
-
-//------------------------------------------------------------------------------
-// str
-//------------------------------------------------------------------------------
-std::string cta::Checksum::str() const {
-  std::ostringstream oss;
-
-  switch(m_type) {
-    case CHECKSUMTYPE_ADLER32:
-      oss << "adler32:" << std::hex << std::showbase << getNumeric<uint32_t>();
-      break;
-    case CHECKSUMTYPE_NONE:
-      oss << "-";
-      break;
-    default:;
-  }
-  return oss.str();
-}
-
-//------------------------------------------------------------------------------
-// operator<<
-//------------------------------------------------------------------------------
-std::ostream &cta::operator<<(std::ostream &os, const Checksum &checksum) {
-  os << checksum.str();
-  return os;
-}
diff --git a/common/checksum/Checksum.hpp b/common/checksum/Checksum.hpp
deleted file mode 100644
index d881a2b588af120797cf6b695511137a156e51a7..0000000000000000000000000000000000000000
--- a/common/checksum/Checksum.hpp
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#pragma once
-
-#include "common/exception/Exception.hpp"
-
-#include <ostream>
-#include <sstream>
-#include <typeinfo>
-
-namespace cta {
-
-/**
- * A checksum
- */
-class Checksum {
-public:
-
-  /**
-   * Enumeration of the supported checksum types.
-   */
-  enum ChecksumType {
-    CHECKSUMTYPE_NONE,
-    CHECKSUMTYPE_ADLER32};
-
-  /**
-   * Thread safe method that returns the string representation of the specified
-   * checksum type.
-   *
-   * @param enumValue The integer value of the type.
-   * @return The string representation.
-   */
-  static const char *checksumTypeToStr(const ChecksumType enumValue) throw();
-
-  /**
-   * Constructor.
-   *
-   * Creates an empty checksum.
-   */
-  Checksum();
-
-  /**
-   * Constructor.
-   *
-   * @param type The type of the checksum.
-   * @param val A numeric value to store in the byte array.
-   */
-  template <typename t>
-  Checksum(const ChecksumType &type, t val): m_type(type) { 
-    switch (m_type) {
-      case CHECKSUMTYPE_ADLER32:
-        if (sizeof(t) != 4) {
-          std::stringstream err;
-          err << "In Checksum::Checksum(type,value): unexpected value size="
-                  << sizeof(t) << " expected=4";
-          throw cta::exception::Exception(err.str());
-        }
-        break;
-      default:
-        throw cta::exception::Exception("In Checksum::Checksum(type,value): unsupported type for any value");
-    }
-    setNumeric(val);
-  }
-  
-  /**
-   * String based constructor.
-   * 
-   * @param url A string describing the type of the checksum
-   */
-  Checksum(const std::string & url);
-
-  /**
-   * Equality operator.
-   *
-   * @param ths The right hand side of the operator.
-   */
-  bool operator==(const Checksum &rhs) const;
-
-  /**
-   * Returns the type of the checksum.
-   *
-   * @return The type of the checksum.
-   */
-  ChecksumType getType() const throw();
-
-  /**
-   * Returns the checksum as a byte array that can be used for storing in a
-   * database.
-   *
-   * The bytes of the bytes array are in little-endian order.
-   *
-   * @return The checksum as a byte array that can be used for storing in a
-   * database.
-   */
-  const std::string &getByteArray() const throw();
-
-  /**
-   * Returns a human-readable string representation of the checksum.
-   */
-  std::string str() const;
-  
-  template <typename t>
-  t getNumeric() const {
-    if (m_byteArray.size() != sizeof(t)) {
-      std::stringstream err;
-      err << "In Checksum::getNumeric<"
-              << typeid(t).name() << ">(): wrong size of byte array="
-              << m_byteArray.size() << " expected=" << sizeof(t);
-      throw cta::exception::Exception(err.str());
-    }
-    return (*((t*)m_byteArray.data()));
-  }
-  
-  template <typename t>
-  void setNumeric(t val) {
-    m_byteArray.replace(m_byteArray.begin(), m_byteArray.end(), (char *)&val, sizeof(t));
-  }
-
-private:
-
-  /**
-   * The type of the checksum.
-   */
-  ChecksumType m_type;
-
-  /**
-   * The checksum as a byte array that can be used to store the checksum in a
-   * database alongside its type.
-   */
-  std::string m_byteArray;
-
-}; // class Checksum
-
-/**
- * Writes the specified Checksum object to the specified ooutput stream.
- *
- * @param os The output stream.
- * @param checksum The checksum.
- */
-std::ostream &operator<<(std::ostream &os, const Checksum &checksum);
-
-} // namespace cta
diff --git a/common/checksum/ChecksumBlob.cpp b/common/checksum/ChecksumBlob.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..507243fefc348d60ab17fe6764f066a551e0f790
--- /dev/null
+++ b/common/checksum/ChecksumBlob.cpp
@@ -0,0 +1,166 @@
+/*!
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <iomanip>
+
+#include "ChecksumBlob.hpp"
+#include "ChecksumBlobSerDeser.hpp"
+
+namespace cta {
+namespace checksum {
+
+void ChecksumBlob::insert(ChecksumType type, const std::string &value) {
+  // Validate the length of the checksum
+  size_t expectedLength;
+  switch(type) {
+    case NONE:       expectedLength = 0;  break;
+    case ADLER32:
+    case CRC32:
+    case CRC32C:     expectedLength = 4;  break;
+    case MD5:        expectedLength = 16; break;
+    case SHA1:       expectedLength = 20; break;
+  }
+  if(value.length() > expectedLength) throw exception::ChecksumValueMismatch(
+    "Checksum length type=" + ChecksumTypeName.at(type) +
+               " expected=" + std::to_string(expectedLength) +
+                 " actual=" + std::to_string(value.length()));
+  // Pad bytearray to expected length with trailing zeros
+  m_cs[type] = value + std::string(expectedLength-value.length(), 0);
+}
+
+void ChecksumBlob::insert(ChecksumType type, uint32_t value) {
+  // This method is only valid for 32-bit checksums
+  std::string cs;
+  switch(type) {
+    case ADLER32:
+    case CRC32:
+    case CRC32C:
+      for(int i = 0; i < 4; ++i) {
+        cs.push_back(static_cast<unsigned char>(value & 0xFF));
+        value >>= 8;
+      }
+      m_cs[type] = cs;
+      break;
+    default:
+      throw exception::ChecksumTypeMismatch(ChecksumTypeName.at(type) + " is not a 32-bit checksum");
+  }
+}
+
+void ChecksumBlob::validate(ChecksumType type, const std::string &value) const {
+  auto cs = m_cs.find(type);
+  if(cs == m_cs.end()) throw exception::ChecksumTypeMismatch(
+      "Checksum type " + ChecksumTypeName.at(type) + " not found");
+  if(cs->second != value) throw exception::ChecksumValueMismatch(
+      "Checksum value expected=0x" + ByteArrayToHex(value) +
+                      " actual=0x" + ByteArrayToHex(cs->second));
+}
+
+void ChecksumBlob::validate(const ChecksumBlob &blob) const {
+  if(m_cs.size() != blob.m_cs.size()) {
+    throw exception::ChecksumBlobSizeMismatch("Checksum blob size does not match. expected=" +
+      std::to_string(m_cs.size()) + " actual=" + std::to_string(blob.m_cs.size()));
+  }
+
+  auto it1 = m_cs.begin();
+  auto it2 = blob.m_cs.begin();
+  for( ; it1 != m_cs.end(); ++it1, ++it2) {
+    if(it1->first != it2->first) throw exception::ChecksumTypeMismatch(
+      "Checksum type expected=" + ChecksumTypeName.at(it1->first) +
+                     " actual=" + ChecksumTypeName.at(it2->first));
+    if(it1->second != it2->second) throw exception::ChecksumValueMismatch(
+      "Checksum value expected=0x" + ByteArrayToHex(it1->second) +
+                      " actual=0x" + ByteArrayToHex(it2->second));
+  }
+}
+
+std::string ChecksumBlob::serialize() const {
+  common::ChecksumBlob p_csb;
+  ChecksumBlobToProtobuf(*this, p_csb);
+
+  std::string bytearray;
+  p_csb.SerializeToString(&bytearray);
+  return bytearray;
+}
+
+size_t ChecksumBlob::length() const {
+  common::ChecksumBlob p_csb;
+  ChecksumBlobToProtobuf(*this, p_csb);
+  return p_csb.ByteSizeLong();
+}
+
+void ChecksumBlob::deserialize(const std::string &bytearray) {
+  common::ChecksumBlob p_csb;
+  if(!p_csb.ParseFromString(bytearray)) {
+    throw exception::Exception("ChecksumBlob: deserialization failed");
+  }
+  ProtobufToChecksumBlob(p_csb, *this);
+}
+
+void ChecksumBlob::deserializeOrSetAdler32(const std::string &bytearray, uint32_t adler32) {
+  common::ChecksumBlob p_csb;
+  // A NULL value in the CHECKSUM_BLOB column will return an empty bytearray. If the bytearray is empty
+  // or otherwise invalid, default to using the contents of the CHECKSUM_ADLER32 column.
+  if(!bytearray.empty() && p_csb.ParseFromString(bytearray)) {
+    ProtobufToChecksumBlob(p_csb, *this);
+  } else {
+    insert(ADLER32, adler32);
+  }
+}
+
+std::string ChecksumBlob::HexToByteArray(std::string hexString) {
+  std::string bytearray;
+
+  if(hexString.substr(0,2) == "0x" || hexString.substr(0,2) == "0X") {
+    hexString.erase(0,2);
+  }
+  // ensure we have an even number of hex digits
+  if(hexString.length() % 2 == 1) hexString.insert(0, "0");
+
+  for(unsigned int i = 0; i < hexString.length(); i += 2) {
+    uint8_t byte = strtol(hexString.substr(i,2).c_str(), nullptr, 16);
+    bytearray.insert(0,1,byte);
+  }
+
+  return bytearray;
+}
+
+std::string ChecksumBlob::ByteArrayToHex(const std::string &bytearray) {
+  if(bytearray.empty()) return "0";
+
+  std::stringstream value;
+  value << std::hex << std::setfill('0');
+  for(auto c = bytearray.rbegin(); c != bytearray.rend(); ++c) {
+    value << std::setw(2) << (static_cast<uint8_t>(*c) & 0xFF);
+  }
+  return value.str();
+}
+
+std::ostream &operator<<(std::ostream &os, const ChecksumBlob &csb) {
+  os << "[ ";
+  auto num_els = csb.m_cs.size();
+  for(auto &cs : csb.m_cs) {
+    bool is_last_el = --num_els > 0;
+    os << "{ \"" << ChecksumTypeName.at(cs.first) << "\",0x"  << ChecksumBlob::ByteArrayToHex(cs.second)
+       << (is_last_el ? " }," : " }");
+  }
+  os << " ]";
+
+  return os;
+}
+
+}} // namespace cta::checksum
diff --git a/common/checksum/ChecksumBlob.hpp b/common/checksum/ChecksumBlob.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..11d9c6f17b3181d53d733e604fa8f59898f3cb62
--- /dev/null
+++ b/common/checksum/ChecksumBlob.hpp
@@ -0,0 +1,214 @@
+/*!
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <map>
+
+#include <common/exception/ChecksumBlobSizeMismatch.hpp>
+#include <common/exception/ChecksumTypeMismatch.hpp>
+#include <common/exception/ChecksumValueMismatch.hpp>
+
+namespace cta {
+namespace checksum {
+
+/*!
+ * Enumeration of the supported checksum types
+ *
+ * We allow all checksum types supported by EOS
+ */
+enum ChecksumType {
+  NONE,       //!< No checksum specified
+  ADLER32,    //!< Adler-32 checksum
+  CRC32,      //!< CRC-32 checksum
+  CRC32C,     //!< CRC-32C checksum
+  MD5,        //!< MD5 128-bit hash
+  SHA1        //!< SHA-1 160-bit hash
+};
+
+/*!
+ * String representations of the checksum types
+ */
+const std::map<ChecksumType, std::string> ChecksumTypeName = {
+  { NONE,    "NONE" },
+  { ADLER32, "ADLER32" },
+  { CRC32,   "CRC32" }, 
+  { CRC32C,  "CRC32C" }, 
+  { MD5,     "MD5" }, 
+  { SHA1,    "SHA1" }
+};
+
+/*!
+ * A class to store one or more checksums
+ */
+class ChecksumBlob {
+public:
+  /*!
+   * Default constructor
+   */
+  ChecksumBlob() {}
+
+  /*!
+   * Construct and insert one checksum
+   */
+  ChecksumBlob(ChecksumType type, const std::string &value) {
+    insert(type, value);
+  }
+
+  /*!
+   * Construct and insert one 32-bit checksum
+   */
+  ChecksumBlob(ChecksumType type, uint32_t value) {
+    insert(type, value);
+  }
+
+  /*!
+   * Clear all of the checksums in this blob
+   */
+  void clear() { m_cs.clear(); }
+
+  /*!
+   * Insert a new checksum into the blob
+   *
+   * @param[in] value    Little-endian byte array containing the checksum
+   */
+  void insert(ChecksumType type, const std::string &value);
+
+  /*!
+   * Insert a new 32-bit checksum into the blob
+   *
+   * Only valid for Adler-32, CRC-32 and CRC-32c checksums. Throws an
+   * exception for other checksum types.
+   *
+   * @param[in] value    32-bit unsigned integer containing the checksum
+   */
+  void insert(ChecksumType type, uint32_t value);
+
+  /*!
+   * Deserialize from a byte array
+   */
+  void deserialize(const std::string &bytearray);
+
+  /*!
+   * Deserialize from a byte array. In case of an invalid byte array, use the supplied Adler32 value instead.
+   */
+  void deserializeOrSetAdler32(const std::string &bytearray, uint32_t adler32);
+
+  /*!
+   * Serialise to a byte array
+   */
+  std::string serialize() const;
+
+  /*!
+   * Length of the serialized byte array
+   */
+  size_t length() const;
+
+  /*!
+   * True if there are no checksums in the blob
+   */
+  bool empty() const { return m_cs.empty(); }
+
+  /*!
+   * Returns the number of checksums in the blob
+   */
+  size_t size() const { return m_cs.size(); }
+
+  /*!
+   * Get a const reference to the implementation (for conversion to protobuf)
+   */
+  const std::map<ChecksumType,std::string> &getMap() const {
+    return m_cs;
+  }
+
+  /*!
+   * Return the checksum for the specified key
+   */
+  std::string at(ChecksumType type) const {
+    try {
+      return m_cs.at(type);
+    } catch(std::out_of_range) {
+      std::stringstream ss;
+      ss << ChecksumTypeName.at(type) << " checksum not found. Checksum blob contents: "
+         << *this;
+      throw exception::ChecksumTypeMismatch(ss.str());
+    }
+  }
+
+  /*!
+   * Check that a single checksum is in the blob and that it has the value expected, throw an exception if not
+   */
+  void validate(ChecksumType type, const std::string &value) const;
+
+  /*!
+   * Check all the checksums in the blob match, throw an exception if they don't
+   */
+  void validate(const ChecksumBlob &blob) const;
+
+  /*!
+   * Returns true if the checksum is in the blob and that it has the value expected
+   */
+  bool contains(ChecksumType type, const std::string &value) const {
+    try {
+      validate(type, value);
+    } catch(exception::ChecksumTypeMismatch &ex) {
+      return false;
+    } catch(exception::ChecksumValueMismatch &ex) {
+      return false;
+    }
+    return true;
+  }
+
+  /*!
+   * Returns true if all the checksums in the blob match
+   */
+  bool operator==(const ChecksumBlob &blob) const {
+    try {
+      validate(blob);
+    } catch(exception::ChecksumBlobSizeMismatch &ex) {
+      return false;
+    } catch(exception::ChecksumTypeMismatch &ex) {
+      return false;
+    } catch(exception::ChecksumValueMismatch &ex) {
+      return false;
+    }
+    return true;
+  }
+
+  /*!
+   * Returns false if all the checksums in the blob match
+   */
+  bool operator!=(const ChecksumBlob &blob) const { return !(*this == blob); }
+
+  /*!
+   * Convert hexadecimal string to little-endian byte array
+   */
+  static std::string HexToByteArray(std::string hexString);
+
+  /*!
+   * Convert little-endian byte array to hexadecimal string
+   */
+  static std::string ByteArrayToHex(const std::string &bytearray);
+
+private:
+  friend std::ostream &operator<<(std::ostream &os, const ChecksumBlob &csb);
+
+  std::map<ChecksumType,std::string> m_cs;
+};
+
+}} // namespace cta::checksum
diff --git a/common/checksum/ChecksumBlobSerDeser.hpp b/common/checksum/ChecksumBlobSerDeser.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a3af1be40581328e0d85e389faf926dd669e6b8b
--- /dev/null
+++ b/common/checksum/ChecksumBlobSerDeser.hpp
@@ -0,0 +1,62 @@
+/*!
+ * @project        The CERN Tape Archive (CTA)
+ * @brief          Convert checksum::ChecksumBlob to/from common::ChecksumBlob (EOS-CTA protobuf)
+ * @copyright      Copyright 2019 CERN
+ * @license        This program is free software: you can redistribute it and/or modify
+ *                 it under the terms of the GNU General Public License as published by
+ *                 the Free Software Foundation, either version 3 of the License, or
+ *                 (at your option) any later version.
+ *
+ *                 This program is distributed in the hope that it will be useful,
+ *                 but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *                 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *                 GNU General Public License for more details.
+ *
+ *                 You should have received a copy of the GNU General Public License
+ *                 along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/checksum/ChecksumBlob.hpp"
+#include "cta_common.pb.h"
+
+namespace cta {
+namespace checksum {
+
+void ProtobufToChecksumBlob(const common::ChecksumBlob &p_csb, checksum::ChecksumBlob &csb) {
+  csb.clear();
+  for(auto &cs : p_csb.cs()) {
+    checksum::ChecksumType type;
+    switch(cs.type()) {
+      case common::ChecksumBlob::Checksum::ADLER32: type = ADLER32; break;
+      case common::ChecksumBlob::Checksum::CRC32:   type = CRC32;   break;
+      case common::ChecksumBlob::Checksum::CRC32C:  type = CRC32C;  break;
+      case common::ChecksumBlob::Checksum::MD5:     type = MD5;     break;
+      case common::ChecksumBlob::Checksum::SHA1:    type = SHA1;    break;
+      case common::ChecksumBlob::Checksum::NONE:
+      default:                                      type = NONE;    break;
+    }
+    csb.insert(type, cs.value());
+  }
+}
+
+void ChecksumBlobToProtobuf(const checksum::ChecksumBlob &csb, common::ChecksumBlob &p_csb) {
+  for(auto &cs : csb.getMap()) {
+    common::ChecksumBlob::Checksum::Type type;
+    switch(cs.first) {
+      case ADLER32: type = common::ChecksumBlob::Checksum::ADLER32; break;
+      case CRC32:   type = common::ChecksumBlob::Checksum::CRC32;   break;
+      case CRC32C:  type = common::ChecksumBlob::Checksum::CRC32C;  break;
+      case MD5:     type = common::ChecksumBlob::Checksum::MD5;     break;
+      case SHA1:    type = common::ChecksumBlob::Checksum::SHA1;    break;
+      case NONE:
+      default:      type = common::ChecksumBlob::Checksum::NONE;    break;
+    }
+    auto cs_ptr = p_csb.add_cs();
+    cs_ptr->set_type(type);
+    cs_ptr->set_value(cs.second);
+  }
+}
+
+}} // namespace cta::checksum
diff --git a/common/checksum/ChecksumBlobTest.cpp b/common/checksum/ChecksumBlobTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1a9bd3768b2fbd9e8f7a25e6f0d567229d127193
--- /dev/null
+++ b/common/checksum/ChecksumBlobTest.cpp
@@ -0,0 +1,180 @@
+/*!
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "common/checksum/ChecksumBlob.hpp"
+#include <gtest/gtest.h>
+
+namespace unitTests {
+
+class cta_ChecksumBlobTest : public ::testing::Test {
+protected:
+  virtual void SetUp() {
+  }
+
+  virtual void TearDown() {
+  }
+};
+
+TEST_F(cta_ChecksumBlobTest, checksum_types) {
+  using namespace cta::checksum;
+  using namespace cta::exception;
+
+  ChecksumBlob checksumBlob;
+  ASSERT_EQ(checksumBlob.empty(), true);
+  ASSERT_EQ(checksumBlob.size(), 0);
+
+  // Checksum type not in blob
+  ASSERT_THROW(checksumBlob.at(NONE),    ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.at(ADLER32), ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.at(CRC32),   ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.at(CRC32C),  ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.at(MD5),     ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.at(SHA1),    ChecksumTypeMismatch);
+
+  // valid insertions
+  checksumBlob.insert(NONE, "");                      // 0 bits
+  ASSERT_EQ(checksumBlob.size(), 1);
+  ASSERT_EQ(checksumBlob.empty(), false);
+  checksumBlob.insert(ADLER32, "1234");               // 32 bits
+  ASSERT_EQ(checksumBlob.size(), 2);
+  checksumBlob.insert(CRC32, "1234");                 // 32 bits
+  ASSERT_EQ(checksumBlob.size(), 3);
+  checksumBlob.insert(CRC32C, "1234");                // 32 bits
+  ASSERT_EQ(checksumBlob.size(), 4);
+  checksumBlob.insert(MD5, "1234567890123456");       // 128 bits
+  ASSERT_EQ(checksumBlob.size(), 5);
+  checksumBlob.insert(SHA1, "12345678901234567890");  // 160 bits
+  ASSERT_EQ(checksumBlob.size(), 6);
+
+  // check each of the checksums in turn
+  ASSERT_EQ(checksumBlob.contains(NONE, ""), true);
+  ASSERT_EQ(checksumBlob.contains(ADLER32, "1234"), true);
+  ASSERT_EQ(checksumBlob.contains(CRC32, "1234"), true);
+  ASSERT_EQ(checksumBlob.contains(CRC32C, "1234"), true);
+  ASSERT_EQ(checksumBlob.contains(MD5, "1234567890123456"), true);
+  ASSERT_EQ(checksumBlob.contains(SHA1, "12345678901234567890"), true);
+
+  // invalid insertions
+  ASSERT_THROW(checksumBlob.insert(NONE, "0"),                     ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(ADLER32, "12345"),              ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(CRC32, "12345"),                ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(CRC32C, "12345"),               ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(MD5, "12345678901234567"),      ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(SHA1, "123456789012345678901"), ChecksumValueMismatch);
+  ASSERT_THROW(checksumBlob.insert(MD5, 0x12345678),               ChecksumTypeMismatch);
+  ASSERT_THROW(checksumBlob.insert(SHA1, 0x12345678),              ChecksumTypeMismatch);
+
+  // Blob types are different
+  ChecksumBlob checksumBlob2, checksumBlob3;
+  checksumBlob2.insert(NONE, "");
+  checksumBlob3.insert(ADLER32, "1234");
+  ASSERT_THROW(checksumBlob2.validate(checksumBlob3), ChecksumTypeMismatch);
+  ASSERT_NE(checksumBlob2,checksumBlob3);
+
+  // Blob sizes are different
+  checksumBlob3.insert(NONE, "");
+  ASSERT_THROW(checksumBlob2.validate(checksumBlob3), ChecksumBlobSizeMismatch);
+  ASSERT_NE(checksumBlob2,checksumBlob3);
+
+  // Blob values are different
+  checksumBlob2 = checksumBlob;
+  checksumBlob2.insert(ADLER32, 0x0a0b0c0d);
+  ASSERT_THROW(checksumBlob.validate(checksumBlob2), ChecksumValueMismatch);
+  ASSERT_NE(checksumBlob,checksumBlob2);
+
+  // Clear the blob
+  checksumBlob.clear();
+  ASSERT_EQ(checksumBlob.empty(), true);
+  ASSERT_EQ(checksumBlob.size(), 0);
+}
+
+TEST_F(cta_ChecksumBlobTest, hex_to_byte_array) {
+  using namespace cta::checksum;
+
+  // Check some limiting cases
+  ASSERT_EQ(ChecksumBlob::ByteArrayToHex(ChecksumBlob::HexToByteArray("0")), "00");
+  ASSERT_EQ(ChecksumBlob::ByteArrayToHex(ChecksumBlob::HexToByteArray("0xFFFFFFFF")), "ffffffff");
+  ASSERT_EQ(ChecksumBlob::ByteArrayToHex(ChecksumBlob::HexToByteArray("0X10a0BFC")), "010a0bfc");
+  ASSERT_EQ(ChecksumBlob::ByteArrayToHex(ChecksumBlob::HexToByteArray("000a000b000c000d000e000f000abcdef1234567890")),
+                                                                     "0000a000b000c000d000e000f000abcdef1234567890");
+}
+
+TEST_F(cta_ChecksumBlobTest, adler32) {
+  using namespace cta::checksum;
+  using namespace cta::exception;
+
+  ChecksumBlob checksumBlob;
+  ASSERT_THROW(checksumBlob.validate(ADLER32, "invalid"), ChecksumTypeMismatch);
+  ASSERT_EQ(checksumBlob.contains(ADLER32, "invalid"), false);
+
+  checksumBlob.insert(ADLER32, 0x0A141E28);
+  ASSERT_EQ(checksumBlob.size(), 1);
+  ASSERT_THROW(checksumBlob.validate(ADLER32, "invalid"), ChecksumValueMismatch);
+  ASSERT_EQ(checksumBlob.contains(ADLER32, "invalid"), false);
+
+  // Check internal representation
+  std::string bytearray = checksumBlob.at(ADLER32);
+  ASSERT_EQ(4, bytearray.length());
+  ASSERT_EQ(static_cast<uint8_t>(0x28), bytearray[0]);
+  ASSERT_EQ(static_cast<uint8_t>(0x1E), bytearray[1]);
+  ASSERT_EQ(static_cast<uint8_t>(0x14), bytearray[2]);
+  ASSERT_EQ(static_cast<uint8_t>(0x0A), bytearray[3]);
+
+  // Check we can convert back to the original hex value
+  ASSERT_EQ(ChecksumBlob::ByteArrayToHex(bytearray), "0a141e28");
+
+  // Check construction by bytearray yields the same result
+  ChecksumBlob checksumBlob2;
+  checksumBlob2.insert(ADLER32, bytearray);
+  ASSERT_EQ(checksumBlob2.size(), 1);
+  ASSERT_EQ(checksumBlob, checksumBlob2);
+
+  // Check construction by hex string yields the same result
+  ChecksumBlob checksumBlob3;
+  checksumBlob3.insert(ADLER32, ChecksumBlob::HexToByteArray("0x0A141E28"));
+  ASSERT_EQ(checksumBlob3.size(), 1);
+
+  // Check alternate hex string yields the same result
+  ChecksumBlob checksumBlob4;
+  checksumBlob4.insert(ADLER32, ChecksumBlob::HexToByteArray("a141e28"));
+  ASSERT_EQ(checksumBlob4.size(), 1);
+  ASSERT_EQ(checksumBlob, checksumBlob4);
+}
+
+TEST_F(cta_ChecksumBlobTest, serialize_deserialize) {
+  using namespace cta::checksum;
+
+  ChecksumBlob checksumBlob1;
+
+  checksumBlob1.insert(NONE, "");                      // 0 bits
+  checksumBlob1.insert(ADLER32, 0x3e80001);            // 32 bits
+  checksumBlob1.insert(CRC32, "0");                    // 32 bits
+  checksumBlob1.insert(CRC32C, "FFFF");                // 32 bits
+  checksumBlob1.insert(MD5, "1234567890123456");       // 128 bits
+  checksumBlob1.insert(SHA1, "12345678901234567890");  // 160 bits
+
+  auto len = checksumBlob1.length();
+  auto bytearray = checksumBlob1.serialize();
+  ASSERT_EQ(len,bytearray.length());
+
+  ChecksumBlob checksumBlob2;
+  checksumBlob2.deserialize(bytearray);
+  ASSERT_EQ(checksumBlob1, checksumBlob2);
+}
+
+} // namespace unitTests
diff --git a/common/checksum/ChecksumTest.cpp b/common/checksum/ChecksumTest.cpp
deleted file mode 100644
index 5cc21fae76fae02335ab8302b594c10fee36477c..0000000000000000000000000000000000000000
--- a/common/checksum/ChecksumTest.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "common/checksum/Checksum.hpp"
-#include <gtest/gtest.h>
-#include <arpa/inet.h>
-
-namespace unitTests {
-
-class cta_ChecksumTest : public ::testing::Test {
-protected:
-  virtual void SetUp() {
-  }
-
-  virtual void TearDown() {
-  }
-};
-
-TEST_F(cta_ChecksumTest, default_constructor) {
-  using namespace cta;
-
-  const Checksum checksum;
-
-  ASSERT_EQ(Checksum::CHECKSUMTYPE_NONE, checksum.getType());
-  ASSERT_EQ("-", checksum.str());
-
-  ASSERT_EQ((uint32_t)0, checksum.getByteArray().size());
-}
-
-TEST_F(cta_ChecksumTest, two_param_constructor) {
-  using namespace cta;
-
-  const Checksum::ChecksumType checksumType = Checksum::CHECKSUMTYPE_ADLER32;
-  const uint32_t val = ntohl(0x0A141E28);
-  const Checksum checksum(checksumType, val);
-
-  ASSERT_EQ(Checksum::CHECKSUMTYPE_ADLER32, checksum.getType());
-  ASSERT_EQ((uint32_t)4, checksum.getByteArray().size());
-  ASSERT_EQ((uint8_t)10, checksum.getByteArray()[0]);
-  ASSERT_EQ((uint8_t)20, checksum.getByteArray()[1]);
-  ASSERT_EQ((uint8_t)30, checksum.getByteArray()[2]);
-  ASSERT_EQ((uint8_t)40, checksum.getByteArray()[3]);
-}
-
-TEST_F(cta_ChecksumTest, url_constructor) {
-  using namespace cta;
-  
-  const Checksum checksum("adler32:0x12345678");
-  
-  ASSERT_EQ(Checksum::CHECKSUMTYPE_ADLER32, checksum.getType());
-  ASSERT_EQ(0x12345678, checksum.getNumeric<uint32_t>());
-}
-
-} // namespace unitTests
diff --git a/common/dataStructures/ArchiveFile.cpp b/common/dataStructures/ArchiveFile.cpp
index a822c8d9a9a4920ef1152cd6e099178bf729f1aa..46b16ee4775250e593f3ee87e28e3fa7bed3df98 100644
--- a/common/dataStructures/ArchiveFile.cpp
+++ b/common/dataStructures/ArchiveFile.cpp
@@ -44,8 +44,7 @@ bool ArchiveFile::operator==(const ArchiveFile &rhs) const {
       && diskFileId    == rhs.diskFileId
       && diskInstance  == rhs.diskInstance
       && fileSize      == rhs.fileSize
-      && checksumType  == rhs.checksumType
-      && checksumValue == rhs.checksumValue
+      && checksumBlob  == rhs.checksumBlob
       && storageClass  == rhs.storageClass
       && diskFileInfo  == rhs.diskFileInfo
       && tapeFiles    == rhs.tapeFiles;
@@ -104,8 +103,7 @@ std::ostream &operator<<(std::ostream &os, const ArchiveFile &obj) {
     "diskFileID="         << obj.diskFileId         << ","
     "diskInstance="       << obj.diskInstance       << ","
     "fileSize="           << obj.fileSize           << ","
-    "checksumType="       << obj.checksumType       << ","
-    "checksumValue="      << obj.checksumValue      << ","
+    "checksumBlob="       << obj.checksumBlob       << ","
     "storageClass="       << obj.storageClass       << ","
     "diskFileInfo="       << obj.diskFileInfo       << ","
     "tapeFiles="          << obj.tapeFiles          << ","
diff --git a/common/dataStructures/ArchiveFile.hpp b/common/dataStructures/ArchiveFile.hpp
index 9929eb50ed3570acba5f31fc7094ab7f05ec7a3d..76e56c274a41ff38228ab73bda71ce1f91e4eb1c 100644
--- a/common/dataStructures/ArchiveFile.hpp
+++ b/common/dataStructures/ArchiveFile.hpp
@@ -1,6 +1,6 @@
-/*
+/*!
  * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
+ * Copyright (C) 2019 CERN
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,11 +20,11 @@
 
 #include <list>
 #include <map>
-#include <stdint.h>
 #include <string>
 
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/TapeFile.hpp"
+#include "common/checksum/ChecksumBlob.hpp"
 
 namespace cta {
 namespace common {
@@ -53,14 +53,7 @@ struct ArchiveFile {
   std::string diskFileId;
   std::string diskInstance;
   uint64_t fileSize;
-  /**
-   * The human readable checksum type. Ex: ADLER32 
-   */
-  std::string checksumType;
-  /**
-   * The human readable checksum value. Ex: 0X1292AB12 
-   */
-  std::string checksumValue;
+  checksum::ChecksumBlob checksumBlob;
   std::string storageClass;
   DiskFileInfo diskFileInfo;
   /**
diff --git a/common/dataStructures/ArchiveFileSummary.cpp b/common/dataStructures/ArchiveFileSummary.cpp
index 751ad3d9cce701c7b2ead8f899f1116e3e488eb4..cc9ec8b86a7c83c536056b79558193d8702dfc71 100644
--- a/common/dataStructures/ArchiveFileSummary.cpp
+++ b/common/dataStructures/ArchiveFileSummary.cpp
@@ -29,7 +29,6 @@ namespace dataStructures {
 //------------------------------------------------------------------------------
 ArchiveFileSummary::ArchiveFileSummary():
   totalBytes(0),
-  totalCompressedBytes(0),
   totalFiles(0) {}
 
 //------------------------------------------------------------------------------
@@ -37,7 +36,6 @@ ArchiveFileSummary::ArchiveFileSummary():
 //------------------------------------------------------------------------------
 bool ArchiveFileSummary::operator==(const ArchiveFileSummary &rhs) const {
   return totalBytes==rhs.totalBytes
-      && totalCompressedBytes==rhs.totalCompressedBytes
       && totalFiles==rhs.totalFiles;
 }
 
@@ -53,7 +51,6 @@ bool ArchiveFileSummary::operator!=(const ArchiveFileSummary &rhs) const {
 //------------------------------------------------------------------------------
 std::ostream &operator<<(std::ostream &os, const ArchiveFileSummary &obj) {
   os << "(totalBytes=" << obj.totalBytes
-     << " totalCompressedBytes=" << obj.totalCompressedBytes
      << " totalFiles=" << obj.totalFiles << ")";
   return os;
 }
diff --git a/common/dataStructures/ArchiveFileSummary.hpp b/common/dataStructures/ArchiveFileSummary.hpp
index f4b0e465820f04b0122edf56169fcf960f41389a..99ce39c511b0eaa7f0b85dadacf7a7e694851177 100644
--- a/common/dataStructures/ArchiveFileSummary.hpp
+++ b/common/dataStructures/ArchiveFileSummary.hpp
@@ -40,7 +40,6 @@ struct ArchiveFileSummary {
   bool operator!=(const ArchiveFileSummary &rhs) const;
 
   uint64_t totalBytes;
-  uint64_t totalCompressedBytes;
   uint64_t totalFiles;
 
 }; // struct ArchiveFileSummary
diff --git a/common/dataStructures/ArchiveFileTest.cpp b/common/dataStructures/ArchiveFileTest.cpp
index 8522d1f345a08a27b17129283fc2bb5de11cacc4..1f2296882fe9951d0ad7c8c9e3f1d1d6bd37065f 100644
--- a/common/dataStructures/ArchiveFileTest.cpp
+++ b/common/dataStructures/ArchiveFileTest.cpp
@@ -23,6 +23,9 @@
 
 namespace unitTests {
 
+const uint32_t RECOVERY_OWNER_UID = 9751;
+const uint32_t RECOVERY_GID       = 9752;
+
 class cta_common_dataStructures_ArchiveFileTest : public ::testing::Test {
 protected:
 
@@ -41,20 +44,19 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
   archiveFile1.archiveFileID = 1234;
   archiveFile1.diskFileId = "EOS_file_ID";
   archiveFile1.fileSize = 1;
-  archiveFile1.checksumType = "checksum_type";
-  archiveFile1.checksumValue = "cheskum_value";
+  archiveFile1.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   archiveFile1.storageClass = "storage_class";
 
   archiveFile1.diskInstance = "recovery_instance";
   archiveFile1.diskFileInfo.path = "recovery_path";
-  archiveFile1.diskFileInfo.owner = "recovery_owner";
-  archiveFile1.diskFileInfo.group = "recovery_group";
+  archiveFile1.diskFileInfo.owner_uid = RECOVERY_OWNER_UID;
+  archiveFile1.diskFileInfo.gid = RECOVERY_GID;
 
   TapeFile tapeFile1;
   tapeFile1.vid = "VID1";
   tapeFile1.fSeq = 5678;
   tapeFile1.blockId = 9012;
-  tapeFile1.compressedSize = 5;
+  tapeFile1.fileSize = 5;
   tapeFile1.copyNb = 1;
 
   archiveFile1.tapeFiles.push_back(tapeFile1);
@@ -64,7 +66,7 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
   tapeFile2.vid = "VID2";
   tapeFile2.fSeq = 3456;
   tapeFile2.blockId = 7890;
-  tapeFile2.compressedSize = 6;
+  tapeFile2.fileSize = 6;
   tapeFile2.copyNb = 2;
 
   archiveFile1.tapeFiles.push_back(tapeFile2);
@@ -77,14 +79,13 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
   ASSERT_EQ(archiveFile1.archiveFileID, archiveFile2.archiveFileID);
   ASSERT_EQ(archiveFile1.diskFileId, archiveFile2.diskFileId);
   ASSERT_EQ(archiveFile1.fileSize, archiveFile2.fileSize);
-  ASSERT_EQ(archiveFile1.checksumType, archiveFile2.checksumType);
-  ASSERT_EQ(archiveFile1.checksumValue, archiveFile2.checksumValue);
+  ASSERT_EQ(archiveFile1.checksumBlob, archiveFile2.checksumBlob);
   ASSERT_EQ(archiveFile1.storageClass, archiveFile2.storageClass);
 
   ASSERT_EQ(archiveFile1.diskInstance, archiveFile2.diskInstance);
   ASSERT_EQ(archiveFile1.diskFileInfo.path, archiveFile2.diskFileInfo.path);
-  ASSERT_EQ(archiveFile1.diskFileInfo.owner, archiveFile2.diskFileInfo.owner);
-  ASSERT_EQ(archiveFile1.diskFileInfo.group, archiveFile2.diskFileInfo.group);
+  ASSERT_EQ(archiveFile1.diskFileInfo.owner_uid, archiveFile2.diskFileInfo.owner_uid);
+  ASSERT_EQ(archiveFile1.diskFileInfo.gid, archiveFile2.diskFileInfo.gid);
 
   ASSERT_EQ(2, archiveFile2.tapeFiles.size());
 
@@ -95,7 +96,7 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
     ASSERT_EQ(tapeFile1.vid, copyNbToTapeFileItor->vid);
     ASSERT_EQ(tapeFile1.fSeq, copyNbToTapeFileItor->fSeq);
     ASSERT_EQ(tapeFile1.blockId, copyNbToTapeFileItor->blockId);
-    ASSERT_EQ(tapeFile1.compressedSize, copyNbToTapeFileItor->compressedSize);
+    ASSERT_EQ(tapeFile1.fileSize, copyNbToTapeFileItor->fileSize);
     ASSERT_EQ(tapeFile1.copyNb, copyNbToTapeFileItor->copyNb);
   }
 
@@ -106,7 +107,7 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
     ASSERT_EQ(tapeFile2.vid, copyNbToTapeFileItor->vid);
     ASSERT_EQ(tapeFile2.fSeq, copyNbToTapeFileItor->fSeq);
     ASSERT_EQ(tapeFile2.blockId, copyNbToTapeFileItor->blockId);
-    ASSERT_EQ(tapeFile2.compressedSize, copyNbToTapeFileItor->compressedSize);
+    ASSERT_EQ(tapeFile2.fileSize, copyNbToTapeFileItor->fileSize);
     ASSERT_EQ(tapeFile2.copyNb, copyNbToTapeFileItor->copyNb);
   }
 }
diff --git a/common/dataStructures/ArchiveRequest.cpp b/common/dataStructures/ArchiveRequest.cpp
index ff6139903366b002d98ad8eb11341d587101ed4d..26fdd970d671e74b8546d55e31aae7e1699c92ab 100644
--- a/common/dataStructures/ArchiveRequest.cpp
+++ b/common/dataStructures/ArchiveRequest.cpp
@@ -37,8 +37,7 @@ bool ArchiveRequest::operator==(const ArchiveRequest &rhs) const {
       && diskFileID==rhs.diskFileID
       && srcURL==rhs.srcURL
       && fileSize==rhs.fileSize
-      && checksumType==rhs.checksumType
-      && checksumValue==rhs.checksumValue
+      && checksumBlob==rhs.checksumBlob
       && storageClass==rhs.storageClass
       && diskFileInfo==rhs.diskFileInfo
       && archiveReportURL==rhs.archiveReportURL
@@ -61,8 +60,7 @@ std::ostream &operator<<(std::ostream &os, const ArchiveRequest &obj) {
      << " diskFileID=" << obj.diskFileID
      << " srcURL=" << obj.srcURL
      << " fileSize=" << obj.fileSize
-     << " checksumType=" << obj.checksumType
-     << " checksumValue=" << obj.checksumValue
+     << " checksumBlob=" << obj.checksumBlob
      << " storageClass=" << obj.storageClass
      << " diskFileInfo=" << obj.diskFileInfo
      << " archiveReportURL=" << obj.archiveReportURL
diff --git a/common/dataStructures/ArchiveRequest.hpp b/common/dataStructures/ArchiveRequest.hpp
index a2dbadb73e43c0ac07807a7fffb68004fd49a29f..baca15394e0ed1f9b2208214e6a611190177b8c0 100644
--- a/common/dataStructures/ArchiveRequest.hpp
+++ b/common/dataStructures/ArchiveRequest.hpp
@@ -20,7 +20,8 @@
 
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/EntryLog.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
+#include "common/checksum/ChecksumBlob.hpp"
 
 #include <list>
 #include <map>
@@ -42,19 +43,12 @@ struct ArchiveRequest {
 
   bool operator!=(const ArchiveRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
   std::string diskFileID;
 
   std::string srcURL;
   uint64_t fileSize;
-  /**
-   * The human readable checksum type. Ex: ADLER32 
-   */
-  std::string checksumType;
-  /**
-   * The human readable checksum value. Ex: 0X1292AB12 
-   */
-  std::string checksumValue;
+  checksum::ChecksumBlob checksumBlob;
   std::string storageClass;
   DiskFileInfo diskFileInfo;
   std::string archiveReportURL;
diff --git a/common/dataStructures/CancelRetrieveRequest.hpp b/common/dataStructures/CancelRetrieveRequest.hpp
index 1721fbf7ca4ffb3b81ae34efee31c25b89523b8a..02af7c07fd1f1ab14c9809fc537b7e783f3d5d3b 100644
--- a/common/dataStructures/CancelRetrieveRequest.hpp
+++ b/common/dataStructures/CancelRetrieveRequest.hpp
@@ -24,7 +24,7 @@
 #include <string>
 
 #include "common/dataStructures/DiskFileInfo.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 
 namespace cta {
 namespace common {
@@ -41,7 +41,7 @@ struct CancelRetrieveRequest {
 
   bool operator!=(const CancelRetrieveRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
   uint64_t archiveFileID;
   std::string dstURL;
   DiskFileInfo diskFileInfo;
diff --git a/common/dataStructures/DeleteArchiveRequest.hpp b/common/dataStructures/DeleteArchiveRequest.hpp
index fd0730d20c41338059904a169a10fd592e0b8d29..949222a61aaffdd9132fd6caf361f9187e9b2de1 100644
--- a/common/dataStructures/DeleteArchiveRequest.hpp
+++ b/common/dataStructures/DeleteArchiveRequest.hpp
@@ -23,7 +23,7 @@
 #include <stdint.h>
 #include <string>
 
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 
 namespace cta {
 namespace common {
@@ -41,7 +41,7 @@ struct DeleteArchiveRequest {
 
   bool operator!=(const DeleteArchiveRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
   uint64_t archiveFileID;
 
 }; // struct DeleteArchiveRequest
diff --git a/common/dataStructures/DiskFileInfo.cpp b/common/dataStructures/DiskFileInfo.cpp
index 948572716611eb82be1e0822234e2dc3999c957c..c9454e5b48fca67b6da1df1290b6ea829afc07cc 100644
--- a/common/dataStructures/DiskFileInfo.cpp
+++ b/common/dataStructures/DiskFileInfo.cpp
@@ -27,15 +27,21 @@ namespace dataStructures {
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-DiskFileInfo::DiskFileInfo() {}
+DiskFileInfo::DiskFileInfo() : owner_uid(0), gid(0) {}
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+DiskFileInfo::DiskFileInfo(const std::string &path, uint32_t owner_uid, uint32_t gid) :
+  path(path), owner_uid(owner_uid), gid(gid) {}
 
 //------------------------------------------------------------------------------
 // operator==
 //------------------------------------------------------------------------------
 bool DiskFileInfo::operator==(const DiskFileInfo &rhs) const {
   return path==rhs.path
-      && owner==rhs.owner
-      && group==rhs.group;
+      && owner_uid==rhs.owner_uid
+      && gid==rhs.gid;
 }
 
 //------------------------------------------------------------------------------
@@ -50,8 +56,8 @@ bool DiskFileInfo::operator!=(const DiskFileInfo &rhs) const {
 //------------------------------------------------------------------------------
 std::ostream &operator<<(std::ostream &os, const DiskFileInfo &obj) {
   os << "(path=" << obj.path
-     << " owner=" << obj.owner
-     << " group=" << obj.group << ")";
+     << " owner_uid=" << obj.owner_uid
+     << " gid=" << obj.gid << ")";
   return os;
 }
 
diff --git a/common/dataStructures/DiskFileInfo.hpp b/common/dataStructures/DiskFileInfo.hpp
index d0395d55404676cce04fa126068ba91e5c08792f..3266932ad3eed22aa379de5b66acbc6e173a088d 100644
--- a/common/dataStructures/DiskFileInfo.hpp
+++ b/common/dataStructures/DiskFileInfo.hpp
@@ -36,13 +36,15 @@ struct DiskFileInfo {
 
   DiskFileInfo();
 
+  DiskFileInfo(const std::string &path, uint32_t owner_uid, uint32_t gid);
+
   bool operator==(const DiskFileInfo &rhs) const;
 
   bool operator!=(const DiskFileInfo &rhs) const;
 
   std::string path;
-  std::string owner;
-  std::string group;
+  uint32_t    owner_uid;
+  uint32_t    gid;
 
 }; // struct DiskFileInfo
 
diff --git a/common/dataStructures/EntryLog.hpp b/common/dataStructures/EntryLog.hpp
index 5a29b9cf5fa9f188a2cef1913ef4b2acc5709193..9376f02c85c94168a34f68ef6e8db9f426ccc986 100644
--- a/common/dataStructures/EntryLog.hpp
+++ b/common/dataStructures/EntryLog.hpp
@@ -23,8 +23,6 @@
 #include <stdint.h>
 #include <string>
 
-#include "common/dataStructures/UserIdentity.hpp"
-
 namespace cta {
 namespace common {
 namespace dataStructures {
diff --git a/common/dataStructures/ListStorageClassRequest.hpp b/common/dataStructures/ListStorageClassRequest.hpp
index 9bb3a017853e7df39b161a218e5c1dfb2c527ed4..50faa19fe13181060becea5a54522f0f50428c77 100644
--- a/common/dataStructures/ListStorageClassRequest.hpp
+++ b/common/dataStructures/ListStorageClassRequest.hpp
@@ -23,7 +23,7 @@
 #include <stdint.h>
 #include <string>
 
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 
 namespace cta {
 namespace common {
@@ -40,7 +40,7 @@ struct ListStorageClassRequest {
 
   bool operator!=(const ListStorageClassRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
 
 }; // struct ListStorageClassRequest
 
diff --git a/common/UserIdentity.cpp b/common/dataStructures/OwnerIdentity.cpp
similarity index 70%
rename from common/UserIdentity.cpp
rename to common/dataStructures/OwnerIdentity.cpp
index 350523e4fdfd0114b9fbb7890f1f6a5b0a39b8cd..86e7fa3e2ead9921e6a24280a19dc5b1cf3e7451 100644
--- a/common/UserIdentity.cpp
+++ b/common/dataStructures/OwnerIdentity.cpp
@@ -1,6 +1,6 @@
 /*
  * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
+ * Copyright (C) 2019 CERN
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,47 +16,51 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "common/UserIdentity.hpp"
-
 #include <limits>
-#include <unistd.h>
-#include <ostream>
+
+#include "common/dataStructures/OwnerIdentity.hpp"
+#include "common/dataStructures/utils.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+namespace common {
+namespace dataStructures {
 
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-cta::UserIdentity::UserIdentity() throw():
+OwnerIdentity::OwnerIdentity() :
   uid(std::numeric_limits<uid_t>::max()),
   gid(std::numeric_limits<gid_t>::max()) {}
 
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-cta::UserIdentity::UserIdentity(
-  const uint32_t u,
-  const uint32_t g) throw():
-  uid(u),
-  gid(g) {
-}
+OwnerIdentity::OwnerIdentity(uint32_t uid, uint32_t gid) : uid(uid), gid(gid) {}
 
 //------------------------------------------------------------------------------
 // operator==
 //------------------------------------------------------------------------------
-bool cta::UserIdentity::operator==(const UserIdentity &rhs) const {
-  return uid == rhs.uid;
+bool OwnerIdentity::operator==(const OwnerIdentity &rhs) const {
+  return uid == rhs.uid && gid == rhs.gid;
 }
 
 //------------------------------------------------------------------------------
 // operator!=
 //------------------------------------------------------------------------------
-bool cta::UserIdentity::operator!=(const UserIdentity &rhs) const {
+bool OwnerIdentity::operator!=(const OwnerIdentity &rhs) const {
   return !operator==(rhs);
 }
 
 //------------------------------------------------------------------------------
 // operator<<
 //------------------------------------------------------------------------------
-std::ostream &operator<<(std::ostream &os, const cta::UserIdentity &obj) {
-  os << "(uid=" << obj.uid << " gid=" << obj.gid << ")";
+std::ostream &operator<<(std::ostream &os, const OwnerIdentity &obj) {
+  os << "(uid=" << obj.uid
+     << " gid=" << obj.gid << ")";
   return os;
 }
+
+} // namespace dataStructures
+} // namespace common
+} // namespace cta
diff --git a/common/dataStructures/OwnerIdentity.hpp b/common/dataStructures/OwnerIdentity.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0bd3660757e0aa3cceabfa2e9bcbf2ce8a403c92
--- /dev/null
+++ b/common/dataStructures/OwnerIdentity.hpp
@@ -0,0 +1,53 @@
+/**
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <list>
+#include <map>
+#include <stdint.h>
+#include <string>
+
+
+namespace cta {
+namespace common {
+namespace dataStructures {
+
+/**
+ * This struct holds the username and group name of a given user 
+ */
+struct OwnerIdentity {
+
+  OwnerIdentity();
+  
+  OwnerIdentity(uint32_t uid, uint32_t gid);
+
+  bool operator==(const OwnerIdentity &rhs) const;
+
+  bool operator!=(const OwnerIdentity &rhs) const;
+
+  uint32_t uid;
+  uint32_t gid;
+
+}; // struct OwnerIdentity
+
+std::ostream &operator<<(std::ostream &os, const OwnerIdentity &obj);
+
+} // namespace dataStructures
+} // namespace common
+} // namespace cta
diff --git a/common/dataStructures/QueueAndMountSummary.cpp b/common/dataStructures/QueueAndMountSummary.cpp
index c636d5dda528232df75dd436a0f3d9872eb05321..4b7c81cea533bc6baed0ffb0dda3543cd122ed39 100644
--- a/common/dataStructures/QueueAndMountSummary.cpp
+++ b/common/dataStructures/QueueAndMountSummary.cpp
@@ -30,15 +30,15 @@ QueueAndMountSummary &QueueAndMountSummary::getOrCreateEntry(std::list<QueueAndM
     const common::dataStructures::VidToTapeMap &vid_to_tapeinfo)
 {
   for (auto & summary: summaryList) {
-    if ((mountType==MountType::ArchiveForUser && summary.tapePool==tapePool) ||
+    if (((mountType==MountType::ArchiveForUser || mountType==MountType::ArchiveForRepack) && summary.tapePool==tapePool) ||
         (mountType==MountType::Retrieve && summary.vid==vid))
       return summary;
   }
-  if (std::set<MountType>({MountType::ArchiveForUser, MountType::Retrieve}).count(mountType)) {
+  if (std::set<MountType>({MountType::ArchiveForUser, MountType::Retrieve, MountType::ArchiveForRepack}).count(mountType)) {
     summaryList.push_back(QueueAndMountSummary());
     summaryList.back().mountType=mountType;
     summaryList.back().tapePool=tapePool;
-    if (MountType::ArchiveForUser==mountType) {
+    if (MountType::ArchiveForUser==mountType || MountType::ArchiveForRepack == mountType) {
       summaryList.back().vid="-";
       summaryList.back().logicalLibrary="-";
     } else {
diff --git a/common/dataStructures/QueueAndMountSummary.hpp b/common/dataStructures/QueueAndMountSummary.hpp
index 009a4811760980f6fec45d41fa94cac022302344..b6ef864047b88ba46c1eddae0fb52f4aa7eee6ec 100644
--- a/common/dataStructures/QueueAndMountSummary.hpp
+++ b/common/dataStructures/QueueAndMountSummary.hpp
@@ -48,6 +48,7 @@ struct QueueAndMountSummary {
   uint64_t filesOnTapes=0;
   uint64_t dataOnTapes=0;
   uint64_t fullTapes=0;
+  uint64_t readOnlyTapes=0;
   uint64_t emptyTapes=0;
   uint64_t disabledTapes=0;
   uint64_t writableTapes=0;
diff --git a/common/dataStructures/UserIdentity.cpp b/common/dataStructures/RequesterIdentity.cpp
similarity index 82%
rename from common/dataStructures/UserIdentity.cpp
rename to common/dataStructures/RequesterIdentity.cpp
index b075b08568eefccb7c42c905dd895442400a8a50..14d6f412158fa56ac0bfea35396683ee4909b527 100644
--- a/common/dataStructures/UserIdentity.cpp
+++ b/common/dataStructures/RequesterIdentity.cpp
@@ -1,6 +1,6 @@
 /*
  * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
+ * Copyright (C) 2019 CERN
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,7 +16,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 #include "common/dataStructures/utils.hpp"
 #include "common/exception/Exception.hpp"
 
@@ -27,19 +27,19 @@ namespace dataStructures {
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-UserIdentity::UserIdentity() { }
+RequesterIdentity::RequesterIdentity() { }
 
 
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-UserIdentity::UserIdentity(const std::string& name, const std::string& group):
+RequesterIdentity::RequesterIdentity(const std::string& name, const std::string& group):
   name(name), group(group) {}
 
 //------------------------------------------------------------------------------
 // operator==
 //------------------------------------------------------------------------------
-bool UserIdentity::operator==(const UserIdentity &rhs) const {
+bool RequesterIdentity::operator==(const RequesterIdentity &rhs) const {
   return name==rhs.name
       && group==rhs.group;
 }
@@ -47,14 +47,14 @@ bool UserIdentity::operator==(const UserIdentity &rhs) const {
 //------------------------------------------------------------------------------
 // operator!=
 //------------------------------------------------------------------------------
-bool UserIdentity::operator!=(const UserIdentity &rhs) const {
+bool RequesterIdentity::operator!=(const RequesterIdentity &rhs) const {
   return !operator==(rhs);
 }
 
 //------------------------------------------------------------------------------
 // operator<<
 //------------------------------------------------------------------------------
-std::ostream &operator<<(std::ostream &os, const UserIdentity &obj) {
+std::ostream &operator<<(std::ostream &os, const RequesterIdentity &obj) {
   os << "(name=" << obj.name
      << " group=" << obj.group << ")";
   return os;
diff --git a/common/dataStructures/UserIdentity.hpp b/common/dataStructures/RequesterIdentity.hpp
similarity index 73%
rename from common/dataStructures/UserIdentity.hpp
rename to common/dataStructures/RequesterIdentity.hpp
index 849a93ff4754f86f3f57c85b35207f8f3622d94e..d72937d515ec8f20c508900efecc7b1503877d37 100644
--- a/common/dataStructures/UserIdentity.hpp
+++ b/common/dataStructures/RequesterIdentity.hpp
@@ -1,6 +1,6 @@
-/*
+/**
  * The CERN Tape Archive (CTA) project
- * Copyright (C) 2015  CERN
+ * Copyright (C) 2019 CERN
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,22 +31,22 @@ namespace dataStructures {
 /**
  * This struct holds the username and group name of a given user 
  */
-struct UserIdentity {
+struct RequesterIdentity {
 
-  UserIdentity();
+  RequesterIdentity();
   
-  UserIdentity(const std::string &name, const std::string &group);
+  RequesterIdentity(const std::string &name, const std::string &group);
 
-  bool operator==(const UserIdentity &rhs) const;
+  bool operator==(const RequesterIdentity &rhs) const;
 
-  bool operator!=(const UserIdentity &rhs) const;
+  bool operator!=(const RequesterIdentity &rhs) const;
 
   std::string name;
   std::string group;
 
-}; // struct UserIdentity
+}; // struct RequesterIdentity
 
-std::ostream &operator<<(std::ostream &os, const UserIdentity &obj);
+std::ostream &operator<<(std::ostream &os, const RequesterIdentity &obj);
 
 } // namespace dataStructures
 } // namespace common
diff --git a/common/dataStructures/RetrieveRequest.hpp b/common/dataStructures/RetrieveRequest.hpp
index 5a82075162cd328bd241c6807dc4be8dd23073ce..ae4b691fbf5230ded5df02aca4d587e9c3551c30 100644
--- a/common/dataStructures/RetrieveRequest.hpp
+++ b/common/dataStructures/RetrieveRequest.hpp
@@ -25,7 +25,7 @@
 
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/EntryLog.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 #include "common/dataStructures/ArchiveRoute.hpp"
 #include "LifecycleTimings.hpp"
 #include "common/optional.hpp"
@@ -45,7 +45,7 @@ struct RetrieveRequest {
 
   bool operator!=(const RetrieveRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
   uint64_t archiveFileID;
   std::string dstURL;
   std::string errorReportURL;
@@ -60,4 +60,4 @@ std::ostream &operator<<(std::ostream &os, const RetrieveRequest &obj);
 
 } // namespace dataStructures
 } // namespace common
-} // namespace cta
\ No newline at end of file
+} // namespace cta
diff --git a/common/dataStructures/Tape.cpp b/common/dataStructures/Tape.cpp
index 78886b1436673f984855569706e8ba054ae952a3..3df67facd87a0cdd7ac816698586eb1bb8f606a2 100644
--- a/common/dataStructures/Tape.cpp
+++ b/common/dataStructures/Tape.cpp
@@ -29,8 +29,11 @@ namespace dataStructures {
 //------------------------------------------------------------------------------
 Tape::Tape():
   lastFSeq(0),
-  capacityInBytes(0),
-  dataOnTapeInBytes(0) {}
+  capacityInBytes(0), 
+  dataOnTapeInBytes(0),
+  full(false),
+  disabled(false),
+  readOnly(false) {}
 
 //------------------------------------------------------------------------------
 // operator==
@@ -45,6 +48,7 @@ bool Tape::operator==(const Tape &rhs) const {
       && encryptionKey==rhs.encryptionKey
       && full==rhs.full
       && disabled==rhs.disabled
+      && readOnly==rhs.readOnly
       && creationLog==rhs.creationLog
       && lastModificationLog==rhs.lastModificationLog
       && comment==rhs.comment
@@ -73,6 +77,7 @@ std::ostream &operator<<(std::ostream &os, const Tape &obj) {
      << " encryptionKey=" << (obj.encryptionKey ? obj.encryptionKey.value() : "null")
      << " full=" << obj.full
      << " disabled=" << obj.disabled
+     << " readOnly=" << obj.readOnly    
      << " creationLog=" << obj.creationLog
      << " lastModificationLog=" << obj.lastModificationLog
      << " comment=" << obj.comment
diff --git a/common/dataStructures/Tape.hpp b/common/dataStructures/Tape.hpp
index 436e4040af58953145e45c5f7c829dbc032ee8e5..bbd884b9bc1162126f8ea335c9fe9b74284047b0 100644
--- a/common/dataStructures/Tape.hpp
+++ b/common/dataStructures/Tape.hpp
@@ -62,6 +62,10 @@ struct Tape {
 
   bool full;
   bool disabled;
+  bool readOnly;
+  bool isFromCastor;  
+  uint64_t readMountCount;
+  uint64_t writeMountCount;
   EntryLog creationLog;
   EntryLog lastModificationLog;
   std::string comment;
diff --git a/common/dataStructures/TapeFile.cpp b/common/dataStructures/TapeFile.cpp
index 91dc2601aa7886893f65e0f36ee6183088235632..a0435799838b8f2ca12107d2b9b2cb515cd138d6 100644
--- a/common/dataStructures/TapeFile.cpp
+++ b/common/dataStructures/TapeFile.cpp
@@ -30,7 +30,7 @@ namespace dataStructures {
 TapeFile::TapeFile():
   fSeq(0),
   blockId(0),
-  compressedSize(0),
+  fileSize(0),
   copyNb(0),
   creationTime(0) {}
 
@@ -41,7 +41,7 @@ bool TapeFile::operator==(const TapeFile &rhs) const {
   return vid==rhs.vid
       && fSeq==rhs.fSeq
       && blockId==rhs.blockId
-      && compressedSize==rhs.compressedSize
+      && fileSize==rhs.fileSize
       && copyNb==rhs.copyNb
       && creationTime==rhs.creationTime;
 }
@@ -67,7 +67,7 @@ std::ostream &operator<<(std::ostream &os, const TapeFile &obj) {
   os << "(vid=" << obj.vid
      << " fSeq=" << obj.fSeq
      << " blockId=" << obj.blockId
-     << " compressedSize=" << obj.compressedSize
+     << " fileSize=" << obj.fileSize
      << " copyNb=" << obj.copyNb
      << " creationTime=" << obj.creationTime << ")";
   return os;
diff --git a/common/dataStructures/TapeFile.hpp b/common/dataStructures/TapeFile.hpp
index 64f4d350ecba41de67fc322acdc5cece62bafa54..e679aeff717b2563e2f0b7de0d27a7a60cdf4e46 100644
--- a/common/dataStructures/TapeFile.hpp
+++ b/common/dataStructures/TapeFile.hpp
@@ -20,9 +20,9 @@
 
 #include <list>
 #include <map>
-#include <stdint.h>
 #include <string>
 
+#include <common/checksum/ChecksumBlob.hpp>
 
 namespace cta {
 namespace common {
@@ -56,10 +56,10 @@ struct TapeFile {
   // TODO: change denomination to match SCSI nomenclature (logical object identifier).
   uint64_t blockId;
   /**
-   * The compressed size of the tape file in bytes. In other words the 
-   * actual number of bytes it occupies on tape. 
+   * The uncompressed (logical) size of the tape file in bytes. This field is redundant as it already exists in the
+   * ArchiveFile class, so it may be removed in future.
    */
-  uint64_t compressedSize;
+  uint64_t fileSize;
   /**
    * The copy number of the file. Copy numbers start from 1. Copy number 0 
    * is an invalid copy number. 
@@ -71,14 +71,9 @@ struct TapeFile {
   time_t creationTime;
   
   /**
-   * The checksum type
+   * Set of checksum (type, value) pairs
    */
-  std::string checksumType;
-  
-  /**
-   * The checksum value 
-   */
-  std::string checksumValue;
+  checksum::ChecksumBlob checksumBlob;
   
   /**
    * The vid of the tape file superseding this one (or empty string if not)
diff --git a/common/dataStructures/UpdateFileStorageClassRequest.hpp b/common/dataStructures/UpdateFileStorageClassRequest.hpp
index a8e6c228e1941a7a3bc869dddb6b8d06a72d45f7..1725e7cc9ca3047d1954bff3bd8895a34fc865d1 100644
--- a/common/dataStructures/UpdateFileStorageClassRequest.hpp
+++ b/common/dataStructures/UpdateFileStorageClassRequest.hpp
@@ -24,7 +24,7 @@
 #include <string>
 
 #include "common/dataStructures/DiskFileInfo.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 
 namespace cta {
 namespace common {
@@ -42,7 +42,7 @@ struct UpdateFileStorageClassRequest {
 
   bool operator!=(const UpdateFileStorageClassRequest &rhs) const;
 
-  UserIdentity requester;
+  RequesterIdentity requester;
   uint64_t archiveFileID;
   std::string storageClass;
   DiskFileInfo diskFileInfo;
diff --git a/common/dataStructures/utils.cpp b/common/dataStructures/utils.cpp
index 9a1ffde04798eb6665a71c1ba5adea510d488a1e..6adeecb76a9380a279c87151c3f457ac3229fb1e 100644
--- a/common/dataStructures/utils.cpp
+++ b/common/dataStructures/utils.cpp
@@ -65,4 +65,4 @@ std::ostream &operator<<(std::ostream &os, const std::map<std::string,std::pair<
 
 } // namespace dataStructures
 } // namespace common
-} // namespace cta
\ No newline at end of file
+} // namespace cta
diff --git a/common/dataStructures/utils.hpp b/common/dataStructures/utils.hpp
index cdc92790094274857f8de59bdab6811115c307cd..c5110b8a74ba20042e901431215c88cab0676337 100644
--- a/common/dataStructures/utils.hpp
+++ b/common/dataStructures/utils.hpp
@@ -36,4 +36,4 @@ std::ostream &operator<<(std::ostream &os, const std::map<uint64_t,std::pair<std
 
 } // namespace dataStructures
 } // namespace common
-} // namespace cta
\ No newline at end of file
+} // namespace cta
diff --git a/common/exception/ChecksumBlobSizeMismatch.hpp b/common/exception/ChecksumBlobSizeMismatch.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d4a86e1ec6257aa6d86d4178c03da6dd855482df
--- /dev/null
+++ b/common/exception/ChecksumBlobSizeMismatch.hpp
@@ -0,0 +1,46 @@
+/*!
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+namespace exception {
+
+/**
+ * Exception representing an unexpected mismatch between checksum types.
+ */
+class ChecksumBlobSizeMismatch: public exception::Exception {
+public:
+  /**
+   * Constructor
+   *
+   * @param context optional context string added to the message at initialisation time
+   * @param embedBacktrace whether to embed a backtrace of where the exception was thrown in the message
+   */
+  ChecksumBlobSizeMismatch(const std::string &context = "", const bool embedBacktrace = true) :
+    Exception(context, embedBacktrace) {}
+
+  /**
+   * Destructor
+   */
+  ~ChecksumBlobSizeMismatch() override {}
+}; // class ChecksumTypeMismatch
+
+}} // namespace cta::exception
diff --git a/catalogue/ChecksumTypeMismatch.hpp b/common/exception/ChecksumTypeMismatch.hpp
similarity index 89%
rename from catalogue/ChecksumTypeMismatch.hpp
rename to common/exception/ChecksumTypeMismatch.hpp
index f0597c42c7de565f6a16a828dd7d318f00af23af..19c5bb6dd9c35cf965c88913a005e475be608c32 100644
--- a/catalogue/ChecksumTypeMismatch.hpp
+++ b/common/exception/ChecksumTypeMismatch.hpp
@@ -21,7 +21,7 @@
 #include "common/exception/Exception.hpp"
 
 namespace cta {
-namespace catalogue {
+namespace exception {
 
 /**
  * Exception representing an unexpected mismatch between checksum types.
@@ -36,13 +36,13 @@ public:
    * @param embedBacktrace whether to embed a backtrace of where the
    * exception was throw in the message
    */
-  ChecksumTypeMismatch(const std::string &context = "", const bool embedBacktrace = true);
+  ChecksumTypeMismatch(const std::string &context = "", const bool embedBacktrace = true) :
+    Exception(context, embedBacktrace) {}
 
   /**
    * Destructor.
    */
-  ~ChecksumTypeMismatch() override;
+  ~ChecksumTypeMismatch() override {}
 }; // class ChecksumTypeMismatch
 
-} // namespace catalogue
-} // namespace cta
+}} // namespace cta::exception
diff --git a/catalogue/ChecksumValueMismatch.hpp b/common/exception/ChecksumValueMismatch.hpp
similarity index 88%
rename from catalogue/ChecksumValueMismatch.hpp
rename to common/exception/ChecksumValueMismatch.hpp
index 67fa9b84442c609559c79a25b770e936e2391e67..fb93da284fb8897361bc9cf10e1cb82c8ac0296d 100644
--- a/catalogue/ChecksumValueMismatch.hpp
+++ b/common/exception/ChecksumValueMismatch.hpp
@@ -21,7 +21,7 @@
 #include "common/exception/Exception.hpp"
 
 namespace cta {
-namespace catalogue {
+namespace exception {
 
 /**
  * Exception representing an unexpected mismatch between checksum values.
@@ -36,13 +36,13 @@ public:
    * @param embedBacktrace whether to embed a backtrace of where the
    * exception was throw in the message
    */
-  ChecksumValueMismatch(const std::string &context = "", const bool embedBacktrace = true);
+  ChecksumValueMismatch(const std::string &context = "", const bool embedBacktrace = true) :
+    Exception(context, embedBacktrace) {}
 
   /**
    * Destructor.
    */
-  ~ChecksumValueMismatch() override;
+  ~ChecksumValueMismatch() override {}
 }; // class ChecksumValueMismatch
 
-} // namespace catalogue
-} // namespace cta
+}} // namespace cta::excpetion
diff --git a/catalogue/FileSizeMismatch.hpp b/common/exception/FileSizeMismatch.hpp
similarity index 92%
rename from catalogue/FileSizeMismatch.hpp
rename to common/exception/FileSizeMismatch.hpp
index be6558424b136387fd72d4d68915403d297324a0..6847f9fa285365b3fdadb0d030520d28027833c3 100644
--- a/catalogue/FileSizeMismatch.hpp
+++ b/common/exception/FileSizeMismatch.hpp
@@ -36,12 +36,14 @@ public:
    * @param embedBacktrace whether to embed a backtrace of where the
    * exception was throw in the message
    */
-  FileSizeMismatch(const std::string &context = "", const bool embedBacktrace = true);
+  FileSizeMismatch(const std::string &context = "", const bool embedBacktrace = true):
+    cta::exception::Exception(context, embedBacktrace) {
+  }
 
   /**
    * Destructor.
    */
-  ~FileSizeMismatch() override;
+  ~FileSizeMismatch() override {}
 }; // class FileSizeMismatch
 
 } // namespace catalogue
diff --git a/common/exception/TapeFseqMismatch.hpp b/common/exception/TapeFseqMismatch.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e706d7645fcefcbeae4dcf9178746460f848ce6d
--- /dev/null
+++ b/common/exception/TapeFseqMismatch.hpp
@@ -0,0 +1,49 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+namespace exception {
+
+/**
+ * Exception representing an unexpected mismatch between tape file sequence
+ * numbers.
+ */
+class TapeFseqMismatch: public exception::Exception {
+public:
+  /**
+   * Constructor.
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  TapeFseqMismatch(const std::string &context = "", const bool embedBacktrace = true) :
+    Exception(context, embedBacktrace) {}
+
+  /**
+   * Destructor.
+   */
+  ~TapeFseqMismatch() override {}
+}; // class TapeFseqMismatch
+
+}} // namespace cta::exception
diff --git a/common/remoteFS/RemoteFileStatus.cpp b/common/remoteFS/RemoteFileStatus.cpp
index 510a4b75dcd11c8f7f3fc106168b0a6188e89f90..397c3c0106e41322a8d419a649a64dcfc1ec1a12 100644
--- a/common/remoteFS/RemoteFileStatus.cpp
+++ b/common/remoteFS/RemoteFileStatus.cpp
@@ -30,7 +30,7 @@ cta::RemoteFileStatus::RemoteFileStatus():
 // constructor
 //------------------------------------------------------------------------------
 cta::RemoteFileStatus::RemoteFileStatus(
-  const common::dataStructures::UserIdentity &owner,
+  const common::dataStructures::OwnerIdentity &owner,
   const mode_t mode,
   const uint64_t size):
   owner(owner),
diff --git a/common/remoteFS/RemoteFileStatus.hpp b/common/remoteFS/RemoteFileStatus.hpp
index f8d97fb0313bfc5e34017eb92c2c060a648749a1..47d86d181b2ed249511ce5ce996713aa8bcd08c0 100644
--- a/common/remoteFS/RemoteFileStatus.hpp
+++ b/common/remoteFS/RemoteFileStatus.hpp
@@ -18,7 +18,7 @@
 
 #pragma once
 
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/OwnerIdentity.hpp"
 
 #include <stdint.h>
 #include <string>
@@ -44,14 +44,14 @@ struct RemoteFileStatus {
    * @param size The size of the file in bytes.
    */
   RemoteFileStatus(
-    const common::dataStructures::UserIdentity &owner,
+    const common::dataStructures::OwnerIdentity &owner,
     const mode_t mode,
     const uint64_t size);
 
   /**
    * The identity of the owner.
    */
-  common::dataStructures::UserIdentity owner;
+  common::dataStructures::OwnerIdentity owner;
 
   /**
    * The mode bits of the directory entry.
diff --git a/common/utils/utils.cpp b/common/utils/utils.cpp
index 2ad57ee51dd9a1a762b46367182ab17e98a36d94..441f3c3b594fd847c4107afbcf1a97db99174582 100644
--- a/common/utils/utils.cpp
+++ b/common/utils/utils.cpp
@@ -36,7 +36,7 @@
 #include <sys/utsname.h>
 #include <sys/prctl.h>
 #include <iomanip>
-#include <xrootd/XrdClient/XrdClientUrlInfo.hh>
+#include <xrootd/XrdCl/XrdClURL.hh>
 
 using cta::exception::Exception;
 
@@ -720,18 +720,6 @@ uint32_t getAdler32(const uint8_t *buf, const uint32_t len)
   return adler32(checksum, (const Bytef*)buf, len);
 }
 
-//------------------------------------------------------------------------------
-// getAdler32String
-//------------------------------------------------------------------------------
-std::string getAdler32String(const uint8_t *buf, const uint32_t len)
-{
-  const uint32_t checksum = adler32(0L, Z_NULL, 0);
-  std::stringstream ret;
-  ret << "0X" << std::noshowbase << std::hex << std::setw(8) << std::setfill('0') << std::uppercase
-      << adler32(checksum, (const Bytef*)buf, len);
-  return ret.str();
-}
-
 //------------------------------------------------------------------------------
 // getShortHostname
 //------------------------------------------------------------------------------
@@ -859,8 +847,8 @@ std::string getCurrentLocalTime() {
 }
 
 std::string extractPathFromXrootdPath(const std::string& path){
-  XrdClientUrlInfo urlInfo(path.c_str());
-  return std::string(urlInfo.File.c_str());
+  XrdCl::URL urlInfo(path.c_str());
+  return urlInfo.GetPath();
 }
 
 //------------------------------------------------------------------------------
diff --git a/common/utils/utils.hpp b/common/utils/utils.hpp
index 9990ff22db3cdc49e7771839d7d7696de4416ce9..a0d0afa2f96831ae128241141fb12c7b8539dac2 100644
--- a/common/utils/utils.hpp
+++ b/common/utils/utils.hpp
@@ -325,16 +325,6 @@ namespace utils {
    */
   uint32_t getAdler32(const uint8_t *buf, const uint32_t len);
   
-  /**
-   * Returns the alder32 checksum of the specified buffer in string format,
-   * CTA style that is with leading 0x, 8 digits with leading 0 and uppercase.
-   *
-   * @param buf The buffer.
-   * @param len The length of the buffer in bytes.
-   * @return the alder32 checksum of the specified buffer.
-   */
-  std::string getAdler32String(const uint8_t *buf, const uint32_t len);
-
   /**
    * Returns true if the attributes of the current process indicate that it will
    * produce a core dump if it receives a signal whose behaviour is to produce a
diff --git a/continuousintegration/buildtree_runner/vmBootstrap/bootstrapCTA.sh b/continuousintegration/buildtree_runner/vmBootstrap/bootstrapCTA.sh
index 08b99c2c9a424e5833fb7ea810eb634eec67bf9e..219ed85fe634c48079ff3d54a9d11eaa7ffad4eb 100755
--- a/continuousintegration/buildtree_runner/vmBootstrap/bootstrapCTA.sh
+++ b/continuousintegration/buildtree_runner/vmBootstrap/bootstrapCTA.sh
@@ -19,10 +19,16 @@ mkdir -p ~/CTA-build-srpm
 (cd ~/CTA-build-srpm && cmake -DPackageOnly:Bool=true ../CTA; make cta_srpm)
 
 echo Installing repos
+<<<<<<< HEAD
 for r in `ls -1 ../../docker/ctafrontend/cc7/etc/yum.repos.d/*.repo`; do
   yum-config-manager --add-repo=$r
 done
 sudo yum-config-manager --add-repo=ceph-internal.repo
+=======
+for r in `ls ../../docker/ctafrontend/cc7/etc/yum.repos.d/*.repo` ceph-internal.repo; do
+  sudo yum-config-manager --add-repo=$r
+done
+>>>>>>> origin/master
 sudo yum install -y yum-plugin-priorities
 
 echo Adding versionlock for xrootd:
diff --git a/continuousintegration/buildtree_runner/vmBootstrap/bootstrapKubernetes.sh b/continuousintegration/buildtree_runner/vmBootstrap/bootstrapKubernetes.sh
index 4c287c12ac80955c60174a69859d49ddf2bc1dcc..50168ef4ae096d3d0366c2be32694b8caaaa6301 100755
--- a/continuousintegration/buildtree_runner/vmBootstrap/bootstrapKubernetes.sh
+++ b/continuousintegration/buildtree_runner/vmBootstrap/bootstrapKubernetes.sh
@@ -7,6 +7,7 @@ sudo systemctl start etcd
 sudo mkdir -p /etc/kubernetes
 sudo cp -rv kubernetes/* /etc/kubernetes
 sudo perl -p -i -e 's/^(KUBELET_ARGS=).*$/$1"--allow_privileged=true --cluster-dns=10.254.199.254 --cluster-domain=cluster.local"/' /etc/kubernetes/kubelet
+sudo perl -p -i -e 's/^(KUBELET_POD_INFRA_CONTAINER=)/#$1/' /etc/kubernetes/kubelet
 # We put the config in 2 places as flanneld might fetch it from different places.
 curl -L http://localhost:2379/v2/keys/flannel/network/config -XPUT --data-urlencode value@kubernetes/flannel-config.json
 curl -L http://localhost:2379/v2/keys/atomic.io/network/config -XPUT --data-urlencode value@kubernetes/flannel-config.json
diff --git a/continuousintegration/docker/ctafrontend/cc7/buildtree-stage3-scripts/Dockerfile b/continuousintegration/docker/ctafrontend/cc7/buildtree-stage3-scripts/Dockerfile
index a44522ce4603e89f32eb15ff79eca546492fb0b3..51baca4a4e6e1a195e1ae814d2ff246ab8f4b517 100644
--- a/continuousintegration/docker/ctafrontend/cc7/buildtree-stage3-scripts/Dockerfile
+++ b/continuousintegration/docker/ctafrontend/cc7/buildtree-stage3-scripts/Dockerfile
@@ -32,9 +32,6 @@ ENV ORCHESTRATIONDIR="continuousintegration/orchestration/pods" \
 # Add pod specific configuration
 ADD ${BASEDIR}/config ${LOCALORCHESTRATIONDIR}
 
-# Add eos_wfe_scripts directory inside ctaeos specific folder
-ADD eos_wfe_scripts ${LOCALORCHESTRATIONDIR}/ctaeos/var/eos/wfe/bash/
-
 # Add orchestration run scripts locally
 ADD ${BASEDIR}/opt /opt
 
diff --git a/continuousintegration/docker/ctafrontend/cc7/ci_runner/Dockerfile b/continuousintegration/docker/ctafrontend/cc7/ci_runner/Dockerfile
index e0ced426652fb3fa94d2418326c28b3abf51f357..7e810579ba2173e0775afef4cced431f55280005 100644
--- a/continuousintegration/docker/ctafrontend/cc7/ci_runner/Dockerfile
+++ b/continuousintegration/docker/ctafrontend/cc7/ci_runner/Dockerfile
@@ -84,9 +84,6 @@ RUN yum-config-manager --enable epel --setopt="epel.priority=4" \
 # Add pod specific configuration
 ADD ${BASEDIR}/config ${LOCALORCHESTRATIONDIR}
 
-# Add eos_wfe_scripts directory inside ctaeos specific folder
-ADD eos_wfe_scripts ${LOCALORCHESTRATIONDIR}/ctaeos/var/eos/wfe/bash/
-
 # Add orchestration run scripts locally
 ADD ${BASEDIR}/opt /opt
 
diff --git a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf
new file mode 100644
index 0000000000000000000000000000000000000000..6f3d411e8d398fcaefa2004a8cedd4b1fbeeab0e
--- /dev/null
+++ b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/cta/cta-fst-gcd.conf
@@ -0,0 +1,8 @@
+[main]
+logfile = /var/log/eos/fst/cta-fst-gcd.log
+mgmhost = localhost.cern.ch
+minfreebytes = 0
+gcagesecs = 1
+queryperiodsecs = 20
+mainloopperiodsecs = 10
+xrdsecssskt = /etc/eos.keytab
diff --git a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
index ef935af2434f7c02f34be1f9507a6c37cbcf5c22..022a19576cad61680ab80d1952dec2e3502fa6bc 100644
--- a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
+++ b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/sysconfig/eos
@@ -55,3 +55,6 @@ export EOS_TTY_BROACAST_EGREP="\"CRIT|ALERT|EMERG|PROGRESS\""
 
 # define the fst delete rate
 export EOS_FST_DELETE_QUERY_INTERVAL=5
+
+# Tell clients that there is a CTA backend by starting the sitename with "cern_tape_archive_"
+XRDSITE=cern_tape_archive_ci
diff --git a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/xrd.cf.mgm b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/xrd.cf.mgm
index 3034d5c29a1959915f1a5bcb71dd0149f691b7c4..d95d45ce6075114ab85969f7c1b42bf306b67401 100644
--- a/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/xrd.cf.mgm
+++ b/continuousintegration/docker/ctafrontend/cc7/config/ctaeos/etc/xrd.cf.mgm
@@ -1,5 +1,10 @@
 ###########################################################
-xrootd.fslib libXrdEosMgm.so
+# A prepare request with the Prep_EVICT flag can only call
+# XrdMgmOfs::prepare() if XRootD believes an alternative
+# Prepare plugin is present.  "xrootd.fslib -2" invokes
+# XrdSfsGetFileSystem2() which tells XRootD that such
+# a plugin is present.
+xrootd.fslib -2 libXrdEosMgm.so
 xrootd.seclib libXrdSec.so
 xrootd.async off nosf
 xrootd.chksum adler32
@@ -64,6 +69,3 @@ mgmofs.autosaveconfig true
 # Set the endpoint and resources for EOS/CTA Workflows
 mgmofs.protowfendpoint ctafrontend:10955
 mgmofs.protowfresource /ctafrontend
-
-# Turn on the MGM LRU tape aware garbage collector
-mgmofs.tapeawaregc.defaultspace.enable true
diff --git a/continuousintegration/docker/ctafrontend/cc7/doublebuildtree-stage2b-scripts/Dockerfile b/continuousintegration/docker/ctafrontend/cc7/doublebuildtree-stage2b-scripts/Dockerfile
index 17d4bf8c2931801eb2562f01ed87412ae270a508..89ccd66f57c732ce24bacafd2f93efccd625290a 100644
--- a/continuousintegration/docker/ctafrontend/cc7/doublebuildtree-stage2b-scripts/Dockerfile
+++ b/continuousintegration/docker/ctafrontend/cc7/doublebuildtree-stage2b-scripts/Dockerfile
@@ -32,9 +32,6 @@ ENV ORCHESTRATIONDIR="continuousintegration/orchestration/pods" \
 # Add pod specific configuration
 ADD ${BASEDIR}/config ${LOCALORCHESTRATIONDIR}
 
-# Add eos_wfe_scripts directory inside ctaeos specific folder
-ADD eos_wfe_scripts ${LOCALORCHESTRATIONDIR}/ctaeos/var/eos/wfe/bash/
-
 # Add orchestration run scripts locally
 ADD ${BASEDIR}/opt /opt
 
diff --git a/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list b/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
index 07eae6d18d726206669e26efc00666a1a4f04594..4654466fe19b1333a2a3b387baf67fd05473b25e 100644
--- a/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
+++ b/continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list
@@ -1,34 +1,34 @@
-0:eos-archive-4.5.2-1.el7.cern.x86_64
-0:eos-cleanup-4.5.2-1.el7.cern.x86_64
-0:eos-client-4.5.2-1.el7.cern.x86_64
-0:eos-debuginfo-4.5.2-1.el7.cern.x86_64
-0:eos-fuse-4.5.2-1.el7.cern.x86_64
-0:eos-fuse-core-4.5.2-1.el7.cern.x86_64
-0:eos-fuse-sysv-4.5.2-1.el7.cern.x86_64
-0:eos-fusex-4.5.2-1.el7.cern.x86_64
-0:eos-fusex-core-4.5.2-1.el7.cern.x86_64
-0:eos-fusex-selinux-4.5.2-1.el7.cern.x86_64
-0:eos-server-4.5.2-1.el7.cern.x86_64
-0:eos-srm-4.5.2-1.el7.cern.x86_64
-0:eos-test-4.5.2-1.el7.cern.x86_64
-0:eos-testkeytab-4.5.2-1.el7.cern.x86_64
-1:python2-xrootd-4.10.0-0.rc4.el7.*
-1:python3-xrootd-4.10.0-0.rc4.el7.*
-1:xrootd-4.10.0-0.rc4.el7.*
-1:xrootd-client-4.10.0-0.rc4.el7.*
-1:xrootd-client-devel-4.10.0-0.rc4.el7.*
-1:xrootd-client-libs-4.10.0-0.rc4.el7.*
-1:xrootd-debuginfo-4.10.0-0.rc4.el7.*
-1:xrootd-devel-4.10.0-0.rc4.el7.*
-1:xrootd-doc-4.10.0-0.rc4.el7.*
-1:xrootd-fuse-4.10.0-0.rc4.el7.*
-1:xrootd-libs-4.10.0-0.rc4.el7.*
-1:xrootd-private-devel-4.10.0-0.rc4.el7.*
-1:xrootd-selinux-4.10.0-0.rc4.el7.*
-1:xrootd-server-4.10.0-0.rc4.el7.*
-1:xrootd-server-devel-4.10.0-0.rc4.el7.*
-1:xrootd-server-libs-4.10.0-0.rc4.el7.*
-1:xrootd-tests-4.10.0-0.rc4.el7.*
+0:eos-archive-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-cleanup-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-client-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-debuginfo-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fuse-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fuse-core-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fuse-sysv-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fusex-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fusex-core-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-fusex-selinux-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-server-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-srm-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-test-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+0:eos-testkeytab-4.5.3-20190730172748gitea30da3.el7.cern.x86_64
+1:python2-xrootd-4.10.0-1.el7.*
+1:python3-xrootd-4.10.0-1.el7.*
+1:xrootd-4.10.0-1.el7.*
+1:xrootd-client-4.10.0-1.el7.*
+1:xrootd-client-devel-4.10.0-1.el7.*
+1:xrootd-client-libs-4.10.0-1.el7.*
+1:xrootd-debuginfo-4.10.0-1.el7.*
+1:xrootd-devel-4.10.0-1.el7.*
+1:xrootd-doc-4.10.0-1.el7.*
+1:xrootd-fuse-4.10.0-1.el7.*
+1:xrootd-libs-4.10.0-1.el7.*
+1:xrootd-private-devel-4.10.0-1.el7.*
+1:xrootd-selinux-4.10.0-1.el7.*
+1:xrootd-server-4.10.0-1.el7.*
+1:xrootd-server-devel-4.10.0-1.el7.*
+1:xrootd-server-libs-4.10.0-1.el7.*
+1:xrootd-tests-4.10.0-1.el7.*
 2:ceph-12.2.2-0.el7.x86_64
 2:ceph-base-12.2.2-0.el7.x86_64
 2:ceph-common-12.2.2-0.el7.x86_64
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
index 472d0e9bc22725e1befe3df505a40cca1e6630ba..6917d8e37e6fb79917dab17dc59a053680dca832 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/ctaeos-mgm.sh
@@ -9,13 +9,18 @@ if [ ! -e /etc/buildtreeRunner ]; then
   yum-config-manager --enable eos-citrine
 
   # Install missing RPMs
-  yum -y install eos-client eos-server xrootd-client xrootd-debuginfo xrootd-server cta-cli cta-debuginfo sudo logrotate
+  yum -y install eos-client eos-server xrootd-client xrootd-debuginfo xrootd-server cta-cli cta-debuginfo sudo logrotate cta-fst-gcd
 
   ## Keep this temporary fix that may be needed if going to protobuf3-3.5.1 for CTA
   # Install eos-protobuf3 separately as eos is OK with protobuf3 but cannot use it..
   # yum -y install eos-protobuf3
 fi
 
+# Check that the /usr/bin/cta-fst-gcd executable has been installed
+test -e /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd EXISTS" || exit 1
+test -f /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd IS A REGULAR FILE" || exit 1
+test -x /usr/bin/cta-fst-gcd && echo "/usr/bin/cta-fst-gcd IS EXECUTABLE" || exit 1
+
 # create local users as the mgm is the only one doing the uid/user/group mapping in the full infrastructure
 groupadd --gid 1100 eosusers
 groupadd --gid 1200 powerusers
@@ -124,6 +129,11 @@ echo
 echo "Limits summary for user daemon:"
 sudo -u daemon bash -c 'ulimit -a'
 
+NB_STARTED_CTA_FST_GCD=0
+if test -f /var/log/eos/fst/cta-fst-gcd.log; then
+  NB_STARTED_CTA_FST_GCD=`grep "cta-fst-gcd started" /var/log/eos/fst/cta-fst-gcd.log | wc -l`
+fi
+
 if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
   # generate eos_env file for systemd
   cat /etc/sysconfig/eos | sed -e 's/^export\s*//' > /etc/sysconfig/eos_env
@@ -140,6 +150,8 @@ if [ "-${CI_CONTEXT}-" == '-systemd-' ]; then
 
   systemctl status eos@{mq,mgm,fst}
 
+  systemctl start cta-fst-gcd
+
 else
   # Using jemalloc as specified in
   # it-puppet-module-eos:
@@ -155,6 +167,27 @@ else
     /usr/bin/xrootd -n mq -c /etc/xrd.cf.mq -l /var/log/eos/xrdlog.mq -b -Rdaemon
     /usr/bin/xrootd -n mgm -c /etc/xrd.cf.mgm -m -l /var/log/eos/xrdlog.mgm -b -Rdaemon
     /usr/bin/xrootd -n fst -c /etc/xrd.cf.fst -l /var/log/eos/xrdlog.fst -b -Rdaemon
+
+
+  runuser -u daemon setsid /usr/bin/cta-fst-gcd > /dev/null 2>&1 < /dev/null &
+fi
+
+echo "Giving cta-fst-gcd 1 second to start logging"
+sleep 1
+
+let EXPECTED_NB_STARTED_CTA_FST_GCD=NB_STARTED_CTA_FST_GCD+1
+ACTUAL_NB_STARTED_CTA_FST_GCD=0
+if test -f /var/log/eos/fst/cta-fst-gcd.log; then
+  ACTUAL_NB_STARTED_CTA_FST_GCD=`grep "cta-fst-gcd started" /var/log/eos/fst/cta-fst-gcd.log | wc -l`
+else
+  echo "/usr/bin/cta-fst-gcd DOES NOT EXIST"
+  exit 1
+fi
+if test ${EXPECTED_NB_STARTED_CTA_FST_GCD} = ${ACTUAL_NB_STARTED_CTA_FST_GCD}; then
+  echo "/usr/bin/cta-fst-gcd LOGGED 'cta-fst-gcd started'"
+else
+  echo "/usr/bin/cta-fst-gcd DID NOT LOG 'cta-fst-gcd started'"
+  exit 1
 fi
 
   eos vid enable krb5
@@ -174,8 +207,7 @@ fi
   eos fs add -m ${TAPE_FS_ID} tape localhost:1234 /does_not_exist tape
   eos mkdir ${CTA_PROC_DIR}
   eos mkdir ${CTA_WF_DIR}
-  eos attr set CTA_TapeFsId=${TAPE_FS_ID} ${CTA_WF_DIR}
-  
+
   # ${CTA_TEST_DIR} must be writable by eosusers and powerusers
   # but as there is no sticky bit in eos, we need to remove deletion for non owner to eosusers members
   # this is achieved through the ACLs.
@@ -186,10 +218,6 @@ fi
 
   eos attr set CTA_StorageClass=ctaStorageClass ${CTA_TEST_DIR}
     
-  # hack before it is fixed in EOS
-    TAPE_FS_ID_TOSET=`eos attr ls ${CTA_WF_DIR} | grep CTA_TapeFsId= | tr '"' ' ' | cut -d ' ' -f 2`
-    eos attr set CTA_TapeFsId=${TAPE_FS_ID_TOSET} ${CTA_TEST_DIR}
-
   # Link the attributes of CTA worklow directory to the test directory
   eos attr link ${CTA_WF_DIR} ${CTA_TEST_DIR}
 
@@ -237,8 +265,6 @@ fi
 # prepare EOS garbage collectors
   # enable the 'file archived' garbage collector
   eos space config default space.filearchivedgc=on
-  # set the number of free bytes at which the MGM LRU tape aware garabge collector will start deleting redundant disk files
-  eos space config default space.tapeawaregc.minfreebytes=0
 
 # configure preprod directory separately
 /opt/run/bin/eos_configure_preprod.sh
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/eos_configure_preprod.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/eos_configure_preprod.sh
index 22dd1c4d301bb476cb15e465e0c20b5c52a1e4ac..d784ef2981300dc4756dbad0edcdf9e9e7fdfdc5 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/eos_configure_preprod.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/eos_configure_preprod.sh
@@ -6,13 +6,14 @@ eos chmod 555 ${PREPROD_DIR}
 eos attr set sys.acl=g:eosusers:rwx!d,u:poweruser1:rwx+dp,u:poweruser2:rwx+dp ${PREPROD_DIR}
 
 eos attr set CTA_StorageClass=ctaStorageClass ${PREPROD_DIR}
-eos attr set CTA_TapeFsId=65535 ${PREPROD_DIR}
 
 eos attr set sys.workflow.sync::create.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::closew.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::archived.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::archive_failed.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::prepare.default="proto" ${PREPROD_DIR}
+eos attr set sys.workflow.sync::abort_prepare.default="proto" ${PREPROD_DIR}
+eos attr set sys.workflow.sync::evict_prepare.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::closew.retrieve_written="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::retrieve_failed.default="proto" ${PREPROD_DIR}
 eos attr set sys.workflow.sync::delete.default="proto" ${PREPROD_DIR}
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
index 683ea0b52b3a6f3cab270cb22fb77ecbb8d31436..e620de65b92d48ca3f868184700e0afc493bc593 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
@@ -79,7 +79,7 @@ else
 fi
 
 
-
+: <<'COMMENT_LABEL_PART'
 if [ ! $LIBRARYTYPE == "mhvtl" ]; then
   echo "Real tapes, not labelling";
 else
@@ -108,5 +108,6 @@ else
     echo "OK"
   done
 fi
+COMMENT_LABEL_PART
 
 echo "### INIT COMPLETED ###"
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/taped.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/taped.sh
index d7fb9ae63476d711cfe18d061de329cee393d503..e3f919bc746c9de183857b1ef6d9b7ab49eb6209 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/taped.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/taped.sh
@@ -8,7 +8,7 @@ if [ ! -e /etc/buildtreeRunner ]; then
   yum-config-manager --enable castor
 
   # Install missing RPMs
-  yum -y install mt-st lsscsi sg3_utils cta-taped cta-debuginfo ceph-common
+  yum -y install mt-st lsscsi sg3_utils cta-taped cta-tape-label cta-debuginfo ceph-common
 fi
 
 echo "Using this configuration for library:"
diff --git a/continuousintegration/orchestration/create_instance.sh b/continuousintegration/orchestration/create_instance.sh
index 79d61dccfccc54c1d19284a59408c56a3b143adb..534c7113fc3f09d6f1825feb3f15e51bae94681e 100755
--- a/continuousintegration/orchestration/create_instance.sh
+++ b/continuousintegration/orchestration/create_instance.sh
@@ -363,7 +363,7 @@ CTA_ENDPOINT=ctafrontend:10955
 
 echo "Setting workflows in namespace ${instance} pod ctaeos:"
 CTA_WF_DIR=/eos/${EOSINSTANCE}/proc/cta/workflow
-for WORKFLOW in sync::create.default sync::closew.default sync::archived.default sync::archive_failed.default sync::prepare.default sync::closew.retrieve_written sync::retrieve_failed.default sync::delete.default
+for WORKFLOW in sync::create.default sync::closew.default sync::archived.default sync::archive_failed.default sync::prepare.default sync::abort_prepare.default sync::evict_prepare.default sync::closew.retrieve_written sync::retrieve_failed.default sync::delete.default
 do
   echo "eos attr set sys.workflow.${WORKFLOW}=\"proto\" ${CTA_WF_DIR}"
   kubectl --namespace=${instance} exec ctaeos -- bash -c "eos attr set sys.workflow.${WORKFLOW}=\"proto\" ${CTA_WF_DIR}"
diff --git a/continuousintegration/orchestration/tests/client_ar.sh b/continuousintegration/orchestration/tests/client_ar.sh
index 720e37999881e8da13371fece103fefd03e1da98..ae13afd26a0e92c1830ee5d1c40e5c7dc7da06c1 100644
--- a/continuousintegration/orchestration/tests/client_ar.sh
+++ b/continuousintegration/orchestration/tests/client_ar.sh
@@ -23,7 +23,6 @@ TAPEAWAREGC=0
 
 NB_BATCH_PROCS=500  # number of parallel batch processes
 BATCH_SIZE=20    # number of files per batch process
-GC_MINFREEBYTES=2000000000000000000 # value for space.tapeawaregc.minfreebytes initially (for default space). Set if TAPEAWAREGC=1
 
 SSH_OPTIONS='-o BatchMode=yes -o ConnectTimeout=10'
 
@@ -126,13 +125,7 @@ fi
 
 if [[ $TAPEAWAREGC == 1 ]]; then
     echo "Enabling tape aware garbage collector"
-    ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.tapeawaregc.minfreebytes=${GC_MINFREEBYTES} || die "Could not set space.tapeawaregc.minfreebytes to ${GC_MINFREEBYTES}"
     ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.filearchivedgc=off || die "Could not disable filearchivedgc"
-else
-    echo "Enabling file archived garbage collector"
-    # no ssh for CI
-    #ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.tapeawaregc.minfreebytes=0 || die "Could not set space.tapeawaregc.minfreebytes to 0"
-    #ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.filearchivedgc=on || die "Could not enable filearchivedgc"
 fi
 
 EOS_DIR=''
@@ -257,14 +250,6 @@ sleep 10
 echo "###"
 
 
-if [[ $TAPEAWAREGC == 1 ]]; then
-    echo "Disabling file tape aware garbage collector"
-    ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.tapeawaregc.minfreebytes=0 || die "Could not set space.tapeawaregc.minfreebytes to 0"
-    # we do not need it for retrieves
-    # ssh ${SSH_OPTIONS} -l root ${EOSINSTANCE} eos space config default space.filearchivedgc=on || die "Could not enable filearchivedgc"
-fi
-
-
 echo "$(date +%s): Trigerring EOS retrieve workflow as poweruser1:powerusers (12001:1200)"
 
 rm -f ${STATUS_FILE}
@@ -331,9 +316,9 @@ done
 
 TO_STAGERRM=$(cat ${STATUS_FILE} | wc -l)
 
-echo "$(date +%s): $TO_STAGERRM files to be stagerrm'ed from EOS"
+echo "$(date +%s): $TO_STAGERRM files to be stagerrm'ed from EOS using 'xrdfs prepare -e'"
 # We need the -s as we are staging the files from tape (see xrootd prepare definition)
-cat ${STATUS_FILE} | sed -e "s%^%${EOS_DIR}/%" | XrdSecPROTOCOL=krb5 KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 xargs --max-procs=10 -n 40 eos root://${EOSINSTANCE} stagerrm   > /dev/null
+cat ${STATUS_FILE} | sed -e "s%^%${EOS_DIR}/%" | XrdSecPROTOCOL=krb5 KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 xargs --max-procs=10 -n 40 xrdfs ${EOSINSTANCE} prepare -e > /dev/null
 
 
 LEFTOVER=0
@@ -342,7 +327,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do
 done
 
 STAGERRMED=$((${TO_STAGERRM}-${LEFTOVER}))
-echo "$(date +%s): $STAGERRMED files stagerrmed from EOS"
+echo "$(date +%s): $STAGERRMED files stagerrmed from EOS 'xrdfs prepare -e'"
 
 LASTCOUNT=${STAGERRMED}
 
diff --git a/continuousintegration/orchestration/tests/prepare_tests.sh b/continuousintegration/orchestration/tests/prepare_tests.sh
index a5571eca251c48d9031cf82f1068591c2756b647..2f80787d510d0508cf3f69a75a37250e8074f2b4 100755
--- a/continuousintegration/orchestration/tests/prepare_tests.sh
+++ b/continuousintegration/orchestration/tests/prepare_tests.sh
@@ -96,6 +96,7 @@ echo "Preparing CTA configuration for tests"
       --vid ${VID}                                                         \
       --disabled false                                                     \
       --full false                                                         \
+      --readonly false                                                     \
       --comment "ctasystest"
   done
   kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin storageclass add   \
@@ -134,6 +135,20 @@ echo "Preparing CTA configuration for tests"
      --name powerusers                                                  \
      --mountpolicy ctasystest --comment "ctasystest"
 
+echo "Labeling tapes:"
+  # add all tapes
+  for ((i=0; i<${#TAPES[@]}; i++)); do
+    VID=${TAPES[${i}]}
+    echo "  cta-tape-label --vid ${VID}"
+    # for debug use
+      # kubectl --namespace ${NAMESPACE} exec tpsrv01 -c taped  -- cta-tape-label --vid ${VID} --debug
+    kubectl --namespace ${NAMESPACE} exec tpsrv01 -c taped  -- cta-tape-label --vid ${VID} 
+    if [ $? -ne 0 ]; then
+      echo "ERROR: failed to label the tape ${VID}"
+      exit 1
+    fi
+  done
+
 echo "Setting drive up: ${DRIVENAMES[${driveslot}]}"
   kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin drive up ${DRIVENAMES[${driveslot}]}
   kubectl --namespace ${NAMESPACE} exec ctacli -- cta-admin drive ls
diff --git a/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh b/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
index dabdec972d5e8a11b3c42689459d34b442cf0705..9b9babf38fde92c45765b365ec306c9f48551e45 100755
--- a/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
+++ b/continuousintegration/orchestration/tests/repack_systemtest_wrapper.sh
@@ -82,7 +82,7 @@ fi
 echo "Reclaiming tape ${VID_TO_REPACK}"
 kubectl -n ${NAMESPACE} exec ctacli -- cta-admin tape reclaim --vid ${VID_TO_REPACK}
 
-NB_FILES=1152
+NB_FILES=1153
 kubectl -n ${NAMESPACE} exec client -- bash /root/client_ar.sh -n ${NB_FILES} -s ${FILE_SIZE_KB} -p 100 -d /eos/ctaeos/preprod -v -A || exit 1
 
 VID_TO_REPACK=$(getFirstVidContainingFiles)
diff --git a/continuousintegration/orchestration/tests/simpletest.cli.sh b/continuousintegration/orchestration/tests/simpletest.cli.sh
index 4b936eeb4b3459a957dddb68f3f057d951cc007d..7067d4ff5898062e4880d22a418ad9c61ba316a0 100755
--- a/continuousintegration/orchestration/tests/simpletest.cli.sh
+++ b/continuousintegration/orchestration/tests/simpletest.cli.sh
@@ -6,7 +6,7 @@ cta logicallibrary add --name VLSTK --comment "ctasystest"
 
 cta tapepool add --name ctasystest --partialtapesnumber 5 --encrypted false --comment "ctasystest"
 
-cta tape add --logicallibrary VLSTK --tapepool ctasystest --capacity 1000000000 --comment "ctasystest" --vid ${VID} --disabled false --full false
+cta tape add --logicallibrary VLSTK --tapepool ctasystest --capacity 1000000000 --comment "ctasystest" --vid ${VID} --disabled false --full false --readonly false
 
 cta storageclass add --instance root --name ctaStorageClass --copynb 1 --comment "ctasystest"
 
diff --git a/continuousintegration/orchestration/tests/systest.sh b/continuousintegration/orchestration/tests/systest.sh
index 9b22b63aba15016f9c665e2cf9baefa2bb2703cd..d020e2f4e29f8bb54a67ed4dad76522d21886ef3 100755
--- a/continuousintegration/orchestration/tests/systest.sh
+++ b/continuousintegration/orchestration/tests/systest.sh
@@ -78,6 +78,7 @@ echo "Preparing CTA for tests"
       --vid ${VID}                                                    \
       --disabled false                                                \
       --full false                                                    \
+      --readonly false                                                \
       --comment "ctasystest"
   done
   kubectl --namespace ${NAMESPACE} exec ctacli -- cta storageclass add   \
diff --git a/continuousintegration/orchestration/tests/systest_xrdcp_many_concurrent_files.sh b/continuousintegration/orchestration/tests/systest_xrdcp_many_concurrent_files.sh
index e579c4e1f720174bb942bcaec05526686d1d4638..4f7d493c2468d95ce7d725cd3bd6ce2bab901a5f 100755
--- a/continuousintegration/orchestration/tests/systest_xrdcp_many_concurrent_files.sh
+++ b/continuousintegration/orchestration/tests/systest_xrdcp_many_concurrent_files.sh
@@ -78,6 +78,7 @@ for ((i=0; i<${#TAPES[@]}; i++)); do
     --vid ${VID}                                                    \
     --disabled false                                                \
     --full false                                                    \
+    --readonly false                                                \
     --comment "ctasystest"
 done
 
diff --git a/continuousintegration/orchestration/tests/systest_xrdcp_many_files.sh b/continuousintegration/orchestration/tests/systest_xrdcp_many_files.sh
index a2a3cbb5411b63b64fb4f55eb4eee47356256a86..00c4b76db3de2794f45d39df8569d8772680f585 100755
--- a/continuousintegration/orchestration/tests/systest_xrdcp_many_files.sh
+++ b/continuousintegration/orchestration/tests/systest_xrdcp_many_files.sh
@@ -78,6 +78,7 @@ for ((i=0; i<${#TAPES[@]}; i++)); do
     --vid ${VID}                                                    \
     --disabled false                                                \
     --full false                                                    \
+    --readonly false                                                  \
     --comment "ctasystest"
 done
 
diff --git a/cta.spec.in b/cta.spec.in
index b1274b078f616e39beec6ad3b62bbd8eb69c0e5d..7d283f9150fddbc95ab6834e305c6a0648b9b973 100644
--- a/cta.spec.in
+++ b/cta.spec.in
@@ -58,6 +58,7 @@ BuildRequires: mariadb-devel
 BuildRequires: postgresql-devel
 BuildRequires: valgrind
 BuildRequires: valgrind-devel
+BuildRequires: grpc, grpc-devel, grpc-static, grpc-plugins
 %{?systemd_requires}
 BuildRequires: systemd
 # only build debug info if you're building the whole code
@@ -250,6 +251,7 @@ Unit tests and system tests with virtual tape drives
 %{_libdir}/libctadaemonunittests.so*
 %{_libdir}/libctamediachangerunittests.so*
 %{_libdir}/libctadiskunittests.so*
+%{_libdir}/libctatapelabelunittests.so*
 %{_bindir}/cta-systemTests
 %{_libdir}/libctadaemonunittests-multiprocess.so*
 %attr(0644,root,root) %{_datadir}/%{name}-%{ctaVersion}/unittest/*.suppr
@@ -287,12 +289,37 @@ Scripts and utilities to faciliate working with the CTA catalogue
 %attr(0755,root,root) %{_bindir}/cta-catalogue-schema-drop
 %attr(0755,root,root) %{_bindir}/cta-catalogue-schema-verify
 %attr(0755,root,root) %{_bindir}/cta-database-poll
+%attr(0755,root,root) %{_bindir}/cta-upgrade-db
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-catalogue-admin-user-create.1cta.gz
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-catalogue-schema-create.1cta.gz
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-catalogue-schema-drop.1cta.gz
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-catalogue-schema-verify.1cta.gz
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-database-poll.1cta.gz
 
+%package -n cta-migration-tools
+Summary: Tools for migrating CASTOR to CTA
+Group: Application/CTA
+Requires: cta-lib = %{version}-%{release}
+Requires: castor-dbtools >= 2.1.18
+%description -n cta-migration-tools
+CERN Tape Archive:
+Tools for migrating the CASTOR catalogue to CTA and injecting CASTOR file and
+directory metadata into the EOS namespace.
+%files -n cta-migration-tools
+%attr(0755,root,root) %{_bindir}/eos-import-dirs
+%attr(0755,root,root) %{_bindir}/eos-import-files
+%attr(0755,root,root) %{_bindir}/eos-test-dir-inject
+%attr(0755,root,root) %{_bindir}/eos-test-file-inject
+%attr(0755,root,root) %{_bindir}/eos-test-inject.sh
+%attr(0755,root,root) %{_bindir}/eos-insert-missing-dirs
+%attr(0755,root,root) %{_bindir}/json-pretty-print.sh
+%attr(0755,root,root) %{_bindir}/startvoexport.sh
+%attr(0755,root,root) %{_bindir}/exporttapepool.sh
+%attr(0755,root,root) %{_bindir}/undoexporttapepool.sh
+%attr(0755,root,root) %{_bindir}/tapepool_castor_to_cta.py
+%attr(0755,root,root) %{_bindir}/complete_tapepool_export.py
+%attr(0644,root,root) %config(noreplace) %{_sysconfdir}/cta/castor-migration.conf.example
+
 %package -n cta-rmcd
 Summary: The Remote Media Changer daemon (rmcd)
 Group: Application/CTA
@@ -333,6 +360,21 @@ The client of the Remote Media Changer Daemon (rmcd)
 %attr(0644,root,root) %config(noreplace) %{_sysconfdir}/cta/cta-smc.conf
 %attr(0644,root,root) %doc /usr/share/man/man1/cta-smc.1cta.gz
 
+%package -n cta-tape-label
+Summary: The command-line tool for pre-labelling a CTA tape.
+Group: Application/CTA
+Requires: cta-lib = %{version}-%{release}
+Requires(post): /usr/sbin/setcap
+%description -n cta-tape-label
+CERN Tape Archive:
+The command-line tool for pre-labelling a CTA tape.
+%files -n cta-tape-label
+%defattr(-,root,root)
+%attr(0750,cta,tape) %{_bindir}/cta-tape-label
+%attr(0644,root,root) %doc /usr/share/man/man1/cta-tape-label.1cta.gz
+%post -n cta-tape-label
+/usr/sbin/setcap cap_sys_rawio+ep  %{_bindir}/cta-tape-label
+
 %package -n cta-common
 Summary: CERN Tape Archive common items
 Group: Application/CTA
diff --git a/eos_cta/CMakeLists.txt b/eos_cta/CMakeLists.txt
index f1579decef1ad56eda4eb49fb96c5e4eb25db0fd..d6a1b5149ae360d56f9bcbf14f57ce8492ba1b36 100644
--- a/eos_cta/CMakeLists.txt
+++ b/eos_cta/CMakeLists.txt
@@ -1,5 +1,5 @@
 # The CERN Tape Archive (CTA) project
-# Copyright 2018 CERN
+# Copyright 2019 CERN
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -16,18 +16,33 @@
 cmake_minimum_required(VERSION 2.6)
 
 find_package(Protobuf3 REQUIRED)
-set(PROTOBUF3_INCLUDE_PATH ${XRD_SSI_PB_DIR}/eos_cta/protobuf)
-file(GLOB ProtoFiles "${PROTOBUF3_INCLUDE_PATH}/*.proto")
+find_package(GRPC REQUIRED)
 
-PROTOBUF3_GENERATE_CPP(ProtoSources ProtoHeaders ${ProtoFiles})
+include_directories(${PROTOBUF3_INCLUDE_DIRS})
+
+# Select protobuf files
+set(PROTOBUF_EOS_CTA_DIR ${XRD_SSI_PB_DIR}/eos_cta/protobuf)
+file(GLOB ProtoFilesEosCta "${PROTOBUF_EOS_CTA_DIR}/*.proto")
+set(PROTOBUF_MIGRATION_DIR ${PROJECT_SOURCE_DIR}/migration/grpc-proto/protobuf)
+file(GLOB ProtoFilesMigration "${PROTOBUF_MIGRATION_DIR}/*.proto")
+
+# Compile protobufs
+set(PROTOBUF3_INCLUDE_PATH ${PROTOBUF_EOS_CTA_DIR}:${PROTOBUF_MIGRATION_DIR})
+PROTOBUF3_GENERATE_CPP(ProtoSourcesEosCta ProtoHeadersEosCta ${ProtoFilesEosCta})
+PROTOBUF3_GENERATE_CPP(ProtoSourcesMigration ProtoHeadersMigration ${ProtoFilesMigration})
+
+# Compile gRPC code
+set(GRPC_PROTOBUF_PATH "${CMAKE_BINARY_DIR}/eos_cta/")
+grpc_generate_cpp(ProtoGrpcSourcesMigration ProtoGrpcHeadersMigration ${GRPC_PROTOBUF_PATH} ${ProtoFilesMigration})
 
 set_source_files_properties(
-  ${ProtoSources} ${ProtoHeaders}
-  PROPERTIES GENERATED 1)
+  ${ProtoSourcesEosCta} ${ProtoHeadersEosCta}
+  ${ProtoSourcesMigration} ${ProtoHeadersMigration}
+  ${ProtoGrpcSourcesMigration} ${ProtoGrpcHeadersMigration}
+  PROPERTIES GENERATED TRUE)
 
-foreach(PROTO_SRC ${ProtoSources})
+foreach(PROTO_SRC ${ProtoSourcesEosCta} ${ProtoSourcesMigration} ${ProtoGrpcSourcesMigration})
   set_property(SOURCE ${PROTO_SRC} PROPERTY COMPILE_FLAGS " -Wno-missing-field-initializers -fPIC -Wno-narrowing -Wno-implicit-fallthrough")
-
   # Add -Wno-narrowing -Wno-implicit-fallthrough compiler flags if using gcc version 7 or greater
   if(CMAKE_COMPILER_IS_GNUCC)
     if(GCC_VERSION VERSION_EQUAL 7 OR GCC_VERSION VERSION_GREATER 7)
@@ -35,9 +50,15 @@ foreach(PROTO_SRC ${ProtoSources})
     endif(GCC_VERSION VERSION_EQUAL 7 OR GCC_VERSION VERSION_GREATER 7)
   endif(CMAKE_COMPILER_IS_GNUCC)
 endforeach(PROTO_SRC)
-set(CTA_FRONT_END_MESSAGES_SRC_FILES ${ProtoSources})
 
-include_directories(${PROTOBUF3_INCLUDE_DIRS})
-add_library(XrdSsiPbEosCta ${CTA_FRONT_END_MESSAGES_SRC_FILES})
-set_target_properties(XrdSsiPbEosCta PROPERTIES LINKER_LANGUAGE CXX)
+add_library(XrdSsiPbEosCta ${ProtoSourcesEosCta})
+set_target_properties(XrdSsiPbEosCta PROPERTIES
+  LINKER_LANGUAGE CXX
+  POSITION_INDEPENDENT_CODE TRUE)
 target_link_libraries(XrdSsiPbEosCta ${PROTOBUF3_LIBRARIES})
+
+add_library(EosMigration ${ProtoSourcesMigration} ${ProtoGrpcSourcesMigration})
+set_target_properties(EosMigration PROPERTIES
+  LINKER_LANGUAGE CXX
+  POSITION_INDEPENDENT_CODE TRUE)
+target_link_libraries(EosMigration ${PROTOBUF3_GRPC_LIBRARIES})
diff --git a/eos_wfe_scripts/Makefile b/eos_wfe_scripts/Makefile
deleted file mode 100644
index 8cc3bc2ab2ee2b7cc65d061e9cabff74b2a4a791..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-SPECFILE    = $(shell find -maxdepth 1 -type f -name *.spec)
-PACKAGE     = $(shell awk '$$1 == "Name:"     { print $$2 }' $(SPECFILE) )
-VERSION     = $(shell awk '$$1 == "Version:"  { print $$2 }' $(SPECFILE) )
-RELEASE     = $(shell awk '$$1 == "Release:"  { print $$2 }' $(SPECFILE) )
-TARFILE     = $(PACKAGE)-$(VERSION).tar.gz
-RPMTOPDIR   = $(shell rpm --eval '%{_topdir}')
-BUILDARCH   = $(shell awk '$$1 == "BuildArch:"  { print $$2 }' $(SPECFILE) )
-BUILTRPM    = $(RPMTOPDIR)/RPMS/$(BUILDARCH)/$(PACKAGE)-$(VERSION)-$(RELEASE).$(BUILDARCH).rpm
-
-all: $(TARFILE)
-
-
-$(TARFILE):
-	tar cvzf $(TARFILE) --hard-dereference --dereference --exclude-vcs --transform 's,^,$(PACKAGE)-$(VERSION)/,' *
-
-clean:
-	rm $(TARFILE)
-
-build: $(TARFILE) $(SPECFILE)
-	mv $(TARFILE) $(RPMTOPDIR)/SOURCES
-	rpmbuild -ba $(SPECFILE)
-
-deb_build: build
-	sudo alien $(BUILTRPM) --scripts -k
diff --git a/eos_wfe_scripts/README b/eos_wfe_scripts/README
deleted file mode 100644
index 2ca63c0e36033a58b0a0e4c573cc9180e99576ee..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/README
+++ /dev/null
@@ -1,31 +0,0 @@
-This directory contains scripts to be executed by the EOS workflow engine.
-These scripts should be installed in the following directory on the EOS
-mgm node:
-
-    /var/eos/wfe/bash/
-
-For example the following script is executed when a tape server triggers
-an archived.default workflow event:
-
-    /var/eos/wfe/bash/create_tape_drop_disk_replicas
-
-The following "eos attr set" commands can be used to build the workflow
-actions of an EOSi/CTA instance.  Please remember to replace
-CTA_WF_DIR with the path specific to you're EOS instance, for example
-/eos/dev/proc/cta/workflow.  Please also replace CTA_BIN with the full
-path of the cta command-line tool.
-
-    EXECUTED WHEN AN END USER CLOSES A FILE THEY ARE WRITING TO EOS DISK
-    eos attr set sys.workflow.closew.default="bash:shell:cta XrdSecPROTOCOL=sss XrdSecSSSKT=${CTA_KT} ${CTA_BIN} archive --user <eos::wfe::rusername> --group <eos::wfe::rgroupname> --diskid <eos::wfe::fid> --instance eoscta --srcurl <eos::wfe::turl> --size <eos::wfe::size> --checksumtype <eos::wfe::checksumtype> --checksumvalue <eos::wfe::checksum> --storageclass <eos::wfe::cxattr:CTA_StorageClass> --diskfilepath <eos::wfe::path> --diskfileowner <eos::wfe::username> --diskfilegroup <eos::wfe::groupname> --recoveryblob:base64 <eos::wfe::base64:metadata> --reportURL 'eosQuery://${EOS_MGM_HOST}//eos/wfe/passwd?mgm.pcmd=event\&mgm.fid=<eos::wfe::fxid>\&mgm.logid=cta\&mgm.event=archived\&mgm.workflow=default\&mgm.path=/eos/wfe/passwd\&mgm.ruid=0\&mgm.rgid=0' --stderr" ${CTA_WF_DIR}
-
-    EXECUTED WHEN A TAPE SERVER REPORTS TO EOS THAT A FILE IS SAFELY STORED ON TAPE
-    eos attr set sys.workflow.archived.default="bash:create_tape_drop_disk_replicas:cta <eos::wfe::path> <eos::wfe::cxattr:CTA_TapeFsId>" ${CTA_WF_DIR}
-
-    EXECUTED WHEN AN END USER ON THE MGM NODE RUNS THE "xrdfs prepare" COMMAND
-    eos attr set sys.workflow.sync::prepare.default="bash:retrieve_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::turl> <eos::wfe::username> <eos::wfe::groupname> <eos::wfe::base64:metadata> <eos::wfe::path>" ${CTA_WF_DIR}
-
-    EXECUTED WHEN A TAPE SERVER CLOSES A FILE IT IS RETRIEVING TO DISK
-    eos attr set sys.workflow.closew.CTA_retrieve="bash:shell:cta eos attr set 'CTA_retrieved_timestamp=\"\`date\`\"' <eos::wfe::path>" ${CTA_WF_DIR}
-
-    EXECUTED WHEN AN END USER RUNS THE "eos rm" COMMAND
-    eos attr set sys.workflow.sync::delete.default="bash:delete_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::path>" ${CTA_WF_DIR}
diff --git a/eos_wfe_scripts/create_tape_drop_disk_replicas b/eos_wfe_scripts/create_tape_drop_disk_replicas
deleted file mode 100755
index 12acea3666a2c401387a444282d2171d63399038..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/create_tape_drop_disk_replicas
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-if test $# -ne 3; then
-  echo "Wrong number of command-line arguments"
-  echo "Usage: create_tape_drop_disk_replicas wf_tag file_path tape_fs_id"
-  exit -1
-fi
-
-export XRD_STREAMTIMEOUT=600     # increased from 60s
-export XRD_TIMEOUTRESOLUTION=600 # increased from 15s
-
-WF_TAG="$1"
-FILE_PATH="$2"
-TAPE_FS_ID="$3"
-
-LOG_DATE=`/usr/bin/date +%s | /usr/bin/tr '\n' ' ' ; /usr/bin/date`
-LOG_SCRIPT_NAME=`/usr/bin/basename $0`
-LOG_FILE="/var/log/eos/wfe/${WF_TAG}.log"
-
-# Creating tape replica
-echo "${LOG_DATE} ${LOG_SCRIPT_NAME} creating tape replica with fsid ${TAPE_FS_ID} for ${FILE_PATH}" >> ${LOG_FILE}
-OUTPUT=`/usr/bin/eos -r 0 0 file tag "${FILE_PATH}" +${TAPE_FS_ID} 2>&1`
-RESULT=$?
-if [ 0 -ne ${RESULT} ]; then
-  echo "${LOG_DATE} ${LOG_SCRIPT_NAME} failed to create tape replica with fsid ${TAPE_FS_ID} for ${FILE_PATH}: ${RESULT} ${OUTPUT}" >> ${LOG_FILE}
-  exit 1
-fi
-
-# Checking tape replica
-#echo "${LOG_DATE} ${LOG_SCRIPT_NAME} checking tape replica for ${FILE_PATH}" >> ${LOG_FILE}
-OUTPUT=`/usr/bin/eos -r 0 0 attr ls "${FILE_PATH}" 2>&1 | /usr/bin/grep -c '^sys.archiveFileId=\"'`
-RESULT=$?
-if [ 1 -gt ${OUTPUT} ]; then
-  echo "${LOG_DATE} ${LOG_SCRIPT_NAME} missing tape replica sys.archiveFileId for ${FILE_PATH}: ${RESULT} ${OUTPUT}" >> ${LOG_FILE}
-  exit 1
-fi
-OUTPUT=`/usr/bin/eos -r 0 0 ls -y "${FILE_PATH}" 2>&1 | /usr/bin/grep -c '^d.::t[123456789]'`
-RESULT=$?
-if [ 1 -gt ${OUTPUT} ]; then
-  echo "${LOG_DATE} ${LOG_SCRIPT_NAME} tape replica (expecting at least d?::t1) missing in EOS for ${FILE_PATH}: ${RESULT} ${OUTPUT}" >> ${LOG_FILE}
-  exit 1
-fi
-
-# Deleting disk replica
-for DISK_FSID in `/usr/bin/eos file info "${FILE_PATH}" -m | /usr/bin/sed s/\ /'\n'/g | /usr/bin/grep fsid | /usr/bin/sed s/fsid=// | /usr/bin/grep -v ${TAPE_FS_ID}`; do
-  echo "${LOG_DATE} ${LOG_SCRIPT_NAME} deleting disk replica with fsid ${DISK_FSID} for ${FILE_PATH}" >> ${LOG_FILE}
-  OUTPUT=`/usr/bin/eos -r 0 0 file drop "${FILE_PATH}" ${DISK_FSID} 2>&1`
-  RESULT=$?
-  if [ 0 -ne ${RESULT} ]; then
-    echo "${LOG_DATE} ${LOG_SCRIPT_NAME} failed to delete disk replica with fsid ${DISK_FSID} for ${FILE_PATH}: ${RESULT} ${OUTPUT}" >> ${LOG_FILE}
-    exit 1
-  fi
-done
diff --git a/eos_wfe_scripts/cta_eos_wfe_scripts.spec b/eos_wfe_scripts/cta_eos_wfe_scripts.spec
deleted file mode 100644
index cb6620974bfc903da9bfbdabfb74d7998b142270..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/cta_eos_wfe_scripts.spec
+++ /dev/null
@@ -1,42 +0,0 @@
-Summary: CERN Tape Archive workflow scripts for EOS
-Name: cta_eos_wfe_scripts
-Version: 0.1
-Release: 1
-License: GPL
-Group: Applications/System
-Buildroot: %{_tmppath}/%{name}-%{version}
-Source: %{name}-%{version}.tar.gz
-Group: Applications/System
-BuildArch: noarch
-requires: eos-server
-
-%description
-eos_wfe_scripts contains all the workflows needed for archival from EOS to CTA and for retrieva from CTA to EOS.
-This version contains all the file for the so called *preproduction* workflows.
-
-
-%prep
-%setup -n %{name}-%{version}
-
-
-%build
-
-
-%install
-[ -d %{buildroot} ] && rm -rf %{buildroot}
-
-mkdir -p %{buildroot}/var/eos/wfe/bash
-install -m 755 create_tape_drop_disk_replicas %{buildroot}/var/eos/wfe/bash/create_tape_drop_disk_replicas
-install -m 755 delete_archive_file %{buildroot}/var/eos/wfe/bash/delete_archive_file
-install -m 755 retrieve_archive_file %{buildroot}/var/eos/wfe/bash/retrieve_archive_file
-
-
-%clean
-rm -rf %{buildroot}
-
-
-%files
-%defattr(-,daemon,daemon)
-/var/eos/wfe/bash/create_tape_drop_disk_replicas
-/var/eos/wfe/bash/delete_archive_file
-/var/eos/wfe/bash/retrieve_archive_file
diff --git a/eos_wfe_scripts/current-workflows-on-EOSCTATAPE b/eos_wfe_scripts/current-workflows-on-EOSCTATAPE
deleted file mode 100644
index 1418a7a4c8e2f5d79ff460d9a91313193ac40aeb..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/current-workflows-on-EOSCTATAPE
+++ /dev/null
@@ -1,57 +0,0 @@
-This should be made into a script at some point.
-
-Vladimir Bahyl - 09/2017
-
--------------------------------------------------------------------------
-
-DATE: Fri Jul 28 10:23:12 CEST 2017
-
-[root@p06253947b39467 ~]# eos attr ls /eos/dev/proc/cta/workflow
-CTA_TapeFsId="65535"
-
-sys.workflow.archived.default="bash:create_tape_drop_disk_replicas:cta <eos::wfe::path> <eos::wfe::cxattr:CTA_TapeFsId>"
-
-sys.workflow.closew.CTA_retrieve="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 eos attr set 'CTA_retrieved_timestamp="`date`"' <eos::wfe::path>"
-
-sys.workflow.closew.default="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 XrdSecPROTOCOL=sss XrdSecSSSKT=/etc/cta/cta-cli.sss.keytab /usr/bin/cta archive --user <eos::wfe::rusername> --group <eos::wfe::rgroupname> --diskid <eos::wfe::fid> --instance eoscta --srcurl <eos::wfe::turl> --size <eos::wfe::size> --checksumtype <eos::wfe::checksumtype> --checksumvalue <eos::wfe::checksum> --storageclass <eos::wfe::cxattr:CTA_StorageClass> --diskfilepath <eos::wfe::path> --diskfileowner <eos::wfe::username> --diskfilegroup <eos::wfe::groupname> --recoveryblob:base64 <eos::wfe::base64:metadata> --reportURL 'eosQuery://p06253947b39467.cern.ch//eos/wfe/passwd?mgm.pcmd=event\&mgm.fid=<eos::wfe::fxid>\&mgm.logid=cta\&mgm.event=archived\&mgm.workflow=default\&mgm.path=/eos/wfe/passwd\&mgm.ruid=0\&mgm.rgid=0' --stderr"
-
-sys.workflow.sync::delete.default="bash:delete_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::path>"
-
-sys.workflow.sync::prepare.default="bash:retrieve_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::turl> <eos::wfe::username> <eos::wfe::groupname> <eos::wfe::base64:metadata> <eos::wfe::path>"
-
--------------------------------------------------------------------------
-
-DATE: Tue Sep 19 15:28:58 CEST 2017
-
-[root@p06253947b39467 ~]# eos attr ls /eos/dev/proc/cta/workflow
-CTA_TapeFsId="65535"
-
-sys.workflow.archived.default="bash:create_tape_drop_disk_replicas:cta <eos::wfe::path> <eos::wfe::cxattr:CTA_TapeFsId>"
-
-sys.workflow.closew.CTA_retrieve="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 eos attr set 'CTA_retrieved_timestamp="`date`"' <eos::wfe::path>"
-
-sys.workflow.closew.default="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 XrdSecPROTOCOL=sss XrdSecSSSKT=/etc/cta/cta-cli.sss.keytab /usr/bin/cta archive --user <eos::wfe::rusername> --group <eos::wfe::rgroupname> --diskid <eos::wfe::fid> --instance eoscta --srcurl <eos::wfe::turl> --size <eos::wfe::size> --checksumtype <eos::wfe::checksumtype> --checksumvalue <eos::wfe::checksum> --storageclass <eos::wfe::cxattr:CTA_StorageClass> --diskfilepath <eos::wfe::path> --diskfileowner <eos::wfe::username> --diskfilegroup <eos::wfe::groupname> --recoveryblob:base64 <eos::wfe::base64:metadata> --reportURL 'eosQuery://p06253947b39467.cern.ch//eos/wfe/passwd?mgm.pcmd=event\&mgm.fid=<eos::wfe::fxid>\&mgm.logid=cta\&mgm.event=archived\&mgm.workflow=default\&mgm.path=/eos/wfe/passwd\&mgm.ruid=0\&mgm.rgid=0' --stderr"
-
-sys.workflow.sync::delete.default="bash:delete_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::path>"
-
-sys.workflow.sync::prepare.default="bash:retrieve_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::turl> <eos::wfe::username> <eos::wfe::groupname> <eos::wfe::base64:metadata> <eos::wfe::path>"
-
--------------------------------------------------------------------------
-
-DATE: Tue Sep 19 15:34:10 CEST 2017
-
-[root@p06253947b39467 ~]# eos attr ls /eos/dev/proc/cta/workflow
-CTA_TapeFsId="65535"
-
-sys.workflow.archived.default="bash:create_tape_drop_disk_replicas:cta <eos::wfe::path> <eos::wfe::cxattr:CTA_TapeFsId>"
-
-sys.workflow.closew.CTA_retrieve="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 eos attr set 'CTA_retrieved_timestamp="`date`"' <eos::wfe::path>"
-
-sys.workflow.closew.default="bash:shell:cta XRD_STREAMTIMEOUT=600 XRD_TIMEOUTRESOLUTION=600 XrdSecPROTOCOL=sss XrdSecSSSKT=/etc/cta/cta-cli.sss.keytab /usr/bin/cta archive --user <eos::wfe::rusername> --group <eos::wfe::rgroupname> --diskid <eos::wfe::fid> --instance eoscta --srcurl <eos::wfe::turl> --size <eos::wfe::size> --checksumtype <eos::wfe::checksumtype> --checksumvalue <eos::wfe::checksum> --storageclass <eos::wfe::cxattr:CTA_StorageClass> --diskfilepath <eos::wfe::path> --diskfileowner <eos::wfe::username> --diskfilegroup <eos::wfe::groupname> --recoveryblob:base64 cmVjb3ZlcnkK --reportURL 'eosQuery://p06253947b39467.cern.ch//eos/wfe/passwd?mgm.pcmd=event\&mgm.fid=<eos::wfe::fxid>\&mgm.logid=cta\&mgm.event=archived\&mgm.workflow=default\&mgm.path=/eos/wfe/passwd\&mgm.ruid=0\&mgm.rgid=0' --stderr"
-
-sys.workflow.sync::delete.default="bash:delete_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::path>"
-
-sys.workflow.sync::prepare.default="bash:retrieve_archive_file:cta <eos::wfe::rusername> <eos::wfe::rgroupname> <eos::wfe::fxattr:sys.archiveFileId> <eos::wfe::turl> <eos::wfe::username> <eos::wfe::groupname> cmVjb3ZlcnkK <eos::wfe::path>"
-
--------------------------------------------------------------------------
-
diff --git a/eos_wfe_scripts/delete_archive_file b/eos_wfe_scripts/delete_archive_file
deleted file mode 100755
index ffa8ae5211a30f38f59f69cf7038a5bb3bcddd3a..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/delete_archive_file
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-EINVAL=22
-ECANCELED=125
-
-export XrdSecPROTOCOL=sss
-export XrdSecSSSKT=/etc/cta/cta-cli.sss.keytab   # This location is used on EOSCTATAPE AND CI
-
-export XRD_STREAMTIMEOUT=600     # increased from 60s
-export XRD_TIMEOUTRESOLUTION=600 # increased from 15s
-
-if test $# -ne 5; then
-  echo "Wrong number of command-line arguments"
-  echo "Usage: delete_archive_file wf_tag rusername rgroupname archive_file_id file_path"
-  exit ${EINVAL}
-fi
-
-CTA_BIN=/usr/bin/eoscta_stub
-
-WF_TAG="$1"
-RUSERNAME="$2"
-RGROUPNAME="$3"
-ARCHIVE_FILE_ID="$4"
-FILE_PATH="$5"
-
-LOG_DATE=`/usr/bin/date +%s | /usr/bin/tr '\n' ' ' ; /usr/bin/date`
-LOG_SCRIPT_NAME=`/usr/bin/basename $0`
-LOG_FILE="/var/log/eos/wfe/${WF_TAG}.log"
-
-if test UNDEF = ${ARCHIVE_FILE_ID}; then
-  echo "$LOG_DATE $LOG_SCRIPT_NAME ignoring deletion of non-existent tape archive file: rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit 0
-fi
-
-if RESULT=`2>&1 ${CTA_BIN} deletearchive --user ${RUSERNAME} --group ${RGROUPNAME} --id ${ARCHIVE_FILE_ID}`; then
-  echo "$LOG_DATE $LOG_SCRIPT_NAME deleted tape archive file: rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit 0
-else
-  echo "$LOG_DATE $LOG_SCRIPT_NAME failed to delete tape archive file: error=${RESULT} rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit ${ECANCELED}
-fi
diff --git a/eos_wfe_scripts/retrieve_archive_file b/eos_wfe_scripts/retrieve_archive_file
deleted file mode 100755
index 5085be1cf72769e4bccafd05afa2af3d8066b768..0000000000000000000000000000000000000000
--- a/eos_wfe_scripts/retrieve_archive_file
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-
-EINVAL=22
-ECANCELED=125
-
-export XrdSecPROTOCOL=sss
-export XrdSecSSSKT=/etc/cta/cta-cli.sss.keytab   # This location is used on EOSCTATAPE AND CI
-
-export XRD_STREAMTIMEOUT=600     # increased from 60s
-export XRD_TIMEOUTRESOLUTION=600 # increased from 15s
-
-if test $# -ne 9; then
-  echo "Wrong number of command-line arguments"
-  echo "Usage: retrieve_archive_file wf_tag rusername rgroupname archive_file_id turl disk_username disk_groupname metadata_base64 file_path"
-  exit ${EINVAL}
-fi
-
-CTA_BIN=/usr/bin/eoscta_stub
-
-WF_TAG="$1"
-RUSERNAME="$2"
-RGROUPNAME="$3"
-ARCHIVE_FILE_ID="$4"
-TURL="$5"
-DISK_USERNAME="$6"
-DISK_GROUPNAME="$7"
-METADATA_BASE64="$8"
-FILE_PATH="$9"
-
-DST_URL=${TURL}'&eos.ruid=0&eos.rgid=0&eos.injection=1&eos.workflow=CTA_retrieve'
-
-LOG_DATE=`/usr/bin/date +%s | /usr/bin/tr '\n' ' ' ; /usr/bin/date`
-LOG_SCRIPT_NAME=`/usr/bin/basename $0`
-LOG_FILE="/var/log/eos/wfe/${WF_TAG}.log"
-
-if test UNDEF = ${ARCHIVE_FILE_ID}; then
-  echo "$LOG_DATE $LOG_SCRIPT_NAME cannot retrieve an archive file without the sys.archiveFileId attribute being set: rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} dsturl=${DST_URL} disk_username=${DISK_USERNAME} disk_groupname=${DISK_GROUPNAME} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit ${ECANCELED}
-fi
-
-if ! RESULT=`unset XrdSecPROTOCOL XrdSecSSSKT ; /usr/bin/eos ls -y ${FILE_PATH} | /usr/bin/grep -s ^d0`; then
-  echo "$LOG_DATE $LOG_SCRIPT_NAME file already has at least one online replica on disk: rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} dsturl=${DST_URL} disk_username=${DISK_USERNAME} disk_groupname=${DISK_GROUPNAME} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit 0
-fi
-
-if RESULT=`2>&1 ${CTA_BIN} retrieve --user ${RUSERNAME} --group ${RGROUPNAME} --id ${ARCHIVE_FILE_ID} --dsturl ${DST_URL} --diskfilepath ${FILE_PATH} --diskfileowner ${DISK_USERNAME} --diskfilegroup ${DISK_GROUPNAME} --recoveryblob:base64 ${METADATA_BASE64}`; then
-  echo "$LOG_DATE $LOG_SCRIPT_NAME queued retrieve request: rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} dsturl=${DST_URL} disk_username=${DISK_USERNAME} disk_groupname=${DISK_GROUPNAME} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit 0
-else
-  echo "TURL=${TURL}" >> ${LOG_FILE}
-  echo "DST_URL=${DST_URL}" >> ${LOG_FILE}
-  echo ${CTA_BIN} retrieve --user ${RUSERNAME} --group ${RGROUPNAME} --id ${ARCHIVE_FILE_ID} --dsturl "${DST_URL}" --diskfilepath ${FILE_PATH} --diskfileowner ${DISK_USERNAME} --diskfilegroup ${DISK_GROUPNAME} >> ${LOG_FILE}
-  echo "$LOG_DATE $LOG_SCRIPT_NAME failed to queue retrieve request: error=${RESULT} rusername=${RUSERNAME} rgroupname=${RGROUPNAME} archiveFileId=${ARCHIVE_FILE_ID} dsturl=${DST_URL} disk_username=${DISK_USERNAME} disk_groupname=${DISK_GROUPNAME} path=${FILE_PATH}" >> ${LOG_FILE}
-  exit ${ECANCELED}
-fi
diff --git a/objectstore/Algorithms.hpp b/objectstore/Algorithms.hpp
index af76c69c0454e61c7f6122cbdf24bc2bc11f2578..6968fd165b36ba00eebd131b2155e1c76d6ce0a5 100644
--- a/objectstore/Algorithms.hpp
+++ b/objectstore/Algorithms.hpp
@@ -55,15 +55,28 @@ public:
     log::TimingList timingList;
     utils::Timer t;
     ContainerTraits<Q,C>::getLockedAndFetched(cont, contLock, m_agentReference, contId, lc);
+    timingList.insertAndReset("queueLockFetchTime", t);
+    auto contSummaryBefore = ContainerTraits<Q,C>::getContainerSummary(cont);
     ContainerTraits<Q,C>::addReferencesAndCommit(cont, elements, m_agentReference, lc);
+    timingList.insertAndReset("queueProcessAndCommitTime", t);
     auto failedOwnershipSwitchElements = ContainerTraits<Q,C>::switchElementsOwnership(elements, cont.getAddressIfSet(),
         prevContAddress, timingList, t, lc);
+    timingList.insertAndReset("requestsUpdatingTime", t);
     // If ownership switching failed, remove failed object from queue to not leave stale pointers.
     if (failedOwnershipSwitchElements.size()) {
       ContainerTraits<Q,C>::removeReferencesAndCommit(cont, failedOwnershipSwitchElements);
+      timingList.insertAndReset("queueRecommitTime", t);
     }
+    auto contSummaryAfter = ContainerTraits<Q,C>::getContainerSummary(cont);
     // We are now done with the container.
     contLock.release();
+    timingList.insertAndReset("queueUnlockTime", t);
+    log::ScopedParamContainer params(lc);
+    params.add("C", ContainerTraits<Q,C>::c_containerTypeName)
+          .add(ContainerTraits<Q,C>::c_identifierType, contId)
+          .add("containerAddress", cont.getAddressIfSet());
+    contSummaryAfter.addDeltaToLog(contSummaryBefore, params);
+    timingList.addToLog(params);
     if (failedOwnershipSwitchElements.empty()) {
       // The good case: all elements went through.
       std::list<std::string> transferedElements;
@@ -72,6 +85,7 @@ public:
       }
       m_agentReference.removeBatchFromOwnership(transferedElements, m_backend);
       // That's it, we're done.
+      lc.log(log::INFO, "In ContainerAlgorithms::referenceAndSwitchOwnership(): Requeued a batch of elements.");
       return;
     } else {
       // Bad case: we have to filter the elements and remove ownership only for the successful ones.
@@ -87,6 +101,9 @@ public:
       }
       if (transferedElements.size()) m_agentReference.removeBatchFromOwnership(transferedElements, m_backend);
       failureEx.failedElements = failedOwnershipSwitchElements;
+      params.add("errorCount", failedOwnershipSwitchElements.size());
+      lc.log(log::WARNING, "In ContainerAlgorithms::referenceAndSwitchOwnership(): "
+          "Encountered problems while requeuing a batch of elements");
       throw failureEx;
     }
   }
diff --git a/objectstore/AlgorithmsTest.cpp b/objectstore/AlgorithmsTest.cpp
index 2bfbfc165730e33d88cd759f937d29e2a60b9c1a..9505536bc402b3cd90e99c26cf2f1c5182eff82e 100644
--- a/objectstore/AlgorithmsTest.cpp
+++ b/objectstore/AlgorithmsTest.cpp
@@ -50,8 +50,7 @@ void fillRetrieveRequests(
     cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
     rqc.archiveFile.archiveFileID = 123456789L;
     rqc.archiveFile.diskFileId = "eos://diskFile";
-    rqc.archiveFile.checksumType = "";
-    rqc.archiveFile.checksumValue = "";
+    rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
     rqc.archiveFile.creationTime = 0;
     rqc.archiveFile.reconciliationTime = 0;
     rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -61,8 +60,7 @@ void fillRetrieveRequests(
     {
       cta::common::dataStructures::TapeFile tf;
       tf.blockId = 0;
-      tf.compressedSize = 1;
-      tf.compressedSize = 1;
+      tf.fileSize = 1;
       tf.copyNb = 1;
       tf.creationTime = time(nullptr);
       tf.fSeq = i;
@@ -131,8 +129,7 @@ TEST(ObjectStore, ArchiveQueueAlgorithms) {
     cta::common::dataStructures::ArchiveFile aFile;
     aFile.archiveFileID = 123456789L;
     aFile.diskFileId = "eos://diskFile";
-    aFile.checksumType = "";
-    aFile.checksumValue = "";
+    aFile.checksumBlob.insert(cta::checksum::NONE, "");
     aFile.creationTime = 0;
     aFile.reconciliationTime = 0;
     aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -150,7 +147,7 @@ TEST(ObjectStore, ArchiveQueueAlgorithms) {
     ar.setMountPolicy(mp);
     ar.setArchiveReportURL("");
     ar.setArchiveErrorReportURL("");
-    ar.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+    ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
     ar.setSrcURL("root://eoseos/myFile");
     ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
     ar.insert();
diff --git a/objectstore/ArchiveFileSerDeser.hpp b/objectstore/ArchiveFileSerDeser.hpp
index 3e3d205df2afb50e0bbf0dd20983ae8f7ed7f867..a404aee75cee3a3deda2cf880e13fa3df54cd570 100644
--- a/objectstore/ArchiveFileSerDeser.hpp
+++ b/objectstore/ArchiveFileSerDeser.hpp
@@ -18,7 +18,6 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
 #include "objectstore/cta.pb.h"
 #include "common/dataStructures/TapeFile.hpp"
 #include "EntryLogSerDeser.hpp"
@@ -43,8 +42,7 @@ public:
   void serialize (cta::objectstore::serializers::ArchiveFile & osaf) const {
     osaf.set_archivefileid(archiveFileID);
     osaf.set_creationtime(creationTime);
-    osaf.set_checksumtype(checksumType);
-    osaf.set_checksumvalue(checksumValue);
+    osaf.set_checksumblob(checksumBlob.serialize());
     osaf.set_creationtime(creationTime);
     DiskFileInfoSerDeser dfisd(diskFileInfo);
     dfisd.serialize(*osaf.mutable_diskfileinfo());
@@ -61,8 +59,7 @@ public:
     tapeFiles.clear();
     archiveFileID=osaf.archivefileid();
     creationTime=osaf.creationtime();
-    checksumType=osaf.checksumtype();
-    checksumValue=osaf.checksumvalue();
+    checksumBlob.deserialize(osaf.checksumblob());
     diskFileId=osaf.diskfileid();
     DiskFileInfoSerDeser dfisd;
     dfisd.deserialize(osaf.diskfileinfo());
diff --git a/objectstore/ArchiveQueueAlgorithms.hpp b/objectstore/ArchiveQueueAlgorithms.hpp
index 2cb093f97a5fa7c20c469bbec26aee3291d0f221..21b22c17260376b37793e7843bd481929dc61053 100644
--- a/objectstore/ArchiveQueueAlgorithms.hpp
+++ b/objectstore/ArchiveQueueAlgorithms.hpp
@@ -204,25 +204,33 @@ bool ContainerTraits<ArchiveQueue,C>::
 trimContainerIfNeeded(Container& cont, ScopedExclusiveLock & contLock,
   const ContainerIdentifier & cId, log::LogContext& lc)
 {
+  log::TimingList tl;
+  cta::utils::Timer t;
   if (!cont.isEmpty())  return false;
   // The current implementation is done unlocked.
   contLock.release();
+  tl.insertAndReset("queueUnlockTime",t);
   try {
     // The queue should be removed as it is empty.
     ContainerTraits<ArchiveQueue,C>::QueueType queueType;
     RootEntry re(cont.m_objectStore);
     ScopedExclusiveLock rexl(re);
+    tl.insertAndReset("rootEntryLockTime",t);
     re.fetch();
+    tl.insertAndReset("rootEntryFetchTime",t);
     re.removeArchiveQueueAndCommit(cId, queueType.value, lc);
+    tl.insertAndReset("rootEntryRemoveArchiveQueueAndCommitTime",t);
     log::ScopedParamContainer params(lc);
     params.add("tapepool", cId)
           .add("queueObject", cont.getAddressIfSet());
+    tl.addToLog(params);
     lc.log(log::INFO, "In ContainerTraits<ArchiveQueue_t,ArchiveQueue>::trimContainerIfNeeded(): deleted empty queue");
   } catch (cta::exception::Exception &ex) {
     log::ScopedParamContainer params(lc);
     params.add("tapepool", cId)
           .add("queueObject", cont.getAddressIfSet())
           .add("Message", ex.getMessageValue());
+    tl.addToLog(params);
     lc.log(log::INFO, "In ContainerTraits<ArchiveQueue_t,ArchiveQueue>::trimContainerIfNeeded(): could not delete a presumably empty queue");
   }
   //queueRemovalTime += localQueueRemovalTime = t.secs(utils::Timer::resetCounter);
@@ -244,12 +252,16 @@ getLockedAndFetchedNoCreate(Container& cont, ScopedExclusiveLock& contLock, cons
 {
   // Try and get access to a queue.
   size_t attemptCount = 0;
+  log::TimingList tl;
   retry:
+  cta::utils::Timer t;
   objectstore::RootEntry re(cont.m_objectStore);
   re.fetchNoLock();
+  tl.insertAndReset("rootEntryFetchNoLockTime",t);
   std::string aqAddress;
   ContainerTraits<ArchiveQueue,C>::QueueType queueType;
   auto aql = re.dumpArchiveQueues(queueType.value);
+  tl.insertAndReset("rootEntryDumpArchiveQueueTime",t);
   for (auto & aqp : aql) {
     if (aqp.tapePool == cId)
       aqAddress = aqp.address;
@@ -259,33 +271,45 @@ getLockedAndFetchedNoCreate(Container& cont, ScopedExclusiveLock& contLock, cons
   cont.setAddress(aqAddress);
   //findQueueTime += localFindQueueTime = t.secs(utils::Timer::resetCounter);
   try {
-    if (contLock.isLocked()) contLock.release();
+    if (contLock.isLocked()) {
+      contLock.release();
+      tl.insertAndReset("queueUnlockTime",t);
+    }
+    t.reset();
     contLock.lock(cont);
+    tl.insertAndReset("queueLockTime",t);
     cont.fetch();
+    tl.insertAndReset("queueFetchTime",t);
     //lockFetchQueueTime += localLockFetchQueueTime = t.secs(utils::Timer::resetCounter);
   } catch (cta::exception::Exception & ex) {
     // The queue is now absent. We can remove its reference in the root entry.
     // A new queue could have been added in the mean time, and be non-empty.
     // We will then fail to remove from the RootEntry (non-fatal).
     ScopedExclusiveLock rexl(re);
+    tl.insertAndReset("rootEntryLockTime",t);
     re.fetch();
+    tl.insertAndReset("rootEntryFetchTime",t);
     try {
       re.removeArchiveQueueAndCommit(cId, queueType.value, lc);
+      tl.insertAndReset("rootEntryRemoveArchiveQueueAndCommitTime",t);
       log::ScopedParamContainer params(lc);
       params.add("tapepool", cId)
             .add("queueObject", cont.getAddressIfSet());
+      tl.addToLog(params);
       lc.log(log::INFO, "In ContainerTraits<ArchiveQueue,C>::getLockedAndFetchedNoCreate(): de-referenced missing queue from root entry");
     } catch (RootEntry::ArchiveQueueNotEmpty & ex) {
       log::ScopedParamContainer params(lc);
       params.add("tapepool", cId)
             .add("queueObject", cont.getAddressIfSet())
             .add("Message", ex.getMessageValue());
+      tl.addToLog(params);
       lc.log(log::INFO, "In ContainerTraits<ArchiveQueue,C>::getLockedAndFetchedNoCreate(): could not de-referenced missing queue from root entry");
     } catch (RootEntry::NoSuchArchiveQueue & ex) {
       // Somebody removed the queue in the mean time. Barely worth mentioning.
       log::ScopedParamContainer params(lc);
       params.add("tapepool", cId)
             .add("queueObject", cont.getAddressIfSet());
+      tl.addToLog(params);
       lc.log(log::DEBUG, "In ContainerTraits<ArchiveQueue,C>::getLockedAndFetchedNoCreate(): could not de-referenced missing queue from root entry: already done.");
     }
     //emptyQueueCleanupTime += localEmptyCleanupQueueTime = t.secs(utils::Timer::resetCounter);
diff --git a/objectstore/ArchiveRequest.cpp b/objectstore/ArchiveRequest.cpp
index 779246b3906eb9c7bf844b7b1f2312808d58618b..c4a72d0dd1fb313c958639c90923a1989ea98922 100644
--- a/objectstore/ArchiveRequest.cpp
+++ b/objectstore/ArchiveRequest.cpp
@@ -212,14 +212,12 @@ void ArchiveRequest::setArchiveFile(const cta::common::dataStructures::ArchiveFi
   checkPayloadWritable();
   // TODO: factor out the archivefile structure from the flat ArchiveRequest.
   m_payload.set_archivefileid(archiveFile.archiveFileID);
-  m_payload.set_checksumtype(archiveFile.checksumType);
-  m_payload.set_checksumvalue(archiveFile.checksumValue);
+  m_payload.set_checksumblob(archiveFile.checksumBlob.serialize());
   m_payload.set_creationtime(archiveFile.creationTime);
   m_payload.set_diskfileid(archiveFile.diskFileId);
-  m_payload.mutable_diskfileinfo()->set_group(archiveFile.diskFileInfo.group);
-  m_payload.mutable_diskfileinfo()->set_owner(archiveFile.diskFileInfo.owner);
+  m_payload.mutable_diskfileinfo()->set_gid(archiveFile.diskFileInfo.gid);
+  m_payload.mutable_diskfileinfo()->set_owner_uid(archiveFile.diskFileInfo.owner_uid);
   m_payload.mutable_diskfileinfo()->set_path(archiveFile.diskFileInfo.path);
-  m_payload.mutable_diskfileinfo()->set_recoveryblob("");
   m_payload.set_diskinstance(archiveFile.diskInstance);
   m_payload.set_filesize(archiveFile.fileSize);
   m_payload.set_reconcilationtime(archiveFile.reconciliationTime);
@@ -233,12 +231,11 @@ cta::common::dataStructures::ArchiveFile ArchiveRequest::getArchiveFile() {
   checkPayloadReadable();
   cta::common::dataStructures::ArchiveFile ret;
   ret.archiveFileID = m_payload.archivefileid();
-  ret.checksumType = m_payload.checksumtype();
-  ret.checksumValue = m_payload.checksumvalue();
+  ret.checksumBlob.deserialize(m_payload.checksumblob());
   ret.creationTime = m_payload.creationtime();
   ret.diskFileId = m_payload.diskfileid();
-  ret.diskFileInfo.group = m_payload.diskfileinfo().group();
-  ret.diskFileInfo.owner = m_payload.diskfileinfo().owner();
+  ret.diskFileInfo.gid = m_payload.diskfileinfo().gid();
+  ret.diskFileInfo.owner_uid = m_payload.diskfileinfo().owner_uid();
   ret.diskFileInfo.path = m_payload.diskfileinfo().path();
   ret.diskInstance = m_payload.diskinstance();
   ret.fileSize = m_payload.filesize();
@@ -300,7 +297,7 @@ cta::common::dataStructures::MountPolicy ArchiveRequest::getMountPolicy() {
 //------------------------------------------------------------------------------
 // ArchiveRequest::setRequester()
 //------------------------------------------------------------------------------
-void ArchiveRequest::setRequester(const cta::common::dataStructures::UserIdentity &requester) {
+void ArchiveRequest::setRequester(const cta::common::dataStructures::RequesterIdentity &requester) {
   checkPayloadWritable();
   auto payloadRequester = m_payload.mutable_requester();
   payloadRequester->set_name(requester.name);
@@ -310,9 +307,9 @@ void ArchiveRequest::setRequester(const cta::common::dataStructures::UserIdentit
 //------------------------------------------------------------------------------
 // ArchiveRequest::getRequester()
 //------------------------------------------------------------------------------
-cta::common::dataStructures::UserIdentity ArchiveRequest::getRequester() {
+cta::common::dataStructures::RequesterIdentity ArchiveRequest::getRequester() {
   checkPayloadReadable();
-  cta::common::dataStructures::UserIdentity requester;
+  cta::common::dataStructures::RequesterIdentity requester;
   auto payloadRequester = m_payload.requester();
   requester.name=payloadRequester.name();
   requester.group=payloadRequester.group();
@@ -384,12 +381,10 @@ void ArchiveRequest::garbageCollect(const std::string &presumedOwner, AgentRefer
   auto * jl = m_payload.mutable_jobs();
   bool anythingGarbageCollected=false;
   using serializers::ArchiveJobStatus;
-  std::set<ArchiveJobStatus> statusesImplyingQueueing ({ArchiveJobStatus::AJS_ToTransferForUser, ArchiveJobStatus::AJS_ToReportToUserForTransfer,
-      ArchiveJobStatus::AJS_ToReportToUserForFailure, ArchiveJobStatus::AJS_Failed});
   for (auto j=jl->begin(); j!=jl->end(); j++) {
     auto owner=j->owner();
     auto status=j->status();
-    if ( statusesImplyingQueueing.count(status) && owner==presumedOwner) {
+    if ( c_statusesImplyingQueueing.count(status) && owner==presumedOwner) {
       // The job is in a state which implies queuing.
       std::string queueObject="Not defined yet";
       anythingGarbageCollected=true;
@@ -399,7 +394,13 @@ void ArchiveRequest::garbageCollect(const std::string &presumedOwner, AgentRefer
         // recreated (this will be done by helper).
         ArchiveQueue aq(m_objectStore);
         ScopedExclusiveLock aql;
-        Helpers::getLockedAndFetchedJobQueue<ArchiveQueue>(aq, aql, agentReference, j->tapepool(), getQueueType(status), lc);
+        std::string containerId;
+        if(!c_statusesImplyingQueueingByRepackRequestAddress.count(status)){
+          containerId = j->tapepool();
+        } else {
+          containerId = m_payload.repack_info().repack_request_address();
+        }
+        Helpers::getLockedAndFetchedJobQueue<ArchiveQueue>(aq, aql, agentReference, containerId, getQueueType(status), lc);
         queueObject=aq.getAddressIfSet();
         ArchiveRequest::JobDump jd;
         jd.copyNb = j->copynb();
@@ -545,12 +546,11 @@ ArchiveRequest::AsyncJobOwnerUpdater* ArchiveRequest::asyncUpdateJobOwner(uint32
             // TODO this is an unfortunate duplication of the getXXX() members of ArchiveRequesgetLockedAndFetchedJobQueuet.
             // We could try and refactor this.
             retRef.m_archiveFile.archiveFileID = payload.archivefileid();
-            retRef.m_archiveFile.checksumType = payload.checksumtype();
-            retRef.m_archiveFile.checksumValue = payload.checksumvalue();
+            retRef.m_archiveFile.checksumBlob.deserialize(payload.checksumblob());
             retRef.m_archiveFile.creationTime = payload.creationtime();
             retRef.m_archiveFile.diskFileId = payload.diskfileid();
-            retRef.m_archiveFile.diskFileInfo.group = payload.diskfileinfo().group();
-            retRef.m_archiveFile.diskFileInfo.owner = payload.diskfileinfo().owner();
+            retRef.m_archiveFile.diskFileInfo.gid = payload.diskfileinfo().gid();
+            retRef.m_archiveFile.diskFileInfo.owner_uid = payload.diskfileinfo().owner_uid();
             retRef.m_archiveFile.diskFileInfo.path = payload.diskfileinfo().path();
             retRef.m_archiveFile.diskInstance = payload.diskinstance();
             retRef.m_archiveFile.fileSize = payload.filesize();
@@ -689,7 +689,7 @@ ArchiveRequest::AsyncTransferSuccessfulUpdater * ArchiveRequest::asyncUpdateTran
             return oh.SerializeAsString();
           }
         }
-      } else { // Repack case, the report policy is different (report all jobs). So we just the job's status.
+      } else { // Repack case, the report policy is different (report all jobs). So we just change the job's status.
         for (auto j: *payload.mutable_jobs()) {
           if (j.copynb() == copyNumber) {
             j.set_status(serializers::ArchiveJobStatus::AJS_ToReportToRepackForSuccess);
@@ -752,6 +752,10 @@ JobQueueType ArchiveRequest::getQueueType(const serializers::ArchiveJobStatus& s
   case ArchiveJobStatus::AJS_ToReportToUserForTransfer:
   case ArchiveJobStatus::AJS_ToReportToUserForFailure:
     return JobQueueType::JobsToReportToUser;
+  case ArchiveJobStatus::AJS_ToReportToRepackForSuccess:
+    return JobQueueType::JobsToReportToRepackForSuccess;
+  case ArchiveJobStatus::AJS_ToReportToRepackForFailure:
+    return JobQueueType::JobsToReportToRepackForFailure; 
   case ArchiveJobStatus::AJS_Failed:
     return JobQueueType::FailedJobs;
   default:
@@ -819,7 +823,7 @@ auto ArchiveRequest::determineNextStep(uint32_t copyNumberUpdated, JobEvent jobE
   if (!currentStatus) {
     std::stringstream err;
     err << "In ArchiveRequest::updateJobStatus(): copynb not found : " << copyNumberUpdated
-        << "exiing ones: ";
+        << "existing ones: ";
     for (auto &j: jl) err << j.copynb() << "  ";
     throw cta::exception::Exception(err.str());
   }
diff --git a/objectstore/ArchiveRequest.hpp b/objectstore/ArchiveRequest.hpp
index 386f73b75e953ae43efa0ce2ea61431ebb72a7ae..6936f967a1c55b695804566d571279b364bfc9e8 100644
--- a/objectstore/ArchiveRequest.hpp
+++ b/objectstore/ArchiveRequest.hpp
@@ -22,7 +22,7 @@
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/EntryLog.hpp"
 #include "common/dataStructures/MountPolicy.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
+#include "common/dataStructures/RequesterIdentity.hpp"
 #include "common/dataStructures/ArchiveFile.hpp"
 #include "JobQueueType.hpp"
 #include "common/Timer.hpp"
@@ -83,6 +83,13 @@ public:
      * success/failure scenario. */
     serializers::ArchiveJobStatus nextStatus;
   };
+  const std::set<serializers::ArchiveJobStatus> c_statusesImplyingQueueing = {serializers::ArchiveJobStatus::AJS_ToTransferForUser, serializers::ArchiveJobStatus::AJS_ToReportToUserForTransfer,
+      serializers::ArchiveJobStatus::AJS_ToReportToUserForFailure, serializers::ArchiveJobStatus::AJS_Failed,
+      serializers::ArchiveJobStatus::AJS_ToTransferForRepack, serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure,
+      serializers::ArchiveJobStatus::AJS_ToReportToRepackForSuccess
+  };
+  const std::set<serializers::ArchiveJobStatus> c_statusesImplyingQueueingByRepackRequestAddress {serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure,
+      serializers::ArchiveJobStatus::AJS_ToReportToRepackForSuccess};
 private:
   /**
    * Determine and set the new status of the job and determine whether and where the request should be queued 
@@ -206,8 +213,8 @@ public:
   void setArchiveErrorReportURL(const std::string &URL);
   std::string getArchiveErrorReportURL();
 
-  void setRequester(const cta::common::dataStructures::UserIdentity &requester);
-  cta::common::dataStructures::UserIdentity getRequester();
+  void setRequester(const cta::common::dataStructures::RequesterIdentity &requester);
+  cta::common::dataStructures::RequesterIdentity getRequester();
 
   void setSrcURL(const std::string &srcURL);
   std::string getSrcURL();
diff --git a/objectstore/DiskFileInfoSerDeser.hpp b/objectstore/DiskFileInfoSerDeser.hpp
index b8564fa6528665f6eaa259a0d26bd1e8bd06cadb..9d6ad03556b0afdc8e0e5856701dd77b3e270fcc 100644
--- a/objectstore/DiskFileInfoSerDeser.hpp
+++ b/objectstore/DiskFileInfoSerDeser.hpp
@@ -1,4 +1,4 @@
-/*
+/**
  * The CERN Tape Archive (CTA) project
  * Copyright (C) 2015  CERN
  *
@@ -18,7 +18,6 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
 #include "objectstore/cta.pb.h"
 #include "common/dataStructures/DiskFileInfo.hpp"
 
@@ -27,33 +26,29 @@
 #include <limits>
 
 namespace cta { namespace objectstore {
+
 /**
  * A decorator class of scheduler's creation log adding serialization.
  */
-class DiskFileInfoSerDeser: public cta::common::dataStructures::DiskFileInfo {
-public:
-  DiskFileInfoSerDeser (): cta::common::dataStructures::DiskFileInfo() {}
-  DiskFileInfoSerDeser (const cta::common::dataStructures::DiskFileInfo & dfi): cta::common::dataStructures::DiskFileInfo(dfi) {}
-  DiskFileInfoSerDeser (const std::string & path, const std::string & owner, const std::string & group): 
-    cta::common::dataStructures::DiskFileInfo() {
-    this->path=path;
-    this->owner=owner;
-    this->group=group;
-  }
+struct DiskFileInfoSerDeser: public cta::common::dataStructures::DiskFileInfo {
+  DiskFileInfoSerDeser() : cta::common::dataStructures::DiskFileInfo() {}
+  DiskFileInfoSerDeser(const cta::common::dataStructures::DiskFileInfo &dfi) : cta::common::dataStructures::DiskFileInfo(dfi) {}
+
   operator cta::common::dataStructures::DiskFileInfo() {
     return cta::common::dataStructures::DiskFileInfo(*this);
-  } 
+  }
+
   void serialize (cta::objectstore::serializers::DiskFileInfo & osdfi) const {
     osdfi.set_path(path);
-    osdfi.set_owner(owner);
-    osdfi.set_group(group);
-    osdfi.set_recoveryblob("");
+    osdfi.set_owner_uid(owner_uid);
+    osdfi.set_gid(gid);
   }
+
   void deserialize (const cta::objectstore::serializers::DiskFileInfo & osdfi) {
-    path=osdfi.path();
-    owner=osdfi.owner();
-    group=osdfi.group();
+    path      = osdfi.path();
+    owner_uid = osdfi.owner_uid();
+    gid       = osdfi.gid();
   }
 };
-  
+
 }}
diff --git a/objectstore/EntryLogSerDeser.hpp b/objectstore/EntryLogSerDeser.hpp
index 4a99bc0e4a39957bea2b7392709a1f2999294448..e0d9cf701b4931f20b2ea4c9c836d9570b93f1f8 100644
--- a/objectstore/EntryLogSerDeser.hpp
+++ b/objectstore/EntryLogSerDeser.hpp
@@ -18,7 +18,6 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
 #include "objectstore/cta.pb.h"
 #include "common/dataStructures/EntryLog.hpp"
 
diff --git a/objectstore/GarbageCollector.cpp b/objectstore/GarbageCollector.cpp
index 1c527fe5121671581a57717654d3add3dd55075b..a2626922492b5b057526eea2540bf67fbfaa883f 100644
--- a/objectstore/GarbageCollector.cpp
+++ b/objectstore/GarbageCollector.cpp
@@ -303,14 +303,21 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
         obj.reset();
         bool jobRequeued=false;
         for (auto &j: ar->dumpJobs()) {
-          if ((j.owner == agent.getAddressIfSet())) {
+          if ((j.owner == agent.getAddressIfSet() && ar->c_statusesImplyingQueueing.count(j.status))) {
+            std::string containerIdentifier;
             try {
-              archiveQueuesAndRequests[std::make_tuple(j.tapePool, ar->getJobQueueType(j.copyNb))].emplace_back(ar);
+              if(ar->c_statusesImplyingQueueingByRepackRequestAddress.count(j.status)){
+                containerIdentifier = ar->getRepackInfo().repackRequestAddress;
+              } else {
+                containerIdentifier = j.tapePool;
+              }
+              archiveQueuesAndRequests[std::make_tuple(containerIdentifier, ar->getJobQueueType(j.copyNb),j.tapePool)].emplace_back(ar);
               log::ScopedParamContainer params3(lc);
               params3.add("tapePool", j.tapePool)
+                     .add("containerIdentifier", containerIdentifier)
                      .add("copynb", j.copyNb)
                      .add("fileId", ar->getArchiveFile().archiveFileID);
-              lc.log(log::INFO, "Selected archive request for requeueing to tape pool");
+              lc.log(log::INFO, "Selected archive request for requeueing to the corresponding queue");
               jobRequeued=true;
             } catch (ArchiveRequest::JobNotQueueable &) {}
           }
@@ -339,9 +346,17 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
         }
         // Small parenthesis for non transfer cases.
         if (candidateVids.empty()) {
-          // The request might need to be added to the failed to report of failed queue/container.
+          //If the queueType of the RetrieveRequest is FailedJobs or JobsToReportToUser, it needs to be requeued in a queue identified by the vid of the tape
+          //If queueType is JobsToReportToRepackForSuccess or JobsToReportToRepackForFailure, it needs to be requeued in a queue identified by the RepackRequest's address
           try {
-            retrieveQueuesAndRequests[std::make_tuple(rr->getArchiveFile().tapeFiles.begin()->vid, rr->getQueueType())].emplace_back(rr);
+            std::string vid = rr->getArchiveFile().tapeFiles.begin()->vid;
+            if(rr->getQueueType() != JobQueueType::FailedJobs && rr->getQueueType() != JobQueueType::JobsToReportToUser){
+              retrieveQueuesAndRequests[std::make_tuple(rr->getRepackInfo().repackRequestAddress, rr->getQueueType(),vid)].emplace_back(rr);
+            } else {
+              // The request has failed, might need to be added to the failed to report of failed queue/container.
+              retrieveQueuesAndRequests[std::make_tuple(vid, rr->getQueueType(),vid)].emplace_back(rr);
+            }
+            break;
           } catch (cta::exception::Exception & ex) {
             log::ScopedParamContainer params3(lc);
             params3.add("fileId", rr->getArchiveFile().archiveFileID)
@@ -362,7 +377,7 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
           otherObjects.emplace_back(new GenericObject(rr->getAddressIfSet(), objectStore));
           break;
         }
-        retrieveQueuesAndRequests[std::make_tuple(vid, JobQueueType::JobsToTransferForUser)].emplace_back(rr);
+        retrieveQueuesAndRequests[std::make_tuple(vid, JobQueueType::JobsToTransferForUser,vid)].emplace_back(rr);
         log::ScopedParamContainer params3(lc);
         // Find copyNb for logging
         size_t copyNb = std::numeric_limits<size_t>::max();
@@ -373,8 +388,8 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
                .add("tapeVid", vid)
                .add("fSeq", fSeq);
         lc.log(log::INFO, "Selected vid to be requeued for retrieve request.");
-        break;
       }
+      break;
       default:
         // For other objects, we will not implement any optimization and simply call
         // their individual garbageCollect method.
@@ -389,6 +404,109 @@ void GarbageCollector::OwnedObjectSorter::sortFetchedObjects(Agent& agent, std::
   fetchedObjects.clear();
 }
 
+template<typename ArchiveSpecificQueue>
+void GarbageCollector::OwnedObjectSorter::executeArchiveAlgorithm(std::list<std::shared_ptr<ArchiveRequest>> &jobs,std::string &queueAddress, const std::string& containerIdentifier, const std::string& tapepool, 
+        std::set<std::string> & jobsIndividuallyGCed, Agent& agent, AgentReference& agentReference, 
+        Backend &objectStore, log::LogContext& lc)
+{
+  typedef ContainerAlgorithms<ArchiveQueue,ArchiveSpecificQueue> AqAlgos;
+  AqAlgos aqcl(objectStore, agentReference);
+  typename decltype(aqcl)::InsertedElement::list jobsToAdd;
+  for (auto & ar: jobs) {
+    // Determine the copy number and feed the queue with it.
+    for (auto &j: ar->dumpJobs()) {
+      if (j.tapePool == tapepool) {
+        jobsToAdd.push_back({ar.get(), j.copyNb, ar->getArchiveFile(), ar->getMountPolicy(), cta::nullopt});         
+      }
+    }
+  }
+  std::set<std::string> jobsNotRequeued;
+  try {
+    aqcl.referenceAndSwitchOwnershipIfNecessary(containerIdentifier, agent.getAddressIfSet(), queueAddress, jobsToAdd, lc);
+  } catch (typename AqAlgos::OwnershipSwitchFailure & failure) {
+    for (auto &failedAR: failure.failedElements) {
+      try {
+        std::rethrow_exception(failedAR.failure);
+      } catch (cta::exception::Exception & e) {
+        // Update did not go through. It could be benign
+        std::string debugType=typeid(e).name();
+        auto & arup=*failedAR.element;
+        jobsNotRequeued.insert(arup.archiveRequest->getAddressIfSet());
+        if (typeid(e) == typeid(Backend::NoSuchObject) || typeid(e) == typeid(Backend::WrongPreviousOwner)) {
+          // The object was not present or not owned during update, so we skip it.
+          // This is nevertheless unexpected (from previous fetch, so this is an error).
+          log::ScopedParamContainer params(lc);
+          params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
+                .add("copyNb", arup.copyNb)
+                .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
+                .add("exceptionType", debugType);
+          lc.log(log::ERR, 
+              "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): "
+              "failed to requeue gone/not owned archive job. Removed from queue.");
+        } else {
+          // We have an unexpected error. We will handle this with the request-by-request garbage collection.
+          log::ScopedParamContainer params(lc);
+          params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
+                .add("copyNb", arup.copyNb)
+                .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
+                .add("exceptionType", debugType)
+                .add("exceptionMessage", e.getMessageValue());
+          lc.log(log::ERR, "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): "
+              "failed to requeue archive job with unexpected error. "
+              "Removing from queue and will re-run individual garbage collection.");
+          // We will re-run the individual GC for this one.
+          jobsIndividuallyGCed.insert(arup.archiveRequest->getAddressIfSet());
+          otherObjects.emplace_back(new GenericObject(arup.archiveRequest->getAddressIfSet(), objectStore));
+        }
+      }
+    }
+  }
+  // We can now log individually requeued jobs.
+  for (auto & arup: jobsToAdd) {
+    if (!jobsNotRequeued.count(arup.archiveRequest->getAddressIfSet())) {
+        // OK, the job made it to the queue
+        log::ScopedParamContainer params(lc);
+        params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
+              .add("copyNb", arup.copyNb)
+              .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
+              .add("tapePool", tapepool)
+              .add("archiveQueueObject", queueAddress)
+              .add("garbageCollectedPreviousOwner", agent.getAddressIfSet());
+        lc.log(log::INFO, "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): requeued archive job.");
+    }
+  }
+  jobsToAdd.clear();
+}
+
+std::string GarbageCollector::OwnedObjectSorter::dispatchArchiveAlgorithms(std::list<std::shared_ptr<ArchiveRequest>> &jobs,const JobQueueType& jobQueueType, const std::string& containerIdentifier,
+        const std::string& tapepool,std::set<std::string> & jobsIndividuallyGCed, 
+        Agent& agent, AgentReference& agentReference, Backend & objectstore, log::LogContext &lc) {
+  std::string queueAddress;
+  switch(jobQueueType){
+    case JobQueueType::JobsToTransferForUser:
+      executeArchiveAlgorithm<ArchiveQueueToTransferForUser>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    case JobQueueType::JobsToReportToUser:
+      executeArchiveAlgorithm<ArchiveQueueToReportForUser>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    case JobQueueType::JobsToTransferForRepack:
+      executeArchiveAlgorithm<ArchiveQueueToTransferForRepack>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    case JobQueueType::JobsToReportToRepackForSuccess:
+      executeArchiveAlgorithm<ArchiveQueueToReportToRepackForSuccess>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    case JobQueueType::JobsToReportToRepackForFailure:
+      executeArchiveAlgorithm<ArchiveQueueToReportToRepackForFailure>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    case JobQueueType::FailedJobs:
+      executeArchiveAlgorithm<ArchiveQueueFailed>(jobs,queueAddress,containerIdentifier,tapepool, jobsIndividuallyGCed, agent, agentReference, objectstore, lc);
+      break;
+    default:
+      break;
+  }
+  return queueAddress;
+}
+
 //TODO : We should record the VID in the ArchiveRequest object to allow the requeueing in the proper report queue (currently, the report queue is selected
 //by tapepool, which works but is not the most efficient way to report the request (contention problem)
 void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(Agent& agent, AgentReference& agentReference, Backend & objectStore,
@@ -401,9 +519,10 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(Agent& a
     // The number of objects to requeue could be very high. In order to limit the time taken by the
     // individual requeue operations, we limit the number of concurrently requeued objects to an 
     // arbitrary 500.
+    std::string containerIdentifier;
     std::string tapepool;
     JobQueueType queueType;
-    std::tie(tapepool, queueType) = archiveQueueIdAndReqs.first;
+    std::tie(containerIdentifier, queueType, tapepool) = archiveQueueIdAndReqs.first;
     auto & requestsList = archiveQueueIdAndReqs.second;
     while (requestsList.size()) {
       decltype (archiveQueueIdAndReqs.second) currentJobBatch;
@@ -411,76 +530,11 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(Agent& a
         currentJobBatch.emplace_back(std::move(requestsList.front()));
         requestsList.pop_front();
       }
-      utils::Timer t;
-      typedef ContainerAlgorithms<ArchiveQueue,ArchiveQueueToTransferForUser> AqAlgos;
-      AqAlgos aqcl(objectStore, agentReference);
-      decltype(aqcl)::InsertedElement::list jobsToAdd;
-      for (auto & ar: currentJobBatch) {
-        // Determine the copy number and feed the queue with it.
-        for (auto &j: ar->dumpJobs()) {
-          if (j.tapePool == tapepool) {
-            jobsToAdd.push_back({ar.get(), j.copyNb, ar->getArchiveFile(), ar->getMountPolicy(), cta::nullopt});         
-          }
-        }
-      }
       std::set<std::string> jobsIndividuallyGCed;
-      std::set<std::string> jobsNotRequeued;
-      std::string queueAddress;
-      try {
-        aqcl.referenceAndSwitchOwnershipIfNecessary(tapepool, agent.getAddressIfSet(), queueAddress, jobsToAdd, lc);
-      } catch (AqAlgos::OwnershipSwitchFailure & failure) {
-        for (auto &failedAR: failure.failedElements) {
-          try {
-            std::rethrow_exception(failedAR.failure);
-          } catch (cta::exception::Exception & e) {
-            // Update did not go through. It could be benign
-            std::string debugType=typeid(e).name();
-            auto & arup=*failedAR.element;
-            jobsNotRequeued.insert(arup.archiveRequest->getAddressIfSet());
-            if (typeid(e) == typeid(Backend::NoSuchObject) || typeid(e) == typeid(Backend::WrongPreviousOwner)) {
-              // The object was not present or not owned during update, so we skip it.
-              // This is nevertheless unexpected (from previous fetch, so this is an error).
-              log::ScopedParamContainer params(lc);
-              params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
-                    .add("copyNb", arup.copyNb)
-                    .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
-                    .add("exceptionType", debugType);
-              lc.log(log::ERR, 
-                  "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): "
-                  "failed to requeue gone/not owned archive job. Removed from queue.");
-            } else {
-              // We have an unexpected error. We will handle this with the request-by-request garbage collection.
-              log::ScopedParamContainer params(lc);
-              params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
-                    .add("copyNb", arup.copyNb)
-                    .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
-                    .add("exceptionType", debugType)
-                    .add("exceptionMessage", e.getMessageValue());
-              lc.log(log::ERR, "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): "
-                  "failed to requeue archive job with unexpected error. "
-                  "Removing from queue and will re-run individual garbage collection.");
-              // We will re-run the individual GC for this one.
-              jobsIndividuallyGCed.insert(arup.archiveRequest->getAddressIfSet());
-              otherObjects.emplace_back(new GenericObject(arup.archiveRequest->getAddressIfSet(), objectStore));
-            }
-          }
-        }
-      }
-      // We can now log individually requeued jobs.
-      for (auto & arup: jobsToAdd) {
-        if (!jobsNotRequeued.count(arup.archiveRequest->getAddressIfSet())) {
-            // OK, the job made it to the queue
-            log::ScopedParamContainer params(lc);
-            params.add("archiveRequestObject", arup.archiveRequest->getAddressIfSet())
-                  .add("copyNb", arup.copyNb)
-                  .add("fileId", arup.archiveRequest->getArchiveFile().archiveFileID)
-                  .add("tapePool", tapepool)
-                  .add("archiveQueueObject", queueAddress)
-                  .add("garbageCollectedPreviousOwner", agent.getAddressIfSet());
-            lc.log(log::INFO, "In GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateArchiveJobs(): requeued archive job.");
-        }
-      }
-      jobsToAdd.clear();
+      utils::Timer t;
+      //Dispatch the archive algorithms
+      dispatchArchiveAlgorithms(currentJobBatch,queueType,containerIdentifier,tapepool,jobsIndividuallyGCed,agent,agentReference,objectStore,lc);
+      
       // We can now forget pool level list. But before that, we can remove the objects 
       // from agent ownership if this was the last reference to it.
       // The usage of use_count() is safe here because we are in a single threaded environment.
@@ -520,9 +574,10 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateRetrieveJobs(Agent&
   // 2) Get the retrieve requests done. They are simpler as retrieve requests are fully owned.
   // Then should hence not have changes since we pre-fetched them.
   for (auto & retriveQueueIdAndReqs: retrieveQueuesAndRequests) {
-    std::string vid;
+    std::string containerIdentifier;
     JobQueueType queueType;
-    std::tie(vid, queueType) = retriveQueueIdAndReqs.first;
+    std::string vid;
+    std::tie(containerIdentifier, queueType, vid) = retriveQueueIdAndReqs.first;
     auto & requestsList = retriveQueueIdAndReqs.second;
     while (requestsList.size()) {
       decltype (retriveQueueIdAndReqs.second) currentJobBatch;
@@ -545,7 +600,7 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateRetrieveJobs(Agent&
       // Get the retrieve queue and add references to the jobs to it.
       RetrieveQueue rq(objectStore);
       ScopedExclusiveLock rql;
-      Helpers::getLockedAndFetchedJobQueue<RetrieveQueue>(rq,rql, agentReference, vid, queueType, lc);
+      Helpers::getLockedAndFetchedJobQueue<RetrieveQueue>(rq,rql, agentReference, containerIdentifier, queueType, lc);
       queueLockFetchTime = t.secs(utils::Timer::resetCounter);
       auto jobsSummary=rq.getJobsSummary();
       filesBefore=jobsSummary.jobs;
@@ -557,7 +612,7 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateRetrieveJobs(Agent&
       for (auto & rr: currentJobBatch) {
         // Determine the copy number and feed the queue with it.
         for (auto &tf: rr->getArchiveFile().tapeFiles) {
-          if (tf.vid == vid) {
+            if (tf.vid == vid) {
             jta.push_back({tf.copyNb, tf.fSeq, rr->getAddressIfSet(), rr->getArchiveFile().fileSize, 
                 rr->getRetrieveFileQueueCriteria().mountPolicy, rr->getEntryLog().time, rr->getActivity(), rr->getDiskSystemName()});
           }
diff --git a/objectstore/GarbageCollector.hpp b/objectstore/GarbageCollector.hpp
index f9eedf80f1ded862309dd40bfa573749ddac5de5..b84a2728ed98bc5ec9034db936aab9024cdcd00e 100644
--- a/objectstore/GarbageCollector.hpp
+++ b/objectstore/GarbageCollector.hpp
@@ -28,7 +28,7 @@
 
 /**
  * Plan => Garbage collector keeps track of the agents.
- * If an agent is declared dead => tape ownership of owned objects
+ * If an agent is declared dead => take ownership of owned objects
  * Using the backup owner, re-post the objet to the container.
  * All containers will have a "repost" method, which is more thorough 
  * (and expensive) than the usual one. It can for example prevent double posting.
@@ -56,8 +56,10 @@ public:
   /** Structure allowing the sorting of owned objects, so they can be requeued in batches,
     * one batch per queue. */
   struct OwnedObjectSorter {
-    std::map<std::tuple<std::string, JobQueueType>, std::list<std::shared_ptr <ArchiveRequest>>> archiveQueuesAndRequests;
-    std::map<std::tuple<std::string, JobQueueType>, std::list<std::shared_ptr <RetrieveRequest>>> retrieveQueuesAndRequests;
+    //tuple[0] = containerIdentifier (tapepool or Repack Request's address), tuple[1]=jobQueueType, tuple[2]=tapepoolOfTheJob
+    std::map<std::tuple<std::string, JobQueueType ,std::string>, std::list<std::shared_ptr <ArchiveRequest>>> archiveQueuesAndRequests;
+    //tuple[0] = containerIdentifier (vid or Repack Request's address), tuple[1]=jobQueueType, tuple[2]=vidOfTheJob
+    std::map<std::tuple<std::string, JobQueueType,std::string>, std::list<std::shared_ptr <RetrieveRequest>>> retrieveQueuesAndRequests;
     std::list<std::shared_ptr<GenericObject>> otherObjects;
     //Sorter m_sorter;
     /// Fill up the fetchedObjects with objects of interest.
@@ -74,6 +76,16 @@ public:
     void lockFetchAndUpdateOtherObjects(Agent & agent, AgentReference & agentReference, Backend & objectStore,
         cta::catalogue::Catalogue & catalogue, log::LogContext & lc);
     //Sorter& getSorter();
+    
+  private:
+    std::string dispatchArchiveAlgorithms(std::list<std::shared_ptr<ArchiveRequest>> &jobs,const JobQueueType& jobQueueType, const std::string& containerIdentifier,
+        const std::string& tapepool,std::set<std::string> & jobsIndividuallyGCed, 
+        Agent& agent, AgentReference& agentReference, Backend & objectstore, log::LogContext &lc);
+    
+    template<typename ArchiveSpecificQueue>
+    void executeArchiveAlgorithm(std::list<std::shared_ptr<ArchiveRequest>> &jobs,std::string &queueAddress, const std::string& containerIdentifier, const std::string& tapepool, 
+        std::set<std::string> & jobsIndividuallyGCed, Agent& agent, AgentReference& agentReference, 
+        Backend &objectStore, log::LogContext& lc);
   };
   
 private:
@@ -82,7 +94,6 @@ private:
   AgentReference & m_ourAgentReference;
   AgentRegister m_agentRegister;
   std::map<std::string, AgentWatchdog * > m_watchedAgents;
-  //void garbageCollectArchiveRequests(Agent& agent, OwnedObjectSorter &ownedObjectSorter,log::LogContext & lc);
 };
   
 }}
diff --git a/objectstore/GarbageCollectorTest.cpp b/objectstore/GarbageCollectorTest.cpp
index b6a97711c72e45240fceb60e9c0caea6a6b86cd1..ada1a4bce885e7609b6a55e06ed85e718048ba40 100644
--- a/objectstore/GarbageCollectorTest.cpp
+++ b/objectstore/GarbageCollectorTest.cpp
@@ -308,8 +308,7 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) {
   re.initialize();
   re.insert();
   // Create the agent register
-    cta::objectstore::EntryLogSerDeser el("user0",
-      "unittesthost", time(NULL));
+  cta::objectstore::EntryLogSerDeser el("user0", "unittesthost", time(NULL));
   cta::objectstore::ScopedExclusiveLock rel(re);
   // Create the agent for objects creation
   cta::objectstore::AgentReference agentRef("unitTestCreateEnv", dl);
@@ -361,8 +360,7 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) {
     cta::common::dataStructures::ArchiveFile aFile;
     aFile.archiveFileID = 123456789L;
     aFile.diskFileId = "eos://diskFile";
-    aFile.checksumType = "";
-    aFile.checksumValue = "";
+    aFile.checksumBlob.insert(cta::checksum::NONE, "");
     aFile.creationTime = 0;
     aFile.reconciliationTime = 0;
     aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -376,7 +374,7 @@ TEST(ObjectStore, GarbageCollectorArchiveRequest) {
     ar.setMountPolicy(mp);
     ar.setArchiveReportURL("");
     ar.setArchiveErrorReportURL("");
-    ar.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+    ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
     ar.setSrcURL("root://eoseos/myFile");
     ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
     ar.insert();
@@ -553,8 +551,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
     cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
     rqc.archiveFile.archiveFileID = 123456789L;
     rqc.archiveFile.diskFileId = "eos://diskFile";
-    rqc.archiveFile.checksumType = "";
-    rqc.archiveFile.checksumValue = "";
+    rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
     rqc.archiveFile.creationTime = 0;
     rqc.archiveFile.reconciliationTime = 0;
     rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -564,8 +561,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
     {
       cta::common::dataStructures::TapeFile tf;
       tf.blockId=0;
-      tf.compressedSize=1;
-      tf.compressedSize=1;
+      tf.fileSize=1;
       tf.copyNb=1;
       tf.creationTime=time(nullptr);
       tf.fSeq=pass;
@@ -575,8 +571,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
     {
       cta::common::dataStructures::TapeFile tf;
       tf.blockId=0;
-      tf.compressedSize=1;
-      tf.compressedSize=1;
+      tf.fileSize=1;
       tf.copyNb=2;
       tf.creationTime=time(nullptr);
       tf.fSeq=pass;
@@ -726,6 +721,7 @@ TEST(ObjectStore, GarbageCollectorRepackRequestPending) {
     repackRequest.setVid("VIDTest");
     repackRequest.setBufferURL("test/buffer/url");
     repackRequest.setOwner(agentReferenceRepackRequest.getAgentAddress());
+    repackRequest.setMountPolicy(cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack);
     repackRequest.insert();
   }
   {
@@ -806,6 +802,7 @@ TEST(ObjectStore, GarbageCollectorRepackRequestToExpand) {
     repackRequest.setVid("VID2Test");
     repackRequest.setBufferURL("test/buffer/url");
     repackRequest.setOwner(agentReferenceRepackRequest.getAgentAddress());
+    repackRequest.setMountPolicy(cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack);
     repackRequest.insert();
   }
   {
@@ -886,6 +883,7 @@ TEST(ObjectStore, GarbageCollectorRepackRequestRunningExpandNotFinished) {
     repackRequest.setBufferURL("test/buffer/url");
     repackRequest.setOwner(agentReferenceRepackRequest.getAgentAddress());
     repackRequest.setExpandFinished(false);
+    repackRequest.setMountPolicy(cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack);
     repackRequest.insert();
   }
   {
@@ -967,6 +965,7 @@ TEST(ObjectStore, GarbageCollectorRepackRequestRunningExpandFinished) {
     repackRequest.setBufferURL("test/buffer/url");
     repackRequest.setOwner(agentReferenceRepackRequest.getAgentAddress());
     repackRequest.setExpandFinished(true);
+    repackRequest.setMountPolicy(cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack);
     repackRequest.insert();
   }
   cta::log::StringLogger strLogger("dummy", "dummy", cta::log::DEBUG);
@@ -1065,6 +1064,7 @@ TEST(ObjectStore, GarbageCollectorRepackRequestStarting) {
     repackRequest.setBufferURL("test/buffer/url");
     repackRequest.setOwner(agentReferenceRepackRequest.getAgentAddress());
     repackRequest.setExpandFinished(true);
+    repackRequest.setMountPolicy(cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack);
     repackRequest.insert();
   }
   cta::log::StringLogger strLogger("dummy", "dummy", cta::log::DEBUG);
@@ -1089,4 +1089,1039 @@ TEST(ObjectStore, GarbageCollectorRepackRequestStarting) {
   ASSERT_NE(std::string::npos,logToCheck.find("MSG=\"In RepackRequest::garbageCollect(): failed to requeue the RepackRequest (leaving it as it is) : The status Starting have no corresponding queue.\""));
 }
 
+TEST(ObjectStore, GarbageCollectorRetrieveAllStatusesAndQueues) {
+// We will need a log object
+#ifdef STDOUT_LOGGING
+  cta::log::StdoutLogger dl("dummy", "unitTest");
+#else
+  cta::log::DummyLogger dl("dummy", "unitTest");
+#endif
+  cta::log::LogContext lc(dl);
+  // We need a dummy catalogue
+  cta::catalogue::DummyCatalogue catalogue;
+  // Here we check that can successfully call RetrieveRequests's garbage collector
+  cta::objectstore::BackendVFS be;
+  // Create the root entry
+  cta::objectstore::RootEntry re(be);
+  re.initialize();
+  re.insert();
+  // Create the agent register
+  cta::objectstore::EntryLogSerDeser el("user0",
+      "unittesthost", time(NULL));
+  cta::objectstore::ScopedExclusiveLock rel(re);
+  // Create the agent for objects creation
+  cta::objectstore::AgentReference agentRef("unitTestCreateEnv", dl);
+  // Finish root creation.
+  re.addOrGetAgentRegisterPointerAndCommit(agentRef, el, lc);
+  rel.release();
+  // continue agent creation.
+  cta::objectstore::Agent agent(agentRef.getAgentAddress(), be);
+  agent.initialize();
+  agent.setTimeout_us(10000);
+  agent.insertAndRegisterSelf(lc);
+  // Create all agents to be garbage collected
+  cta::objectstore::AgentReference agentRefToTransferForUser("ToTransferForUser", dl);
+  cta::objectstore::Agent agentToTransferForUser(agentRefToTransferForUser.getAgentAddress(), be);
+  agentToTransferForUser.initialize();
+  agentToTransferForUser.setTimeout_us(0);
+  agentToTransferForUser.insertAndRegisterSelf(lc);
+  
+  std::string retrieveRequestAddress = agentRefToTransferForUser.nextId("RetrieveRequest");
+  agentRefToTransferForUser.addToOwnership(retrieveRequestAddress, be);
+  
+  cta::objectstore::RetrieveRequest rr(retrieveRequestAddress, be);
+  
+  rr.initialize();
+  cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
+  rqc.archiveFile.archiveFileID = 123456789L;
+  rqc.archiveFile.diskFileId = "eos://diskFile";
+  rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
+  rqc.archiveFile.creationTime = 0;
+  rqc.archiveFile.reconciliationTime = 0;
+  rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
+  rqc.archiveFile.diskInstance = "eoseos";
+  rqc.archiveFile.fileSize = 1000;
+  rqc.archiveFile.storageClass = "sc";
+  {
+    cta::common::dataStructures::TapeFile tf;
+    tf.blockId=0;
+    tf.fileSize=1;
+    tf.copyNb=2;
+    tf.creationTime=time(nullptr);
+    tf.fSeq=1;
+    tf.vid="Tape0";
+    rqc.archiveFile.tapeFiles.push_back(tf);
+  }
+  rqc.mountPolicy.archiveMinRequestAge = 1;
+  rqc.mountPolicy.archivePriority = 1;
+  rqc.mountPolicy.creationLog.time = time(nullptr);
+  rqc.mountPolicy.lastModificationLog.time = time(nullptr);
+  rqc.mountPolicy.maxDrivesAllowed = 1;
+  rqc.mountPolicy.retrieveMinRequestAge = 1;
+  rqc.mountPolicy.retrievePriority = 1;
+  rr.setRetrieveFileQueueCriteria(rqc);
+  cta::common::dataStructures::RetrieveRequest sReq;
+  sReq.archiveFileID = rqc.archiveFile.archiveFileID;
+  sReq.creationLog.time=time(nullptr);
+  rr.setSchedulerRequest(sReq);
+  rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToTransfer);
+  rr.setOwner(agentToTransferForUser.getAddressIfSet());
+  rr.setActiveCopyNumber(0);
+  rr.insert();
+  
+  // Create the garbage collector and run it once.
+  cta::objectstore::AgentReference gcAgentRef("unitTestGarbageCollector", dl);
+  cta::objectstore::Agent gcAgent(gcAgentRef.getAgentAddress(), be);
+  gcAgent.initialize();
+  gcAgent.setTimeout_us(0);
+  gcAgent.insertAndRegisterSelf(lc);
+
+  cta::objectstore::GarbageCollector gc(be, gcAgentRef, catalogue);
+  gc.runOnePass(lc);
+  
+  {
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    auto jobs = rq.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+    
+    rr.fetchNoLock();
+    ASSERT_EQ(rr.getOwner(),rq.getAddressIfSet());
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for RetrieveQueueToTransferForUser
+    cta::objectstore::AgentReference agentRefToTransferForUserAutoGc("ToTransferForUser", dl);
+    cta::objectstore::Agent agentToTransferForUserAutoGc(agentRefToTransferForUserAutoGc.getAgentAddress(), be);
+    agentToTransferForUserAutoGc.initialize();
+    agentToTransferForUserAutoGc.setTimeout_us(0);
+    agentToTransferForUserAutoGc.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ScopedExclusiveLock sel(rr);
+    rr.fetch();
+    rr.setOwner(agentRefToTransferForUserAutoGc.getAgentAddress());
+    agentRefToTransferForUserAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+ 
+    ASSERT_NO_THROW(rr.garbageCollect(agentRefToTransferForUserAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    sel.release();
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    auto jobs = rq.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+    
+    rr.fetchNoLock();
+    ASSERT_EQ(rr.getOwner(),rq.getAddressIfSet());
+  }
+  
+  {
+    //Test the Garbage collection of the RetrieveRequest with a reportToUserForFailure job
+    cta::objectstore::AgentReference agentRefToReportToUser("ToReportToUser", dl);
+    cta::objectstore::Agent agentToReportToUser(agentRefToReportToUser.getAgentAddress(), be);
+    agentToReportToUser.initialize();
+    agentToReportToUser.setTimeout_us(0);
+    agentToReportToUser.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToReportToUser.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToUserForFailure);
+      rr.commit();
+    }
+    
+    agentRefToReportToUser.addToOwnership(rr.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Retrieve Request should be queued in the RetrieveQueueToReportToUser
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToUser(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    rqToReportToUser.fetchNoLock();
+    
+    auto jobs = rqToReportToUser.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for ToReportToUserForFailure job
+    cta::objectstore::AgentReference agentRefToReportToUserAutoGc("ToReportForUser", dl);
+    cta::objectstore::Agent agentToReportToUserAutoGc(agentRefToReportToUserAutoGc.getAgentAddress(), be);
+    agentToReportToUserAutoGc.initialize();
+    agentToReportToUserAutoGc.setTimeout_us(0);
+    agentToReportToUserAutoGc.insertAndRegisterSelf(lc);
+    
+    
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToReportToUserAutoGc.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToUserForFailure);
+      rr.commit();
+
+      agentRefToReportToUserAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+
+      ASSERT_NO_THROW(rr.garbageCollect(agentRefToReportToUserAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    }
+    
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToUser(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    rqToReportToUser.fetchNoLock();
+    
+    auto jobs = rqToReportToUser.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+
+    rr.fetchNoLock();
+    ASSERT_EQ(rqToReportToUser.getAddressIfSet(),rr.getOwner());
+  }
+  
+  {
+    //Test the Garbage collection of the RetrieveRequest with a RJS_Failed job
+    cta::objectstore::AgentReference agentRefFailedJob("FailedJob", dl);
+    cta::objectstore::Agent agentFailedJob(agentRefFailedJob.getAgentAddress(), be);
+    agentFailedJob.initialize();
+    agentFailedJob.setTimeout_us(0);
+    agentFailedJob.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefFailedJob.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_Failed);
+      rr.commit();
+    }
+    agentRefFailedJob.addToOwnership(rr.getAddressIfSet(),be);
+    
+    gc.runOnePass(lc);
+    
+    //The Retrieve Request should be queued in the RetrieveQueueFailed
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqFailed(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::FailedJobs), be);
+    rqFailed.fetchNoLock();
+    
+    auto jobs = rqFailed.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for RJS_Failed job
+    cta::objectstore::AgentReference agentRefFailedJobAutoGc("FailedJob", dl);
+    cta::objectstore::Agent agentFailedJobAutoGc(agentRefFailedJobAutoGc.getAgentAddress(), be);
+    agentFailedJobAutoGc.initialize();
+    agentFailedJobAutoGc.setTimeout_us(0);
+    agentFailedJobAutoGc.insertAndRegisterSelf(lc);
+    
+    
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::FailedJobs), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefFailedJobAutoGc.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_Failed);
+      rr.commit();
+    
+    
+      agentRefFailedJobAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+
+      ASSERT_NO_THROW(rr.garbageCollect(agentRefFailedJobAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    }
+    
+    //The Retrieve Request should now be queued in the RetrieveQueueToTransferForUser
+    
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToUser(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::FailedJobs), be);
+    rqToReportToUser.fetchNoLock();
+    
+    auto jobs = rqToReportToUser.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+
+    rr.fetchNoLock();
+    ASSERT_EQ(rqToReportToUser.getAddressIfSet(),rr.getOwner());
+  }
+  
+  //Create a repack info object for the garbage collection of Jobs ToReportToRepackForSuccess and ToReportToRepackForFailure
+  cta::objectstore::RetrieveRequest::RepackInfo ri;
+  ri.isRepack = true;
+  ri.fSeq = 1;
+  ri.fileBufferURL = "testFileBufferURL";
+  ri.repackRequestAddress = "repackRequestAddress";
+  
+  {
+    //Test the Garbage collection of the RetrieveRequest with a Retrieve job ToReportToRepackForSuccess
+    cta::objectstore::AgentReference agentRefToReportToRepackForSuccess("ToReportToRepackForSuccess", dl);
+    cta::objectstore::Agent agentToReportToRepackForSuccess(agentRefToReportToRepackForSuccess.getAgentAddress(), be);
+    agentToReportToRepackForSuccess.initialize();
+    agentToReportToRepackForSuccess.setTimeout_us(0);
+    agentToReportToRepackForSuccess.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress("Tape0", cta::objectstore::JobQueueType::FailedJobs), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToReportToRepackForSuccess.getAgentAddress());
+      //Add the repack informations to the RetrieveRequest
+      rr.setRepackInfo(ri);
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess);
+      rr.commit();
+    }
+    agentRefToReportToRepackForSuccess.addToOwnership(rr.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Retrieve Request should be queued in the RetrieveQueueToReportToRepackForSuccess
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToRepackForSuccess(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    rqToReportToRepackForSuccess.fetchNoLock();
+    
+    auto jobs = rqToReportToRepackForSuccess.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for RJS_ToReportToRepackForSuccess job
+    cta::objectstore::AgentReference agentRefToReportToRepackForSuccessJobAutoGc("ToReportToRepackForSuccessAutoGC", dl);
+    cta::objectstore::Agent agentToReportToRepackForSuccessJobAutoGc(agentRefToReportToRepackForSuccessJobAutoGc.getAgentAddress(), be);
+    agentToReportToRepackForSuccessJobAutoGc.initialize();
+    agentToReportToRepackForSuccessJobAutoGc.setTimeout_us(0);
+    agentToReportToRepackForSuccessJobAutoGc.insertAndRegisterSelf(lc);
+    
+    
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToReportToRepackForSuccessJobAutoGc.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess);
+      rr.commit();
+
+      agentRefToReportToRepackForSuccessJobAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+
+      ASSERT_NO_THROW(rr.garbageCollect(agentRefToReportToRepackForSuccessJobAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    }
+    
+    //The Retrieve Request should now be queued in the RetrieveQueueToReportToRepackForSuccess
+    
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToRepackForSuccess(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    rqToReportToRepackForSuccess.fetchNoLock();
+    
+    auto jobs = rqToReportToRepackForSuccess.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+
+    rr.fetchNoLock();
+    ASSERT_EQ(rqToReportToRepackForSuccess.getAddressIfSet(),rr.getOwner());
+  }
+  
+  {
+    //Test the Garbage collection of the RetrieveRequest with a Retrieve job ToReportToRepackForFailure
+    cta::objectstore::AgentReference agentRefToReportToRepackForFailure("ToReportToRepackForFailure", dl);
+    cta::objectstore::Agent agentToReportToRepackForFailure(agentRefToReportToRepackForFailure.getAgentAddress(), be);
+    agentToReportToRepackForFailure.initialize();
+    agentToReportToRepackForFailure.setTimeout_us(0);
+    agentToReportToRepackForFailure.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    cta::objectstore::ScopedExclusiveLock sel(rr);
+    rr.fetch();
+    rr.setOwner(agentRefToReportToRepackForFailure.getAgentAddress());
+  
+    rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToRepackForFailure);
+    rr.commit();
+    sel.release();
+    
+    agentRefToReportToRepackForFailure.addToOwnership(rr.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Retrieve Request should be queued in the RetrieveQueueToReportToRepackForFailure
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToRepackForFailure(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+    rqToReportToRepackForFailure.fetchNoLock();
+    
+    auto jobs = rqToReportToRepackForFailure.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+  }
+  
+  {
+    //Test the RetrieveRequest::garbageCollect method for RJS_ToReportToRepackForSuccess job
+    cta::objectstore::AgentReference agentRefToReportToRepackForFailureJobAutoGc("ToReportToRepackForFailureAutoGC", dl);
+    cta::objectstore::Agent agentToReportToRepackForFailureJobAutoGc(agentRefToReportToRepackForFailureJobAutoGc.getAgentAddress(), be);
+    agentToReportToRepackForFailureJobAutoGc.initialize();
+    agentToReportToRepackForFailureJobAutoGc.setTimeout_us(0);
+    agentToReportToRepackForFailureJobAutoGc.insertAndRegisterSelf(lc);
+    
+    
+    cta::objectstore::RetrieveQueue rq(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+    cta::objectstore::ScopedExclusiveLock rql(rq);
+    rq.fetch();
+    rq.removeJobsAndCommit({rr.getAddressIfSet()});
+    rql.release();
+    
+    {
+      cta::objectstore::ScopedExclusiveLock sel(rr);
+      rr.fetch();
+      rr.setOwner(agentRefToReportToRepackForFailureJobAutoGc.getAgentAddress());
+      rr.setJobStatus(2,cta::objectstore::serializers::RetrieveJobStatus::RJS_ToReportToRepackForFailure);
+      rr.commit();
+
+      agentRefToReportToRepackForFailureJobAutoGc.addToOwnership(rr.getAddressIfSet(),be);
+
+      ASSERT_NO_THROW(rr.garbageCollect(agentRefToReportToRepackForFailureJobAutoGc.getAgentAddress(),agentRef,lc,catalogue));
+    }
+    
+    //The Retrieve Request should now be queued in the RetrieveQueueToReportToRepackForFailure
+    
+    re.fetchNoLock();
+    cta::objectstore::RetrieveQueue rqToReportToRepackForFailure(re.getRetrieveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+    rqToReportToRepackForFailure.fetchNoLock();
+    
+    auto jobs = rqToReportToRepackForFailure.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+
+    rr.fetchNoLock();
+    ASSERT_EQ(rqToReportToRepackForFailure.getAddressIfSet(),rr.getOwner());
+  }
+}
+
+TEST(ObjectStore, GarbageCollectorArchiveAllStatusesAndQueues) {
+  // We will need a log object
+#ifdef STDOUT_LOGGING
+  cta::log::StdoutLogger dl("dummy", "unitTest");
+#else
+  cta::log::DummyLogger dl("dummy", "unitTest");
+#endif
+  cta::log::LogContext lc(dl);
+  // We need a dummy catalogue
+  cta::catalogue::DummyCatalogue catalogue;
+  // Here we check that can successfully call RetrieveRequests's garbage collector
+  cta::objectstore::BackendVFS be;
+  // Create the root entry
+  cta::objectstore::RootEntry re(be);
+  re.initialize();
+  re.insert();
+  // Create the agent register
+  cta::objectstore::EntryLogSerDeser el("user0",
+      "unittesthost", time(NULL));
+  cta::objectstore::ScopedExclusiveLock rel(re);
+  // Create the agent for objects creation
+  cta::objectstore::AgentReference agentRef("unitTestCreateEnv", dl);
+  // Finish root creation.
+  re.addOrGetAgentRegisterPointerAndCommit(agentRef, el, lc);
+  rel.release();
+  // continue agent creation.
+  cta::objectstore::Agent agent(agentRef.getAgentAddress(), be);
+  agent.initialize();
+  agent.setTimeout_us(0);
+  agent.insertAndRegisterSelf(lc);
+  
+  // Create all agents to be garbage collected
+  cta::objectstore::AgentReference agentRefToTransferForUser("ToTransferForUser", dl);
+  cta::objectstore::Agent agentToTransferForUser(agentRefToTransferForUser.getAgentAddress(), be);
+  agentToTransferForUser.initialize();
+  agentToTransferForUser.setTimeout_us(0);
+  agentToTransferForUser.insertAndRegisterSelf(lc);
+  
+  std::string archiveRequestAddress = agentRefToTransferForUser.nextId("ArchiveRequest");
+  agentRefToTransferForUser.addToOwnership(archiveRequestAddress, be);
+  
+  std::string tapePool = "tapePool";
+  
+  cta::objectstore::ArchiveRequest ar(archiveRequestAddress, be);
+  ar.initialize();
+  cta::common::dataStructures::ArchiveFile aFile;
+  aFile.archiveFileID = 123456789L;
+  aFile.diskFileId = "eos://diskFile";
+  aFile.checksumBlob.insert(cta::checksum::NONE, "");
+  aFile.creationTime = 0;
+  aFile.reconciliationTime = 0;
+  aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
+  aFile.diskInstance = "eoseos";
+  aFile.fileSize = 667;
+  aFile.storageClass = "sc";
+  ar.setArchiveFile(aFile);
+  ar.addJob(2, tapePool, agentRefToTransferForUser.getAgentAddress(), 1, 1, 1);
+  cta::common::dataStructures::MountPolicy mp;
+  ar.setMountPolicy(mp);
+  ar.setArchiveReportURL("");
+  ar.setArchiveErrorReportURL("");
+  ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
+  ar.setSrcURL("root://eoseos/myFile");
+  ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
+  ar.insert();
+  
+  // Create the garbage collector and run it once.
+  cta::objectstore::AgentReference gcAgentRef("unitTestGarbageCollector", dl);
+  cta::objectstore::Agent gcAgent(gcAgentRef.getAgentAddress(), be);
+  gcAgent.initialize();
+  gcAgent.setTimeout_us(0);
+  gcAgent.insertAndRegisterSelf(lc);
+
+  cta::objectstore::GarbageCollector gc(be, gcAgentRef, catalogue);
+  gc.runOnePass(lc);
+  
+  {
+    //The Archive Request should now be queued in the ArchiveQueueToTransferForUser
+    re.fetchNoLock();
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    auto jobs = aq.dumpJobs();
+    ASSERT_EQ(1,jobs.size());
+
+    auto& job = jobs.front();
+    ASSERT_EQ(2,job.copyNb);
+    
+    ar.fetchNoLock();
+    ASSERT_EQ(ar.getJobOwner(2),aq.getAddressIfSet());
+  }
+  {
+    //Test the AJS_ToTransferForUser auto garbage collection
+    cta::objectstore::AgentReference agentRefToTransferForUserAutoGC("ToTransferForUserAutoGC", dl);
+    cta::objectstore::Agent agentToTransferForUserAutoGC(agentRefToTransferForUserAutoGC.getAgentAddress(), be);
+    agentToTransferForUserAutoGC.initialize();
+    agentToTransferForUserAutoGC.setTimeout_us(0);
+    agentToTransferForUserAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToTransferForUserAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToTransferForUser);
+    ar.commit();
+    agentRefToTransferForUserAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    
+    ar.garbageCollect(agentRefToTransferForUserAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    sel.release();
+    
+    {
+      //The Archive Request should be queued in the ArchiveQueueToTransferForUser
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToTransferForUser(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+
+      aqToTransferForUser.fetchNoLock();
+
+      auto jobs = aqToTransferForUser.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToTransferForUser.getAddressIfSet());
+    }
+  }
+  {
+    //Test the AJS_ToReportToUserForFailure Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToUserForFailure("ToReportToUserForFailure", dl);
+    cta::objectstore::Agent agentToReportToUserForFailure(agentRefToReportToUserForFailure.getAgentAddress(), be);
+    agentToReportToUserForFailure.initialize();
+    agentToReportToUserForFailure.setTimeout_us(0);
+    agentToReportToUserForFailure.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToTransferForUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToUserForFailure.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToUserForFailure);
+    ar.commit();
+    sel.release();
+    
+    agentRefToReportToUserForFailure.addToOwnership(ar.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportForUser
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToUserForFailure(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+
+      aqToReportToUserForFailure.fetchNoLock();
+
+      auto jobs = aqToReportToUserForFailure.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToUserForFailure.getAddressIfSet());
+    }
+  }
+  {
+    //Test the AJS_ToReportToUserForFailure Auto Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToUserForFailureAutoGC("ToReportToUserForFailureAutoGC", dl);
+    cta::objectstore::Agent agentToReportToUserForFailureAutoGC(agentRefToReportToUserForFailureAutoGC.getAgentAddress(), be);
+    agentToReportToUserForFailureAutoGC.initialize();
+    agentToReportToUserForFailureAutoGC.setTimeout_us(0);
+    agentToReportToUserForFailureAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToUserForFailureAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToUserForFailure);
+    ar.commit();
+    agentRefToReportToUserForFailureAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    ar.garbageCollect(agentRefToReportToUserForFailureAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportForUser
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToUserForFailure(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+
+      aqToReportToUserForFailure.fetchNoLock();
+
+      auto jobs = aqToReportToUserForFailure.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToUserForFailure.getAddressIfSet());
+    }
+  }
+  
+  {
+    //Test the AJS_ToReportToUserForTransfer Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToUserForTransfer("ToReportToUserForTransfer", dl);
+    cta::objectstore::Agent agentToReportToUserForTransfer(agentRefToReportToUserForTransfer.getAgentAddress(), be);
+    agentToReportToUserForTransfer.initialize();
+    agentToReportToUserForTransfer.setTimeout_us(0);
+    agentToReportToUserForTransfer.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToUserForTransfer.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToUserForTransfer);
+    ar.commit();
+    sel.release();
+    
+    agentRefToReportToUserForTransfer.addToOwnership(ar.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportForUser
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToUserForTransfer(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+
+      aqToReportToUserForTransfer.fetchNoLock();
+
+      auto jobs = aqToReportToUserForTransfer.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToUserForTransfer.getAddressIfSet());
+    }
+  }
+  {
+    //Test the AJS_ToReportToUserForTransfer Auto Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToUserForTransferAutoGC("ToReportToUserForTransferAutoGC", dl);
+    cta::objectstore::Agent agentToReportToUserForTransferAutoGC(agentRefToReportToUserForTransferAutoGC.getAgentAddress(), be);
+    agentToReportToUserForTransferAutoGC.initialize();
+    agentToReportToUserForTransferAutoGC.setTimeout_us(0);
+    agentToReportToUserForTransferAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToUserForTransferAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToUserForTransfer);
+    ar.commit();
+    agentRefToReportToUserForTransferAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    ar.garbageCollect(agentRefToReportToUserForTransferAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportForUser
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToUserForTransfer(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+
+      aqToReportToUserForTransfer.fetchNoLock();
+
+      auto jobs = aqToReportToUserForTransfer.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToUserForTransfer.getAddressIfSet());
+    }
+  }
+  {
+    //Test the garbage collection of an AJS_Failed job
+    cta::objectstore::AgentReference agentRefFailed("Failed", dl);
+    cta::objectstore::Agent agentFailed(agentRefFailed.getAgentAddress(), be);
+    agentFailed.initialize();
+    agentFailed.setTimeout_us(0);
+    agentFailed.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::JobsToReportToUser), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefFailed.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_Failed);
+    ar.commit();
+    sel.release();
+    
+    agentRefFailed.addToOwnership(ar.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Archive Request should be queued in the ArchiveQueueFailed
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqFailed(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::FailedJobs), be);
+
+      aqFailed.fetchNoLock();
+
+      auto jobs = aqFailed.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqFailed.getAddressIfSet());
+    }
+  }
+  
+  {
+    //Test the AJS_Failed job Auto Garbage collection
+    cta::objectstore::AgentReference agentRefFailedAutoGC("FailedAutoGC", dl);
+    cta::objectstore::Agent agentFailedAutoGC(agentRefFailedAutoGC.getAgentAddress(), be);
+    agentFailedAutoGC.initialize();
+    agentFailedAutoGC.setTimeout_us(0);
+    agentFailedAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::FailedJobs), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefFailedAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_Failed);
+    ar.commit();
+    agentRefFailedAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    ar.garbageCollect(agentRefFailedAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    
+    //The Archive Request should be queued in the ArchiveQueueFailed
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqFailed(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::FailedJobs), be);
+
+      aqFailed.fetchNoLock();
+
+      auto jobs = aqFailed.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqFailed.getAddressIfSet());
+    }
+  }
+  
+  //Add Repack informations to test the garbage collection of Archive Requests for Repack
+  //Create a repack info object for the garbage collection of Jobs ToReportToRepackForSuccess and ToReportToRepackForFailure
+  cta::objectstore::ArchiveRequest::RepackInfo ri;
+  ri.isRepack = true;
+  ri.fSeq = 1;
+  ri.fileBufferURL = "testFileBufferURL";
+  ri.repackRequestAddress = "repackRequestAddress";
+  
+  {
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setRepackInfo(ri);
+    ar.commit();
+  }
+  
+  {
+    //Test the Garbage collection of an AJS_ToReportToRepackForSuccess job
+    cta::objectstore::AgentReference agentRefToReportToRepackForSuccess("ToReportToUserForTransfer", dl);
+    cta::objectstore::Agent agentToReportToRepackForSuccess(agentRefToReportToRepackForSuccess.getAgentAddress(), be);
+    agentToReportToRepackForSuccess.initialize();
+    agentToReportToRepackForSuccess.setTimeout_us(0);
+    agentToReportToRepackForSuccess.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(tapePool, cta::objectstore::JobQueueType::FailedJobs), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToRepackForSuccess.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToRepackForSuccess);
+    ar.commit();
+    sel.release();
+    
+    agentRefToReportToRepackForSuccess.addToOwnership(ar.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportToRepackForSuccess
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToRepackForSuccess(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+
+      aqToReportToRepackForSuccess.fetchNoLock();
+
+      auto jobs = aqToReportToRepackForSuccess.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToRepackForSuccess.getAddressIfSet());
+    }
+  }
+  
+  {
+    //Test the AJS_ToReportToRepackForSuccess job Auto Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToRepackForSuccessAutoGC("ToReportToRepackForSuccessAutoGC", dl);
+    cta::objectstore::Agent agentToReportToRepackForSuccessAutoGC(agentRefToReportToRepackForSuccessAutoGC.getAgentAddress(), be);
+    agentToReportToRepackForSuccessAutoGC.initialize();
+    agentToReportToRepackForSuccessAutoGC.setTimeout_us(0);
+    agentToReportToRepackForSuccessAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToRepackForSuccessAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToRepackForSuccess);
+    ar.commit();
+    agentRefToReportToRepackForSuccessAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    ar.garbageCollect(agentRefToReportToRepackForSuccessAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportToRepackForSuccess
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToRepackForSuccess(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+
+      aqToReportToRepackForSuccess.fetchNoLock();
+
+      auto jobs = aqToReportToRepackForSuccess.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToRepackForSuccess.getAddressIfSet());
+    }
+  }
+  
+  {
+    //Test the garbage collection of an AJS_ToReportToRepackForFailure job
+    cta::objectstore::AgentReference agentRefToReportToRepackForFailure("ToReportToRepackForFailure", dl);
+    cta::objectstore::Agent agentToReportToRepackForFailure(agentRefToReportToRepackForFailure.getAgentAddress(), be);
+    agentToReportToRepackForFailure.initialize();
+    agentToReportToRepackForFailure.setTimeout_us(0);
+    agentToReportToRepackForFailure.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForSuccess), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToRepackForFailure.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure);
+    ar.commit();
+    sel.release();
+    
+    agentRefToReportToRepackForFailure.addToOwnership(ar.getAddressIfSet(),be);
+
+    gc.runOnePass(lc);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportToRepackForFailure
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToRepackForFailure(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+
+      aqToReportToRepackForFailure.fetchNoLock();
+
+      auto jobs = aqToReportToRepackForFailure.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToRepackForFailure.getAddressIfSet());
+    }
+  }
+  {
+    //Test the AJS_ToReportToRepackForFailure job Auto Garbage collection
+    cta::objectstore::AgentReference agentRefToReportToRepackForFailureAutoGC("ToReportToRepackForFailureAutoGC", dl);
+    cta::objectstore::Agent agentToReportToRepackForFailureAutoGC(agentRefToReportToRepackForFailureAutoGC.getAgentAddress(), be);
+    agentToReportToRepackForFailureAutoGC.initialize();
+    agentToReportToRepackForFailureAutoGC.setTimeout_us(0);
+    agentToReportToRepackForFailureAutoGC.insertAndRegisterSelf(lc);
+  
+    cta::objectstore::ArchiveQueue aq(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+    cta::objectstore::ScopedExclusiveLock aql(aq);
+    aq.fetch();
+    aq.removeJobsAndCommit({ar.getAddressIfSet()});
+    aql.release();
+    
+    cta::objectstore::ScopedExclusiveLock sel(ar);
+    ar.fetch();
+    ar.setJobOwner(2,agentRefToReportToRepackForFailureAutoGC.getAgentAddress());
+    ar.setJobStatus(2,cta::objectstore::serializers::ArchiveJobStatus::AJS_ToReportToRepackForFailure);
+    ar.commit();
+    agentRefToReportToRepackForFailureAutoGC.addToOwnership(ar.getAddressIfSet(),be);
+    ar.garbageCollect(agentRefToReportToRepackForFailureAutoGC.getAgentAddress(),agentRef,lc,catalogue);
+    
+    //The Archive Request should be queued in the ArchiveQueueToReportToRepackForFailure
+    {
+      re.fetchNoLock();
+      cta::objectstore::ArchiveQueue aqToReportToRepackForFailure(re.getArchiveQueueAddress(ri.repackRequestAddress, cta::objectstore::JobQueueType::JobsToReportToRepackForFailure), be);
+
+      aqToReportToRepackForFailure.fetchNoLock();
+
+      auto jobs = aqToReportToRepackForFailure.dumpJobs();
+      ASSERT_EQ(1,jobs.size());
+
+      auto& job = jobs.front();
+      ASSERT_EQ(2,job.copyNb);
+      
+      ar.fetchNoLock();
+      ASSERT_EQ(ar.getJobOwner(2),aqToReportToRepackForFailure.getAddressIfSet());
+    }
+  }
+}
+
 }
diff --git a/objectstore/MountPolicySerDeser.hpp b/objectstore/MountPolicySerDeser.hpp
index b5a5ab7f97a5c1a2e5d3a1ae122a2f56916d0965..f64950c60294f52faf17be7672f5de2b7e85181d 100644
--- a/objectstore/MountPolicySerDeser.hpp
+++ b/objectstore/MountPolicySerDeser.hpp
@@ -18,7 +18,6 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
 #include "objectstore/cta.pb.h"
 #include "common/dataStructures/MountPolicy.hpp"
 
diff --git a/objectstore/OwnerIdentitySerDeser.hpp b/objectstore/OwnerIdentitySerDeser.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e54d396479b1124cdb4cadb197a1f925554ad62a
--- /dev/null
+++ b/objectstore/OwnerIdentitySerDeser.hpp
@@ -0,0 +1,39 @@
+/**
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <string>
+
+#include "common/dataStructures/OwnerIdentity.hpp"
+#include "objectstore/cta.pb.h"
+
+namespace cta { namespace objectstore {
+
+struct OwnerIdentitySerDeser: public cta::common::dataStructures::OwnerIdentity {
+
+  void serialize(cta::objectstore::serializers::OwnerIdentity &user) const {
+    user.set_uid(uid);
+    user.set_gid(gid);
+  }
+
+  void deserialize(const cta::objectstore::serializers::OwnerIdentity &user) :
+    uid(user.uid()), gid(user.gid()) {}
+};
+
+}}
diff --git a/objectstore/RepackRequest.cpp b/objectstore/RepackRequest.cpp
index 4ac5aaaa398689227dad3de267385c6195b3f8f0..3e3e52b53502465f4d9f9c20ed5456570ed4e9fd 100644
--- a/objectstore/RepackRequest.cpp
+++ b/objectstore/RepackRequest.cpp
@@ -21,6 +21,7 @@
 #include "AgentReference.hpp"
 #include "RepackQueueAlgorithms.hpp"
 #include "Algorithms.hpp"
+#include "MountPolicySerDeser.hpp"
 #include <google/protobuf/util/json_util.h>
 #include <iostream>
 
@@ -71,6 +72,7 @@ void RepackRequest::initialize() {
   m_payload.set_archivedbytes(0);
   m_payload.set_failedtoretrievefiles(0);
   m_payload.set_failedtoretrievebytes(0);
+  m_payload.set_failedtocreatearchivereq(0);
   m_payload.set_failedtoarchivefiles(0);
   m_payload.set_failedtoarchivebytes(0);
   m_payload.set_lastexpandedfseq(0);
@@ -177,14 +179,27 @@ void RepackRequest::setTotalStats(const cta::SchedulerDatabase::RepackRequest::T
   setTotalBytesToRetrieve(totalStatsFiles.totalBytesToRetrieve);
 }
 
+void RepackRequest::setMountPolicy(const common::dataStructures::MountPolicy& mp){
+  checkPayloadWritable();
+  MountPolicySerDeser mpSerDeser(mp);
+  mpSerDeser.serialize(*m_payload.mutable_mount_policy());
+}
+
+common::dataStructures::MountPolicy RepackRequest::getMountPolicy(){
+  checkPayloadReadable();
+  MountPolicySerDeser mpSerDeser;
+  mpSerDeser.deserialize(m_payload.mount_policy());
+  return mpSerDeser;
+}
+
 void RepackRequest::setStatus(){
   checkPayloadWritable();
   checkPayloadReadable();
   
   if(m_payload.is_expand_started()){
-    //The expansion of the Repack Request have started
+    //The expansion of the Repack Request have started 
     if(m_payload.is_expand_finished()){
-      if( (m_payload.retrievedfiles() + m_payload.failedtoretrievefiles() >= m_payload.totalfilestoretrieve()) && (m_payload.archivedfiles() + m_payload.failedtoarchivefiles() >= m_payload.totalfilestoarchive()) ){
+      if( (m_payload.retrievedfiles() + m_payload.failedtoretrievefiles() >= m_payload.totalfilestoretrieve()) && (m_payload.archivedfiles() + m_payload.failedtoarchivefiles() + m_payload.failedtocreatearchivereq() >= m_payload.totalfilestoarchive()) ){
         //We reached the end
         if (m_payload.failedtoretrievefiles() || m_payload.failedtoarchivefiles()) {
           //At least one retrieve or archive has failed
@@ -518,6 +533,30 @@ auto RepackRequest::getStats() -> std::map<StatsType, StatsValues> {
   return ret;
 }
 
+//------------------------------------------------------------------------------
+// RepackRequest::reportRetrieveCreationFailures()
+//------------------------------------------------------------------------------
+void RepackRequest::reportRetrieveCreationFailures(const std::list<cta::SchedulerDatabase::RepackRequest::Subrequest>& notCreatedSubrequests){
+  checkPayloadWritable();
+  uint64_t failedToRetrieveFiles, failedToRetrieveBytes, failedToCreateArchiveReq = 0;
+  for(auto & subreq: notCreatedSubrequests){
+    failedToRetrieveFiles++;
+    failedToRetrieveBytes+=subreq.archiveFile.fileSize;
+    for(auto & copyNb: subreq.copyNbsToRearchive){
+      (void) copyNb;
+      failedToCreateArchiveReq++;
+    }
+  }
+  m_payload.set_failedtoretrievebytes(m_payload.failedtoretrievebytes() + failedToRetrieveBytes);
+  m_payload.set_failedtoretrievefiles(m_payload.failedtoretrievefiles() + failedToRetrieveFiles);
+  reportArchiveCreationFailures(failedToCreateArchiveReq);
+  setStatus();
+}
+
+void RepackRequest::reportArchiveCreationFailures(uint64_t nbFailedToCreateArchiveRequests){
+  checkPayloadWritable();
+  m_payload.set_failedtocreatearchivereq(m_payload.failedtocreatearchivereq() + nbFailedToCreateArchiveRequests);
+}
 
 //------------------------------------------------------------------------------
 // RepackRequest::garbageCollect()
diff --git a/objectstore/RepackRequest.hpp b/objectstore/RepackRequest.hpp
index 29b1d618aed8fc04ec7c88f5341134d9f348585f..bfa7c812d8e30ba3b8d8a062fef6009b7493685c 100644
--- a/objectstore/RepackRequest.hpp
+++ b/objectstore/RepackRequest.hpp
@@ -48,6 +48,9 @@ public:
   void setExpandStarted(const bool expandStarted);
   void setTotalStats(const cta::SchedulerDatabase::RepackRequest::TotalStatsFiles& totalStatsFiles);
   cta::SchedulerDatabase::RepackRequest::TotalStatsFiles getTotalStatsFile();
+  void setMountPolicy(const common::dataStructures::MountPolicy &mp);
+  common::dataStructures::MountPolicy getMountPolicy();
+  
   /**
    * Automatically set the new status of the Repack Request
    * regarding multiple parameters
@@ -123,6 +126,10 @@ public:
   };
   std::map<StatsType, StatsValues> getStats();
   
+  void reportRetrieveCreationFailures(const std::list<cta::SchedulerDatabase::RepackRequest::Subrequest>& notCreatedSubrequests);
+  
+  void reportArchiveCreationFailures(uint64_t nbFailedToCreateArchiveRequests);
+  
   void garbageCollect(const std::string &presumedOwner, AgentReference & agentReference, log::LogContext & lc,
     cta::catalogue::Catalogue & catalogue) override;
     
diff --git a/objectstore/UserIdentity.hpp b/objectstore/RequesterIdentitySerDeser.hpp
similarity index 58%
rename from objectstore/UserIdentity.hpp
rename to objectstore/RequesterIdentitySerDeser.hpp
index a3f9087196f2b5a4bd75cecb8ab2d936c3e93f7d..c4ba50f29e1118b235bb1a4915ac0b52e5a66564 100644
--- a/objectstore/UserIdentity.hpp
+++ b/objectstore/RequesterIdentitySerDeser.hpp
@@ -16,30 +16,24 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "common/UserIdentity.hpp"
-#include "objectstore/cta.pb.h"
+#pragma once
 
 #include <string>
-#include <stdint.h>
+
+#include "common/dataStructures/RequesterIdentity.hpp"
+#include "objectstore/cta.pb.h"
 
 namespace cta { namespace objectstore {
 
-class UserIdentity: public cta::common::dataStructures::UserIdentity {
-public:
-  UserIdentity (): cta::common::dataStructures::UserIdentity() {}
-  UserIdentity (const std::string &n, const std::string &g) {
-    name = n;
-    group = g;
-  }
-  UserIdentity(const cta::common::dataStructures::UserIdentity & user): cta::common::dataStructures::UserIdentity(user) {}
-  void serialize (cta::objectstore::serializers::UserIdentity & user) const {
+struct RequesterIdentitySerDeser: public cta::common::dataStructures::RequesterIdentity {
+
+  void serialize (cta::objectstore::serializers::RequesterIdentity & user) const {
     user.set_name(name);
     user.set_group(group);
   }
-  void deserialize (const cta::objectstore::serializers::UserIdentity & user) {
-    name = user.name();
-    group = user.group();
-  }
+
+  void deserialize (const cta::objectstore::serializers::RequesterIdentity & user) :
+    name(user.name()), group(user.group()) {}
 };
 
 }}
diff --git a/objectstore/RetrieveRequest.cpp b/objectstore/RetrieveRequest.cpp
index 14eff484bad0eb9e35e698c0350be1e77767c314..704eb05e735a3d4ba74e5452e789836e5bef17da 100644
--- a/objectstore/RetrieveRequest.cpp
+++ b/objectstore/RetrieveRequest.cpp
@@ -27,6 +27,8 @@
 #include "Helpers.hpp"
 #include "common/utils/utils.hpp"
 #include "LifecycleTimingsSerDeser.hpp"
+#include "Sorter.hpp"
+#include "AgentWrapper.hpp"
 #include <google/protobuf/util/json_util.h>
 #include <cmath>
 
@@ -84,21 +86,65 @@ void RetrieveRequest::garbageCollect(const std::string& presumedOwner, AgentRefe
   using serializers::RetrieveJobStatus;
   std::set<std::string> candidateVids;
   for (auto &j: m_payload.jobs()) {
-    if (j.status() == RetrieveJobStatus::RJS_ToTransfer) {
-      // Find the job details in tape file
-      for (auto &tf: m_payload.archivefile().tapefiles()) {
-        if (tf.copynb() == j.copynb()) {
-          candidateVids.insert(tf.vid());
-          goto found;
+    switch(j.status()){
+      case RetrieveJobStatus::RJS_ToTransfer:
+        // Find the job details in tape file
+        for (auto &tf: m_payload.archivefile().tapefiles()) {
+          if (tf.copynb() == j.copynb()) {
+            candidateVids.insert(tf.vid());
+            goto found;
+          }
         }
-      }
-      {
-        std::stringstream err;
-        err << "In RetrieveRequest::garbageCollect(): could not find tapefile for copynb " << j.copynb();
-        throw exception::Exception(err.str());
-      }
-    found:;
+        {
+          std::stringstream err;
+          err << "In RetrieveRequest::garbageCollect(): could not find tapefile for copynb " << j.copynb();
+          throw exception::Exception(err.str());
+        }
+        break;
+      case RetrieveJobStatus::RJS_ToReportToRepackForSuccess:
+      case RetrieveJobStatus::RJS_ToReportToRepackForFailure:
+        //We don't have any vid to find, we just need to
+        //Requeue it into RetrieveQueueToReportToRepackForSuccess or into the RetrieveQueueToReportToRepackForFailure (managed by the sorter)
+        for (auto &tf: m_payload.archivefile().tapefiles()) {
+          if (tf.copynb() == j.copynb()) {
+            Sorter sorter(agentReference,m_objectStore,catalogue);
+            std::shared_ptr<RetrieveRequest> rr = std::make_shared<RetrieveRequest>(*this);
+            cta::objectstore::Agent agentRR(getOwner(),m_objectStore);
+            cta::objectstore::AgentWrapper agentRRWrapper(agentRR);
+            sorter.insertRetrieveRequest(rr,agentRRWrapper,cta::optional<uint32_t>(tf.copynb()),lc);
+            std::string retrieveQueueAddress = rr->getRepackInfo().repackRequestAddress;
+            this->m_exclusiveLock->release();
+            cta::objectstore::Sorter::MapRetrieve allRetrieveJobs = sorter.getAllRetrieve();
+            std::list<std::tuple<cta::objectstore::Sorter::RetrieveJob,std::future<void>>> allFutures;
+            cta::utils::Timer t;
+            cta::log::TimingList tl;
+            for(auto& kv: allRetrieveJobs){
+              for(auto& job: kv.second){
+                allFutures.emplace_back(std::make_tuple(std::get<0>(job->jobToQueue),std::get<1>(job->jobToQueue).get_future()));
+              }
+            }
+            sorter.flushAll(lc);
+            tl.insertAndReset("sorterFlushingTime",t);
+            for(auto& future: allFutures){
+              //Throw an exception in case of failure
+              std::get<1>(future).get();
+            }
+            log::ScopedParamContainer params(lc);
+            params.add("jobObject", getAddressIfSet())
+                  .add("fileId", m_payload.archivefile().archivefileid())
+                  .add("queueObject", retrieveQueueAddress)
+                  .add("copynb", tf.copynb())
+                  .add("tapeVid", tf.vid());
+            tl.addToLog(params);
+            lc.log(log::INFO, "In RetrieveRequest::garbageCollect(): requeued the repack retrieve request.");
+            return;
+          }
+        }
+        break;
+      default:
+        break;
     }
+    found:;
   }
   std::string bestVid;
   // If no tape file is a candidate, we just need to skip to queueing to the failed queue
@@ -107,7 +153,7 @@ void RetrieveRequest::garbageCollect(const std::string& presumedOwner, AgentRefe
   // filter on tape availability.
   try {
     // If we have to fetch the status of the tapes and queued for the non-disabled vids.
-    auto bestVid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, m_objectStore);
+    bestVid=Helpers::selectBestRetrieveQueue(candidateVids, catalogue, m_objectStore);
     goto queueForTransfer;
   } catch (Helpers::NoTapeAvailableForRetrieve &) {}
 queueForFailure:;
@@ -153,7 +199,7 @@ queueForFailure:;
     // We now need to grab the failed queue and queue the request.
     RetrieveQueue rq(m_objectStore);
     ScopedExclusiveLock rql;
-    Helpers::getLockedAndFetchedJobQueue<RetrieveQueue>(rq, rql, agentReference, bestVid, JobQueueType::JobsToReportToUser, lc);
+    Helpers::getLockedAndFetchedJobQueue<RetrieveQueue>(rq, rql, agentReference, activeVid, getQueueType(), lc);
     // Enqueue the job
     objectstore::MountPolicySerDeser mp;
     std::list<RetrieveQueue::JobToAdd> jta;
@@ -1047,7 +1093,7 @@ void RetrieveRequest::AsyncJobDeleter::wait() {
 RetrieveRequest::AsyncJobSucceedForRepackReporter * RetrieveRequest::asyncReportSucceedForRepack(uint32_t copyNb)
 {
   std::unique_ptr<AsyncJobSucceedForRepackReporter> ret(new AsyncJobSucceedForRepackReporter);
-  ret->m_updaterCallback = [copyNb](const std::string &in)->std::string{ 
+  ret->m_updaterCallback = [&ret,copyNb](const std::string &in)->std::string{ 
         // We have a locked and fetched object, so we just need to work on its representation.
         cta::objectstore::serializers::ObjectHeader oh;
         if (!oh.ParseFromString(in)) {
@@ -1080,6 +1126,7 @@ RetrieveRequest::AsyncJobSucceedForRepackReporter * RetrieveRequest::asyncReport
             return oh.SerializeAsString();
           }
         }
+        ret->m_MountPolicy.deserialize(payload.mountpolicy());
         throw cta::exception::Exception("In RetrieveRequest::asyncReportSucceedForRepack::lambda(): copyNb not found");
     };
     ret->m_backendUpdater.reset(m_objectStore.asyncUpdate(getAddressIfSet(),ret->m_updaterCallback));
@@ -1126,8 +1173,7 @@ RetrieveRequest::AsyncRetrieveToArchiveTransformer * RetrieveRequest::asyncTrans
     serializers::ArchiveRequest archiveRequestPayload;
     const cta::objectstore::serializers::ArchiveFile& archiveFile = retrieveRequestPayload.archivefile();
     archiveRequestPayload.set_archivefileid(archiveFile.archivefileid());
-    archiveRequestPayload.set_checksumtype(archiveFile.checksumtype());
-    archiveRequestPayload.set_checksumvalue(archiveFile.checksumvalue());
+    archiveRequestPayload.set_checksumblob(archiveFile.checksumblob());
     archiveRequestPayload.set_creationtime(archiveFile.creationtime()); //TODO : should the creation time be the same as the archiveFile creation time ?
     archiveRequestPayload.set_diskfileid(archiveFile.diskfileid());
     archiveRequestPayload.set_diskinstance(archiveFile.diskinstance());
@@ -1157,7 +1203,7 @@ RetrieveRequest::AsyncRetrieveToArchiveTransformer * RetrieveRequest::asyncTrans
     //ArchiveRequest source url is the same as the retrieveRequest destination URL
     const cta::objectstore::serializers::SchedulerRetrieveRequest schedulerRetrieveRequest = retrieveRequestPayload.schedulerrequest();
     archiveRequestPayload.set_srcurl(schedulerRetrieveRequest.dsturl());
-    cta::objectstore::serializers::UserIdentity *archiveRequestUser = archiveRequestPayload.mutable_requester();
+    cta::objectstore::serializers::RequesterIdentity *archiveRequestUser = archiveRequestPayload.mutable_requester();
     archiveRequestUser->CopyFrom(schedulerRetrieveRequest.requester());
     
     //Copy the RetrieveRequest MountPolicy into the new ArchiveRequest
diff --git a/objectstore/RetrieveRequest.hpp b/objectstore/RetrieveRequest.hpp
index 0ad3cd77fae0a58c15a3d68d19a1e492bea11af2..b5a11ff62d5663aeca6090a5cd4516f687046b05 100644
--- a/objectstore/RetrieveRequest.hpp
+++ b/objectstore/RetrieveRequest.hpp
@@ -26,13 +26,14 @@
 #include <list>
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/EntryLog.hpp"
-#include "common/dataStructures/UserIdentity.hpp"
 #include "common/dataStructures/TapeFile.hpp"
 #include "common/dataStructures/ArchiveFile.hpp"
 #include "common/dataStructures/RetrieveRequest.hpp"
 #include "common/dataStructures/RetrieveFileQueueCriteria.hpp"
 #include "common/dataStructures/LifecycleTimings.hpp"
 #include "AgentReference.hpp"
+#include "SorterArchiveJob.hpp"
+#include "MountPolicySerDeser.hpp"
 
 namespace cta { 
   namespace objectstore {
@@ -77,6 +78,7 @@ public:
      * Wait for the end of the execution of the updater callback
      */
     void wait();
+    MountPolicySerDeser m_MountPolicy;
   private:
     //Hold the AsyncUpdater that will run asynchronously the m_updaterCallback
     std::unique_ptr<Backend::AsyncUpdater> m_backendUpdater;
diff --git a/objectstore/RootEntry.hpp b/objectstore/RootEntry.hpp
index de1f6f0fc1251dbfa4c1d642009abc7f7d74a7d9..0bd47c6d185b5dbc37df1dca3e67a393714433a0 100644
--- a/objectstore/RootEntry.hpp
+++ b/objectstore/RootEntry.hpp
@@ -25,7 +25,6 @@
 #include "Backend.hpp"
 #include "ObjectOps.hpp"
 #include "EntryLogSerDeser.hpp"
-#include "UserIdentity.hpp"
 #include "common/MountControl.hpp"
 #include <list>
 
diff --git a/objectstore/Sorter.cpp b/objectstore/Sorter.cpp
index 65928feb7f3aa2579e5237fc050e1b1bd6f229b6..8f5945084e51fae2ea5a25fb9a7ffe07a1253c57 100644
--- a/objectstore/Sorter.cpp
+++ b/objectstore/Sorter.cpp
@@ -39,7 +39,7 @@ void Sorter::executeArchiveAlgorithm(const std::string tapePool, std::string& qu
   std::map<uint64_t,std::shared_ptr<ArchiveJobQueueInfo>> succeededJobs;
   std::string previousOwner;
   for(auto& jobToAdd: jobs){
-    Sorter::ArchiveJob job = std::get<0>(jobToAdd->jobToQueue);
+    SorterArchiveJob job = std::get<0>(jobToAdd->jobToQueue);
     succeededJobs[job.jobDump.copyNb] = jobToAdd;
     previousOwner = job.previousOwner->getAgentAddress();
     jobsToAdd.push_back({ job.archiveRequest.get() ,job.jobDump.copyNb,job.archiveFile, job.mountPolicy,cta::nullopt });
@@ -89,7 +89,7 @@ void Sorter::dispatchArchiveAlgorithm(const std::string tapePool, const JobQueue
 
 void Sorter::insertArchiveRequest(std::shared_ptr<ArchiveRequest> archiveRequest, AgentReferenceInterface& previousOwner, log::LogContext& lc){
   for(auto& job: archiveRequest->dumpJobs()){
-    ArchiveJob jobToInsert;
+    SorterArchiveJob jobToInsert;
     jobToInsert.archiveRequest = archiveRequest;
     jobToInsert.archiveFile = archiveRequest->getArchiveFile();
     jobToInsert.jobDump = job;
@@ -109,13 +109,13 @@ void Sorter::insertArchiveRequest(std::shared_ptr<ArchiveRequest> archiveRequest
 
 void Sorter::insertArchiveRequest(const SorterArchiveRequest& archiveRequest, AgentReferenceInterface& previousOwner, log::LogContext& lc) {
   for(auto& archiveJob: archiveRequest.archiveJobs){
-    ArchiveJob jobToInsert = archiveJob;
+    SorterArchiveJob jobToInsert = archiveJob;
     jobToInsert.previousOwner = &previousOwner;
     insertArchiveJob(jobToInsert);
   }
 }
 
-void Sorter::insertArchiveJob(const ArchiveJob& job){
+void Sorter::insertArchiveJob(const SorterArchiveJob& job){
   auto ajqi = std::make_shared<ArchiveJobQueueInfo>();
   ajqi->jobToQueue = std::make_tuple(job,std::promise<void>());
   threading::MutexLocker mapLocker(m_mutex);
diff --git a/objectstore/Sorter.hpp b/objectstore/Sorter.hpp
index 21198193760cb46d20939750c1189f5902c3a32a..064b924cbf40a01b217c4374a92b7b1c3ffbd02e 100644
--- a/objectstore/Sorter.hpp
+++ b/objectstore/Sorter.hpp
@@ -36,6 +36,7 @@
 #include "Algorithms.hpp"
 #include "ArchiveQueueAlgorithms.hpp"
 #include "RetrieveQueueAlgorithms.hpp"
+#include "SorterArchiveJob.hpp"
 
 namespace cta { namespace objectstore {  
   
@@ -57,26 +58,6 @@ public:
   typedef std::map<std::tuple<std::string, JobQueueType>, std::list<std::shared_ptr<ArchiveJobQueueInfo>>> MapArchive;
   typedef std::map<std::tuple<std::string, JobQueueType>, std::list<std::shared_ptr<RetrieveJobQueueInfo>>> MapRetrieve;
   
-  /**
-   * This structure holds the necessary data to queue a job taken from the ArchiveRequest that needs to be queued.
-   */
-  struct ArchiveJob{
-    std::shared_ptr<ArchiveRequest> archiveRequest;
-    ArchiveRequest::JobDump jobDump;
-    common::dataStructures::ArchiveFile archiveFile;
-    AgentReferenceInterface * previousOwner;
-    common::dataStructures::MountPolicy mountPolicy;
-    cta::objectstore::JobQueueType jobQueueType;
-  };
-  
-  /**
-   * This structure holds the datas the user have to 
-   * give to insert an ArchiveRequest without any fetch needed on the Request
-   */
-  struct SorterArchiveRequest{
-    std::list<ArchiveJob> archiveJobs;
-  };
-  
   /* Archive-related methods */
   /**
    * This method allows to insert the ArchiveRequest passed in parameter into the sorter.
@@ -221,7 +202,7 @@ private:
   /* Archive-related methods */
   void queueArchiveRequests(const std::string tapePool, const JobQueueType jobQueueType, std::list<std::shared_ptr<ArchiveJobQueueInfo>>& requests, log::LogContext &lc);
   
-  void insertArchiveJob(const ArchiveJob& job); 
+  void insertArchiveJob(const SorterArchiveJob& job); 
   
   void dispatchArchiveAlgorithm(const std::string tapePool, const JobQueueType jobQueueType, std::string& queueAddress, std::list<std::shared_ptr<ArchiveJobQueueInfo>>& archiveJobsInfos, log::LogContext &lc);
 
@@ -231,7 +212,7 @@ private:
 };
 
 struct ArchiveJobQueueInfo{
-  std::tuple<Sorter::ArchiveJob,std::promise<void>> jobToQueue;
+  std::tuple<SorterArchiveJob,std::promise<void>> jobToQueue;
   //TODO : Job reporting
 };
 
diff --git a/objectstore/SorterArchiveJob.hpp b/objectstore/SorterArchiveJob.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9f22c5ea4218301c278e035d521952f6cde480d
--- /dev/null
+++ b/objectstore/SorterArchiveJob.hpp
@@ -0,0 +1,47 @@
+/**
+ * The CERN Tape Archive (CTA) project
+ * Copyright © 2018 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SORTERARCHIVEJOB_HPP
+#define SORTERARCHIVEJOB_HPP
+#include "ArchiveRequest.hpp"
+
+namespace cta { namespace objectstore {
+/**
+ * This structure holds the necessary data to queue a job taken from the ArchiveRequest that needs to be queued.
+ */
+struct SorterArchiveJob{
+  std::shared_ptr<ArchiveRequest> archiveRequest;
+  ArchiveRequest::JobDump jobDump;
+  common::dataStructures::ArchiveFile archiveFile;
+  AgentReferenceInterface * previousOwner;
+  common::dataStructures::MountPolicy mountPolicy;
+  cta::objectstore::JobQueueType jobQueueType;
+};
+  
+/**
+ * This structure holds the datas the user have to 
+ * give to insert an ArchiveRequest without any fetch needed on the Request
+ */
+struct SorterArchiveRequest{
+  std::list<SorterArchiveJob> archiveJobs;
+};
+
+
+}} 
+#endif /* SORTERARCHIVEJOB_HPP */
+
diff --git a/objectstore/SorterTest.cpp b/objectstore/SorterTest.cpp
index 2fbf55b12c3b318fa1563624207a3a3a835f12d4..13f63a0e61f544fd59b40d3dd6ac0bbf94805e43 100644
--- a/objectstore/SorterTest.cpp
+++ b/objectstore/SorterTest.cpp
@@ -80,8 +80,7 @@ TEST(ObjectStore,SorterInsertArchiveRequest){
   cta::common::dataStructures::ArchiveFile aFile;
   aFile.archiveFileID = 123456789L;
   aFile.diskFileId = "eos://diskFile";
-  aFile.checksumType = "checksumType";
-  aFile.checksumValue = "checksumValue";
+  aFile.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   aFile.creationTime = 0;
   aFile.reconciliationTime = 0;
   aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -98,7 +97,7 @@ TEST(ObjectStore,SorterInsertArchiveRequest){
   ar.setMountPolicy(mp);
   ar.setArchiveReportURL("");
   ar.setArchiveErrorReportURL("");
-  ar.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+  ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
   ar.setSrcURL("root://eoseos/myFile");
   ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
   ar.insert();
@@ -111,7 +110,7 @@ TEST(ObjectStore,SorterInsertArchiveRequest){
   atfrl.release();
   //Get the future
   cta::objectstore::Sorter::MapArchive allArchiveJobs = sorter.getAllArchive();
-  std::list<std::tuple<cta::objectstore::Sorter::ArchiveJob,std::future<void>>> allFutures;
+  std::list<std::tuple<cta::objectstore::SorterArchiveJob,std::future<void>>> allFutures;
   
   for(auto& kv: allArchiveJobs){
     for(auto& job: kv.second){
@@ -145,8 +144,7 @@ TEST(ObjectStore,SorterInsertArchiveRequest){
       ASSERT_EQ(archiveFile.archiveFileID,aFile.archiveFileID);
 
       ASSERT_EQ(archiveFile.diskFileId,aFile.diskFileId);
-      ASSERT_EQ(archiveFile.checksumType,aFile.checksumType);
-      ASSERT_EQ(archiveFile.checksumValue,aFile.checksumValue);
+      ASSERT_EQ(archiveFile.checksumBlob,aFile.checksumBlob);
       ASSERT_EQ(archiveFile.creationTime,aFile.creationTime);
       ASSERT_EQ(archiveFile.reconciliationTime,aFile.reconciliationTime);
       ASSERT_EQ(archiveFile.diskFileInfo,aFile.diskFileInfo);
@@ -175,8 +173,7 @@ TEST(ObjectStore,SorterInsertArchiveRequest){
       ASSERT_EQ(archiveFile.archiveFileID,aFile.archiveFileID);
 
       ASSERT_EQ(archiveFile.diskFileId,aFile.diskFileId);
-      ASSERT_EQ(archiveFile.checksumType,aFile.checksumType);
-      ASSERT_EQ(archiveFile.checksumValue,aFile.checksumValue);
+      ASSERT_EQ(archiveFile.checksumBlob,aFile.checksumBlob);
       ASSERT_EQ(archiveFile.creationTime,aFile.creationTime);
       ASSERT_EQ(archiveFile.reconciliationTime,aFile.reconciliationTime);
       ASSERT_EQ(archiveFile.diskFileInfo,aFile.diskFileInfo);
@@ -231,8 +228,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequest){
   cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
   rqc.archiveFile.archiveFileID = 123456789L;
   rqc.archiveFile.diskFileId = "eos://diskFile";
-  rqc.archiveFile.checksumType = "";
-  rqc.archiveFile.checksumValue = "";
+  rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
   rqc.archiveFile.creationTime = 0;
   rqc.archiveFile.reconciliationTime = 0;
   rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -242,8 +238,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequest){
   {
     cta::common::dataStructures::TapeFile tf;
     tf.blockId=0;
-    tf.compressedSize=1;
-    tf.compressedSize=1;
+    tf.fileSize=1;
     tf.copyNb=1;
     tf.creationTime=time(nullptr);
     tf.fSeq=2;
@@ -253,8 +248,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequest){
   {
     cta::common::dataStructures::TapeFile tf;
     tf.blockId=0;
-    tf.compressedSize=2;
-    tf.compressedSize=2;
+    tf.fileSize=2;
     tf.copyNb=2;
     tf.creationTime=time(nullptr);
     tf.fSeq=2;
@@ -319,8 +313,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequest){
     cta::common::dataStructures::ArchiveFile aFile = elt.archiveFile;
     ASSERT_EQ(aFile.archiveFileID,rqc.archiveFile.archiveFileID);
     ASSERT_EQ(aFile.diskFileId,rqc.archiveFile.diskFileId);
-    ASSERT_EQ(aFile.checksumType,rqc.archiveFile.checksumType);
-    ASSERT_EQ(aFile.checksumValue,rqc.archiveFile.checksumValue);
+    ASSERT_EQ(aFile.checksumBlob,rqc.archiveFile.checksumBlob);
     ASSERT_EQ(aFile.creationTime,rqc.archiveFile.creationTime);
     ASSERT_EQ(aFile.reconciliationTime,rqc.archiveFile.reconciliationTime);
     ASSERT_EQ(aFile.diskFileInfo,rqc.archiveFile.diskFileInfo);
@@ -369,15 +362,14 @@ TEST(ObjectStore,SorterInsertRetrieveRequest){
     cta::common::dataStructures::ArchiveFile aFile = elt.archiveFile;
     ASSERT_EQ(aFile.archiveFileID,rqc.archiveFile.archiveFileID);
     ASSERT_EQ(aFile.diskFileId,rqc.archiveFile.diskFileId);
-    ASSERT_EQ(aFile.checksumType,rqc.archiveFile.checksumType);
-    ASSERT_EQ(aFile.checksumValue,rqc.archiveFile.checksumValue);
+    ASSERT_EQ(aFile.checksumBlob,rqc.archiveFile.checksumBlob);
     ASSERT_EQ(aFile.creationTime,rqc.archiveFile.creationTime);
     ASSERT_EQ(aFile.reconciliationTime,rqc.archiveFile.reconciliationTime);
     ASSERT_EQ(aFile.diskFileInfo,rqc.archiveFile.diskFileInfo);
     ASSERT_EQ(aFile.fileSize,rqc.archiveFile.fileSize);
     ASSERT_EQ(aFile.storageClass,rqc.archiveFile.storageClass);
     ASSERT_EQ(elt.copyNb,2);
-    ASSERT_EQ(elt.archiveFile.tapeFiles.at(2).compressedSize,2);
+    ASSERT_EQ(elt.archiveFile.tapeFiles.at(2).fileSize,2);
     ASSERT_EQ(elt.bytes,1000);
     ASSERT_EQ(elt.reportType,cta::SchedulerDatabase::RetrieveJob::ReportType::NoReportRequired);
     ASSERT_EQ(elt.rr.archiveFileID,aFile.archiveFileID);
@@ -446,8 +438,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
   rqc.archiveFile.archiveFileID = 1L;
   rqc.archiveFile.diskFileId = "eos://diskFile1";
-  rqc.archiveFile.checksumType = "";
-  rqc.archiveFile.checksumValue = "";
+  rqc.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
   rqc.archiveFile.creationTime = 0;
   rqc.archiveFile.reconciliationTime = 0;
   rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -457,8 +448,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   {
     cta::common::dataStructures::TapeFile tf;
     tf.blockId=0;
-    tf.compressedSize=1;
-    tf.compressedSize=1;
+    tf.fileSize=1;
     tf.copyNb=1;
     tf.creationTime=time(nullptr);
     tf.fSeq=1;
@@ -490,8 +480,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   cta::common::dataStructures::RetrieveFileQueueCriteria rqc2;
   rqc2.archiveFile.archiveFileID = 2L;
   rqc2.archiveFile.diskFileId = "eos://diskFile2";
-  rqc2.archiveFile.checksumType = "";
-  rqc2.archiveFile.checksumValue = "";
+  rqc2.archiveFile.checksumBlob.insert(cta::checksum::NONE, "");
   rqc2.archiveFile.creationTime = 0;
   rqc2.archiveFile.reconciliationTime = 0;
   rqc2.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -501,8 +490,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   {
     cta::common::dataStructures::TapeFile tf;
     tf.blockId=0;
-    tf.compressedSize=1;
-    tf.compressedSize=1;
+    tf.fileSize=1;
     tf.copyNb=2;
     tf.creationTime=time(nullptr);
     tf.fSeq=2;
@@ -569,8 +557,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   cta::common::dataStructures::ArchiveFile aFile;
   aFile.archiveFileID = 3L;
   aFile.diskFileId = "eos://diskFile";
-  aFile.checksumType = "checksumType";
-  aFile.checksumValue = "checksumValue";
+  aFile.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   aFile.creationTime = 0;
   aFile.reconciliationTime = 0;
   aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -583,7 +570,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   ar.setMountPolicy(mp);
   ar.setArchiveReportURL("");
   ar.setArchiveErrorReportURL("");
-  ar.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+  ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
   ar.setSrcURL("root://eoseos/myFile");
   ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
   ar.insert();
@@ -595,8 +582,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   cta::common::dataStructures::ArchiveFile aFile2;
   aFile2.archiveFileID = 4L;
   aFile2.diskFileId = "eos://diskFile";
-  aFile2.checksumType = "checksumType";
-  aFile2.checksumValue = "checksumValue";
+  aFile2.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   aFile2.creationTime = 0;
   aFile2.reconciliationTime = 0;
   aFile2.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -610,7 +596,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   ar2.setMountPolicy(mp);
   ar2.setArchiveReportURL("");
   ar2.setArchiveErrorReportURL("");
-  ar2.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+  ar2.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
   ar2.setSrcURL("root://eoseos/myFile");
   ar2.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
   ar2.insert();
@@ -634,7 +620,7 @@ TEST(ObjectStore,SorterInsertDifferentTypesOfRequests){
   }
   
   cta::objectstore::Sorter::MapArchive allArchiveJobs = sorter.getAllArchive();
-  std::list<std::tuple<cta::objectstore::Sorter::ArchiveJob,std::future<void>>> allFuturesArchive;
+  std::list<std::tuple<cta::objectstore::SorterArchiveJob,std::future<void>>> allFuturesArchive;
   ASSERT_EQ(allArchiveJobs.size(),2);
   for(auto& kv: allArchiveJobs){
     for(auto& job: kv.second){
@@ -757,8 +743,7 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
   cta::common::dataStructures::ArchiveFile aFile;
   aFile.archiveFileID = 3L;
   aFile.diskFileId = "eos://diskFile";
-  aFile.checksumType = "checksumType";
-  aFile.checksumValue = "checksumValue";
+  aFile.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   aFile.creationTime = 0;
   aFile.reconciliationTime = 0;
   aFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -772,17 +757,17 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
   ar.setMountPolicy(mp);
   ar.setArchiveReportURL("");
   ar.setArchiveErrorReportURL("");
-  ar.setRequester(cta::common::dataStructures::UserIdentity("user0", "group0"));
+  ar.setRequester(cta::common::dataStructures::RequesterIdentity("user0", "group0"));
   ar.setSrcURL("root://eoseos/myFile");
   ar.setEntryLog(cta::common::dataStructures::EntryLog("user0", "host0", time(nullptr)));
   ar.insert();
 
   
   
-  Sorter::SorterArchiveRequest request;
-  std::list<Sorter::ArchiveJob>& jobs = request.archiveJobs;
+  SorterArchiveRequest request;
+  std::list<SorterArchiveJob>& jobs = request.archiveJobs;
   jobs.emplace_back();
-  Sorter::ArchiveJob& job1 = jobs.back();
+  SorterArchiveJob& job1 = jobs.back();
   job1.archiveRequest = std::make_shared<cta::objectstore::ArchiveRequest>(ar);
   job1.archiveFile = aFile;
   job1.jobDump.copyNb = 1;
@@ -795,7 +780,7 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
   job1.previousOwner = &agentRef;
   
   jobs.emplace_back();
-  Sorter::ArchiveJob& job2 = jobs.back();
+  SorterArchiveJob& job2 = jobs.back();
   job2.archiveRequest = std::make_shared<cta::objectstore::ArchiveRequest>(ar);
   job2.archiveFile = aFile;
   job2.jobDump.copyNb = 2;
@@ -811,7 +796,7 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
   sorter.insertArchiveRequest(request,agentRef,lc);
   
   cta::objectstore::Sorter::MapArchive allArchiveJobs = sorter.getAllArchive();
-  std::list<std::tuple<cta::objectstore::Sorter::ArchiveJob,std::future<void>>> allFuturesArchive;
+  std::list<std::tuple<cta::objectstore::SorterArchiveJob,std::future<void>>> allFuturesArchive;
   ASSERT_EQ(allArchiveJobs.size(),1);
   for(auto& kv: allArchiveJobs){
     for(auto& job: kv.second){
@@ -840,8 +825,7 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
     
     ASSERT_EQ(elt.copyNb,1);
     ASSERT_EQ(elt.archiveFile.archiveFileID,3L);
-    ASSERT_EQ(elt.archiveFile.checksumType,"checksumType");
-    ASSERT_EQ(elt.archiveFile.checksumValue,"checksumValue");
+    ASSERT_EQ(elt.archiveFile.checksumBlob,cta::checksum::ChecksumBlob(cta::checksum::ADLER32, "1234"));
     ASSERT_EQ(elt.archiveFile.diskInstance,"eoseos");
     ASSERT_EQ(elt.archiveFile.diskFileId,"eos://diskFile");
     
@@ -849,8 +833,7 @@ TEST(ObjectStore,SorterInsertArchiveRequestNotFetched){
     
     ASSERT_EQ(elt2.copyNb,2);
     ASSERT_EQ(elt2.archiveFile.archiveFileID,3L);
-    ASSERT_EQ(elt2.archiveFile.checksumType,"checksumType");
-    ASSERT_EQ(elt2.archiveFile.checksumValue,"checksumValue");
+    ASSERT_EQ(elt2.archiveFile.checksumBlob,cta::checksum::ChecksumBlob(cta::checksum::ADLER32, "1234"));
     ASSERT_EQ(elt.archiveFile.diskInstance,"eoseos");
     ASSERT_EQ(elt.archiveFile.diskFileId,"eos://diskFile");
   }
@@ -903,8 +886,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequestNotFetched){
   cta::common::dataStructures::RetrieveFileQueueCriteria rqc;
   rqc.archiveFile.archiveFileID = 1L;
   rqc.archiveFile.diskFileId = "eos://diskFile";
-  rqc.archiveFile.checksumType = "checksumType";
-  rqc.archiveFile.checksumValue = "checksumValue";
+  rqc.archiveFile.checksumBlob.insert(cta::checksum::ADLER32, "1234");
   rqc.archiveFile.creationTime = 0;
   rqc.archiveFile.reconciliationTime = 0;
   rqc.archiveFile.diskFileInfo = cta::common::dataStructures::DiskFileInfo();
@@ -914,8 +896,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequestNotFetched){
   {
     cta::common::dataStructures::TapeFile tf;
     tf.blockId=0;
-    tf.compressedSize=1;
-    tf.compressedSize=1;
+    tf.fileSize=1;
     tf.copyNb=1;
     tf.creationTime=time(nullptr);
     tf.fSeq=1;
@@ -988,8 +969,7 @@ TEST(ObjectStore,SorterInsertRetrieveRequestNotFetched){
     ASSERT_EQ(elt.copyNb,1);
     ASSERT_EQ(elt.archiveFile.tapeFiles.at(1).vid,"Tape0");
     ASSERT_EQ(elt.archiveFile.tapeFiles.at(1).fSeq,1);
-    ASSERT_EQ(elt.archiveFile.checksumValue,"checksumValue");
-    ASSERT_EQ(elt.archiveFile.checksumType,"checksumType");
+    ASSERT_EQ(elt.archiveFile.checksumBlob,cta::checksum::ChecksumBlob(cta::checksum::ADLER32, "1234"));
     ASSERT_EQ(elt.archiveFile.diskInstance,"eoseos");
     ASSERT_EQ(elt.archiveFile.fileSize,1000);
     ASSERT_EQ(elt.archiveFile.storageClass,"sc");
@@ -997,4 +977,4 @@ TEST(ObjectStore,SorterInsertRetrieveRequestNotFetched){
   
 }
 
-}
\ No newline at end of file
+}
diff --git a/objectstore/TapeFileSerDeser.hpp b/objectstore/TapeFileSerDeser.hpp
index 3fb283613df0bb5dfd7cb8b3cd9579cd846558df..a7150e5a9bd614b55d5c1af98ef87e65cd402e7e 100644
--- a/objectstore/TapeFileSerDeser.hpp
+++ b/objectstore/TapeFileSerDeser.hpp
@@ -18,7 +18,6 @@
 
 #pragma once
 
-#include "common/UserIdentity.hpp"
 #include "objectstore/cta.pb.h"
 #include "common/dataStructures/TapeFile.hpp"
 #include "EntryLogSerDeser.hpp"
@@ -42,22 +41,20 @@ public:
     ostf.set_vid(vid);
     ostf.set_fseq(fSeq);
     ostf.set_blockid(blockId);
-    ostf.set_compressedsize(compressedSize);
+    ostf.set_filesize(fileSize);
     ostf.set_copynb(copyNb);
     ostf.set_creationtime(creationTime);
-    ostf.set_checksumtype(checksumType);
-    ostf.set_checksumvalue(checksumValue);
+    ostf.set_checksumblob(checksumBlob.serialize());
   }
   
   void deserialize (const cta::objectstore::serializers::TapeFile & ostf) {
     vid=ostf.vid();
     fSeq=ostf.fseq();
     blockId=ostf.blockid();
-    compressedSize=ostf.compressedsize();
+    fileSize=ostf.filesize();
     copyNb=ostf.copynb();
     creationTime=ostf.creationtime();
-    checksumType=ostf.checksumtype();
-    checksumValue=ostf.checksumvalue();
+    checksumBlob.deserialize(ostf.checksumblob());
   }
 };
   
diff --git a/objectstore/cta.proto b/objectstore/cta.proto
index fe020386077c930f70109d62f369e6664f0e7afc..1c90c23a9a99d4e88ae2a7dbf5fdbc2723670a6d 100644
--- a/objectstore/cta.proto
+++ b/objectstore/cta.proto
@@ -65,8 +65,8 @@ message GenericObject {
 // ===========================  Root Entry =====================================
 // The objects making up the root entry.
 
-// A user information record
-message UserIdentity {
+// User information record for requesters
+message RequesterIdentity {
   required string name = 10;
   required string group = 11;
 }
@@ -178,17 +178,20 @@ message TapeFile {
   required string vid = 9120;
   required uint64 fseq = 9121;
   required uint64 blockid = 9122;
-  required uint64 compressedsize = 9123;
+  required uint64 filesize = 9123;
   required uint32 copynb = 9124;
   required uint64 creationtime = 9125;
-  required string checksumtype = 9126;
-  required string checksumvalue = 9127;
+  //required string checksumtype = 9126;     DEPRECATED
+  //required string checksumvalue = 9127;    DEPRECATED
+  required bytes checksumblob = 9128;
 }
 
 message DiskFileInfo {
-  required bytes recoveryblob = 8900;
-  required string group = 8910;
-  required string owner = 8930;
+  //required bytes recoveryblob = 8900;      DEPRECATED
+  //required string group = 8910;            DEPRECATED
+  //required string owner = 8930;            DEPRECATED
+  required uint32 owner_uid = 8920;
+  required uint32 gid = 8925;
   required string path = 8940;
 }
 
@@ -198,8 +201,9 @@ message ArchiveFile {
   required string diskfileid = 4353;
   required string diskinstance= 4354;
   required DiskFileInfo diskfileinfo= 4355;
-  required string checksumtype = 4356;
-  required string checksumvalue = 4357;
+  //required string checksumtype = 4356;     DEPRECATED
+  //required string checksumvalue = 4357;    DEPRECATED
+  required bytes checksumblob = 4362;
   required uint64 creationtime = 4358;
   repeated TapeFile tapefiles = 4359;
   required uint64 reconciliationtime = 4360;
@@ -330,8 +334,9 @@ message ArchiveRequestRepackInfo {
 message ArchiveRequest {
   required uint64 archivefileid = 8990;
   required MountPolicy mountpolicy = 8995;
-  required string checksumtype = 9000;
-  required string checksumvalue = 9010;
+  //required string checksumtype = 9000;     DEPRECATED
+  //required string checksumvalue = 9010;    DEPRECATED
+  required bytes checksumblob = 9011;
   required uint64 creationtime = 9015;
   required uint64 reconcilationtime = 9017;
   required DiskFileInfo diskfileinfo = 9040;
@@ -340,7 +345,7 @@ message ArchiveRequest {
   required string archivereporturl = 9057;
   required string archiveerrorreporturl = 9058;
   required uint64 filesize = 9060;
-  required UserIdentity requester = 9070;
+  required RequesterIdentity requester = 9070;
   required string srcurl = 9080;
   required string storageclass = 9090;
   required EntryLog creationlog = 9091;
@@ -368,7 +373,7 @@ enum RetrieveJobStatus {
 }
 
 message SchedulerRetrieveRequest {
-  required UserIdentity requester = 9100;
+  required RequesterIdentity requester = 9100;
   required uint64 ArchiveFileId = 9101;
   required string dstURL = 9102;
   required DiskFileInfo diskfileinfo = 9103;
@@ -576,6 +581,7 @@ message RepackRequest {
   required uint64 archivedbytes = 11510;
   required uint64 failedtoretrievefiles = 11520;
   required uint64 failedtoretrievebytes = 11530;
+  required uint64 failedtocreatearchivereq = 11535;
   required uint64 failedtoarchivefiles = 11540;
   required uint64 failedtoarchivebytes = 11550;
   required uint64 lastexpandedfseq = 11560;
@@ -584,6 +590,7 @@ message RepackRequest {
   //the expansion of the RepackRequest is done or not
   required bool is_expand_finished = 11561;
   required bool is_expand_started = 11562;
+  required MountPolicy mount_policy = 11563;
   repeated RepackSubRequestPointer subrequests = 11570;
 }
 
diff --git a/operations/tape/supply-pool-refill-mechanism b/operations/tape/supply-pool-refill-mechanism
deleted file mode 100755
index 3f8ca6d50d360f258eb9cc92c1d359bcb95081ba..0000000000000000000000000000000000000000
--- a/operations/tape/supply-pool-refill-mechanism
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/python36
-
-# The CERN Tape Archive (CTA) project
-# Copyright (C) 2019  CERN
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import json
-import logging
-from random import shuffle
-from subprocess import STDOUT, check_output, CalledProcessError
-
-##########
-#
-# CONFIGURATION
-#
-##########
-
-timeout = 10	# how many seconds to wait for the external command to finish
-separator = ","	# characted which separates multiple tape pools in the CTA "supply" column
-
-# Configure logging
-logfile = sys.argv[0] + ".log"
-logging.basicConfig (
-    level    = logging.DEBUG,				# only messages with log level above this are logged
-    format   = "[%(levelname)-5.5s] %(message)s",	# format of the logging (can be very complex - see the manual)
-    handlers = [
-        logging.FileHandler(logfile), 			# log to a logfile
-        logging.StreamHandler()				# log to console STDOUT
-    ])
-
-##########
-#
-# FUNCTIONS
-#
-##########
-
-# Extract eligible (not DISABLED, not FULL and NOT in a DISABLED library) tapes from a given tape pool
-def extract_tapes(tapelibrary, tapepool):
-    tapes = []
-    command = ["/usr/bin/cta-admin", "--json", "tape", "ls", "--tapepool", tapepool, "--disabled", "false", "--full", "false"]
-    if ((tapelibrary) and (tapelibrary != "ALL")):
-        command.extend(["--logicallibrary", tapelibrary])
-    try:
-        logging.debug("Executing command " + format(command) + " with timeout of " + format(timeout) + " seconds")
-        output = check_output(command, stderr = STDOUT, timeout = timeout).decode("UTF-8")
-    except Exception as error:
-        logging.error(format(error))
-        sys.exit(-1)
-
-    if output == "]": output = "" # TEMPORARY UNTIL --json FIXED !!!
-    if output:
-        tapes = [line["vid"] for line in json.loads(output)]
-    else:
-        logging.warn("No eligible tapes extracted from the tape pool: " + tapepool)
-
-    return tapes
-
-##########
-#
-# MAIN
-#
-##########
-
-# Extract the list of all tape pools in CTA in JSON format
-command = ["/usr/bin/cta-admin", "--json", "tapepool", "ls"]
-try:
-    logging.debug("Executing command " + format(command) + " with timeout of " + format(timeout) + " seconds")
-    output = check_output(command, stderr = STDOUT, timeout = timeout).decode("UTF-8")
-except Exception as error:
-    logging.error(format(error))
-    sys.exit(-1)
-if output:
-    tapepools = json.loads(output)
-else:
-    logging.error("List of CTA tape pools is empty, nothing to do, exiting")
-    sys.exit(0)
-
-# Extract the list of DISABLED tape libraries
-# LATER as --json not yet implemented
-
-#logging.debug("List of extracted tape pools from CTA:\n" + format(tapepools))
-
-# Iterate over the extracted CTA tape pools and re-fill them with supply tapes as needed
-for tapepool in tapepools:
-    logging.info("Tape pool: " + tapepool["name"] + " which should have at least: " + tapepool["numPartialTapes"] + " partial tape(s) is supplied from: " + tapepool["supply"])
-
-    if (tapepool["numPartialTapes"] and tapepool["supply"]):
-
-        # Check if re-filling is actually needed
-        currentpartialtapes = len([tape for tape in extract_tapes("ALL", tapepool["name"])])
-        if (currentpartialtapes < int(tapepool["numPartialTapes"])):
-            logging.info("Tape pool: " + tapepool["name"] + " only has: " + format(currentpartialtapes) + " partial tape(s) available, re-filling")
-        else:
-            logging.info("Tape pool: " + tapepool["name"] + " already has: " + format(currentpartialtapes) + " partial tape(s) available, skipping")
-            continue
-
-        # Prepare the eligible supply tapes from a given supply pool(s)
-        supplytapes = []
-        for supplypool in tapepool["supply"].split(separator):
-            supplytapes.extend([tape for tape in extract_tapes("ALL", supplypool)])
-        shuffle(supplytapes) # Randomize it (so that tapes are picked at random from multiple pools)
-
-        # Move the required number of supply tapes to the given tape pool (if any eligible tapes were identified)
-        if len(supplytapes):
-            logging.info("Identified: " + format(len(supplytapes)) + " supply tapes, moving " + format(int(tapepool["numPartialTapes"]) - currentpartialtapes) + " to the pool: " + tapepool["name"])
-            for i in range(int(tapepool["numPartialTapes"]) - currentpartialtapes):
-                command = ["/usr/bin/cta-admin", "tape", "ch", "--vid", supplytapes[i], "--tapepool", tapepool["name"]]
-                try:
-                    logging.debug("Executing command " + format(command) + " with timeout of " + format(timeout) + " seconds")
-#                    output = check_output(command, stderr = STDOUT, timeout = timeout).decode("UTF-8")
-                except Exception as error:
-                    logging.error(format(error))
-                    sys.exit(-1)
-                logging.info("Tape: " + format(supplytapes[i]) + " moved to the pool: " + tapepool["name"])
-        else:
-            logging.warn("Unable to re-fill the tape pool: " + tapepool["name"] + ", no eligible supply tapes identified")
-
-    else:
-        logging.warn("Unable to re-fill the tape pool: " + tapepool["name"] + " because either the number of partial tapes: " + tapepool["numPartialTapes"] + " or the supply pool: " + tapepool["supply"] + " is not properly configured")
diff --git a/operations/tape/tape-config-generate b/operations/tape/tape-config-generate
deleted file mode 100755
index fcb5ff47998e6fe76b3e73c591a1ccab2e28fe0b..0000000000000000000000000000000000000000
--- a/operations/tape/tape-config-generate
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/perl -w
-#######################################################################
-#
-# This script will generate /etc/cta/TPCONFIG file with the data from
-# TOMS (Tape Operations Management System) URL:
-#
-# https://apex.cern.ch/pls/htmldb_castorns/f?p=toms_prod:250:163672298908022::NO::P250_TAPESERVER:HOSTNAME
-#
-# Vladimir Bahyl - 05/2019
-#
-#######################################################################
-
-use strict;
-use XML::DOM;
-use Sys::Hostname;
-use LWP::UserAgent;
-use LC::Check qw(file);
-
-#use Data::Dumper;
-
-my $today = localtime;
-
-my %TPCONFIG = ();
-my $hostname = '';
-
-my $tpconfigfile = '/etc/castor/TPCONFIG';
-my $tpconfig = "#######################################################################
-#
-# CTA Tape Server Configuration file
-#
-# This tape server is not configured.
-#
-#######################################################################
-#
-# Generated on $today by $0
-";
-
-my $changes = 0;
-
-($hostname = hostname()) =~ s/\.cern\.ch$//io;
-
-my $configUrl = 'https://apex.cern.ch/pls/htmldb_castorns/f?p=toms_prod:250:163672298908022::NO::P250_TAPESERVER:HOSTNAME';
-die ("$0: missing configuration URL") unless ($configUrl);
-$configUrl =~ s/HOSTNAME/$hostname/o;
-
-#
-# Fetch the data
-#
-print("$0: Fetching the data over HTTP from the Oracle APEX database ... please be patient ...\n");
-%TPCONFIG = &GetData($configUrl);
-
-#
-# Prepare the TPCONFIG file
-#
-my $i = 0;
-while (%TPCONFIG and defined($TPCONFIG{$i}{'tapeserver'}) and (lc($TPCONFIG{$i}{'tapeserver'}) eq lc($hostname))) {
-  $tpconfig = "#######################################################################
-#
-# CTA Tape Server Configuration file
-#
-# unit      device    system                control
-# name      group     device                method
-
-" if ($i == 0);
-
-  $tpconfig .= "$TPCONFIG{$i}{'tapedrive'}    $TPCONFIG{$i}{'devicegroup'}    $TPCONFIG{$i}{'unixdevice'}    $TPCONFIG{$i}{'controlmethod'}
-
-# Tape Drive Comment: $TPCONFIG{$i}{'tapedrivecomment'}
-# Tape Service Comment: $TPCONFIG{$i}{'tapeservicecomment'}
-# Modified by: $TPCONFIG{$i}{'modifuser'}
-# Modify date: $TPCONFIG{$i}{'modifdate'}
-
-";
-  $i++;
-}
-
-$tpconfig .= "#
-#######################################################################
-#
-# Generated on $today by $0
-" if (%TPCONFIG and (lc($TPCONFIG{0}{'tapeserver'}) eq lc($hostname)));
-
-# Change the TPCONFIG location if comment mentions CTA
-$tpconfigfile = '/etc/cta/TPCONFIG' if (%TPCONFIG and (defined $TPCONFIG{0}{'tapeservicecomment'}) and ($TPCONFIG{0}{'tapeservicecomment'} =~ /CTA/oi));
-
-#
-# Configure TPCONFIG and SSI files
-#
-$changes += &UpdateFile($tpconfigfile, $tpconfig);
-
-LC::Check::link('/etc/TPCONFIG', $tpconfigfile,
-  backup  => '.old',
-  nocheck => 1,
-  force   => 1
-);
-
-##########################################################################
-sub GetData {
-##########################################################################
-  my ($url) = @_;
-
-  my %TPCONFIG = ();
-
-  my $xmlParser = new XML::DOM::Parser;
-
-  # Download the XML formated configuration data
-  # Use cookies because of APEX (otherwise, nothing will be downloaded; redirection will not work)
-  my $UserAgent = LWP::UserAgent->new;
-  $UserAgent->cookie_jar({ file => undef });
-  my $xml = $UserAgent->get($url);
-
-  if (defined ($xml->content)) {
-    if ($xml->content =~/TAPESERVER/oi) {
-      my $xmlDoc = $xmlParser->parse($xml->content);
-
-      # pcitpdp39 ~ > lynx -source "http://oraweb.cern.ch/pls/cdbsqldev/web.show_tpconfig?p_tapeserver=tpsrv027&p_output=xml"
-      # <?xml version = '1.0'?>
-      # <TPCONFIGSEARCHLIST> <TAPESERVER NAME="tpsrv027" TAPEDRIVE="994B53A6" DEVICEGROUP="994BR5" UNIXDEVICE="/dev/nst0" DENSITY="200G" COMPRESSION="Y" INITSTATUS="DOWN" CONTROLMETHOD="acs0,3,10,6" MODEL="9940" ROBOTHOST="sunstk62" /> </TPCONFIGSEARCHLIST>
-
-      for my $i (0 .. ($xmlDoc->getElementsByTagName("TAPESERVER")->getLength())-1) {
-        $TPCONFIG{$i}{'tapeserver'}         = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("NAME");
-        $TPCONFIG{$i}{'tapedrive'}          = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("TAPEDRIVE");
-        $TPCONFIG{$i}{'devicegroup'}        = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("DEVICEGROUP");
-        $TPCONFIG{$i}{'unixdevice'}         = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("UNIXDEVICE");
-        $TPCONFIG{$i}{'initstatus'}         = lc($xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("INITSTATUS"));
-        $TPCONFIG{$i}{'controlmethod'}      = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("CONTROLMETHOD");
-        $TPCONFIG{$i}{'modifdate'}          = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("MODIFDATE");
-        $TPCONFIG{$i}{'modifuser'}          = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("MODIFUSER");
-        $TPCONFIG{$i}{'tapedrivecomment'}   = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("TAPEDRIVECOMMENT");
-        $TPCONFIG{$i}{'tapeservicecomment'} = $xmlDoc->getElementsByTagName("TAPESERVER")->item($i)->getAttribute("TAPESERVICECOMMENT");
-
-        warn("$0: database entry nr. ".($i+1)." missing tape server hostname\n")    unless ($TPCONFIG{$i}{'tapeserver'});
-        warn("$0: database entry nr. ".($i+1)." missing tape drive name\n")         unless ($TPCONFIG{$i}{'tapedrive'});
-        warn("$0: database entry nr. ".($i+1)." missing device group name\n")       unless ($TPCONFIG{$i}{'devicegroup'});
-        warn("$0: database entry nr. ".($i+1)." missing unix device\n")             unless ($TPCONFIG{$i}{'unixdevice'});
-        warn("$0: database entry nr. ".($i+1)." missing init status\n")             unless ($TPCONFIG{$i}{'initstatus'});
-        warn("$0: database entry nr. ".($i+1)." missing control method\n")          unless ($TPCONFIG{$i}{'controlmethod'});
-        warn("$0: database entry nr. ".($i+1)." missing the modification date\n")   unless ($TPCONFIG{$i}{'modifdate'});
-        warn("$0: database entry nr. ".($i+1)." missing user name\n")               unless ($TPCONFIG{$i}{'modifuser'});
-        print("$0: database entry nr. ".($i+1)." no tape service comment\n")        unless ($TPCONFIG{$i}{'tapeservicecomment'});
-        print("$0: database entry nr. ".($i+1)." no tape drive comment\n")          unless ($TPCONFIG{$i}{'tapedrivecomment'});
-      }
-
-      $xmlDoc->dispose;
-    } else {
-      warn("$0: URL $url is not returning any usable data for $hostname. This tape server will not be configured. Please check whether there is a tape drive assigned to this tape server.\n");
-    }
-  } else {
-    warn("$0: URL $url doesn't seem to work. There could be a problem with the Web server or the Oracle APEX database server.\n");
-  }
-
-  return %TPCONFIG;
-}
-
-##########################################################################
-sub UpdateFile {
-##########################################################################
-  my ($filename, $newcontent) = @_;
-
-  my $changes = 0;
-
-  if ((-f $filename) and (-r $filename) and (-s $filename)) {
-      # Check the content of the file and correct it if there are some differences
-      $changes += LC::Check::file($filename,
-      source => $filename,
-      owner  => 0,
-      group  => 0,
-      mode   => 0644,
-      backup => '.old',
-      code   => sub {
-        my($oldcontent) = @_;
-        return() unless $oldcontent;
-
-        (my $oldfile = $oldcontent) =~ s/^#.*$//gim; # remove lines with comments
-        $oldfile =~ s/^\s*$//gim;                    # remove empty lines
-
-        (my $newfile = $newcontent) =~ s/^#.*$//gim; # remove lines with comments
-        $newfile =~ s/^\s*$//gim;                    # remove empty lines
-
-        $oldcontent = $newcontent unless ($oldfile eq $newfile);
-
-        return($oldcontent);
-      }
-    );
-  } else {
-    # The file is missing, create a new one
-    $changes += LC::File::file_contents($filename, $newcontent);
-    print "$0: created new $filename\n";
-  }
-  die ("$0: error modifying $filename\n") unless (defined($changes));
-
-  return $changes;
-}
diff --git a/operations/tape/tape-devices-namer b/operations/tape/tape-devices-namer
deleted file mode 100755
index 708bea830bd59e1d3f8d4002363092e72fb6d892..0000000000000000000000000000000000000000
--- a/operations/tape/tape-devices-namer
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/python
-
-"""
-This script creates links to tape and medium changer devices.
-The association between tape and smc device is made based on
-the serial numbers stored in the TOMS DB.
-"""
-
-import re
-import os
-import sys
-import socket 
-import pprint
-import urllib2
-import optparse
-import cookielib
-import subprocess 
-pp = pprint.PrettyPrinter(width=200)
-
-#------------------------------------------------------------
-def mklink(dev, sn, drivename, type):
-    if not options.noaction and drivename is not None:
-        link = '/dev/' + type + '_' + drivename
-        subprocess.Popen(['/bin/ln', '-f',  '-s', dev, link]).wait()
-        print 'Created link', link
-    else:
-        print 'Cannot create link to ' + dev
-        print 'Drivename for serial number ' + sn + ' not found'
-
-#------------------------------------------------------------
-def fix_mismatch(mm_tape_dev, toms_drives, hostname):
-    #this fucntions assumes that there is only one mismatch,
-    #i.e. only one tape device with S/N not found in TOMS
-    #and only one drives in TOSM with S/N not found in the server.
-    l = []
-    cj = cookielib.CookieJar()
-    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
-    for d in toms_drives:
-        if d['match'] == 0:
-            #assigning drivename to the mismatched tape device 
-            mm_tape_dev['drivename'] = d['drivename']
-            #fixing the S/N in TOMS
-            tomsurl = 'http://castortapeweb.cern.ch/cgi-bin/serial-nr-update.cgi?tapedrive=' + d['drivename'] + '&tapeserver=' + hostname + '&serialnumber=' + mm_tape_dev['sn']
-            if options.debug: print 'Opening', tomsurl
-            try:
-                urlfh = opener.open(tomsurl)
-            except:
-                print 'Cannot open ' + tomsurl
-                
-            
-#------------------------------------------------------------
-#main
-#------------------------------------------------------------
-
-# options ---------------------------------------------------
-usage = "usage: %prog [options]"
-parser = optparse.OptionParser(usage)
-parser.add_option("-d", "--debug", action="store_true", dest="debug", help="print debug messages")
-parser.add_option("--noaction", action="store_true", dest="noaction", help="do nothing")
-(options, args) = parser.parse_args()
-
-tape_devices = []
-smc_devices = []
-
-# find tape and smc devices
-if options.debug: print 'Searching tape devices'
-try:
-    p = subprocess.Popen(['/usr/bin/lsscsi', '-g'], stdout=subprocess.PIPE)
-    p.wait()
-except:
-    print 'Cannot run lsscsi. Exit.'
-    sys.exit(0)
-    
-for line in p.stdout:
-    fields = line.split() 
-    scsi_address = fields[0][1:-1]
-    type = fields[1]
-    scsi_generic = fields.pop()
-    scsi_tape = fields.pop()
-    if type == 'tape':
-        tape_devices.append({'scsi_address' : scsi_address, 'scsi_generic' : scsi_generic, 'scsi_tape' : scsi_tape, 'sn' : None, 'drivename' : None})
-    if type == 'mediumx':
-        smc_devices.append({'scsi_address' : scsi_address, 'scsi_generic' : scsi_generic, 'scsi_tape' : scsi_tape, 'sn' : None, 'drivename' : None})
-
-ntpdev=len(tape_devices)
-
-if options.debug:
-    print 'tape_devices:'
-    pp.pprint(tape_devices)
-    print 'smc_devices:'
-    pp.pprint(smc_devices)
-
-# associate tape and smc devices
-if options.debug: print 'Coupling tape and smc devices (if any)'
-pairs = []
-for tapedev in tape_devices:
-    for smcdev in smc_devices:
-        if tapedev['scsi_address'][:-2] == smcdev['scsi_address'][:-2]:
-            pairs.append([tapedev, smcdev])
-if options.debug:
-    print 'pairs:'
-    pp.pprint(pairs)
-
-if len(tape_devices)>len(smc_devices) and len(smc_devices)>0:
-    print 'Number of control paths lower that number of drives'
-    sys.exit(0)
-
-
-# find the serial number of the tape device
-# sg_inq will not work if the /dev/sgX device is not reachable
-# sg_inq will not work if the /dev/nstY device is being used
-# run sg_inq against the nst dev so that if it is already being used we exit
-if options.debug: print 'Reading serial numbers from tape devices'
-
-for tapedev in tape_devices:
-    tapedn = '/dev/nst'+tapedev['scsi_tape'][-1]
-    p = subprocess.Popen(['/usr/bin/sg_inq', tapedn], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    if p.wait() != 0:
-        print 'Cannot run sg_inq on ' +  tapedn + '. Exit'
-        sys.exit(0)
-    else:
-        for line in p.stdout:
-            #reg = re.search('(?<=Unit serial number: )(\d+)', line)
-            if re.search('Unit serial number', line):
-                l = line.split(':')
-                tapedev['sn'] = l[1][1:-1]
-
-    if tapedev['sn'] is None:
-        print 'Could not extract the serial number from the output of sg_inq ' + tapedn
-        sys.exit(0)
-                
-if options.debug:
-    print 'tape_devices:'
-    pp.pprint(tape_devices)
-
-
-# search the drive names in toms by serial number
-toms_drives = []
-if options.debug: print 'Looking into TOMS for drive names'
-hostname = socket.gethostname().split('.')[0]
-tomsurl =  'https://apex.cern.ch/pls/htmldb_castorns/f?p=toms_prod:250:::NO::P250_TAPESERVER:' + hostname
-if options.debug: print 'Opening', tomsurl
-
-cj = cookielib.CookieJar()
-opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
-
-try:
-    urlfh = opener.open(tomsurl)
-except:
-    print 'Cannot open ' + tomsurl
-    sys.exit(0)
-
-for line in urlfh:
-
-    if options.debug: print line
-
-    if not re.search('TAPESERVER', line): continue
-
-    drivename, serialnumber = '', ''
-
-    l = line.split()
-    for item in l:
-        g = item.split('=')
-        if g[0] == 'CURRENTSERIALNR': serialnumber =  g[1][1:-1]
-        if g[0] == 'TAPEDRIVE': drivename =  g[1][1:-1]
-
-    if drivename == '':
-        print 'drive name for host ' + hostname + ' not found in TOMS'
-        sys.exit(0)
-
-    if serialnumber  == '':
-        print 'Serial number for drive', drivename, 'not found in TOMS'
-        #here we don't exit, in case of a signle mismatch we update the s/n
-
-    toms_drives.append({'drivename' : drivename, 'sn': serialnumber, 'match' : 0})
-        
-    for tapedev in tape_devices:
-        if tapedev['sn'] == serialnumber:
-            tapedev['drivename'] = drivename
-            for d in toms_drives:
-                if d['drivename'] == drivename: d['match'] = 1
-
-if options.debug:
-    print 'tape_devices:'
-    pp.pprint(tape_devices)
-    print 'toms_drives:'
-    pp.pprint(toms_drives)
-
-
-#Check how many S/N are missing.
-#1. If there is only one assume that the drive has been replaced and update the S/N in TOMS.
-#2. If there are more than one the script does nothing (new or changed drives/devices
-#   will not be configured (link not created).
-
-devs_mm_sn = 0
-mm_tape_dev = None
-for t in tape_devices:
-    if t['drivename'] is None:
-        devs_mm_sn += 1
-        mm_tape_dev = t
-
-if devs_mm_sn == 1:
-    print 'One S/N mismatch. Going to fix S/N in TOMS'
-    fix_mismatch(mm_tape_dev, toms_drives, hostname)
-elif devs_mm_sn == 0:
-    if options.debug: print 'No S/N mismatches'
-else:
-    if options.debug: print 'Too many S/N mismatches'
-    
-
-# created links
-if pairs == []:
-    # this is a SUN tape server
-    for tapedev in tape_devices:
-        tapedn = '/dev/nst'+tapedev['scsi_tape'][-1]
-        mklink(tapedn, tapedev['sn'], tapedev['drivename'], 'tape')
-else:
-    #this is a IBM tape server
-    for pair in pairs:
-        tapedev = pair[0]
-        tapedn = '/dev/nst'+tapedev['scsi_tape'][-1]
-        mklink(tapedn, tapedev['sn'], tapedev['drivename'], 'tape')
-        smcdev = pair[1]
-        mklink(smcdev['scsi_generic'], tapedev['sn'], tapedev['drivename'], 'smc')
-
-
-
diff --git a/python/eosfstgcd/CMakeLists.txt b/python/eosfstgcd/CMakeLists.txt
index 781060864fd6a9b48dcf2246171619f663dddf38..9427af1e16ccf04d02d3b7fc103d03baa7440fb2 100644
--- a/python/eosfstgcd/CMakeLists.txt
+++ b/python/eosfstgcd/CMakeLists.txt
@@ -15,7 +15,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 cmake_minimum_required (VERSION 2.6)
 
-install (PROGRAMS ctafstgcd.py DESTINATION usr/bin/cta-fst-gcd)
+install (PROGRAMS ctafstgcd.py DESTINATION usr/bin RENAME cta-fst-gcd)
 install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-fst-gcd.1cta DESTINATION /usr/share/man/man1)
 install (FILES cta-fst-gcd.service DESTINATION /etc/systemd/system)
 install (FILES cta-fst-gcd.conf.example DESTINATION /etc/cta)
diff --git a/rdbms/CMakeLists.txt b/rdbms/CMakeLists.txt
index c6e926561262358a0dc6080292cd491f74d97f8e..a3fa9ad9df1423869bfd41044a83b795f10f201a 100644
--- a/rdbms/CMakeLists.txt
+++ b/rdbms/CMakeLists.txt
@@ -45,6 +45,7 @@ set(RDBMS_UNIT_TESTS_LIB_SRC_FILES
   ConnPoolTest.cpp
   LoginTest.cpp
   RdbmsTest.cpp
+  RsetTest.cpp
   StmtPoolTest.cpp)
 
 add_library (ctardbmsunittests SHARED
diff --git a/rdbms/Conn.hpp b/rdbms/Conn.hpp
index b9849a9b4518d8655a141a5869e5ced3de4dcaf3..4d3f6b140c6c8e0379a1477e6c0fec6bd6ce3efa 100644
--- a/rdbms/Conn.hpp
+++ b/rdbms/Conn.hpp
@@ -209,6 +209,15 @@ public:
    */
   std::list<std::string> getTriggerNames();
 
+  /**
+   * Get a pointer to the connection wrapper implementation
+   *
+   * Required for Postgres PQescapeByteaConn()
+   */
+  wrapper::ConnWrapper *getConnWrapperPtr() {
+    return m_connAndStmts->conn.get();
+  }
+
 private:
 
   /**
diff --git a/rdbms/InvalidResultSet.hpp b/rdbms/InvalidResultSet.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a6a431e0052f4957555a9e1bd90af116fc7879a
--- /dev/null
+++ b/rdbms/InvalidResultSet.hpp
@@ -0,0 +1,50 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+namespace rdbms {
+
+/**
+ * Invalid result set.
+ */
+class InvalidResultSet : public cta::exception::Exception {
+public:
+      
+  /**
+   * Constructor
+   *
+   * @param context optional context string added to the message
+   * at initialisation time.
+   * @param embedBacktrace whether to embed a backtrace of where the
+   * exception was throw in the message
+   */
+  InvalidResultSet(const std::string &context = "", const bool embedBacktrace = true):
+    Exception(context, embedBacktrace) {}
+
+  /**
+   * Destructor.
+   */
+  virtual ~InvalidResultSet() {}
+}; // class InvalidResultSet
+      
+} // namespace rdbms
+} // namespace cta
diff --git a/rdbms/Rset.cpp b/rdbms/Rset.cpp
index f811cb387357b10371aed2aac73cb8b8163d51f9..7950e08c95560c5c804cd4b667dc7f3095efe783 100644
--- a/rdbms/Rset.cpp
+++ b/rdbms/Rset.cpp
@@ -16,6 +16,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include "rdbms/InvalidResultSet.hpp"
 #include "rdbms/NullDbValue.hpp"
 #include "rdbms/Rset.hpp"
 #include "rdbms/wrapper/RsetWrapper.hpp"
@@ -57,13 +58,23 @@ Rset &Rset::operator=(Rset &&rhs) {
   return *this;
 }
 
+//------------------------------------------------------------------------------
+// columnString
+//------------------------------------------------------------------------------
+std::string Rset::columnBlob(const std::string &colName) const {
+  if(nullptr == m_impl) {
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
+  }
+  return m_impl->columnBlob(colName);
+}
+
 //------------------------------------------------------------------------------
 // columnString
 //------------------------------------------------------------------------------
 std::string Rset::columnString(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw exception::Exception("This result set is invalid");
+      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
     }
 
     const optional<std::string> col = columnOptionalString(colName);
@@ -83,7 +94,7 @@ std::string Rset::columnString(const std::string &colName) const {
 uint64_t Rset::columnUint64(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw exception::Exception("This result set is invalid");
+      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
     }
 
     const optional<uint64_t> col = columnOptionalUint64(colName);
@@ -103,7 +114,7 @@ uint64_t Rset::columnUint64(const std::string &colName) const {
 bool Rset::columnBool(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw exception::Exception("This result set is invalid");
+      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
     }
 
     const optional<bool> col = columnOptionalBool(colName);
@@ -123,7 +134,7 @@ bool Rset::columnBool(const std::string &colName) const {
 optional<bool> Rset::columnOptionalBool(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw exception::Exception("This result set is invalid");
+      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
     }
 
     const auto column = columnOptionalUint64(colName);
@@ -142,8 +153,7 @@ optional<bool> Rset::columnOptionalBool(const std::string &colName) const {
 //------------------------------------------------------------------------------
 const std::string &Rset::getSql() const {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
   return m_impl->getSql();
 }
@@ -153,10 +163,25 @@ const std::string &Rset::getSql() const {
 //------------------------------------------------------------------------------
 bool Rset::next() {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
-  return m_impl->next();
+
+  const bool aRowHasBeenRetrieved = m_impl->next();
+
+  // Release resources of result set when its end has been reached
+  if(!aRowHasBeenRetrieved) {
+    m_impl.reset(nullptr);
+  }
+
+  return aRowHasBeenRetrieved;
+}
+
+//------------------------------------------------------------------------------
+// isEmpty
+//------------------------------------------------------------------------------
+bool Rset::isEmpty() const
+{
+  return nullptr == m_impl;
 }
 
 //------------------------------------------------------------------------------
@@ -164,8 +189,7 @@ bool Rset::next() {
 //------------------------------------------------------------------------------
 bool Rset::columnIsNull(const std::string &colName) const {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
   return m_impl->columnIsNull(colName);
 }
@@ -175,8 +199,7 @@ bool Rset::columnIsNull(const std::string &colName) const {
 //------------------------------------------------------------------------------
 optional<std::string> Rset::columnOptionalString(const std::string &colName) const {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
   return m_impl->columnOptionalString(colName);
 }
@@ -186,8 +209,7 @@ optional<std::string> Rset::columnOptionalString(const std::string &colName) con
 //------------------------------------------------------------------------------
 optional<uint64_t> Rset::columnOptionalUint64(const std::string &colName) const {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
   return m_impl->columnOptionalUint64(colName);
 }
@@ -198,7 +220,7 @@ optional<uint64_t> Rset::columnOptionalUint64(const std::string &colName) const
 double Rset::columnDouble(const std::string &colName) const {
   try {
     if(nullptr == m_impl) {
-      throw exception::Exception("This result set is invalid");
+      throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
     }
 
     const optional<double> col = columnOptionalDouble(colName);
@@ -217,8 +239,7 @@ double Rset::columnDouble(const std::string &colName) const {
 //------------------------------------------------------------------------------
 optional<double> Rset::columnOptionalDouble(const std::string &colName) const {
   if(nullptr == m_impl) {
-    throw exception::Exception(std::string(__FUNCTION__) + " failed: "
-      "This result set is invalid");
+    throw InvalidResultSet(std::string(__FUNCTION__) + " failed: This result set is invalid");
   }
   return m_impl->columnOptionalDouble(colName);
 }
diff --git a/rdbms/Rset.hpp b/rdbms/Rset.hpp
index cbf17fd856d4fee550a1e1b335f648170beb1707..a56e74b6156551633eccba074d175c5fcb3e41dc 100644
--- a/rdbms/Rset.hpp
+++ b/rdbms/Rset.hpp
@@ -19,6 +19,7 @@
 #pragma once
 
 #include "common/optional.hpp"
+#include "rdbms/InvalidResultSet.hpp"
 
 #include <memory>
 #include <stdint.h>
@@ -86,17 +87,37 @@ public:
    *
    * @return True if a row has been retrieved else false if there are no more
    * rows in the result set.
+   * @throw InvalidResultSet if the result is invalid.
    */
   bool next();
 
+  /**
+   * Returns true if the result set does not contain any more rows.
+   * @return True if the result set does not contain any more rows.
+   */
+  bool isEmpty() const;
+
   /**
    * Returns true if the specified column contains a null value.
    *
    * @param colName The name of the column.
    * @return True if the specified column contains a null value.
+   * @throw InvalidResultSet if the result is invalid.
    */
   bool columnIsNull(const std::string &colName) const;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * This method will throw an exception if the value of the specified column
+   * is nullptr.
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
+   */
+  std::string columnBlob(const std::string &colName) const;
+
   /**
    * Returns the value of the specified column as a string.
    *
@@ -105,6 +126,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The string value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   std::string columnString(const std::string &colName) const;
 
@@ -115,6 +137,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The string value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   optional<std::string> columnOptionalString(const std::string &colName) const;
 
@@ -126,6 +149,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   uint64_t columnUint64(const std::string &colName) const;
 
@@ -140,6 +164,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   bool columnBool(const std::string &colName) const;
 
@@ -150,6 +175,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   optional<uint64_t> columnOptionalUint64(const std::string &colName) const;
 
@@ -163,6 +189,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   optional<bool> columnOptionalBool(const std::string &colName) const;
 
@@ -174,6 +201,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   double columnDouble(const std::string &colName) const;
 
@@ -184,6 +212,7 @@ public:
    *
    * @param colName The name of the column.
    * @return The value of the specified column.
+   * @throw InvalidResultSet if the result is invalid.
    */
   optional<double> columnOptionalDouble(const std::string &colName) const;
 
diff --git a/rdbms/RsetTest.cpp b/rdbms/RsetTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5313c29a228bf1f57de6306b323b949ca4ed0ea2
--- /dev/null
+++ b/rdbms/RsetTest.cpp
@@ -0,0 +1,83 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "common/exception/Exception.hpp"
+#include "rdbms/ConnPool.hpp"
+#include "rdbms/Rset.hpp"
+#include "rdbms/wrapper/ConnFactoryFactory.hpp"
+
+#include <gtest/gtest.h>
+#include <sstream>
+
+namespace unitTests {
+
+class cta_rdbms_RsetTest : public ::testing::Test {
+protected:
+
+  virtual void SetUp() {
+  }
+
+  virtual void TearDown() {
+  }
+};
+
+TEST_F(cta_rdbms_RsetTest, constructor) {
+  using namespace cta::rdbms;
+
+  Rset rset;
+
+  ASSERT_TRUE(rset.isEmpty());
+}
+
+TEST_F(cta_rdbms_RsetTest, next) {
+  using namespace cta::rdbms;
+
+  const Login login(Login::DBTYPE_SQLITE, "", "", "file::memory:?cache=shared", "", 0);
+  auto connFactory = wrapper::ConnFactoryFactory::create(login);
+  auto conn = connFactory->create();
+  StmtPool pool;
+  {
+    const std::string sql = "CREATE TABLE RSET_TEST(ID INTEGER)";
+    Stmt stmt = pool.getStmt(*conn, sql);
+    stmt.executeNonQuery();
+  }
+  
+  {
+    const std::string sql = "INSERT INTO RSET_TEST(ID) VALUES(1)";
+    Stmt stmt = pool.getStmt(*conn, sql);
+    stmt.executeNonQuery();
+  }
+
+  {
+    const std::string sql = "SELECT ID AS ID FROM RSET_TEST ORDER BY ID";
+    Stmt stmt = pool.getStmt(*conn, sql);
+    auto rset = stmt.executeQuery();
+
+    ASSERT_FALSE(rset.isEmpty());
+    ASSERT_TRUE(rset.next());
+    ASSERT_EQ(1, rset.columnUint64("ID"));
+
+    ASSERT_FALSE(rset.next());
+
+    ASSERT_THROW(rset.next(), InvalidResultSet);
+
+    ASSERT_TRUE(rset.isEmpty());
+  }
+}
+
+} // namespace unitTests
diff --git a/rdbms/Stmt.cpp b/rdbms/Stmt.cpp
index 0069bbf3b0343c03a1ce3e34335ca0db818031c0..c465d85f6f5fd7742d9b919e45d24b85baea6bc9 100644
--- a/rdbms/Stmt.cpp
+++ b/rdbms/Stmt.cpp
@@ -172,6 +172,17 @@ void Stmt::bindOptionalBool(const std::string &paramName, const optional<bool> &
   }
 }
 
+//-----------------------------------------------------------------------------
+// bindString
+//-----------------------------------------------------------------------------
+void Stmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
+  if(nullptr != m_stmt) {
+    return m_stmt->bindBlob(paramName, paramValue);
+  } else {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed: Stmt does not contain a cached statement");
+  }
+}
+
 //-----------------------------------------------------------------------------
 // bindString
 //-----------------------------------------------------------------------------
diff --git a/rdbms/Stmt.hpp b/rdbms/Stmt.hpp
index 5a3e7a6076b9192fbc17d68967437500723a816b..4aaa0b65776f95079512efb96878b9e8e1d2479d 100644
--- a/rdbms/Stmt.hpp
+++ b/rdbms/Stmt.hpp
@@ -151,6 +151,14 @@ public:
    */
   void bindOptionalBool(const std::string &paramName, const optional<bool> &paramValue);
 
+  /** 
+   * Binds an SQL parameter of type binary blob.
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  void bindBlob(const std::string &paramName, const std::string &paramValue);
+
   /** 
    * Binds an SQL parameter of type string.
    *
diff --git a/rdbms/wrapper/MysqlConn.cpp b/rdbms/wrapper/MysqlConn.cpp
index 188ecc28b56c9dd645ab49158eb6e2e018d034d9..b82ef563a7f000e61909c689ebc8306203f507c1 100644
--- a/rdbms/wrapper/MysqlConn.cpp
+++ b/rdbms/wrapper/MysqlConn.cpp
@@ -55,13 +55,14 @@ MysqlConn::MysqlConn(const std::string& host,
     throw exception::Exception(std::string(" errno: ") + std::to_string(rc) + " " +  msg);
   }
 
-  // we can use mysql_options() to change the connect options
-
   // connect
 
+  // setting the CLIENT_FOUND_ROWS flag so that the reported number of rows
+  // affected by a DML statement matches that of Oracle, PostgreSQL and SQLite
+
   if (mysql_real_connect(m_mysqlConn, host.c_str(),
                          user.c_str(), passwd.c_str(), db.c_str(), port, 
-                         NULL, 0) == NULL) {
+                         NULL, CLIENT_FOUND_ROWS) == NULL) {
     unsigned int rc = mysql_errno(m_mysqlConn);
     std::string msg = mysql_error(m_mysqlConn);
     throw exception::Exception(std::string(" errno: ") + std::to_string(rc) + " " +  msg);
diff --git a/rdbms/wrapper/MysqlRset.cpp b/rdbms/wrapper/MysqlRset.cpp
index 66a3f649e96d92fff386d2b610a623cc20d22b64..ee45544fba3c269359e8da4a089b412232dff518 100644
--- a/rdbms/wrapper/MysqlRset.cpp
+++ b/rdbms/wrapper/MysqlRset.cpp
@@ -79,7 +79,6 @@ bool MysqlRset::next() {
   }
 
   return true;
-  // throw exception::Exception(std::string(__FUNCTION__) + " not implemented.");
 }
 
 //------------------------------------------------------------------------------
@@ -96,6 +95,11 @@ bool MysqlRset::columnIsNull(const std::string &colName) const {
   return *holder->get_is_null();
 }
 
+std::string MysqlRset::columnBlob(const std::string &colName) const {
+  auto blob = columnOptionalString(colName);
+  return blob ? *blob : std::string();
+}
+
 //------------------------------------------------------------------------------
 // columnOptionalString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/MysqlRset.hpp b/rdbms/wrapper/MysqlRset.hpp
index ff263a397c9d59fde25476e5f8bcf778752a532c..9aeb4d3244093ed1107554a05cbf0d92daff1d32 100644
--- a/rdbms/wrapper/MysqlRset.hpp
+++ b/rdbms/wrapper/MysqlRset.hpp
@@ -76,6 +76,14 @@ public:
    */
   bool columnIsNull(const std::string &colName) const override;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   */
+  std::string columnBlob(const std::string &colName) const override;
+
   /**
    * Returns the value of the specified column as a string.
    *
diff --git a/rdbms/wrapper/MysqlStmt.cpp b/rdbms/wrapper/MysqlStmt.cpp
index 6d22de3bc87deb981ca45705b33976fb2fb0c0a0..d007cf8e109a91c8112488a4abd81b0af087026c 100644
--- a/rdbms/wrapper/MysqlStmt.cpp
+++ b/rdbms/wrapper/MysqlStmt.cpp
@@ -260,6 +260,14 @@ void MysqlStmt::bindOptionalDouble(const std::string &paramName, const optional<
   }
 }
 
+void MysqlStmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
+  try {
+    bindOptionalString(paramName, paramValue);
+  } catch(exception::Exception &ex) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+  }
+}
+
 //------------------------------------------------------------------------------
 // bindString
 //------------------------------------------------------------------------------
@@ -307,7 +315,8 @@ void MysqlStmt::bindOptionalString(const std::string &paramName, const optional<
 
       // reset memory
       holder->reset();
-      snprintf(holder->val, holder->get_buffer_length(), paramValue.value().c_str());
+      // need to use memcpy for VARBINARY strings, which are not null-terminated
+      memcpy(holder->val, paramValue.value().c_str(), holder->get_buffer_length());
     } else {
       holder->length = 0;
     }
@@ -317,7 +326,6 @@ void MysqlStmt::bindOptionalString(const std::string &paramName, const optional<
     // delete m_placeholder[idx]; // remove the previous placeholder
 
     m_placeholder[idx] = holder;
-
   } catch(exception::Exception &ex) {
     throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " +
       getSqlForException() + ": " + ex.getMessage().str()); 
diff --git a/rdbms/wrapper/MysqlStmt.hpp b/rdbms/wrapper/MysqlStmt.hpp
index 5f36b0f19c8637d24cb9825ff81d12f02c6d50df..7d5de999eecfed47db252c13f88a8e3714abd429 100644
--- a/rdbms/wrapper/MysqlStmt.hpp
+++ b/rdbms/wrapper/MysqlStmt.hpp
@@ -116,6 +116,14 @@ public:
    */
   void bindOptionalDouble(const std::string &paramName, const optional<double> &paramValue) override;
 
+  /** 
+   * Binds an SQL parameter of type binary string (byte array).
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  void bindBlob(const std::string &paramName, const std::string &paramValue) override;
+
   /** 
    * Binds an SQL parameter of type string.
    *
diff --git a/rdbms/wrapper/OcciColumn.cpp b/rdbms/wrapper/OcciColumn.cpp
index 86c7f11902ae8eaffb3a6e94c32707dde38c5813..aeaf4c7494fc60801bd30217d01e6829613c26a1 100644
--- a/rdbms/wrapper/OcciColumn.cpp
+++ b/rdbms/wrapper/OcciColumn.cpp
@@ -150,6 +150,26 @@ void OcciColumn::copyStrIntoField(const size_t index, const std::string &str) {
   }
 }
 
+//------------------------------------------------------------------------------
+// setFieldValueToRaw
+//------------------------------------------------------------------------------
+void OcciColumn::setFieldValueToRaw(size_t index, const std::string &blob) {
+  try {
+    size_t maxlen = m_maxFieldLength < 2000 ? m_maxFieldLength : 2000;
+    if(blob.length() + 2 > maxlen) {
+      throw exception::Exception("Blob length=" + std::to_string(blob.length()) +
+        " exceeds maximum field length (" + std::to_string(maxlen-2) + ") bytes)");
+    }
+    uint16_t len = blob.length();
+    char *const buf = getBuffer();
+    char *const element = buf + index * m_maxFieldLength;
+    memcpy(element, &len, 2);
+    memcpy(element + 2, blob.c_str(), len);
+  } catch(exception::Exception &ex) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed: colName=" + m_colName + ": " + ex.getMessage().str());
+  }
+}
+
 } // namespace wrapper
 } // namespace rdbms
 } // namespace cta
diff --git a/rdbms/wrapper/OcciColumn.hpp b/rdbms/wrapper/OcciColumn.hpp
index 8457f695c54a0f120407ad3a1280638a12583964..a6c47653663e0fd0b2a3ddbfee5ddbd13b4e7d16 100644
--- a/rdbms/wrapper/OcciColumn.hpp
+++ b/rdbms/wrapper/OcciColumn.hpp
@@ -105,7 +105,15 @@ public:
     setFieldValue(index, value, std::is_integral<T>());
   }
 
-private:
+  /**
+   * Sets the field at the specified index to the specified raw byte array.
+   *
+   * This method tag dispatches using std::is_integral.
+   *
+   * @param index The index of the field.
+   * @param value The value of the field.
+   */
+  void setFieldValueToRaw(size_t index, const std::string &blob);
 
   /**
    * Sets the length of the field at the specified index.
@@ -115,6 +123,8 @@ private:
    */
   void setFieldLen(const size_t index, const ub2 length);
 
+private:
+
   /**
    * Sets the length of the field at the specified index to the length of the
    * specified value.
diff --git a/rdbms/wrapper/OcciRset.cpp b/rdbms/wrapper/OcciRset.cpp
index e1e16e158c5f858e8d73a5ed05c6ce425bd18ef8..1401a174b00b33355c7f58d153d4437a1ed09c68 100644
--- a/rdbms/wrapper/OcciRset.cpp
+++ b/rdbms/wrapper/OcciRset.cpp
@@ -126,6 +126,22 @@ void OcciRset::close() {
   }
 }
 
+std::string OcciRset::columnBlob(const std::string &colName) const {
+  try {
+    const int colIdx = m_colNameToIdx.getIdx(colName);
+    auto raw = m_rset->getBytes(colIdx);
+    std::unique_ptr<unsigned char[]> bytearray(new unsigned char[raw.length()]());
+    raw.getBytes(bytearray.get(), raw.length());
+    return std::string(reinterpret_cast<char*>(bytearray.get()), raw.length());
+  } catch(exception::Exception &ne) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + m_stmt.getSql() + ": " +
+      ne.getMessage().str());
+  } catch(std::exception &se) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " + m_stmt.getSql() + ": " +
+      se.what());
+  }
+}
+
 //------------------------------------------------------------------------------
 // columnOptionalString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/OcciRset.hpp b/rdbms/wrapper/OcciRset.hpp
index 27fc061f8c0a90a48018361343b9c05f64eeae5c..416765b0a06bc780bb71cad83bc39f97d15bfa78 100644
--- a/rdbms/wrapper/OcciRset.hpp
+++ b/rdbms/wrapper/OcciRset.hpp
@@ -80,6 +80,14 @@ public:
    */
   bool columnIsNull(const std::string &colName) const override;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   */
+  std::string columnBlob(const std::string &colName) const override;
+
   /**
    * Returns the value of the specified column as a string.
    *
diff --git a/rdbms/wrapper/OcciStmt.cpp b/rdbms/wrapper/OcciStmt.cpp
index ced47e61d31e3a53de3bb3f05be326622e22e135..44451197d3bb020166e1053b3bcc1217fc393f99 100644
--- a/rdbms/wrapper/OcciStmt.cpp
+++ b/rdbms/wrapper/OcciStmt.cpp
@@ -115,6 +115,10 @@ void OcciStmt::bindOptionalUint64(const std::string &paramName, const optional<u
   }
 }
 
+void OcciStmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
+  throw exception::Exception("OcciStmt::bindBlob not implemented.");
+}
+
 //------------------------------------------------------------------------------
 // bindDouble
 //------------------------------------------------------------------------------
@@ -291,11 +295,11 @@ oracle::occi::Statement *OcciStmt::operator->() const {
 //------------------------------------------------------------------------------
 // setColumn
 //------------------------------------------------------------------------------
-void OcciStmt::setColumn(OcciColumn &col) {
+void OcciStmt::setColumn(OcciColumn &col, oracle::occi::Type type) {
   const std::string paramName = std::string(":") + col.getColName();
   const auto paramIdx = getParamIdx(paramName);
-  m_stmt->setDataBuffer(paramIdx, col.getBuffer(), oracle::occi::OCCI_SQLT_STR,
-    col.getMaxFieldLength(), col.getFieldLengths());
+  m_stmt->setDataBuffer(paramIdx, col.getBuffer(), type, col.getMaxFieldLength(),
+    col.getFieldLengths());
 }
 
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/OcciStmt.hpp b/rdbms/wrapper/OcciStmt.hpp
index 6db7e1b80ab028ab14cef37271636c4595643ef7..7e48ae39168ac92c07e7fc15738478d22fc5d3f1 100644
--- a/rdbms/wrapper/OcciStmt.hpp
+++ b/rdbms/wrapper/OcciStmt.hpp
@@ -113,6 +113,14 @@ public:
    */
   void bindOptionalDouble(const std::string &paramName, const optional<double> &paramValue) override;
 
+  /** 
+   * Binds an SQL parameter of type binary string (byte array).
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  void bindBlob(const std::string &paramName, const std::string &paramValue) override;
+
   /** 
    * Binds an SQL parameter of type string.
    *
@@ -174,9 +182,10 @@ public:
   /**
    * Sets the specified column data for a batch-based database access.
    *
-   * @param The column data.
+   * @param  col   The column data
+   * @param  type  The type of the data
    */
-  void setColumn(OcciColumn &col);
+  void setColumn(OcciColumn &col, oracle::occi::Type type = oracle::occi::OCCI_SQLT_STR);
 
   /**
    * Determines whether or not the connection should be closed based on the
diff --git a/rdbms/wrapper/PostgresColumn.cpp b/rdbms/wrapper/PostgresColumn.cpp
index 90636678b28f4165b9e4a9a3d37842ca037dd058..7e58d0032e718bde1d3773456559eb74b8046e21 100644
--- a/rdbms/wrapper/PostgresColumn.cpp
+++ b/rdbms/wrapper/PostgresColumn.cpp
@@ -18,6 +18,7 @@
 
 #include "common/exception/Exception.hpp"
 #include "rdbms/wrapper/PostgresColumn.hpp"
+#include "rdbms/wrapper/PostgresConn.hpp"
 
 namespace cta {
 namespace rdbms {
@@ -46,6 +47,22 @@ size_t PostgresColumn::getNbRows() const {
   return m_nbRows;
 }
 
+//------------------------------------------------------------------------------
+// setFieldByteA
+//------------------------------------------------------------------------------
+void PostgresColumn::setFieldByteA(rdbms::Conn &conn, const size_t index, const std::string &value) {
+  auto pgconn_ptr = dynamic_cast<PostgresConn*>(conn.getConnWrapperPtr());
+  auto pgconn = pgconn_ptr->get();
+
+  size_t escaped_length;
+  auto escapedByteA = PQescapeByteaConn(pgconn, reinterpret_cast<const unsigned char*>(value.c_str()),
+    value.length(), &escaped_length);
+  std::string escapedStr(reinterpret_cast<const char*>(escapedByteA), escaped_length);
+  PQfreemem(escapedByteA);
+
+  copyStrIntoField(index, escapedStr);
+}
+
 //------------------------------------------------------------------------------
 // getValue
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/PostgresColumn.hpp b/rdbms/wrapper/PostgresColumn.hpp
index 741f9d4348ecc7e25aaf5e9c0727c0a1efcb7d3b..64f1679d1628d7d8f88d39f7705e69b37213d5e1 100644
--- a/rdbms/wrapper/PostgresColumn.hpp
+++ b/rdbms/wrapper/PostgresColumn.hpp
@@ -22,6 +22,8 @@
 #include <string.h>
 #include <typeinfo>
 #include <vector>
+#include <libpq-fe.h>
+#include <rdbms/Conn.hpp>
 
 namespace cta {
 namespace rdbms {
@@ -73,6 +75,15 @@ public:
     setFieldValue(index, value, std::is_integral<T>());
   }
 
+  /**
+   * Sets the BYTEA field at the specified index to the value of a byte array
+   *
+   * @param conn  The connection to the Postgres database
+   * @param index The index of the field
+   * @param value The value of the field expressed as a byte array
+   */
+  void setFieldByteA(rdbms::Conn &conn, const size_t index, const std::string &value);
+
 private:
 
   /**
diff --git a/rdbms/wrapper/PostgresConn.hpp b/rdbms/wrapper/PostgresConn.hpp
index 53ef5facb6dd2df84674d14cf850771cb54348de..ce67db9c29127f10de1bc327628b5e46cb7b484b 100644
--- a/rdbms/wrapper/PostgresConn.hpp
+++ b/rdbms/wrapper/PostgresConn.hpp
@@ -33,6 +33,7 @@ namespace wrapper {
 
 class PostgresStmt;
 class PostgresRset;
+class PostgresColumn;
 
 class PostgresConn: public ConnWrapper {
 public:
@@ -42,7 +43,7 @@ public:
    */
   friend PostgresStmt;
   friend PostgresRset;
-
+  friend PostgresColumn;
 
   /**
    * Constructor.
diff --git a/rdbms/wrapper/PostgresRset.cpp b/rdbms/wrapper/PostgresRset.cpp
index 8ade84da2175cf0c3d74b9080fbaaaa04b32db90..951b90402c8c3ef908ad52537614c19dd19d8dca 100644
--- a/rdbms/wrapper/PostgresRset.cpp
+++ b/rdbms/wrapper/PostgresRset.cpp
@@ -73,6 +73,20 @@ bool PostgresRset::columnIsNull(const std::string &colName) const {
   return PQgetisnull(m_resItr->get(), 0, ifield);
 }
 
+std::string PostgresRset::columnBlob(const std::string &colName) const {
+  auto blob = columnOptionalString(colName);
+  if(blob) {
+    size_t blob_len;
+    unsigned char *blob_ptr = PQunescapeBytea(reinterpret_cast<const unsigned char*>(blob->c_str()), &blob_len);
+    if(blob_ptr != nullptr) {
+      std::string blob_str(reinterpret_cast<const char*>(blob_ptr), blob_len);
+      PQfreemem(blob_ptr);
+      return blob_str;
+    }
+  }
+  return std::string();
+}
+
 //------------------------------------------------------------------------------
 // columnOptionalString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/PostgresRset.hpp b/rdbms/wrapper/PostgresRset.hpp
index 4936da35e9a70fde804492c86b0903b5a585dbc1..854f91209d163c7fc685d1cb0427426837607737 100644
--- a/rdbms/wrapper/PostgresRset.hpp
+++ b/rdbms/wrapper/PostgresRset.hpp
@@ -60,6 +60,14 @@ public:
    */
   bool columnIsNull(const std::string &colName) const override;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   */
+  std::string columnBlob(const std::string &colName) const override;
+
   /**
    * Returns the value of the specified column as a string.
    *
diff --git a/rdbms/wrapper/PostgresStmt.cpp b/rdbms/wrapper/PostgresStmt.cpp
index d00e43f99802cf5aca845e038bc2189028486198..a39c012bfa6bf5e10e47e6108e6d8975b70a0bbd 100644
--- a/rdbms/wrapper/PostgresStmt.cpp
+++ b/rdbms/wrapper/PostgresStmt.cpp
@@ -130,6 +130,10 @@ void PostgresStmt::bindOptionalUint64(const std::string &paramName, const option
   }
 }
 
+void PostgresStmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
+  throw exception::Exception("PostgresStmt::bindBlob not implemented.");
+}
+
 //------------------------------------------------------------------------------
 // bindString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/PostgresStmt.hpp b/rdbms/wrapper/PostgresStmt.hpp
index 396b09c75799ae14e5d2349ec024d262d0907e70..c1a6bc47f2afbf5ec42ac0d22ad5b398282b69cf 100644
--- a/rdbms/wrapper/PostgresStmt.hpp
+++ b/rdbms/wrapper/PostgresStmt.hpp
@@ -83,6 +83,14 @@ public:
    */
   void bindOptionalUint64(const std::string &paramName, const optional<uint64_t> &paramValue) override;
 
+  /** 
+   * Binds an SQL parameter of type binary string (byte array).
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  void bindBlob(const std::string &paramName, const std::string &paramValue) override;
+
   /** 
    * Binds an SQL parameter of type string.
    *
diff --git a/rdbms/wrapper/RsetWrapper.hpp b/rdbms/wrapper/RsetWrapper.hpp
index aceff03b11e04242f7d86695273f877659761eeb..8bda702c0db2150b9ed25ede80a9258419239dd5 100644
--- a/rdbms/wrapper/RsetWrapper.hpp
+++ b/rdbms/wrapper/RsetWrapper.hpp
@@ -62,6 +62,14 @@ public:
    */
   virtual bool columnIsNull(const std::string &colName) const = 0;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   */
+  virtual std::string columnBlob(const std::string &colName) const = 0;
+
   /**
    * Returns the value of the specified column as a string.
    *
diff --git a/rdbms/wrapper/SqliteConn.cpp b/rdbms/wrapper/SqliteConn.cpp
index 4a0a2eb63ca9c79a6299fe7bb566aa77a99c4fad..0b230de140033fcf964bb14412a3ba3db45f2149 100644
--- a/rdbms/wrapper/SqliteConn.cpp
+++ b/rdbms/wrapper/SqliteConn.cpp
@@ -248,7 +248,8 @@ std::map<std::string, std::string> SqliteConn::getColumns(const std::string &tab
     "INTEGER|"
     "CHAR|"
     "VARCHAR|"
-    "VARCHAR2";
+    "VARCHAR2|"
+    "BLOB";
     
     auto stmt = createStmt(sql);
     stmt->bindString(":TABLE_NAME", tableName);
diff --git a/rdbms/wrapper/SqliteRset.cpp b/rdbms/wrapper/SqliteRset.cpp
index 9aea8f932faa8f497e81f03c1812fd35d2784301..e332320f23a93855c1597eda39986c3dfe00bfc2 100644
--- a/rdbms/wrapper/SqliteRset.cpp
+++ b/rdbms/wrapper/SqliteRset.cpp
@@ -215,6 +215,24 @@ bool SqliteRset::columnIsNull(const std::string &colName) const {
   }
 }
 
+std::string SqliteRset::columnBlob(const std::string &colName) const {
+  try {
+    const ColumnNameToIdxAndType::IdxAndType idxAndType = m_colNameToIdxAndType.getIdxAndType(colName);
+    if(SQLITE_NULL == idxAndType.colType) {
+      return std::string();
+    } else {
+      const char *const colValue = reinterpret_cast<const char*>(sqlite3_column_blob(m_stmt.get(), idxAndType.colIdx));
+      if(NULL == colValue) {
+        return std::string();
+      }
+      int blobsize = sqlite3_column_bytes(m_stmt.get(), idxAndType.colIdx);
+      return std::string(colValue,blobsize);
+    }
+  } catch(exception::Exception &ex) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
+  }
+}
+
 //------------------------------------------------------------------------------
 // columnOptionalString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/SqliteRset.hpp b/rdbms/wrapper/SqliteRset.hpp
index bc9262d53594eec9ef8fd493665ab75917483281..4a21908f2ba69d84fd6e18011d7bee789bcd0f41 100644
--- a/rdbms/wrapper/SqliteRset.hpp
+++ b/rdbms/wrapper/SqliteRset.hpp
@@ -75,6 +75,14 @@ public:
    */
   bool columnIsNull(const std::string &colName) const override;
 
+  /**
+   * Returns the value of the specified column as a binary string (byte array).
+   *
+   * @param colName The name of the column.
+   * @return The string value of the specified column.
+   */
+  std::string columnBlob(const std::string &colName) const override;
+
   /**
    * Returns the value of the specified column as a string.
    *
diff --git a/rdbms/wrapper/SqliteStmt.cpp b/rdbms/wrapper/SqliteStmt.cpp
index 7f09732a3751552a7394feb53e6547312fe08d04..325df3704a7d6e274f3995b5d18289bcb75b980b 100644
--- a/rdbms/wrapper/SqliteStmt.cpp
+++ b/rdbms/wrapper/SqliteStmt.cpp
@@ -211,6 +211,21 @@ void SqliteStmt::bindOptionalDouble(const std::string &paramName, const optional
   }
 }
 
+void SqliteStmt::bindBlob(const std::string &paramName, const std::string &paramValue) {
+  try {
+    const unsigned int paramIdx = getParamIdx(paramName);
+    int bindRc = sqlite3_bind_blob(m_stmt, paramIdx, paramValue.c_str(), paramValue.length(), SQLITE_TRANSIENT);
+    if(SQLITE_OK != bindRc) {
+      exception::Exception ex;
+      ex.getMessage() << "sqlite3_bind_blob() failed: " << Sqlite::rcToStr(bindRc);
+      throw ex;
+    }
+  } catch(exception::Exception &ex) {
+    throw exception::Exception(std::string(__FUNCTION__) + " failed for SQL statement " +
+      getSqlForException() + ": " + ex.getMessage().str()); 
+  }
+}
+
 //------------------------------------------------------------------------------
 // bindString
 //------------------------------------------------------------------------------
diff --git a/rdbms/wrapper/SqliteStmt.hpp b/rdbms/wrapper/SqliteStmt.hpp
index 54f104b845fc2f49f229a2d35a3a55455f7f8cdf..65d2b6da7ddd57682930d90c97d13926d2f61034 100644
--- a/rdbms/wrapper/SqliteStmt.hpp
+++ b/rdbms/wrapper/SqliteStmt.hpp
@@ -107,6 +107,14 @@ public:
    */
   void bindOptionalDouble(const std::string &paramName, const optional<double> &paramValue) override;
 
+  /** 
+   * Binds an SQL parameter of type binary string (byte array).
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  void bindBlob(const std::string &paramName, const std::string &paramValue) override;
+
   /** 
    * Binds an SQL parameter of type string.
    *
diff --git a/rdbms/wrapper/StmtWrapper.hpp b/rdbms/wrapper/StmtWrapper.hpp
index e14221e1e96271c53bdae88a3c502988d46444f1..aee6cb5faaa356b2ee8e90feedd22cccab1b4983 100644
--- a/rdbms/wrapper/StmtWrapper.hpp
+++ b/rdbms/wrapper/StmtWrapper.hpp
@@ -143,6 +143,14 @@ public:
    */
   void bindOptionalBool(const std::string &paramName, const optional<bool> &paramValue);
 
+  /** 
+   * Binds an SQL parameter of type binary string (byte array).
+   *
+   * @param paramName The name of the parameter.
+   * @param paramValue The value to be bound.
+   */ 
+  virtual void bindBlob(const std::string &paramName, const std::string &paramValue) = 0;
+
   /** 
    * Binds an SQL parameter of type string.
    *
diff --git a/scheduler/ArchiveJob.cpp b/scheduler/ArchiveJob.cpp
index 98e26951421d843c9d5b64add0f1c5091b8f016c..e9cba75bfd37e53caea8d89f4c255560a0a41317 100644
--- a/scheduler/ArchiveJob.cpp
+++ b/scheduler/ArchiveJob.cpp
@@ -58,13 +58,11 @@ cta::catalogue::TapeItemWrittenPointer cta::ArchiveJob::validateAndGetTapeFileWr
   auto & fileReport = *fileReportUP;
   fileReport.archiveFileId = archiveFile.archiveFileID;
   fileReport.blockId = tapeFile.blockId;
-  fileReport.checksumType = tapeFile.checksumType;
-  fileReport.checksumValue = tapeFile.checksumValue;
-  fileReport.compressedSize = tapeFile.compressedSize;
+  fileReport.checksumBlob = tapeFile.checksumBlob;
   fileReport.copyNb = tapeFile.copyNb;
   fileReport.diskFileId = archiveFile.diskFileId;
-  fileReport.diskFileUser = archiveFile.diskFileInfo.owner;
-  fileReport.diskFileGroup = archiveFile.diskFileInfo.group;
+  fileReport.diskFileOwnerUid = archiveFile.diskFileInfo.owner_uid;
+  fileReport.diskFileGid = archiveFile.diskFileInfo.gid;
   fileReport.diskFilePath = archiveFile.diskFileInfo.path;
   fileReport.diskInstance = archiveFile.diskInstance;
   fileReport.fSeq = tapeFile.fSeq;
@@ -84,43 +82,50 @@ void cta::ArchiveJob::validate(){
       std::numeric_limits<decltype(tapeFile.blockId)>::max())
     throw BlockIdNotSet("In cta::ArchiveJob::validate(): Block ID not set");
   // Also check the checksum has been set
-  if (archiveFile.checksumType.empty() || archiveFile.checksumValue.empty() || 
-      tapeFile.checksumType.empty() || tapeFile.checksumValue.empty())
+  if (archiveFile.checksumBlob.empty() || tapeFile.checksumBlob.empty())
     throw ChecksumNotSet("In cta::ArchiveJob::validate(): checksums not set");
   // And matches
-  if (archiveFile.checksumType != tapeFile.checksumType || 
-      archiveFile.checksumValue != tapeFile.checksumValue)
-    throw ChecksumMismatch(std::string("In cta::ArchiveJob::validate(): checksum mismatch!")
-            +" Archive file checksum type: "+archiveFile.checksumType
-            +" Archive file checksum value: "+archiveFile.checksumValue
-            +" Tape file checksum type: "+tapeFile.checksumType
-            +" Tape file checksum value: "+tapeFile.checksumValue);
+  archiveFile.checksumBlob.validate(tapeFile.checksumBlob);
 }
 
 //------------------------------------------------------------------------------
 // ArchiveJob::reportURL
 //------------------------------------------------------------------------------
-std::string cta::ArchiveJob::reportURL() {
+std::string cta::ArchiveJob::exceptionThrowingReportURL() {
   switch (m_dbJob->reportType) {
   case SchedulerDatabase::ArchiveJob::ReportType::CompletionReport:
     return m_dbJob->archiveReportURL;
-  case SchedulerDatabase::ArchiveJob::ReportType::FailureReport:
-    {
-      if (m_dbJob->latestError.empty()) {
-        throw exception::Exception("In ArchiveJob::reportURL(): empty failure reason.");
-      }
-      std::string base64ErrorReport;
-      // Construct a pipe: msg -> sign -> Base64 encode -> result goes into ret.
-      const bool noNewLineInBase64Output = false;
-      CryptoPP::StringSource ss1(m_dbJob->latestError, true, 
-        new CryptoPP::Base64Encoder(
-          new CryptoPP::StringSink(base64ErrorReport), noNewLineInBase64Output));
-      return m_dbJob->errorReportURL + base64ErrorReport;
-    }
-  default:
-    { 
-      throw exception::Exception("In ArchiveJob::reportURL(): job status does not require reporting.");
+  case SchedulerDatabase::ArchiveJob::ReportType::FailureReport: {
+    if (m_dbJob->latestError.empty()) {
+      throw exception::Exception("In ArchiveJob::exceptionThrowingReportURL(): empty failure reason.");
     }
+    std::string base64ErrorReport;
+    // Construct a pipe: msg -> sign -> Base64 encode -> result goes into ret.
+    const bool noNewLineInBase64Output = false;
+    CryptoPP::StringSource ss1(m_dbJob->latestError, true, 
+      new CryptoPP::Base64Encoder(
+        new CryptoPP::StringSink(base64ErrorReport), noNewLineInBase64Output));
+    return m_dbJob->errorReportURL + base64ErrorReport;
+  }
+  case SchedulerDatabase::ArchiveJob::ReportType::NoReportRequired:
+    throw exception::Exception("In ArchiveJob::exceptionThrowingReportURL(): job status NoReportRequired does not require reporting.");
+  case SchedulerDatabase::ArchiveJob::ReportType::Report:
+    throw exception::Exception("In ArchiveJob::exceptionThrowingReportURL(): job status Report does not require reporting.");
+  }
+  throw exception::Exception("In ArchiveJob::exceptionThrowingReportURL(): invalid report type reportType=" +
+    std::to_string(static_cast<uint8_t>(m_dbJob->reportType)));
+}
+
+//------------------------------------------------------------------------------
+// ArchiveJob::reportURL
+//------------------------------------------------------------------------------
+std::string cta::ArchiveJob::reportURL() noexcept {
+  try {
+    return exceptionThrowingReportURL();
+  } catch(exception::Exception &ex) {
+    return ex.what();
+  } catch(...) {
+    return "In ArchiveJob::reportURL(): unknown exception";
   }
 }
 
@@ -142,6 +147,7 @@ std::string cta::ArchiveJob::reportType() {
       throw exception::Exception("In ArchiveJob::reportType(): job status does not require reporting.");
     }
   }
+  throw exception::Exception("In ArchiveJob::reportType(): invalid report type.");
 }
 
 //------------------------------------------------------------------------------
diff --git a/scheduler/ArchiveJob.hpp b/scheduler/ArchiveJob.hpp
index d4852926cd911eb989f37d7a88afad82eb227239..2dc28c02405ec3cfc5f795d52855d48ca2873806 100644
--- a/scheduler/ArchiveJob.hpp
+++ b/scheduler/ArchiveJob.hpp
@@ -74,7 +74,6 @@ public:
   
   CTA_GENERATE_EXCEPTION_CLASS(BlockIdNotSet);
   CTA_GENERATE_EXCEPTION_CLASS(ChecksumNotSet);
-  CTA_GENERATE_EXCEPTION_CLASS(ChecksumMismatch);
   
   /**
    * Start an asynchronous update for a batch of jobs and then make sure they complete.
@@ -110,7 +109,14 @@ public:
    * Get the URL used for reporting
    * @return The URL used to report to the disk system.
    */
-  virtual std::string reportURL();
+  virtual std::string exceptionThrowingReportURL();
+
+  /**
+   * Same as exceptionThrowingReportURL() except it doesn't throw exceptions.
+   * Errors are returned in the output string.
+   * @return The URL used to report to the disk system.
+   */
+  virtual std::string reportURL() noexcept;
 
   /**
    * Get the report type.
diff --git a/scheduler/ArchiveMount.cpp b/scheduler/ArchiveMount.cpp
index 818f703a145990c1c236c4ac53275f12d361a07c..fbfe60f96f809b8b48d790412c6f28a77bc64a87 100644
--- a/scheduler/ArchiveMount.cpp
+++ b/scheduler/ArchiveMount.cpp
@@ -43,7 +43,7 @@ cta::ArchiveMount::ArchiveMount(catalogue::Catalogue & catalogue,
 // getMountType
 //------------------------------------------------------------------------------
 cta::common::dataStructures::MountType cta::ArchiveMount::getMountType() const {
-  return cta::common::dataStructures::MountType::ArchiveForUser;
+  return m_dbMount->mountInfo.mountType;
 }
 
 //------------------------------------------------------------------------------
@@ -278,6 +278,25 @@ void cta::ArchiveMount::setTapeSessionStats(const castor::tape::tapeserver::daem
   m_dbMount->setTapeSessionStats(stats);
 }
 
+//------------------------------------------------------------------------------
+// setTapeMounted()
+//------------------------------------------------------------------------------
+void cta::ArchiveMount::setTapeMounted(cta::log::LogContext& logContext) const {
+  utils::Timer t;    
+  log::ScopedParamContainer spc(logContext);
+  try {
+    m_catalogue.tapeMountedForArchive(m_dbMount->getMountInfo().vid, m_dbMount->getMountInfo().drive);
+    auto catalogueTime = t.secs(cta::utils::Timer::resetCounter);
+    spc.add("catalogueTime", catalogueTime);
+    logContext.log(log::INFO, "In ArchiveMount::setTapeMounted(): success.");
+  } catch (cta::exception::Exception &ex) {
+    auto catalogueTimeFailed = t.secs(cta::utils::Timer::resetCounter);
+    spc.add("catalogueTime", catalogueTimeFailed);
+    logContext.log(cta::log::WARNING,
+      "Failed to update catalogue for the tape mounted for archive.");
+  }    
+}
+
 //------------------------------------------------------------------------------
 // setTapeFull()
 //------------------------------------------------------------------------------
diff --git a/scheduler/ArchiveMount.hpp b/scheduler/ArchiveMount.hpp
index 5ff30a24ba8982e6a20db21d3f8114ac85bd6573..06bf04077baacaa8076075e71a3a5a0c1f9f5737 100644
--- a/scheduler/ArchiveMount.hpp
+++ b/scheduler/ArchiveMount.hpp
@@ -41,12 +41,15 @@ namespace cta {
 
     /**
      * Constructor.
+     * 
+     * @param catalogue The file catalogue interface.
      */
     ArchiveMount(catalogue::Catalogue & catalogue);
 
     /**
      * Constructor.
      *
+     * @param catalogue The file catalogue interface. 
      * @param dbMount The database representation of this mount.
      */
     ArchiveMount(catalogue::Catalogue & catalogue, std::unique_ptr<cta::SchedulerDatabase::ArchiveMount> dbMount);
@@ -113,6 +116,12 @@ namespace cta {
      */
     void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override;
     
+    /**
+     * Report a tape mounted event
+     * @param logContext
+     */
+    void setTapeMounted(log::LogContext &logContext) const override;
+    
     /**
      * Report that the tape is full.
      */
diff --git a/scheduler/CMakeLists.txt b/scheduler/CMakeLists.txt
index 9ca733facb41ebbac93d9dc910963c5782a6a30c..5c07a3d40aa72022ec8b6aac0c667e587a741af4 100644
--- a/scheduler/CMakeLists.txt
+++ b/scheduler/CMakeLists.txt
@@ -23,6 +23,7 @@ set (CTA_SCHEDULER_SRC_FILES
   OStoreDB/OStoreDBWithAgent.cpp
   LabelMount.cpp
   DiskReportRunner.cpp
+  RepackReportThread.cpp
   RepackRequestManager.cpp)
 
 find_package(Protobuf3 REQUIRED)
diff --git a/scheduler/LabelMount.cpp b/scheduler/LabelMount.cpp
index e7ad828d554160116db0430a1934690d14343df4..31e0d484eeea643b0579b3bb82cbed0ff34fe11f 100644
--- a/scheduler/LabelMount.cpp
+++ b/scheduler/LabelMount.cpp
@@ -65,6 +65,11 @@ void LabelMount::setTapeSessionStats(const castor::tape::tapeserver::daemon::Tap
   // TODO
 }
 
+void LabelMount::setTapeMounted(log::LogContext &logContext) const {
+  throw 0;
+  // TODO
+}
+
 LabelMount::LabelMount(catalogue::Catalogue& catalogue, std::unique_ptr<cta::SchedulerDatabase::LabelMount> dbMount): 
   m_catalogue(catalogue) {
   throw 0;
diff --git a/scheduler/LabelMount.hpp b/scheduler/LabelMount.hpp
index 613ceb3411b0582241bcd7512e132e1585d4677e..3392aea811457f21d742dcfe55115f8f9d646e5d 100644
--- a/scheduler/LabelMount.hpp
+++ b/scheduler/LabelMount.hpp
@@ -96,6 +96,12 @@ namespace cta {
      * Report a tape session statistics
      */
     void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override;
+    
+    /**
+     * Report a tape mounted event
+     * @param logContext
+     */ 
+    void setTapeMounted(log::LogContext &logContext) const override;
 
     CTA_GENERATE_EXCEPTION_CLASS(SessionNotRunning);
     
diff --git a/scheduler/OStoreDB/OStoreDB.cpp b/scheduler/OStoreDB/OStoreDB.cpp
index ffb5cd0e1d69a53b25fcead31c5f03bc736547f5..2272862e60e3c4ea8b67f5b21d6e2b189013cc11 100644
--- a/scheduler/OStoreDB/OStoreDB.cpp
+++ b/scheduler/OStoreDB/OStoreDB.cpp
@@ -604,8 +604,7 @@ void OStoreDB::queueArchive(const std::string &instanceName, const cta::common::
   // Summarize all as an archiveFile
   cta::common::dataStructures::ArchiveFile aFile;
   aFile.archiveFileID = criteria.fileId;
-  aFile.checksumType = request.checksumType;
-  aFile.checksumValue = request.checksumValue;
+  aFile.checksumBlob = request.checksumBlob;
   // TODO: fully fledged archive file should not be the representation of the request.
   aFile.creationTime = std::numeric_limits<decltype(aFile.creationTime)>::min();
   aFile.reconciliationTime = 0;
@@ -1119,7 +1118,7 @@ void OStoreDB::setRetrieveJobBatchReportedToUser(std::list<cta::SchedulerDatabas
         "In OStoreDB::setRetrieveJobBatchReported(): tape copy not found"
       );
       insertedElements.emplace_back(CaRQF::InsertedElement{
-        &j.job->m_retrieveRequest, tf_it->copyNb, tf_it->fSeq, tf_it->compressedSize,
+        &j.job->m_retrieveRequest, tf_it->copyNb, tf_it->fSeq, j.job->archiveFile.fileSize,
         common::dataStructures::MountPolicy(), serializers::RetrieveJobStatus::RJS_Failed,
         j.job->m_activityDescription, j.job->m_diskSystemName
       });
@@ -1423,7 +1422,7 @@ OStoreDB::RetrieveQueueItor_t* OStoreDB::getRetrieveJobItorPtr(const std::string
 // OStoreDB::queueRepack()
 //------------------------------------------------------------------------------
 void OStoreDB::queueRepack(const std::string& vid, const std::string& bufferURL,
-    common::dataStructures::RepackInfo::Type repackType, log::LogContext & lc) {
+    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy& mountPolicy, log::LogContext & lc) {
   // Prepare the repack request object in memory.
   assertAgentAddressSet();
   cta::utils::Timer t;
@@ -1434,6 +1433,7 @@ void OStoreDB::queueRepack(const std::string& vid, const std::string& bufferURL,
   rr->setVid(vid);
   rr->setType(repackType);
   rr->setBufferURL(bufferURL);
+  rr->setMountPolicy(mountPolicy);
   // Try to reference the object in the index (will fail if there is already a request with this VID.
   try {
     Helpers::registerRepackRequestToIndex(vid, rr->getAddressIfSet(), *m_agentReference, m_objectStore, lc);
@@ -1714,7 +1714,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
 
     // Try to get jobs from the first queue. If it is empty, it will be trimmed, so we can go for another round.
     Carqtrtrfs::PopCriteria criteria;
-    criteria.files = c_repackReportBatchSize;
+    criteria.files = c_repackRetrieveReportBatchSize;
     auto jobs = algo.popNextBatch(queueList.front().vid, criteria, lc);
     if(jobs.elements.empty()) continue;
     std::unique_ptr<RepackRetrieveSuccessesReportBatch> privateRet;
@@ -1726,13 +1726,14 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
       auto & sr = privateRet->m_subrequestList.back();
       sr.repackInfo = j.repackInfo;
       sr.archiveFile = j.archiveFile;
+      sr.owner = m_agentReference;
       sr.subrequest.reset(j.retrieveRequest.release());
       repackRequestAddresses.insert(j.repackInfo.repackRequestAddress);
     }
     // As we are popping from a single report queue, all requests should concern only one repack request.
     if (repackRequestAddresses.size() != 1) {
       std::stringstream err;
-      err << "In OStoreDB::getNextSuccessfulRetrieveRepackReportBatch(): reports for several repack requests in the same queue. ";
+      err << "In OStoreDB::getNextSuccessfulArchiveRepackReportBatch(): reports for several repack requests in the same queue. ";
       for (auto & rr: repackRequestAddresses) { err << rr << " "; }
       throw exception::Exception(err.str());
     }
@@ -1758,7 +1759,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextFailedRet
 
     // Try to get jobs from the first queue. If it is empty, it will be trimmed, so we can go for another round.
     CaRqtrtrff::PopCriteria criteria;
-    criteria.files = c_repackReportBatchSize;
+    criteria.files = c_repackRetrieveReportBatchSize;
     auto jobs = algo.popNextBatch(queueList.front().vid, criteria, lc);
     if(jobs.elements.empty()) continue;
     std::unique_ptr<RepackRetrieveFailureReportBatch> privateRet;
@@ -1802,7 +1803,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
 
     // Try to get jobs from the first queue. If it is empty, it will be trimmed, so we can go for another round.
     Caaqtrtrfs::PopCriteria criteria;
-    criteria.files = c_repackReportBatchSize;
+    criteria.files = c_repackArchiveReportBatchSize;
     auto jobs = algo.popNextBatch(queueList.front().tapePool, criteria, lc);
     if(jobs.elements.empty()) continue;
     std::unique_ptr<RepackArchiveSuccessesReportBatch> privateRet;
@@ -1830,7 +1831,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
     
     return std::unique_ptr<SchedulerDatabase::RepackReportBatch>(privateRet.release());
   }
-  throw NoRepackReportBatchFound("In OStoreDB::getNextSuccessfulRetrieveRepackReportBatch(): no report found.");
+  throw NoRepackReportBatchFound("In OStoreDB::getNextSuccessfulArchiveRepackReportBatch(): no report found.");
 }
 
 std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextFailedArchiveRepackReportBatch(log::LogContext& lc){
@@ -1844,7 +1845,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextFailedArc
     if (queueList.empty()) throw NoRepackReportBatchFound("In OStoreDB::getNextFailedArchiveRepackReportBatch(): no queue found.");
     // Try to get jobs from the first queue. If it is empty, it will be trimmed, so we can go for another round.
     Caaqtrtrff::PopCriteria criteria;
-    criteria.files = c_repackReportBatchSize;
+    criteria.files = c_repackArchiveReportBatchSize;
     auto jobs = algo.popNextBatch(queueList.front().tapePool, criteria, lc);
     if(jobs.elements.empty()) continue;
     std::unique_ptr<RepackArchiveFailureReportBatch> privateRet;
@@ -1884,7 +1885,7 @@ void OStoreDB::RepackRetrieveSuccessesReportBatch::report(log::LogContext& lc) {
   // As usual there are many opportunities for failure.
   utils::Timer t;
   log::TimingList timingList;
-  
+  cta::common::dataStructures::MountPolicy mountPolicy;
   // 1) Update statistics. As the repack request is protected against double reporting, we can release its lock
   // before the next step.
   {
@@ -1901,6 +1902,7 @@ void OStoreDB::RepackRetrieveSuccessesReportBatch::report(log::LogContext& lc) {
     objectstore::ScopedExclusiveLock rrl(m_repackRequest);
     timingList.insertAndReset("successStatsLockTime", t);
     m_repackRequest.fetch();
+    mountPolicy = m_repackRequest.getMountPolicy();
     timingList.insertAndReset("successStatsFetchTime", t);
     m_repackRequest.reportRetriveSuccesses(ssl);
     timingList.insertAndReset("successStatsUpdateTime", t);
@@ -1913,8 +1915,10 @@ void OStoreDB::RepackRetrieveSuccessesReportBatch::report(log::LogContext& lc) {
   struct SuccessfullyTranformedRequest {
     std::shared_ptr<objectstore::ArchiveRequest> archiveRequest;
     SubrequestInfo & subrequestInfo;
+    SorterArchiveRequest sorterArchiveRequest;
   };
   std::list<SuccessfullyTranformedRequest> successfullyTransformedSubrequests;
+  uint64_t nbTransformedSubrequest = 0;
   {
     objectstore::RepackRequest::SubrequestStatistics::List failedArchiveSSL;
     std::list<SubrequestInfo *> failedSubrequests;
@@ -1958,17 +1962,33 @@ void OStoreDB::RepackRetrieveSuccessesReportBatch::report(log::LogContext& lc) {
     for (auto &atar: asyncTransformsAndReqs) {
       try {
         atar.transformer->wait();
+        nbTransformedSubrequest++;
         // Log the transformation
         log::ScopedParamContainer params(lc);
         params.add("fileId", atar.subrequestInfo.archiveFile.archiveFileID)
               .add("subrequestAddress", atar.subrequestInfo.subrequest->getAddressIfSet());
         timingList.addToLog(params);
         lc.log(log::INFO, "In OStoreDB::RepackRetrieveSuccessesReportBatch::report(), turned successful retrieve request in archive request.");
+        SorterArchiveRequest sorterArchiveRequest;
+        for(auto & copyNbToArchive: atar.subrequestInfo.repackInfo.copyNbsToRearchive){
+          SorterArchiveJob sorterArchiveJob;
+          sorterArchiveJob.archiveFile = atar.subrequestInfo.archiveFile;
+          sorterArchiveJob.archiveRequest = std::make_shared<objectstore::ArchiveRequest>(
+                atar.subrequestInfo.subrequest->getAddressIfSet(),
+                m_oStoreDb.m_objectStore);
+          sorterArchiveJob.jobDump.copyNb = copyNbToArchive;
+          sorterArchiveJob.jobDump.tapePool = atar.subrequestInfo.repackInfo.archiveRouteMap[copyNbToArchive];
+          sorterArchiveJob.jobQueueType = cta::objectstore::JobQueueType::JobsToTransferForRepack;
+          sorterArchiveJob.mountPolicy = mountPolicy;
+          sorterArchiveJob.previousOwner = atar.subrequestInfo.owner;
+          sorterArchiveRequest.archiveJobs.push_back(sorterArchiveJob);
+        }
         successfullyTransformedSubrequests.push_back(SuccessfullyTranformedRequest{
           std::make_shared<objectstore::ArchiveRequest>(
               atar.subrequestInfo.subrequest->getAddressIfSet(),
               m_oStoreDb.m_objectStore), 
-          atar.subrequestInfo
+          atar.subrequestInfo,
+          sorterArchiveRequest
         });
       } catch (exception::Exception & ex) {
         // We failed to archive the file (to create the request, in fact). So all the copyNbs 
@@ -2060,18 +2080,14 @@ void OStoreDB::RepackRetrieveSuccessesReportBatch::report(log::LogContext& lc) {
   // 3. We now just need to queue the freshly created archive jobs into their respective queues
   {
     objectstore::Sorter sorter(*m_oStoreDb.m_agentReference, m_oStoreDb.m_objectStore, m_oStoreDb.m_catalogue);
-    std::list<std::unique_ptr<objectstore::ScopedExclusiveLock>> locks;
-    // TODO: swich to "lockfree" sorter interface.
     for (auto &sts: successfullyTransformedSubrequests) {
-      locks.push_back(cta::make_unique<objectstore::ScopedExclusiveLock>(*sts.archiveRequest));
-      sts.archiveRequest->fetch();
-      sorter.insertArchiveRequest(sts.archiveRequest, *m_oStoreDb.m_agentReference, lc);
+      sorter.insertArchiveRequest(sts.sorterArchiveRequest, *m_oStoreDb.m_agentReference, lc);
     }
-    locks.clear();
     sorter.flushAll(lc);
   }
   timingList.insertAndReset("archiveRequestsQueueingTime", t);
   log::ScopedParamContainer params(lc);
+  params.add("numberOfTransformedSubrequests",nbTransformedSubrequest);
   timingList.addToLog(params);
   lc.log(log::INFO,"In OStoreDB::RepackRetrieveSuccessesReportBatch::report(): Processed a batch of reports.");
 }
@@ -2091,12 +2107,17 @@ void OStoreDB::RepackRetrieveFailureReportBatch::report(log::LogContext& lc){
   {
     // Prepare the report
     objectstore::RepackRequest::SubrequestStatistics::List ssl;
+    uint64_t failedToCreateArchiveReq = 0;
     for (auto &rr: m_subrequestList) {
       ssl.push_back(objectstore::RepackRequest::SubrequestStatistics());
       ssl.back().bytes = rr.archiveFile.fileSize;
       ssl.back().files = 1;
       ssl.back().fSeq = rr.repackInfo.fSeq;
       fSeqsToDelete.push_back(rr.repackInfo.fSeq);
+      for(auto& copyNb: rr.repackInfo.copyNbsToRearchive){
+        (void) copyNb;
+        failedToCreateArchiveReq++;
+      }
     }
     // Record it.
     timingList.insertAndReset("failureStatsPrepareTime", t);
@@ -2106,6 +2127,8 @@ void OStoreDB::RepackRetrieveFailureReportBatch::report(log::LogContext& lc){
     timingList.insertAndReset("failureStatsFetchTime", t);
     m_repackRequest.reportSubRequestsForDeletion(fSeqsToDelete);
     timingList.insertAndReset("failureStatsReportSubRequestsForDeletionTime", t);
+    m_repackRequest.reportArchiveCreationFailures(failedToCreateArchiveReq);
+    timingList.insertAndReset("failureArchiveCreationStatsUpdateTime",t);
     m_repackRequest.reportRetriveFailures(ssl);
     timingList.insertAndReset("failureStatsUpdateTime", t);
     m_repackRequest.commit();
@@ -2195,8 +2218,8 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
   for (auto rsr: repackSubrequests) fSeqs.insert(rsr.fSeq);
   auto subrequestsNames = m_repackRequest.getOrPrepareSubrequestInfo(fSeqs, *m_oStoreDB.m_agentReference);
   m_repackRequest.setTotalStats(totalStatsFiles);
-  uint64_t fSeq = std::max(maxFSeqLowBound+1, maxAddedFSeq + 1);
-  m_repackRequest.setLastExpandedFSeq(fSeq);
+  uint64_t fSeq = std::max(maxFSeqLowBound + 1, maxAddedFSeq + 1);
+  common::dataStructures::MountPolicy mountPolicy = m_repackRequest.getMountPolicy();
   // We make sure the references to subrequests exist persistently before creating them.
   m_repackRequest.commit();
   // We keep holding the repack request lock: we need to ensure de deleted boolean of each subrequest does
@@ -2206,9 +2229,8 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
   for (auto &rn: subrequestsNames) { subReqInfoMap[rn.fSeq] = rn; }
   // Try to create the retrieve subrequests (owned by this process, to be queued in a second step)
   // subrequests can already fail at that point if we cannot find a copy on a valid tape.
-  std::list<uint64_t> failedFSeqs;
-  uint64_t failedFiles = 0;
-  uint64_t failedBytes = 0;
+  std::list<Subrequest> notCreatedSubrequests;
+  objectstore::RepackRequest::StatsValues failedCreationStats;
   // First loop: we will issue the async insertions of the subrequests.
   struct AsyncInsertionInfo {
     Subrequest & rsr;
@@ -2245,9 +2267,9 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
           rRRepackInfo.archiveRouteMap[ar.second.copyNb] = ar.second.tapePoolName;
         }
       } catch (std::out_of_range &) {
-        failedFSeqs.emplace_back(rsr.fSeq);
-        failedFiles++;
-        failedBytes += rsr.archiveFile.fileSize;
+        notCreatedSubrequests.emplace_back(rsr);
+        failedCreationStats.files++;
+        failedCreationStats.bytes+=rsr.archiveFile.fileSize;
         log::ScopedParamContainer params(lc);
         params.add("fileID", rsr.archiveFile.archiveFileID)
               .add("diskInstance", rsr.archiveFile.diskInstance)
@@ -2272,7 +2294,7 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
       // Set the queueing parameters
       common::dataStructures::RetrieveFileQueueCriteria fileQueueCriteria;
       fileQueueCriteria.archiveFile = rsr.archiveFile;
-      fileQueueCriteria.mountPolicy = common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack;
+      fileQueueCriteria.mountPolicy = mountPolicy;
       rr->setRetrieveFileQueueCriteria(fileQueueCriteria);
       // Decide of which vid we are going to retrieve from. Here, if we can retrieve from the repack VID, we
       // will set the initial recall on it. Retries will we requeue to best VID as usual if needed.
@@ -2298,9 +2320,9 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
           bestVid = Helpers::selectBestRetrieveQueue(candidateVids, m_oStoreDB.m_catalogue, m_oStoreDB.m_objectStore);
         } catch (Helpers::NoTapeAvailableForRetrieve &) {
           // Count the failure for this subrequest. 
-          failedFSeqs.emplace_back(rsr.fSeq);
-          failedFiles++;
-          failedBytes += rsr.archiveFile.fileSize;
+          notCreatedSubrequests.emplace_back(rsr);
+          failedCreationStats.files++;
+          failedCreationStats.bytes += rsr.archiveFile.fileSize;
           log::ScopedParamContainer params(lc);
           params.add("fileId", rsr.archiveFile.archiveFileID)
                 .add("repackVid", repackInfo.vid);
@@ -2316,9 +2338,9 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
         }
       {
         // Count the failure for this subrequest. 
-        failedFSeqs.emplace_back(rsr.fSeq);
-        failedFiles++;
-        failedBytes += rsr.archiveFile.fileSize;
+        notCreatedSubrequests.emplace_back(rsr);
+        failedCreationStats.files++;
+        failedCreationStats.bytes += rsr.archiveFile.fileSize;
         log::ScopedParamContainer params(lc);
         params.add("fileId", rsr.archiveFile.archiveFileID)
               .add("repackVid", repackInfo.vid)
@@ -2336,15 +2358,23 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
       try {
         std::shared_ptr<objectstore::RetrieveRequest::AsyncInserter> rrai(rr->asyncInsert());
         asyncInsertionInfoList.emplace_back(AsyncInsertionInfo{rsr, rr, rrai, bestVid, activeCopyNumber});
+      } catch (cta::objectstore::ObjectOpsBase::NotNewObject &objExists){
+        //The retrieve subrequest already exists in the objectstore and is not deleted, we log and don't do anything
+        log::ScopedParamContainer params(lc);
+        params.add("copyNb",activeCopyNumber)
+              .add("repackVid",repackInfo.vid)
+              .add("bestVid",bestVid)
+              .add("fileId",rsr.archiveFile.archiveFileID);
+        lc.log(log::ERR, "In OStoreDB::RepackRequest::addSubrequests(): could not asyncInsert the subrequest because it already exists, continuing expansion");
+        continue;
       } catch (exception::Exception & ex) {
         // We can fail to serialize here...
         // Count the failure for this subrequest. 
-        failedFSeqs.emplace_back(rsr.fSeq);
-        failedFiles++;
-        failedBytes += rsr.archiveFile.fileSize;
-        failedFSeqs.emplace_back(rsr.fSeq);
+        notCreatedSubrequests.emplace_back(rsr);
+        failedCreationStats.files++;
+        failedCreationStats.bytes += rsr.archiveFile.fileSize;
         log::ScopedParamContainer params(lc);
-        params.add("fileId", rsr.archiveFile)
+        params.add("fileId", rsr.archiveFile.archiveFileID)
               .add("repackVid", repackInfo.vid)
               .add("bestVid", bestVid)
               .add("ExceptionMessage", ex.getMessageValue());
@@ -2377,9 +2407,9 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
       asyncInsertedSubrequestInfoList.emplace_back(AsyncInsertedSubrequestInfo{aii.rsr, aii.bestVid, aii.activeCopyNb, aii.request});
     } catch (exception::Exception & ex) {
       // Count the failure for this subrequest. 
-      failedFSeqs.emplace_back(aii.rsr.fSeq);
-      failedFiles++;
-      failedBytes += aii.rsr.archiveFile.fileSize;
+      notCreatedSubrequests.emplace_back(aii.rsr);
+      failedCreationStats.files++;
+      failedCreationStats.bytes += aii.rsr.archiveFile.fileSize;
       log::ScopedParamContainer params(lc);
       params.add("fileId", aii.rsr.archiveFile)
             .add("repackVid", repackInfo.vid)
@@ -2390,6 +2420,14 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
           "In OStoreDB::RepackRequest::addSubrequests(): could not asyncInsert the subrequest.");
     }
   }
+  if(notCreatedSubrequests.size()){
+    log::ScopedParamContainer params(lc);
+    params.add("files", failedCreationStats.files);
+    params.add("bytes", failedCreationStats.bytes);
+    m_repackRequest.reportRetrieveCreationFailures(notCreatedSubrequests);
+    m_repackRequest.commit();
+    lc.log(log::ERR, "In OStoreDB::RepackRequest::addSubRequests(), reported the failed creation of Retrieve Requests to the Repack request");
+  }
   // We now have created the subrequests. Time to enqueue.
   // TODO: the lock/fetch could be parallelized
   {
@@ -2403,6 +2441,8 @@ void OStoreDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>
     locks.clear();
     sorter.flushAll(lc);
   }
+  m_repackRequest.setLastExpandedFSeq(fSeq);
+  m_repackRequest.commit();
 }
 
 //------------------------------------------------------------------------------
@@ -3249,6 +3289,7 @@ std::unique_ptr<SchedulerDatabase::ArchiveMount>
   am.mountInfo.vendor = vendor;
   am.mountInfo.mountId = m_schedulerGlobalLock->getIncreaseCommitMountId();
   am.mountInfo.capacityInBytes = capacityInBytes;
+  am.mountInfo.mountType = type;
   m_schedulerGlobalLock->commit();
   am.mountInfo.tapePool = tape.tapePool;
   am.mountInfo.logicalLibrary = logicalLibrary;
@@ -3262,7 +3303,7 @@ std::unique_ptr<SchedulerDatabase::ArchiveMount>
     driveInfo.logicalLibrary=logicalLibrary;
     driveInfo.host=hostName;
     ReportDriveStatusInputs inputs;
-    inputs.mountType = common::dataStructures::MountType::ArchiveForUser;
+    inputs.mountType = type;// common::dataStructures::MountType::ArchiveForUser;
     inputs.byteTransferred = 0;
     inputs.filesTransferred = 0;
     inputs.latestBandwidth = 0;
@@ -3749,12 +3790,14 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
   // for report and remove them from ownership.
   SchedulerDatabase::DiskSpaceReservationRequest diskSpaceReservationRequest;
   // 1) Check the async update result.
+  common::dataStructures::MountPolicy mountPolicy;
   for (auto & sDBJob: jobsBatch) {
     auto osdbJob = castFromSchedDBJob(sDBJob);
     if (osdbJob->diskSystemName) diskSpaceReservationRequest.addRequest(osdbJob->diskSystemName.value(), osdbJob->archiveFile.fileSize);
     if (osdbJob->isRepack) {
       try {
         osdbJob->m_jobSucceedForRepackReporter->wait();
+        mountPolicy = osdbJob->m_jobSucceedForRepackReporter->m_MountPolicy;
         jobsToRequeueForRepackMap[osdbJob->m_repackInfo.repackRequestAddress].emplace_back(osdbJob);
       } catch (cta::exception::Exception & ex) {
         log::ScopedParamContainer params(lc);
@@ -3802,10 +3845,10 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
     for (auto & req: repackRequestQueue.second) {
       insertedRequests.push_back(RQTRTRFSAlgo::InsertedElement{&req->m_retrieveRequest, req->selectedCopyNb, 
           req->archiveFile.tapeFiles.at(req->selectedCopyNb).fSeq, req->archiveFile.fileSize,
-          cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,
+          mountPolicy,
           serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess, req->m_activityDescription, req->m_diskSystemName});
       requestToJobMap[&req->m_retrieveRequest] = req;
-    }
+       }
     RQTRTRFSAlgo rQTRTRFSAlgo(m_oStoreDB.m_objectStore, *m_oStoreDB.m_agentReference);
     try {
       rQTRTRFSAlgo.referenceAndSwitchOwnership(repackRequestQueue.first, insertedRequests, lc);
@@ -3860,7 +3903,7 @@ void OStoreDB::ArchiveMount::setDriveStatus(cta::common::dataStructures::DriveSt
   driveInfo.logicalLibrary=mountInfo.logicalLibrary;
   driveInfo.host=mountInfo.host;
   ReportDriveStatusInputs inputs;
-  inputs.mountType = common::dataStructures::MountType::ArchiveForUser;
+  inputs.mountType = m_queueType == JobQueueType::JobsToTransferForUser ? common::dataStructures::MountType::ArchiveForUser : common::dataStructures::MountType::ArchiveForRepack;
   inputs.mountSessionId = mountInfo.mountId;
   inputs.reportTime = completionTime;
   inputs.status = status;
@@ -4412,6 +4455,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
   };
   Deleters::List deletersList;
   JobOwnerUpdaters::List jobOwnerUpdatersList;
+  cta::objectstore::serializers::ArchiveJobStatus newStatus = getNewStatus();
   for (auto &sri: m_subrequestList) {
     bufferURL = sri.repackInfo.fileBufferURL;
     bool moreJobsToDo = false;
@@ -4428,7 +4472,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
       try {
         jobOwnerUpdatersList.push_back(JobOwnerUpdaters{std::unique_ptr<objectstore::ArchiveRequest::AsyncJobOwnerUpdater> (
               ar.asyncUpdateJobOwner(sri.archivedCopyNb, "", m_oStoreDb.m_agentReference->getAgentAddress(),
-              getNewStatus())), 
+              newStatus)), 
             sri});
       } catch (cta::exception::Exception & ex) {
         // Log the error
@@ -4494,7 +4538,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
       params.add("fileId", dfr.subrequestInfo.archiveFile.archiveFileID)
             .add("subrequestAddress", dfr.subrequestInfo.subrequest->getAddressIfSet())
             .add("fileBufferURL", dfr.subrequestInfo.repackInfo.fileBufferURL);
-      lc.log(log::INFO, "In OStoreDB::RepackArchiveFailureReportBatch::report(): async deleted file.");
+      lc.log(log::INFO, "In OStoreDB::RepackArchiveReportBatch::report(): async deleted file.");
     } catch (const cta::exception::Exception& ex){
       // Log the error
       log::ScopedParamContainer params(lc);
@@ -4502,7 +4546,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
             .add("subrequestAddress", dfr.subrequestInfo.subrequest->getAddressIfSet())
             .add("fileBufferURL", dfr.subrequestInfo.repackInfo.fileBufferURL)
             .add("exceptionMsg", ex.getMessageValue());
-      lc.log(log::ERR, "In OStoreDB::RepackArchiveFailureReportBatch::report(): async file not deleted.");
+      lc.log(log::ERR, "In OStoreDB::RepackArchiveReportBatch::report(): async file not deleted.");
     }
   }
   if(repackRequestStatus == objectstore::serializers::RepackRequestStatus::RRS_Complete){
@@ -4515,12 +4559,12 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
       directory->rmdir();
       log::ScopedParamContainer params(lc);
       params.add("repackRequestAddress", m_repackRequest.getAddressIfSet());
-      lc.log(log::INFO, "In OStoreDB::RepackArchiveFailureReportBatch::report(): deleted the "+directoryPath+" directory");
+      lc.log(log::INFO, "In OStoreDB::RepackArchiveReportBatch::report(): deleted the "+directoryPath+" directory");
     } catch (const cta::exception::Exception &ex){
       log::ScopedParamContainer params(lc);
       params.add("repackRequestAddress", m_repackRequest.getAddressIfSet())
             .add("exceptionMsg", ex.getMessageValue());
-      lc.log(log::ERR, "In OStoreDB::RepackArchiveFailureReportBatch::report(): failed to remove the "+directoryPath+" directory");
+      lc.log(log::ERR, "In OStoreDB::RepackArchiveReportBatch::report(): failed to remove the "+directoryPath+" directory");
     }
   }
   for (auto & jou: jobOwnerUpdatersList) {
@@ -4529,14 +4573,14 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
       log::ScopedParamContainer params(lc);
       params.add("fileId", jou.subrequestInfo.archiveFile.archiveFileID)
             .add("subrequestAddress", jou.subrequestInfo.subrequest->getAddressIfSet());
-      lc.log(log::INFO, "In OStoreDB::RepackArchiveFailureReportBatch::report(): async updated job.");
+      lc.log(log::INFO, "In OStoreDB::RepackArchiveReportBatch::report(): async updated job.");
     } catch (cta::exception::Exception & ex) {
       // Log the error
       log::ScopedParamContainer params(lc);
       params.add("fileId", jou.subrequestInfo.archiveFile.archiveFileID)
             .add("subrequestAddress", jou.subrequestInfo.subrequest->getAddressIfSet())
             .add("exceptionMsg", ex.getMessageValue());
-      lc.log(log::ERR, "In OStoreDB::RepackArchiveFailureReportBatch::report(): async job update.");
+      lc.log(log::ERR, "In OStoreDB::RepackArchiveReportBatch::report(): async job update.");
     }    
   }
   timingList.insertAndReset("asyncUpdateOrDeleteCompletionTime", t);
@@ -4547,6 +4591,7 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
   timingList.insertAndReset("ownershipRemoval", t);
   log::ScopedParamContainer params(lc);
   timingList.addToLog(params);
+  params.add("archiveReportType",( newStatus == cta::objectstore::serializers::ArchiveJobStatus::AJS_Complete) ? "ArchiveSuccesses" : "ArchiveFailures");
   lc.log(log::INFO, "In OStoreDB::RepackArchiveReportBatch::report(): reported a batch of jobs.");
 }
 
diff --git a/scheduler/OStoreDB/OStoreDB.hpp b/scheduler/OStoreDB/OStoreDB.hpp
index 6b0b8c7ba0b668c794d3d1fc533534863984f203..3b1d75cdc34c5e6e3df1e06eb4a25c0a3c507554 100644
--- a/scheduler/OStoreDB/OStoreDB.hpp
+++ b/scheduler/OStoreDB/OStoreDB.hpp
@@ -348,7 +348,7 @@ public:
   
   /* === Repack requests handling =========================================== */
   void queueRepack(const std::string& vid, const std::string& bufferURL, 
-    common::dataStructures::RepackInfo::Type repackType, log::LogContext &logContext) override;
+    common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext &logContext) override;
   
   std::list<common::dataStructures::RepackInfo> getRepackInfo() override;
   CTA_GENERATE_EXCEPTION_CLASS(NoSuchRepackRequest);
@@ -450,6 +450,7 @@ public:
        * in order to save a read in the most common case (only one job), and trigger immediate deletion of
        * the request after succeeding/failing. */
       std::map<uint32_t, objectstore::serializers::ArchiveJobStatus> archiveJobsStatusMap;
+      cta::objectstore::AgentReference * owner;
       std::shared_ptr<SR> subrequest;
       common::dataStructures::ArchiveFile archiveFile;
       typename SR::RepackInfo repackInfo;
@@ -523,13 +524,14 @@ public:
   
   std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> getRepackReportBatches(log::LogContext &lc) override;
   
-private:
-  CTA_GENERATE_EXCEPTION_CLASS(NoRepackReportBatchFound);
-  const size_t c_repackReportBatchSize = 500;
   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc);
   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext& lc);
   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc);
   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext &lc);
+  CTA_GENERATE_EXCEPTION_CLASS(NoRepackReportBatchFound);
+private:
+  const size_t c_repackArchiveReportBatchSize = 10000;
+  const size_t c_repackRetrieveReportBatchSize = 10000;
 public:
 
   /* === Drive state handling  ============================================== */
diff --git a/scheduler/OStoreDB/OStoreDBFactory.hpp b/scheduler/OStoreDB/OStoreDBFactory.hpp
index c407eaf74d06e72d58f27be5a66f73a696b161a2..5e3a6345c0123b51ccd0ad917a8a932da1fc1eb7 100644
--- a/scheduler/OStoreDB/OStoreDBFactory.hpp
+++ b/scheduler/OStoreDB/OStoreDBFactory.hpp
@@ -158,6 +158,22 @@ public:
     return m_OStoreDB.getNextRepackReportBatch(lc);
   }
   
+  std::unique_ptr<RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc) override {
+    return m_OStoreDB.getNextSuccessfulRetrieveRepackReportBatch(lc);
+  }
+  
+  std::unique_ptr<RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc) override {
+    return m_OStoreDB.getNextSuccessfulArchiveRepackReportBatch(lc);
+  }
+  
+  std::unique_ptr<RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext& lc) override {
+    return m_OStoreDB.getNextFailedRetrieveRepackReportBatch(lc);
+  }
+  
+  std::unique_ptr<RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext& lc) override {
+    return m_OStoreDB.getNextFailedArchiveRepackReportBatch(lc);
+  }
+  
   std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> getRepackReportBatches(log::LogContext &lc) override {
     return m_OStoreDB.getRepackReportBatches(lc);
   }
@@ -210,8 +226,8 @@ public:
   }
   
 
-  void queueRepack(const std::string& vid, const std::string& bufferURL, common::dataStructures::RepackInfo::Type repackType, log::LogContext& lc) override {
-    m_OStoreDB.queueRepack(vid, bufferURL, repackType, lc);
+  void queueRepack(const std::string& vid, const std::string& bufferURL, common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext& lc) override {
+    m_OStoreDB.queueRepack(vid, bufferURL, repackType, mountPolicy, lc);
   }
   
   std::list<common::dataStructures::RepackInfo> getRepackInfo() override {
diff --git a/scheduler/OStoreDB/OStoreDBTest.cpp b/scheduler/OStoreDB/OStoreDBTest.cpp
index 9c26e501277d5dfb878cc2726815b9db9065f070..42469fc0fd1057a4e9c5221b9fdeac39eb4d40b8 100644
--- a/scheduler/OStoreDB/OStoreDBTest.cpp
+++ b/scheduler/OStoreDB/OStoreDBTest.cpp
@@ -198,7 +198,7 @@ TEST_P(OStoreDBTest, MemQueuesSharedAddToArchiveQueue) {
       aReq.initialize();
       cta::common::dataStructures::ArchiveFile aFile;
       cta::common::dataStructures::MountPolicy mountPolicy;
-      cta::common::dataStructures::UserIdentity requester;
+      cta::common::dataStructures::RequesterIdentity requester;
       cta::common::dataStructures::EntryLog entryLog;
       aFile.archiveFileID = i;
       aReq.setArchiveFile(aFile);
diff --git a/scheduler/OStoreDB/QueueItor.cpp b/scheduler/OStoreDB/QueueItor.cpp
index dee57e6f037cf06c6c45e8f43fe7b7eaf396db27..457c93abcc4838618a6f84894c3ffed7077041ee 100644
--- a/scheduler/OStoreDB/QueueItor.cpp
+++ b/scheduler/OStoreDB/QueueItor.cpp
@@ -108,8 +108,7 @@ getQueueJobs(const jobQueue_t &jobQueueChunk)
         job.tapePool                 = j.tapePool;
         job.copyNumber               = j.copyNb;
         job.archiveFileID            = osar.first.getArchiveFile().archiveFileID;
-        job.request.checksumType     = osar.first.getArchiveFile().checksumType;
-        job.request.checksumValue    = osar.first.getArchiveFile().checksumValue;
+        job.request.checksumBlob     = osar.first.getArchiveFile().checksumBlob;
         job.request.creationLog      = osar.first.getEntryLog();
         job.request.diskFileID       = osar.first.getArchiveFile().diskFileId;
         job.request.diskFileInfo     = osar.first.getArchiveFile().diskFileInfo;
diff --git a/scheduler/RepackReportThread.cpp b/scheduler/RepackReportThread.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e694bc8842283b2d22f4908cc776699748343cf
--- /dev/null
+++ b/scheduler/RepackReportThread.cpp
@@ -0,0 +1,70 @@
+/**
+ * The CERN Tape Archive (CTA) project
+ * Copyright © 2018 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "RepackReportThread.hpp"
+namespace cta {
+
+  RepackReportThread::~RepackReportThread() {
+  }
+  
+  void RepackReportThread::run() {
+    utils::Timer totalTime;
+    bool moreBatch = true;
+    log::ScopedParamContainer params(m_lc);
+    params.add("reportingType",getReportingType());
+    uint64_t numberOfBatchReported = 0;
+    while(totalTime.secs() < c_maxTimeToReport && moreBatch){
+      utils::Timer t;
+      log::TimingList tl;
+      cta::Scheduler::RepackReportBatch reportBatch = getNextRepackReportBatch(m_lc);
+      tl.insertAndReset("getNextRepackReportBatchTime",t);
+      if(!reportBatch.empty()) {
+        reportBatch.report(m_lc);
+        numberOfBatchReported++;
+        tl.insertAndReset("reportingTime",t);
+        log::ScopedParamContainer paramsReport(m_lc);
+        tl.addToLog(paramsReport);
+        m_lc.log(log::INFO,"In RepackReportThread::run(), reported a batch of reports.");
+      } else {
+        moreBatch = false;
+      }
+    }
+    if(numberOfBatchReported > 0){
+      params.add("numberOfBatchReported",numberOfBatchReported);
+      params.add("totalRunTime",totalTime.secs());
+      params.add("moreBatchToDo",moreBatch);
+      m_lc.log(log::INFO,"In RepackReportThread::run(), exiting.");
+    }
+  }
+  
+  cta::Scheduler::RepackReportBatch RetrieveSuccessesRepackReportThread::getNextRepackReportBatch(log::LogContext &lc){
+    return m_scheduler.getNextSuccessfulRetrieveRepackReportBatch(lc);
+  }
+  
+  cta::Scheduler::RepackReportBatch ArchiveSuccessesRepackReportThread::getNextRepackReportBatch(log::LogContext &lc){
+    return m_scheduler.getNextSuccessfulArchiveRepackReportBatch(lc);
+  }
+  
+  cta::Scheduler::RepackReportBatch RetrieveFailedRepackReportThread::getNextRepackReportBatch(log::LogContext &lc){
+    return m_scheduler.getNextFailedRetrieveRepackReportBatch(lc);
+  }
+  
+  cta::Scheduler::RepackReportBatch ArchiveFailedRepackReportThread::getNextRepackReportBatch(log::LogContext &lc){
+    return m_scheduler.getNextFailedArchiveRepackReportBatch(lc);
+  }
+}
\ No newline at end of file
diff --git a/scheduler/RepackReportThread.hpp b/scheduler/RepackReportThread.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..670e2f76a639698a3b7b43bf5be7fdc517258aeb
--- /dev/null
+++ b/scheduler/RepackReportThread.hpp
@@ -0,0 +1,70 @@
+/**
+ * The CERN Tape Archive (CTA) project
+ * Copyright © 2018 CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#pragma once
+
+#include "Scheduler.hpp"
+#include "common/threading/Thread.hpp"
+
+namespace cta {
+  
+class RepackReportThread: public cta::threading::Thread {
+public:
+  RepackReportThread(Scheduler& scheduler, log::LogContext &lc):m_scheduler(scheduler),m_lc(lc){}
+  virtual ~RepackReportThread();
+  void run();
+protected:
+  virtual cta::Scheduler::RepackReportBatch getNextRepackReportBatch(log::LogContext &lc) = 0;
+  virtual std::string getReportingType() = 0;
+  Scheduler& m_scheduler;
+  log::LogContext& m_lc;
+  const double c_maxTimeToReport = 30.0;
+};
+
+class RetrieveSuccessesRepackReportThread: public RepackReportThread{
+public:
+  RetrieveSuccessesRepackReportThread(Scheduler& scheduler,log::LogContext& lc):RepackReportThread(scheduler,lc) {}
+private:
+  virtual cta::Scheduler::RepackReportBatch getNextRepackReportBatch(log::LogContext &lc);
+  virtual std::string getReportingType(){ return "RetrieveSuccesses"; }
+};
+
+class ArchiveSuccessesRepackReportThread: public RepackReportThread{
+public:
+  ArchiveSuccessesRepackReportThread(Scheduler& scheduler,log::LogContext& lc):RepackReportThread(scheduler,lc) {}
+private:
+  virtual cta::Scheduler::RepackReportBatch getNextRepackReportBatch(log::LogContext &lc);
+  virtual std::string getReportingType(){ return "ArchiveSuccesses"; }
+};
+
+class RetrieveFailedRepackReportThread: public RepackReportThread{
+public:
+  RetrieveFailedRepackReportThread(Scheduler& scheduler,log::LogContext& lc):RepackReportThread(scheduler,lc) {}
+private:
+  virtual cta::Scheduler::RepackReportBatch getNextRepackReportBatch(log::LogContext &lc);
+  virtual std::string getReportingType(){ return "RetrieveFailed"; }
+};
+
+class ArchiveFailedRepackReportThread: public RepackReportThread{
+public:
+  ArchiveFailedRepackReportThread(Scheduler& scheduler,log::LogContext& lc):RepackReportThread(scheduler,lc) {}
+private:
+  virtual cta::Scheduler::RepackReportBatch getNextRepackReportBatch(log::LogContext &lc);
+  virtual std::string getReportingType(){ return "ArchiveFailed"; }
+};
+
+}
\ No newline at end of file
diff --git a/scheduler/RepackRequestManager.cpp b/scheduler/RepackRequestManager.cpp
index 09c69a09f9f4d02820ee9e4b08bbb566c30b9b3c..62a615b5f2c1189cda4f81845d0f18879da19a25 100644
--- a/scheduler/RepackRequestManager.cpp
+++ b/scheduler/RepackRequestManager.cpp
@@ -18,15 +18,17 @@
 
 #include "RepackRequestManager.hpp"
 #include "Scheduler.hpp"
+#include "common/make_unique.hpp"
+#include "OStoreDB/OStoreDB.hpp"
+#include "RepackReportThread.hpp"
 
 namespace cta {
-
+  
 void RepackRequestManager::runOnePass(log::LogContext& lc) {
   // We give ourselves a budget of 30s for those operations...
   utils::Timer t;
   log::TimingList timingList;
   // First expand any request to expand
-  // TODO: implement expansion
   // Next promote requests to ToExpand if needed
   
   //Putting pending repack request into the RepackQueueToExpand queue
@@ -48,10 +50,14 @@ void RepackRequestManager::runOnePass(log::LogContext& lc) {
   }
   
   {
-    // Do all round of repack subrequest reporting (heavy lifting is done internally).
-    for(auto& reportBatch: m_scheduler.getRepackReportBatches(lc)){
-      reportBatch.report(lc);
-    }
+    RetrieveSuccessesRepackReportThread rsrrt(m_scheduler,lc);
+    rsrrt.run();
+    ArchiveSuccessesRepackReportThread asrrt(m_scheduler,lc);
+    asrrt.run();
+    RetrieveFailedRepackReportThread rfrrt(m_scheduler,lc);
+    rfrrt.run();
+    ArchiveFailedRepackReportThread afrrt(m_scheduler,lc);
+    afrrt.run();
   }
   
 }
diff --git a/scheduler/RepackRequestManager.hpp b/scheduler/RepackRequestManager.hpp
index e78ae76a3d94b541bf17fe3bf37d80893067c753..a780f7095a843f271f3c91f89afb09f0adf8c611 100644
--- a/scheduler/RepackRequestManager.hpp
+++ b/scheduler/RepackRequestManager.hpp
@@ -26,7 +26,8 @@ class Scheduler;
 
 class RepackRequestManager {
 public:
-  RepackRequestManager(Scheduler & scheduler): m_scheduler(scheduler) {}
+  RepackRequestManager(Scheduler & scheduler): m_scheduler(scheduler){
+  }
   
   void runOnePass(log::LogContext & lc);
   
diff --git a/scheduler/RetrieveMount.cpp b/scheduler/RetrieveMount.cpp
index d4c548e740f57236516707e85d6d0d15c5a02770..6ed0f8f5ec0356e2fa900057d1b840fda2496e9e 100644
--- a/scheduler/RetrieveMount.cpp
+++ b/scheduler/RetrieveMount.cpp
@@ -24,15 +24,16 @@
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
-cta::RetrieveMount::RetrieveMount():
-  m_sessionRunning(false) {}
+cta::RetrieveMount::RetrieveMount(cta::catalogue::Catalogue &catalogue): m_sessionRunning(false), m_catalogue(catalogue) {
+}
 
 //------------------------------------------------------------------------------
 // constructor
 //------------------------------------------------------------------------------
 cta::RetrieveMount::RetrieveMount(
+  cta::catalogue::Catalogue &catalogue,
   std::unique_ptr<SchedulerDatabase::RetrieveMount> dbMount): 
-  m_sessionRunning(false) {
+  m_sessionRunning(false), m_catalogue(catalogue) {
   m_dbMount.reset(dbMount.release());
 }
 
@@ -140,7 +141,7 @@ std::list<std::unique_ptr<cta::RetrieveJob> > cta::RetrieveMount::getNextJobBatc
     throw SessionNotRunning("In RetrieveMount::getNextJobBatch(): trying to get job from complete/not started session");
   // Get the current file systems list from the catalogue
   disk::DiskSystemList diskSystemList;
-  if (m_catalogue) diskSystemList = m_catalogue->getAllDiskSystems();
+  diskSystemList = m_catalogue.getAllDiskSystems();
   // TODO: the diskSystemFreeSpaceList could be made a member of the retrieve mount and cache the fetched values, limiting the re-querying
   // of the disk systems free space.
   disk::DiskSystemFreeSpaceList diskSystemFreeSpaceList (diskSystemList);
@@ -237,17 +238,6 @@ cta::disk::DiskReporter* cta::RetrieveMount::createDiskReporter(std::string& URL
   return m_reporterFactory.createDiskReporter(URL);
 }
 
-//------------------------------------------------------------------------------
-// setCatalogue()
-//------------------------------------------------------------------------------
-void cta::RetrieveMount::setCatalogue(catalogue::Catalogue* catalogue) {
-  if (m_catalogue)
-    throw exception::Exception("In RetrieveMount::setCatalogue(): catalogue already set.");
-  if (!catalogue)
-    throw exception::Exception("In RetrieveMount::setCatalogue(): trying to set a null catalogue.");
-  m_catalogue = catalogue;
-}
-
 //------------------------------------------------------------------------------
 // tapeComplete()
 //------------------------------------------------------------------------------
@@ -302,6 +292,25 @@ void cta::RetrieveMount::setTapeSessionStats(const castor::tape::tapeserver::dae
   m_dbMount->setTapeSessionStats(stats);
 }
 
+//------------------------------------------------------------------------------
+// setTapeMounted()
+//------------------------------------------------------------------------------
+void cta::RetrieveMount::setTapeMounted(cta::log::LogContext& logContext) const {
+  utils::Timer t;    
+  log::ScopedParamContainer spc(logContext);
+  try {
+    m_catalogue.tapeMountedForRetrieve(m_dbMount->getMountInfo().vid, m_dbMount->getMountInfo().drive);
+    auto catalogueTime = t.secs(cta::utils::Timer::resetCounter);
+    spc.add("catalogueTime", catalogueTime);
+    logContext.log(log::INFO, "In RetrieveMount::setTapeMounted(): success.");
+  } catch (cta::exception::Exception &ex) {
+    auto catalogueTimeFailed = t.secs(cta::utils::Timer::resetCounter);
+    spc.add("catalogueTime", catalogueTimeFailed);
+    logContext.log(cta::log::WARNING,
+      "Failed to update catalogue for the tape mounted for retrieve.");
+  }    
+}
+
 //------------------------------------------------------------------------------
 // bothSidesComplete()
 //------------------------------------------------------------------------------
diff --git a/scheduler/RetrieveMount.hpp b/scheduler/RetrieveMount.hpp
index 0a237e72c8667d7d7a900927d5f52ae7602d4eda..5a3495f31a7d7013e3c70f9ede114de58439d4a5 100644
--- a/scheduler/RetrieveMount.hpp
+++ b/scheduler/RetrieveMount.hpp
@@ -38,18 +38,19 @@ namespace cta {
    */
   class RetrieveMount: public TapeMount {
     friend class Scheduler;
-  protected:
+  protected:    
     /**
-     * Trivial constructor
+     * Constructor.
+     * @param catalogue The file catalogue interface.
      */
-    RetrieveMount();
+    RetrieveMount(cta::catalogue::Catalogue &catalogue);
     
     /**
      * Constructor.
-     *
+     * @param catalogue The file catalogue interface.
      * @param dbMount The database representation of this mount.
      */
-    RetrieveMount(std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbMount);
+    RetrieveMount(cta::catalogue::Catalogue &catalogue, std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbMount);
 
   public:
 
@@ -132,7 +133,13 @@ namespace cta {
      * Report a tape session statistics
      */
     virtual void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats);
-
+    
+    /**
+     * Report a tape mounted event
+     * @param logContext
+     */
+    virtual void setTapeMounted(log::LogContext &logContext) const;
+    
     /**
      * Indicates that the disk thread of the mount was completed. This
      * will implicitly trigger the transition from DrainingToDisk to Up if necessary.
@@ -192,12 +199,6 @@ namespace cta {
      */
     disk::DiskReporter * createDiskReporter(std::string & URL);
     
-    /**
-     * Passes a reference to the catalogue, used for disk space back pressure 
-     * @param catalogue
-     */
-    void setCatalogue(catalogue::Catalogue *catalogue);
-    
     /**
      * Destructor.
      */
@@ -210,11 +211,6 @@ namespace cta {
      */
     std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> m_dbMount;
     
-    /**
-     * A reference to the catalogue
-     */
-    catalogue::Catalogue * m_catalogue = nullptr;
-    
     /**
      * Internal tracking of the session completion
      */
@@ -237,6 +233,11 @@ namespace cta {
      * Internal tracking of the full disk systems. It is one strike out (for the mount duration).
      */
     std::set<std::string> m_fullDiskSystems;
+    
+    /**
+     * A pointer to the file catalogue.
+     */
+    cta::catalogue::Catalogue &m_catalogue; 
 
   }; // class RetrieveMount
 
diff --git a/scheduler/Scheduler.cpp b/scheduler/Scheduler.cpp
index a00b7610bce4df6c965b8294138077e02aa6dc16..d7106341f133df264766ed74beb792e2bdb84a89 100644
--- a/scheduler/Scheduler.cpp
+++ b/scheduler/Scheduler.cpp
@@ -32,6 +32,7 @@
 #include "RetrieveRequestDump.hpp"
 #include "disk/DiskFileImplementations.hpp"
 #include "disk/RadosStriperPool.hpp"
+#include "OStoreDB/OStoreDB.hpp"
 
 #include <iostream>
 #include <sstream>
@@ -101,7 +102,7 @@ void Scheduler::authorizeAdmin(const common::dataStructures::SecurityIdentity &c
 // checkAndGetNextArchiveFileId
 //------------------------------------------------------------------------------
 uint64_t Scheduler::checkAndGetNextArchiveFileId(const std::string &instanceName,
-  const std::string &storageClassName, const common::dataStructures::UserIdentity &user, log::LogContext &lc) {
+  const std::string &storageClassName, const common::dataStructures::RequesterIdentity &user, log::LogContext &lc) {
   cta::utils::Timer t;
   const uint64_t archiveFileId = m_catalogue.checkAndGetNextArchiveFileId(instanceName, storageClassName, user);
   const auto catalogueTime = t.secs();
@@ -157,10 +158,9 @@ void Scheduler::queueArchiveWithGivenId(const uint64_t archiveFileId, const std:
      .add("policyArchivePriority", catalogueInfo.mountPolicy.archivePriority)
      .add("policyMaxDrives", catalogueInfo.mountPolicy.maxDrivesAllowed)
      .add("diskFilePath", request.diskFileInfo.path)
-     .add("diskFileOwner", request.diskFileInfo.owner)
-     .add("diskFileGroup", request.diskFileInfo.group)
-     .add("checksumValue", request.checksumValue)
-     .add("checksumType", request.checksumType)
+     .add("diskFileOwnerUid", request.diskFileInfo.owner_uid)
+     .add("diskFileGid", request.diskFileInfo.gid)
+     .add("checksumBlob", request.checksumBlob)
      .add("archiveReportURL", midEllipsis(request.archiveReportURL, 50, 15))
      .add("archiveErrorReportURL", midEllipsis(request.archiveErrorReportURL, 50, 15))
      .add("creationHost", request.creationLog.host)
@@ -225,8 +225,8 @@ void Scheduler::queueRetrieve(
   spc.add("fileId", request.archiveFileID)
      .add("instanceName", instanceName)
      .add("diskFilePath", request.diskFileInfo.path)
-     .add("diskFileOwner", request.diskFileInfo.owner)
-     .add("diskFileGroup", request.diskFileInfo.group)
+     .add("diskFileOwnerUid", request.diskFileInfo.owner_uid)
+     .add("diskFileGid", request.diskFileInfo.gid)
      .add("dstURL", request.dstURL)
      .add("errorReportURL", request.errorReportURL)
      .add("creationHost", request.creationLog.host)
@@ -235,12 +235,11 @@ void Scheduler::queueRetrieve(
      .add("requesterName", request.requester.name)
      .add("requesterGroup", request.requester.group)
      .add("criteriaArchiveFileId", queueCriteria.archiveFile.archiveFileID)
-     .add("criteriaChecksumType", queueCriteria.archiveFile.checksumType)
-     .add("criteriaChecksumValue", queueCriteria.archiveFile.checksumValue)
+     .add("criteriaChecksumBlob", queueCriteria.archiveFile.checksumBlob)
      .add("criteriaCreationTime", queueCriteria.archiveFile.creationTime)
      .add("criteriaDiskFileId", queueCriteria.archiveFile.diskFileId)
      .add("criteriaDiskFilePath", queueCriteria.archiveFile.diskFileInfo.path)
-     .add("criteriaDiskFileOwner", queueCriteria.archiveFile.diskFileInfo.owner)
+     .add("criteriaDiskFileOwnerUid", queueCriteria.archiveFile.diskFileInfo.owner_uid)
      .add("criteriaDiskInstance", queueCriteria.archiveFile.diskInstance)
      .add("criteriaFileSize", queueCriteria.archiveFile.fileSize)
      .add("reconciliationTime", queueCriteria.archiveFile.reconciliationTime)
@@ -345,13 +344,13 @@ void Scheduler::checkTapeFullBeforeRepack(std::string vid){
 // repack
 //------------------------------------------------------------------------------
 void Scheduler::queueRepack(const common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, 
-    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, log::LogContext & lc) {
+    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc) {
   // Check request sanity
   if (vid.empty()) throw exception::UserError("Empty VID name.");
   if (bufferURL.empty()) throw exception::UserError("Empty buffer URL.");
   utils::Timer t;
   checkTapeFullBeforeRepack(vid);
-  m_db.queueRepack(vid, bufferURL, repackType, lc);
+  m_db.queueRepack(vid, bufferURL, repackType, mountPolicy, lc);
   log::TimingList tl;
   tl.insertAndReset("schedulerDbTime", t);
   log::ScopedParamContainer params(lc);
@@ -482,24 +481,28 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
   timingList.insertAndReset("fillTotalStatsFileBeforeExpandTime",t);
   cta::catalogue::ArchiveFileItor archiveFilesForCatalogue = m_catalogue.getArchiveFilesForRepackItor(repackInfo.vid, fSeq);
   timingList.insertAndReset("catalogueGetArchiveFilesForRepackItorTime",t);
+  
   std::stringstream dirBufferURL;
   dirBufferURL << repackInfo.repackBufferBaseURL << "/" << repackInfo.vid << "/";
-  cta::disk::DirectoryFactory dirFactory;
-  std::unique_ptr<cta::disk::Directory> dir;
-  dir.reset(dirFactory.createDirectory(dirBufferURL.str()));
   std::set<std::string> filesInDirectory;
-  if(dir->exist()){
-    filesInDirectory = dir->getFilesName();
-  } else {
-    dir->mkdir();
+  if(archiveFilesForCatalogue.hasMore()){
+    //We only create the folder if there are some files to Repack
+    cta::disk::DirectoryFactory dirFactory;
+    std::unique_ptr<cta::disk::Directory> dir;
+    dir.reset(dirFactory.createDirectory(dirBufferURL.str()));
+    if(dir->exist()){
+      filesInDirectory = dir->getFilesName();
+    } else {
+      dir->mkdir();
+    }
   }
   double elapsedTime = 0;
   bool stopExpansion = false;
+  repackRequest->m_dbReq->setExpandStartedAndChangeStatus();
   while(archiveFilesForCatalogue.hasMore() && !stopExpansion) {
     size_t filesCount = 0;
     uint64_t maxAddedFSeq = 0;
     std::list<SchedulerDatabase::RepackRequest::Subrequest> retrieveSubrequests;
-    repackRequest->m_dbReq->setExpandStartedAndChangeStatus();
     while(filesCount < c_defaultMaxNbFilesForRepack && !stopExpansion && archiveFilesForCatalogue.hasMore())
     {
       filesCount++;
@@ -515,14 +518,19 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
       if (repackInfo.type == RepackType::MoveAndAddCopies || repackInfo.type == RepackType::MoveOnly) {
         // determine which fSeq(s) (normally only one) lives on this tape.
         for (auto & tc: archiveFile.tapeFiles) if (tc.vid == repackInfo.vid) {
-          retrieveSubRequest.copyNbsToRearchive.insert(tc.copyNb);
           // We make the (reasonable) assumption that the archive file only has one copy on this tape.
           // If not, we will ensure the subrequest is filed under the lowest fSeq existing on this tape.
           // This will prevent double subrequest creation (we already have such a mechanism in case of crash and 
           // restart of expansion.
-          retrieveSubRequest.fSeq = std::min(tc.fSeq, retrieveSubRequest.fSeq);
-          totalStatsFile.totalFilesToArchive += 1;
-          totalStatsFile.totalBytesToArchive += retrieveSubRequest.archiveFile.fileSize;
+          if(tc.supersededByVid.empty()){
+            //We want to Archive the "active" copies on the tape, thus the copies that are not superseded by another
+            //we want to Retrieve the "active" fSeq
+            totalStatsFile.totalFilesToArchive += 1;
+            totalStatsFile.totalBytesToArchive += retrieveSubRequest.archiveFile.fileSize;
+            retrieveSubRequest.copyNbsToRearchive.insert(tc.copyNb);
+            retrieveSubRequest.fSeq = tc.fSeq;
+          }
+          //retrieveSubRequest.fSeq = (retrieveSubRequest.fSeq == std::numeric_limits<decltype(retrieveSubRequest.fSeq)>::max()) ? tc.fSeq : std::max(tc.fSeq, retrieveSubRequest.fSeq);
         }
       }
       std::stringstream fileName;
@@ -570,8 +578,16 @@ void Scheduler::expandRepackRequest(std::unique_ptr<RepackRequest>& repackReques
     // value in case of crash.
     auto diskSystemList = m_catalogue.getAllDiskSystems();
     timingList.insertAndReset("getDisksystemsListTime",t);
-    repackRequest->m_dbReq->addSubrequestsAndUpdateStats(retrieveSubrequests, archiveRoutesMap, fSeq - 1, maxAddedFSeq, totalStatsFile, diskSystemList, lc);
+    repackRequest->m_dbReq->addSubrequestsAndUpdateStats(retrieveSubrequests, archiveRoutesMap, fSeq, maxAddedFSeq, totalStatsFile, diskSystemList, lc);
     timingList.insertAndReset("addSubrequestsAndUpdateStatsTime",t);
+    {
+      if(!stopExpansion && archiveFilesForCatalogue.hasMore()){
+        log::ScopedParamContainer params(lc);
+        params.add("tapeVid",repackInfo.vid);
+        timingList.addToLog(params);
+        lc.log(log::INFO,"Max number of files expanded reached ("+std::to_string(c_defaultMaxNbFilesForRepack)+"), doing some reporting before continuing expansion.");
+      }
+    }
   }
   log::ScopedParamContainer params(lc);
   params.add("tapeVid",repackInfo.vid);
@@ -596,6 +612,10 @@ Scheduler::RepackReportBatch Scheduler::getNextRepackReportBatch(log::LogContext
   return ret;
 }
 
+
+//------------------------------------------------------------------------------
+// Scheduler::getRepackReportBatches
+//------------------------------------------------------------------------------
 std::list<Scheduler::RepackReportBatch> Scheduler::getRepackReportBatches(log::LogContext &lc){
   std::list<Scheduler::RepackReportBatch> ret;
   for(auto& reportBatch: m_db.getRepackReportBatches(lc)){
@@ -606,6 +626,58 @@ std::list<Scheduler::RepackReportBatch> Scheduler::getRepackReportBatches(log::L
   return ret;
 }
 
+//------------------------------------------------------------------------------
+// Scheduler::getNextSuccessfulRetrieveRepackReportBatch
+//------------------------------------------------------------------------------
+Scheduler::RepackReportBatch Scheduler::getNextSuccessfulRetrieveRepackReportBatch(log::LogContext &lc){
+  Scheduler::RepackReportBatch ret;
+  try{
+    ret.m_DbBatch.reset(m_db.getNextSuccessfulRetrieveRepackReportBatch(lc).release());
+  } catch (OStoreDB::NoRepackReportBatchFound &){
+    ret.m_DbBatch = nullptr;
+  }
+  return ret;
+}
+
+//------------------------------------------------------------------------------
+// Scheduler::getNextFailedRetrieveRepackReportBatch
+//------------------------------------------------------------------------------
+Scheduler::RepackReportBatch Scheduler::getNextFailedRetrieveRepackReportBatch(log::LogContext &lc){
+  Scheduler::RepackReportBatch ret;
+  try{
+    ret.m_DbBatch.reset(m_db.getNextFailedRetrieveRepackReportBatch(lc).release());
+  } catch (OStoreDB::NoRepackReportBatchFound &){
+    ret.m_DbBatch = nullptr;
+  }
+  return ret;
+}
+
+//------------------------------------------------------------------------------
+// Scheduler::getNextSuccessfulArchiveRepackReportBatch
+//------------------------------------------------------------------------------
+Scheduler::RepackReportBatch Scheduler::getNextSuccessfulArchiveRepackReportBatch(log::LogContext &lc){
+  Scheduler::RepackReportBatch ret;
+  try{
+    ret.m_DbBatch.reset(m_db.getNextSuccessfulArchiveRepackReportBatch(lc).release());
+  } catch (OStoreDB::NoRepackReportBatchFound &){
+    ret.m_DbBatch = nullptr;
+  }
+  return ret;
+}
+
+//------------------------------------------------------------------------------
+// Scheduler::getNextFailedArchiveRepackReportBatch
+//------------------------------------------------------------------------------
+Scheduler::RepackReportBatch Scheduler::getNextFailedArchiveRepackReportBatch(log::LogContext &lc){
+  Scheduler::RepackReportBatch ret;
+  try{
+    ret.m_DbBatch.reset(m_db.getNextFailedArchiveRepackReportBatch(lc).release());
+  } catch (OStoreDB::NoRepackReportBatchFound &){
+    ret.m_DbBatch = nullptr;
+  }
+  return ret;
+}
+
 //------------------------------------------------------------------------------
 // Scheduler::RepackReportBatch::report
 //------------------------------------------------------------------------------
@@ -932,6 +1004,23 @@ void Scheduler::sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::T
   }
 }
 
+//------------------------------------------------------------------------------
+// getLogicalLibrary
+//------------------------------------------------------------------------------
+cta::optional<common::dataStructures::LogicalLibrary> Scheduler::getLogicalLibrary(const std::string& libraryName, double& getLogicalLibraryTime){
+  utils::Timer timer;
+  auto logicalLibraries = m_catalogue.getLogicalLibraries();
+  cta::optional<common::dataStructures::LogicalLibrary> ret;
+  auto logicalLibraryItor = std::find_if(logicalLibraries.begin(),logicalLibraries.end(),[libraryName](const cta::common::dataStructures::LogicalLibrary& ll){
+    return (ll.name == libraryName); 
+  });
+  getLogicalLibraryTime += timer.secs(utils::Timer::resetCounter);
+  if(logicalLibraryItor != logicalLibraries.end()){
+    ret = *logicalLibraryItor;
+  }
+  return ret;
+}
+
 //------------------------------------------------------------------------------
 // getNextMountDryRun
 //------------------------------------------------------------------------------
@@ -946,6 +1035,25 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, const
   double decisionTime = 0;
   double schedulerDbTime = 0;
   double catalogueTime = 0;
+  double getLogicalLibrariesTime = 0;
+  
+  auto logicalLibrary = getLogicalLibrary(logicalLibraryName,getLogicalLibrariesTime);
+  if(logicalLibrary){
+    if(logicalLibrary.value().isDisabled){
+      log::ScopedParamContainer params(lc);
+      params.add("logicalLibrary",logicalLibraryName)
+            .add("catalogueTime",getLogicalLibrariesTime);
+      lc.log(log::INFO,"In Scheduler::getNextMountDryRun(): logicalLibrary is disabled");
+      return false;
+    }
+  } else {
+    log::ScopedParamContainer params(lc);
+    params.add("logicalLibrary",logicalLibraryName)
+          .add("catalogueTime",getLogicalLibrariesTime);
+    lc.log(log::INFO,"In Scheduler::getNextMountDryRun(): logicalLibrary does not exist");
+    return false;
+  }
+  
   std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> mountInfo;
   mountInfo = m_db.getMountInfoNoLock(lc);
   getMountInfoTime = timer.secs(utils::Timer::resetCounter);
@@ -1038,7 +1146,7 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, const
     }
   }
   schedulerDbTime = getMountInfoTime;
-  catalogueTime = getTapeInfoTime + getTapeForWriteTime;
+  catalogueTime = getTapeInfoTime + getTapeForWriteTime + getLogicalLibrariesTime;
   decisionTime += timer.secs(utils::Timer::resetCounter);
   log::ScopedParamContainer params(lc);
   params.add("getMountInfoTime", getMountInfoTime)
@@ -1080,7 +1188,26 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
   double mountCreationTime = 0;
   double driveStatusSetTime = 0;
   double schedulerDbTime = 0;
+  double getLogicalLibrariesTime = 0;
   double catalogueTime = 0;
+  
+auto logicalLibrary = getLogicalLibrary(logicalLibraryName,getLogicalLibrariesTime);
+  if(logicalLibrary){
+    if(logicalLibrary.value().isDisabled){
+      log::ScopedParamContainer params(lc);
+      params.add("logicalLibrary",logicalLibraryName)
+            .add("catalogueTime",getLogicalLibrariesTime);
+      lc.log(log::INFO,"In Scheduler::getNextMount(): logicalLibrary is disabled");
+      return std::unique_ptr<TapeMount>();
+    }
+  } else {
+    log::ScopedParamContainer params(lc);
+    params.add("logicalLibrary",logicalLibraryName)
+          .add("catalogueTime",getLogicalLibrariesTime);
+    lc.log(log::CRIT,"In Scheduler::getNextMount(): logicalLibrary does not exist");
+    return std::unique_ptr<TapeMount>();
+  }
+
   std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> mountInfo;
   mountInfo = m_db.getMountInfo(lc);
   getMountInfoTime = timer.secs(utils::Timer::resetCounter);
@@ -1182,8 +1309,8 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
             m->activityNameAndWeightedMountCount.value().activity,
             m->activityNameAndWeightedMountCount.value().weight };
         }
-        std::unique_ptr<RetrieveMount> internalRet (
-          new RetrieveMount(mountInfo->createRetrieveMount(m->vid, 
+        std::unique_ptr<RetrieveMount> internalRet(new RetrieveMount(m_catalogue));
+        internalRet->m_dbMount.reset(mountInfo->createRetrieveMount(m->vid, 
             m->tapePool,
             driveName,
             logicalLibraryName, 
@@ -1192,8 +1319,7 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
             m->mediaType,
             m->vendor,
             m->capacityInBytes,
-            time(NULL), actvityAndWeight)));
-        internalRet->setCatalogue(&m_catalogue);
+            time(NULL), actvityAndWeight).release());
         mountCreationTime += timer.secs(utils::Timer::resetCounter);
         internalRet->m_sessionRunning = true;
         internalRet->m_diskRunning = true;
@@ -1249,7 +1375,7 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
     }
   }
   schedulerDbTime = getMountInfoTime + queueTrimingTime + mountCreationTime + driveStatusSetTime;
-  catalogueTime = getTapeInfoTime + getTapeForWriteTime;
+  catalogueTime = getTapeInfoTime + getTapeForWriteTime + getLogicalLibrariesTime;
   decisionTime += timer.secs(utils::Timer::resetCounter);
   log::ScopedParamContainer params(lc);
   params.add("getMountInfoTime", getMountInfoTime)
@@ -1285,6 +1411,7 @@ std::list<common::dataStructures::QueueAndMountSummary> Scheduler::getQueuesAndM
     auto &summary = common::dataStructures::QueueAndMountSummary::getOrCreateEntry(ret, pm.type, pm.tapePool, pm.vid, vid_to_tapeinfo);
     switch (pm.type) {
     case common::dataStructures::MountType::ArchiveForUser:
+    case common::dataStructures::MountType::ArchiveForRepack:
       summary.mountPolicy.archivePriority = pm.priority;
       summary.mountPolicy.archiveMinRequestAge = pm.minRequestAge;
       summary.mountPolicy.maxDrivesAllowed = pm.maxDrivesAllowed;
@@ -1316,6 +1443,7 @@ std::list<common::dataStructures::QueueAndMountSummary> Scheduler::getQueuesAndM
     auto &summary = common::dataStructures::QueueAndMountSummary::getOrCreateEntry(ret, em.type, em.tapePool, em.vid, vid_to_tapeinfo);
     switch (em.type) {
     case common::dataStructures::MountType::ArchiveForUser:
+    case common::dataStructures::MountType::ArchiveForRepack:
     case common::dataStructures::MountType::Retrieve:
       if (em.currentMount) 
         summary.currentMounts++;
@@ -1332,7 +1460,7 @@ std::list<common::dataStructures::QueueAndMountSummary> Scheduler::getQueuesAndM
   mountDecisionInfo.reset();
   // Add the tape information where useful (archive queues).
   for (auto & mountOrQueue: ret) {
-    if (common::dataStructures::MountType::ArchiveForUser==mountOrQueue.mountType) {
+    if (common::dataStructures::MountType::ArchiveForUser==mountOrQueue.mountType || common::dataStructures::MountType::ArchiveForRepack==mountOrQueue.mountType) {
       // Get all the tape for this pool
       cta::catalogue::TapeSearchCriteria tsc;
       tsc.tapePool = mountOrQueue.tapePool;
@@ -1345,7 +1473,8 @@ std::list<common::dataStructures::QueueAndMountSummary> Scheduler::getQueuesAndM
           mountOrQueue.emptyTapes++;
         if (t.disabled) mountOrQueue.disabledTapes++;
         if (t.full) mountOrQueue.fullTapes++;
-        if (!t.full && !t.disabled) mountOrQueue.writableTapes++;
+        if (t.readOnly) mountOrQueue.readOnlyTapes++;
+        if (!t.full && !t.disabled && !t.readOnly) mountOrQueue.writableTapes++;
       }
     } else if (common::dataStructures::MountType::Retrieve==mountOrQueue.mountType) {
       // Get info for this tape.
@@ -1363,7 +1492,8 @@ std::list<common::dataStructures::QueueAndMountSummary> Scheduler::getQueuesAndM
         mountOrQueue.emptyTapes++;
       if (t.disabled) mountOrQueue.disabledTapes++;
       if (t.full) mountOrQueue.fullTapes++;
-      if (!t.full && !t.disabled) mountOrQueue.writableTapes++;
+      if (t.readOnly) mountOrQueue.readOnlyTapes++;
+      if (!t.full && !t.disabled && !t.readOnly) mountOrQueue.writableTapes++;
       mountOrQueue.tapePool = t.tapePoolName;
     }
   }
@@ -1459,7 +1589,7 @@ void Scheduler::reportArchiveJobsBatch(std::list<std::unique_ptr<ArchiveJob> >&
     auto & current = pendingReports.back();
     // We could fail to create the disk reporter or to get the report URL. This should not impact the other jobs.
     try {
-      current.reporter.reset(reporterFactory.createDiskReporter(j->reportURL()));
+      current.reporter.reset(reporterFactory.createDiskReporter(j->exceptionThrowingReportURL()));
       current.reporter->asyncReport();
       current.archiveJob = j.get();
     } catch (cta::exception::Exception & ex) {
diff --git a/scheduler/Scheduler.hpp b/scheduler/Scheduler.hpp
index 5cddb3c05e23cd21744967f7b3db27f150eddd21..341052b588c8590732ac0b10d75c1a3069c83001 100644
--- a/scheduler/Scheduler.hpp
+++ b/scheduler/Scheduler.hpp
@@ -119,7 +119,7 @@ public:
   uint64_t checkAndGetNextArchiveFileId(
     const std::string &diskInstanceName,
     const std::string &storageClassName,
-    const common::dataStructures::UserIdentity &user,
+    const common::dataStructures::RequesterIdentity &user,
     log::LogContext &lc);
 
   /** 
@@ -200,7 +200,7 @@ public:
     const bool force);
 
   void queueRepack(const common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, 
-    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, log::LogContext & lc);
+    const std::string & bufferURL, const common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc);
   void cancelRepack(const cta::common::dataStructures::SecurityIdentity &cliIdentity, const std::string &vid, log::LogContext & lc);
   std::list<cta::common::dataStructures::RepackInfo> getRepacks();
   cta::common::dataStructures::RepackInfo getRepack(const std::string &vid);
@@ -299,6 +299,8 @@ private:
    */
   void checkTapeFullBeforeRepack(std::string vid);
   
+  cta::optional<common::dataStructures::LogicalLibrary> getLogicalLibrary(const std::string &libraryName, double &getLogicalLibraryTime);
+  
 public:
   /**
    * Run the mount decision logic lock free, so we have no contention in the 
@@ -358,6 +360,11 @@ public:
   RepackReportBatch getNextRepackReportBatch(log::LogContext & lc);
   std::list<Scheduler::RepackReportBatch> getRepackReportBatches(log::LogContext &lc);
   
+  RepackReportBatch getNextSuccessfulRetrieveRepackReportBatch(log::LogContext &lc);
+  RepackReportBatch getNextFailedRetrieveRepackReportBatch(log::LogContext &lc);
+  RepackReportBatch getNextSuccessfulArchiveRepackReportBatch(log::LogContext &lc);
+  RepackReportBatch getNextFailedArchiveRepackReportBatch(log::LogContext &lc);
+  
   /*======================= Failed archive jobs support ======================*/
   SchedulerDatabase::JobsFailedSummary getArchiveJobsFailedSummary(log::LogContext &lc);
 
diff --git a/scheduler/SchedulerDatabase.hpp b/scheduler/SchedulerDatabase.hpp
index 415815c29ff669fe42ef6856a1ab917eead52ec7..cbadd29cc2b1b56a2bd22affe1db55f1a29f9dcb 100644
--- a/scheduler/SchedulerDatabase.hpp
+++ b/scheduler/SchedulerDatabase.hpp
@@ -74,7 +74,6 @@ class StorageClass;
 class Tape;
 class TapeMount;
 class TapeSession;
-class UserIdentity;
 class RepackRequest;
 namespace objectstore{
   class RetrieveRequest;
@@ -159,6 +158,7 @@ public:
       std::string host;
       uint64_t mountId;
       uint64_t capacityInBytes;
+      cta::common::dataStructures::MountType mountType;
     } mountInfo;
     virtual const MountInfo & getMountInfo() = 0;
     virtual std::list<std::unique_ptr<ArchiveJob>> getNextJobBatch(uint64_t filesRequested,
@@ -398,7 +398,7 @@ public:
 
   /*============ Repack management: user side ================================*/
   virtual void queueRepack(const std::string & vid, const std::string & bufferURL,
-      common::dataStructures::RepackInfo::Type repackType, log::LogContext & lc) = 0;
+      common::dataStructures::RepackInfo::Type repackType, const common::dataStructures::MountPolicy &mountPolicy, log::LogContext & lc) = 0;
   virtual std::list<common::dataStructures::RepackInfo> getRepackInfo() = 0;
   virtual common::dataStructures::RepackInfo getRepackInfo(const std::string & vid) = 0;
   virtual void cancelRepack(const std::string & vid, log::LogContext & lc) = 0;
@@ -505,6 +505,14 @@ public:
    */
   virtual std::unique_ptr<RepackReportBatch> getNextRepackReportBatch(log::LogContext & lc) = 0;
   
+  virtual std::unique_ptr<RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext &lc) = 0;
+  
+  virtual std::unique_ptr<RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext &lc) = 0;
+  
+  virtual std::unique_ptr<RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext &lc) = 0;
+  
+  virtual std::unique_ptr<RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext &lc) = 0;
+  
   /**
    * Return all batches of subrequests from the database to be reported to repack.
    * @param lc log context
diff --git a/scheduler/SchedulerDatabaseTest.cpp b/scheduler/SchedulerDatabaseTest.cpp
index d0a0d4ec50ed71c130f74051c0c8b545494e8b5c..b1bf033919133d5c55c17bd55aa4a069f552788d 100644
--- a/scheduler/SchedulerDatabaseTest.cpp
+++ b/scheduler/SchedulerDatabaseTest.cpp
@@ -18,7 +18,6 @@
 
 #include "objectstore/BackendRadosTestSwitch.hpp"
 #include "tests/TestsCompileTimeSwitches.hpp"
-#include "common/UserIdentity.hpp"
 #include "scheduler/SchedulerDatabase.hpp"
 #include "scheduler/SchedulerDatabaseFactory.hpp"
 #include "common/dataStructures/SecurityIdentity.hpp"
@@ -37,6 +36,9 @@
 
 namespace unitTests {
 
+const uint32_t DISK_FILE_OWNER_UID = 9751;
+const uint32_t DISK_FILE_GID = 9752;
+
 /**
  * This structure is used to parameterize scheduler database tests.
  */
@@ -165,7 +167,7 @@ TEST_P(SchedulerDatabaseTest, createManyArchiveJobs) {
       afqc.mountPolicy.comment = "comment";
       afqc.fileId = i;
       ar.archiveReportURL="";
-      ar.checksumType="";
+      ar.checksumBlob.insert(cta::checksum::NONE, "");
       ar.creationLog = { "user", "host", time(nullptr)};
       uuid_t fileUUID;
       uuid_generate(fileUUID);
@@ -173,8 +175,8 @@ TEST_P(SchedulerDatabaseTest, createManyArchiveJobs) {
       uuid_unparse(fileUUID, fileUUIDStr);
       ar.diskFileID = fileUUIDStr;
       ar.diskFileInfo.path = std::string("/uuid/")+fileUUIDStr;
-      ar.diskFileInfo.owner = "user";
-      ar.diskFileInfo.group = "group";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       ar.fileSize = 1000;
       ar.requester = { "user", "group" };
       ar.srcURL = std::string("root:/") + ar.diskFileInfo.path;
@@ -245,7 +247,7 @@ TEST_P(SchedulerDatabaseTest, createManyArchiveJobs) {
       afqc.mountPolicy.comment = "comment";
       afqc.fileId = i;
       ar.archiveReportURL="";
-      ar.checksumType="";
+      ar.checksumBlob.insert(cta::checksum::NONE, "");
       ar.creationLog = { "user", "host", time(nullptr)};
       uuid_t fileUUID;
       uuid_generate(fileUUID);
@@ -253,8 +255,8 @@ TEST_P(SchedulerDatabaseTest, createManyArchiveJobs) {
       uuid_unparse(fileUUID, fileUUIDStr);
       ar.diskFileID = fileUUIDStr;
       ar.diskFileInfo.path = std::string("/uuid/")+fileUUIDStr;
-      ar.diskFileInfo.owner = "user";
-      ar.diskFileInfo.group = "group";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       ar.fileSize = 1000;
       ar.requester = { "user", "group" };
       ar.srcURL = std::string("root:/") + ar.diskFileInfo.path;
@@ -350,8 +352,6 @@ TEST_P(SchedulerDatabaseTest, popRetrieveRequestsWithDisksytem) {
       char fileUUIDStr[37];
       uuid_unparse(fileUUID, fileUUIDStr);
       rr.diskFileInfo.path = std::string("/uuid/")+fileUUIDStr;
-      rr.diskFileInfo.owner = "user";
-      rr.diskFileInfo.group = "group";
       rr.requester = { "user", "group" };
       rr.dstURL = std::string ("root://") + (i%2?"b":"a") + ".disk.system/" + std::to_string(i);
       std::string dsName = (i%2?"ds-B":"ds-A");
@@ -438,9 +438,7 @@ TEST_P(SchedulerDatabaseTest, popRetrieveRequestsWithBackpressure) {
       uuid_generate(fileUUID);
       char fileUUIDStr[37];
       uuid_unparse(fileUUID, fileUUIDStr);
-      rr.diskFileInfo.path = std::string("/uuid/")+fileUUIDStr;
-      rr.diskFileInfo.owner = "user";
-      rr.diskFileInfo.group = "group";
+      rr.diskFileInfo.path = std::string("/uuid/")+fileUUIDStr; 
       rr.requester = { "user", "group" };
       std::string dsName;
       if (i%2) {
diff --git a/scheduler/SchedulerTest.cpp b/scheduler/SchedulerTest.cpp
index 43d1da307cae7acfc24503539cee69a48029a45d..1bf432a28ea8ca130d0488ed2a01d3fd17d2e24b 100644
--- a/scheduler/SchedulerTest.cpp
+++ b/scheduler/SchedulerTest.cpp
@@ -57,6 +57,11 @@
 
 namespace unitTests {
 
+const uint32_t CMS_USER = 9751;
+const uint32_t GROUP_2  = 9752;
+const uint32_t PUBLIC_OWNER_UID = 9753;
+const uint32_t PUBLIC_GID = 9754;
+
 namespace {
 
 /**
@@ -189,7 +194,6 @@ public:
     ASSERT_EQ(mountPolicyComment, group.comment);
 
     const std::string ruleComment = "create requester mount-rule";
-    cta::common::dataStructures::UserIdentity userIdentity;
     catalogue.createRequesterMountRule(s_adminOnAdminHost, mountPolicyName, s_diskInstance, s_userName, ruleComment);
 
     const std::list<common::dataStructures::RequesterMountRule> rules = catalogue.getRequesterMountRules();
@@ -264,17 +268,16 @@ TEST_P(SchedulerTest, archive_to_new_file) {
   creationLog.time=0;
   creationLog.username="admin1";
   cta::common::dataStructures::DiskFileInfo diskFileInfo;
-  diskFileInfo.group="group2";
-  diskFileInfo.owner="cms_user";
+  diskFileInfo.gid=GROUP_2;
+  diskFileInfo.owner_uid=CMS_USER;
   diskFileInfo.path="path/to/file";
   cta::common::dataStructures::ArchiveRequest request;
-  request.checksumType="ADLER32";
-  request.checksumValue="1111";
+  request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
   request.creationLog=creationLog;
   request.diskFileInfo=diskFileInfo;
   request.diskFileID="diskFileID";
   request.fileSize=100*1000*1000;
-  cta::common::dataStructures::UserIdentity requester;
+  cta::common::dataStructures::RequesterIdentity requester;
   requester.name = s_userName;
   requester.group = "userGroup";
   request.requester = requester;
@@ -325,17 +328,16 @@ TEST_P(SchedulerTest, archive_to_new_file) {
 //  creationLog.time=0;
 //  creationLog.username="admin1";
 //  cta::common::dataStructures::DiskFileInfo diskFileInfo;
-//  diskFileInfo.group="group2";
-//  diskFileInfo.owner="cms_user";
+//  diskFileInfo.gid=GROUP_2;
+//  diskFileInfo.owner_uid=CMS_USER;
 //  diskFileInfo.path="path/to/file";
 //  cta::common::dataStructures::ArchiveRequest request;
-//  request.checksumType="ADLER32";
-//  request.checksumValue="1111";
+//  request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
 //  request.creationLog=creationLog;
 //  request.diskFileInfo=diskFileInfo;
 //  request.diskFileID="diskFileID";
 //  request.fileSize=100*1000*1000;
-//  cta::common::dataStructures::UserIdentity requester;
+//  cta::common::dataStructures::RequesterIdentity requester;
 //  requester.name = s_userName;
 //  requester.group = "userGroup";
 //  request.requester = requester;
@@ -399,17 +401,16 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1234abcd";
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -435,7 +436,7 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
 
   // Create the environment for the migration to happen (library + tape) 
   const std::string libraryComment = "Library comment";
-  const bool libraryIsDisabled = false;
+  const bool libraryIsDisabled = true;
   catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
     libraryIsDisabled, libraryComment);
   {
@@ -448,8 +449,9 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
 
   const std::string driveName = "tape_drive";
 
@@ -463,6 +465,11 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
     scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
     mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+    //Test that no mount is available when a logical library is disabled
+    ASSERT_EQ(nullptr, mount.get());
+    catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,false);
+    //continue our test
+    mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
     ASSERT_NE(nullptr, mount.get());
     ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
     auto & osdb=getSchedulerDB();
@@ -478,9 +485,8 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
     archiveJob->tapeFile.blockId = 1;
     archiveJob->tapeFile.fSeq = 1;
-    archiveJob->tapeFile.checksumType = "ADLER32";
-    archiveJob->tapeFile.checksumValue = "1234abcd";
-    archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
     archiveJob->tapeFile.copyNb = 1;
     archiveJob->validate();
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
@@ -509,8 +515,8 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::RetrieveRequest request;
     request.archiveFileID = archiveFileId;
@@ -599,17 +605,16 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1234abcd";
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -648,8 +653,9 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
 
   const std::string driveName = "tape_drive";
 
@@ -678,9 +684,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
     std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
     archiveJob->tapeFile.blockId = 1;
     archiveJob->tapeFile.fSeq = 1;
-    archiveJob->tapeFile.checksumType = "ADLER32";
-    archiveJob->tapeFile.checksumValue = "1234abcd";
-    archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
     archiveJob->tapeFile.copyNb = 1;
     archiveJob->validate();
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
@@ -709,8 +714,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::RetrieveRequest request;
     request.archiveFileID = archiveFileId;
@@ -850,17 +855,16 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1234abcd";
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -899,8 +903,9 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, "mediatype", "vendor", s_libraryName, s_tapePoolName,
-    capacityInBytes, notDisabled, notFull, tapeComment);
+    capacityInBytes, notDisabled, notFull, notReadOnly, tapeComment);
 
   const std::string driveName = "tape_drive";
 
@@ -929,9 +934,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
     std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
     archiveJob->tapeFile.blockId = 1;
     archiveJob->tapeFile.fSeq = 1;
-    archiveJob->tapeFile.checksumType = "ADLER32";
-    archiveJob->tapeFile.checksumValue = "1234abcd";
-    archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
     archiveJob->tapeFile.copyNb = 1;
     archiveJob->validate();
     std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
@@ -960,8 +964,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::RetrieveRequest request;
     request.archiveFileID = archiveFileId;
@@ -1107,17 +1111,16 @@ TEST_P(SchedulerTest, retry_archive_until_max_reached) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1111";
+    request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -1144,8 +1147,9 @@ TEST_P(SchedulerTest, retry_archive_until_max_reached) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
 
   catalogue.tapeLabelled(s_vid, "tape_drive");
 
@@ -1190,8 +1194,8 @@ TEST_P(SchedulerTest, retrieve_non_existing_file) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::RetrieveRequest request;
     request.archiveFileID = 12345;
@@ -1222,17 +1226,16 @@ TEST_P(SchedulerTest, showqueues) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1111";
+    request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -1273,17 +1276,18 @@ TEST_P(SchedulerTest, repack) {
   cliId.username = s_userName;
   std::string tape1 = "Tape";
   
-  catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,false,"Comment");
+  const bool notReadOnly = false; 
+  catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,false, notReadOnly, "Comment");
   
   //The queueing of a repack request should fail if the tape to repack is not full
-  ASSERT_THROW(scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, lc),cta::exception::UserError);
+  ASSERT_THROW(scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,lc),cta::exception::UserError);
   //The queueing of a repack request in a vid that does not exist should throw an exception
-  ASSERT_THROW(scheduler.queueRepack(cliId, "NOT_EXIST", "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, lc),cta::exception::UserError);
+  ASSERT_THROW(scheduler.queueRepack(cliId, "NOT_EXIST", "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc),cta::exception::UserError);
   
   catalogue.setTapeFull(cliId,tape1,true);
   
   // Create and then cancel repack
-  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, lc);
+  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
   {
     auto repacks = scheduler.getRepacks();
     ASSERT_EQ(1, repacks.size());
@@ -1294,8 +1298,8 @@ TEST_P(SchedulerTest, repack) {
   ASSERT_EQ(0, scheduler.getRepacks().size());
   // Recreate a repack and get it moved to ToExpand
   std::string tape2 = "Tape2";
-  catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true,"Comment");
-  scheduler.queueRepack(cliId, tape2, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, lc);
+  catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
+  scheduler.queueRepack(cliId, tape2, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
   {
     auto repacks = scheduler.getRepacks();
     ASSERT_EQ(1, repacks.size());
@@ -1332,16 +1336,17 @@ TEST_P(SchedulerTest, getNextRepackRequestToExpand) {
   cliId.host = "host";
   cliId.username = s_userName;
   std::string tape1 = "Tape";
-  catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true,"Comment");
+  const bool notReadOnly = false;
+  catalogue.createTape(cliId,tape1,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
   
   //Queue the first repack request
-  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly, lc);
+  scheduler.queueRepack(cliId, tape1, "file://"+tempDirectory.path(), common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,  lc);
   
   std::string tape2 = "Tape2";
-  catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true,"Comment");
+  catalogue.createTape(cliId,tape2,"mediaType","vendor",s_libraryName,s_tapePoolName,500,false,true, notReadOnly, "Comment");
   
   //Queue the second repack request
-  scheduler.queueRepack(cliId,tape2,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::AddCopiesOnly,lc);
+  scheduler.queueRepack(cliId,tape2,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::AddCopiesOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
   
   //Test the repack request queued has status Pending
   ASSERT_EQ(scheduler.getRepack(tape1).status,common::dataStructures::RepackInfo::Status::Pending);
@@ -1398,12 +1403,11 @@ TEST_P(SchedulerTest, expandRepackRequest) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = true;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
   cta::common::dataStructures::SecurityIdentity admin;
   admin.username = "admin_user_name";
   admin.host = "admin_host";
-  const std::string diskFileUser = "public_disk_user";
-  const std::string diskFileGroup = "public_disk_group";
   
   //Create a logical library in the catalogue
   const bool libraryIsDisabled = false;
@@ -1421,7 +1425,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
     std::string vid = ossVid.str();
     allVid.push_back(vid);
     catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-      disabledValue, fullValue, comment);
+      disabledValue, fullValue, readOnlyValue, comment);
   }
   
   //Create a storage class in the catalogue
@@ -1430,16 +1434,14 @@ TEST_P(SchedulerTest, expandRepackRequest) {
   storageClass.name = s_storageClassName;
   storageClass.nbCopies = 2;
   storageClass.comment = "Create storage class";
-
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
   const uint64_t nbArchiveFilesPerTape = 10;
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
 
   //Simulate the writing of 10 files per tape in the catalogue
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  checksum::ChecksumBlob checksumBlob;
+  checksumBlob.insert(cta::checksum::ADLER32, "1234");
   {
     uint64_t archiveFileId = 1;
     for(uint64_t i = 1; i<= nbTapesToRepack;++i){
@@ -1455,16 +1457,14 @@ TEST_P(SchedulerTest, expandRepackRequest) {
         fileWritten.diskInstance = storageClass.diskInstance;
         fileWritten.diskFileId = diskFileId.str();
         fileWritten.diskFilePath = diskFilePath.str();
-        fileWritten.diskFileUser = diskFileUser;
-        fileWritten.diskFileGroup = diskFileGroup;
+        fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+        fileWritten.diskFileGid = PUBLIC_GID;
         fileWritten.size = archiveFileSize;
-        fileWritten.checksumType = checksumType;
-        fileWritten.checksumValue = checksumValue;
+        fileWritten.checksumBlob = checksumBlob;
         fileWritten.storageClassName = s_storageClassName;
         fileWritten.vid = currentVid;
         fileWritten.fSeq = j;
         fileWritten.blockId = j * 100;
-        fileWritten.compressedSize = compressedFileSize;
         fileWritten.copyNb = 1;
         fileWritten.tapeDrive = tapeDrive;
         tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -1478,7 +1478,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   {
     for(uint64_t i = 0; i < nbTapesToRepack ; ++i) {
-      scheduler.queueRepack(admin,allVid.at(i),"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,lc);
+      scheduler.queueRepack(admin,allVid.at(i),"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
     }
     scheduler.waitSchedulerDbSubthreadsComplete();
     //scheduler.waitSchedulerDbSubthreadsComplete();
@@ -1513,15 +1513,14 @@ TEST_P(SchedulerTest, expandRepackRequest) {
         //Test that the informations are correct for each file
         //ASSERT_EQ(retrieveJob.request.tapePool,s_tapePoolName);
         ASSERT_EQ(retrieveJob.request.archiveFileID,archiveFileId++);
-        ASSERT_EQ(retrieveJob.fileSize,compressedFileSize);
+        ASSERT_EQ(retrieveJob.fileSize,archiveFileSize);
         std::stringstream ss;
         ss<<"file://"<<tempDirectory.path()<<"/"<<allVid.at(i-1)<<"/"<<std::setw(9)<<std::setfill('0')<<j;
         ASSERT_EQ(retrieveJob.request.dstURL, ss.str());
         ASSERT_EQ(retrieveJob.tapeCopies[vid].second.copyNb,1);
-        ASSERT_EQ(retrieveJob.tapeCopies[vid].second.checksumType,checksumType);
-        ASSERT_EQ(retrieveJob.tapeCopies[vid].second.checksumValue,checksumValue);
+        ASSERT_EQ(retrieveJob.tapeCopies[vid].second.checksumBlob,checksumBlob);
         ASSERT_EQ(retrieveJob.tapeCopies[vid].second.blockId,j*100);
-        ASSERT_EQ(retrieveJob.tapeCopies[vid].second.compressedSize,compressedFileSize);
+        ASSERT_EQ(retrieveJob.tapeCopies[vid].second.fileSize,archiveFileSize);
         ASSERT_EQ(retrieveJob.tapeCopies[vid].second.fSeq,j);
         ASSERT_EQ(retrieveJob.tapeCopies[vid].second.vid,vid);
         ++j;
@@ -1609,9 +1608,8 @@ TEST_P(SchedulerTest, expandRepackRequest) {
           ASSERT_EQ(tapeFile.vid,allVid.at(i-1));
           ASSERT_EQ(tapeFile.blockId,j * 100);
           ASSERT_EQ(tapeFile.fSeq,j);
-          ASSERT_EQ(tapeFile.checksumType, checksumType);
-          ASSERT_EQ(tapeFile.checksumValue,checksumValue);
-          ASSERT_EQ(tapeFile.compressedSize, compressedFileSize);
+          ASSERT_EQ(tapeFile.checksumBlob, checksumBlob);
+          ASSERT_EQ(tapeFile.fileSize, archiveFileSize);
 
           //Testing scheduler retrieve request
           ASSERT_EQ(schedulerRetrieveRequest.archiveFileID,archiveFileId++);
@@ -1676,16 +1674,15 @@ TEST_P(SchedulerTest, expandRepackRequest) {
         ASSERT_LE(1, fileIndex);
         ASSERT_GE(nbArchiveFilesPerTape, fileIndex);
         //Test the ArchiveRequest
-        ASSERT_EQ(archiveFile.checksumType,checksumType);
-        ASSERT_EQ(archiveFile.checksumValue,checksumValue);
+        ASSERT_EQ(archiveFile.checksumBlob,checksumBlob);
         std::ostringstream diskFilePath;
         diskFilePath << "/public_dir/public_file_"<<tapeIndex<<"_"<<fileIndex;
         std::ostringstream diskFileId;
         diskFileId << (12345677 + archiveFile.archiveFileID);
         ASSERT_EQ(archiveFile.diskFileId,diskFileId.str());
         ASSERT_EQ(archiveFile.diskFileInfo.path,diskFilePath.str());
-        ASSERT_EQ(archiveFile.diskFileInfo.group,diskFileGroup);
-        ASSERT_EQ(archiveFile.diskFileInfo.owner,diskFileUser);
+        ASSERT_EQ(archiveFile.diskFileInfo.gid,PUBLIC_GID);
+        ASSERT_EQ(archiveFile.diskFileInfo.owner_uid,PUBLIC_OWNER_UID);
         ASSERT_EQ(archiveFile.fileSize,archiveFileSize);
         ASSERT_EQ(archiveFile.storageClass,s_storageClassName);
         std::stringstream ss;
@@ -1734,12 +1731,11 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = true;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
   cta::common::dataStructures::SecurityIdentity admin;
   admin.username = "admin_user_name";
   admin.host = "admin_host";
-  const std::string diskFileUser = "public_disk_user";
-  const std::string diskFileGroup = "public_disk_group";
   
   //Create a logical library in the catalogue
   const bool libraryIsDisabled = false;
@@ -1749,7 +1745,7 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
   ossVid << s_vid << "_" << 1;
   std::string vid = ossVid.str();
   catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   
   //Create a storage class in the catalogue
   common::dataStructures::StorageClass storageClass;
@@ -1758,12 +1754,9 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
   storageClass.nbCopies = 2;
   storageClass.comment = "Create storage class";
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
   const uint64_t nbArchiveFilesPerTape = 10;
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
   
   //Simulate the writing of 10 files per tape in the catalogue
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
@@ -1781,16 +1774,15 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = diskFileUser;
-      fileWritten.diskFileGroup = diskFileGroup;
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
       fileWritten.storageClassName = s_storageClassName;
       fileWritten.vid = currentVid;
       fileWritten.fSeq = j;
       fileWritten.blockId = j * 100;
-      fileWritten.compressedSize = compressedFileSize;
+      fileWritten.size = archiveFileSize;
       fileWritten.copyNb = 1;
       fileWritten.tapeDrive = tapeDrive;
       tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -1803,7 +1795,7 @@ TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
  
     log::TimingList tl;
@@ -1974,12 +1966,11 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = true;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
   cta::common::dataStructures::SecurityIdentity admin;
   admin.username = "admin_user_name";
   admin.host = "admin_host";
-  const std::string diskFileUser = "public_disk_user";
-  const std::string diskFileGroup = "public_disk_group";
   
   //Create a logical library in the catalogue
   const bool libraryIsDisabled = false;
@@ -1989,11 +1980,11 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
   ossVid << s_vid << "_" << 1;
   std::string vid = ossVid.str();
   catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   //Create a repack destination tape
   std::string vidDestination = "vidDestination";
   catalogue.createTape(s_adminOnAdminHost,vidDestination, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    disabledValue, false, comment);
+    disabledValue, false, readOnlyValue, comment);
   
   //Create a storage class in the catalogue
   common::dataStructures::StorageClass storageClass;
@@ -2002,12 +1993,9 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
   storageClass.nbCopies = 2;
   storageClass.comment = "Create storage class";
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
   const uint64_t nbArchiveFilesPerTape = 10;
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
   
   //Simulate the writing of 10 files per tape in the catalogue
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
@@ -2025,16 +2013,15 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = diskFileUser;
-      fileWritten.diskFileGroup = diskFileGroup;
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
       fileWritten.storageClassName = s_storageClassName;
       fileWritten.vid = currentVid;
       fileWritten.fSeq = j;
       fileWritten.blockId = j * 100;
-      fileWritten.compressedSize = compressedFileSize;
+      fileWritten.size = archiveFileSize;
       fileWritten.copyNb = 1;
       fileWritten.tapeDrive = tapeDrive;
       tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -2047,7 +2034,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
     //scheduler.waitSchedulerDbSubthreadsComplete();
  
@@ -2119,7 +2106,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
     std::unique_ptr<cta::TapeMount> mount;
     mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
     ASSERT_NE(nullptr, mount.get());
-    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
     
     std::unique_ptr<cta::ArchiveMount> archiveMount;
     archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
@@ -2132,9 +2119,8 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
       auto jobBatch = archiveMount->getNextJobBatch(1,archiveFileSize,lc);
       archiveJob.reset(jobBatch.front().release());
       archiveJob->tapeFile.blockId = j * 101;
-      archiveJob->tapeFile.checksumType = checksumType;
-      archiveJob->tapeFile.checksumValue = checksumValue;
-      archiveJob->tapeFile.compressedSize = compressedFileSize;
+      archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32,"1234");
+      archiveJob->tapeFile.fileSize = archiveFileSize;
       ASSERT_NE(nullptr,archiveJob.get());
       executedJobs.push_back(std::move(archiveJob));
     }
@@ -2225,12 +2211,11 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = true;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
   cta::common::dataStructures::SecurityIdentity admin;
   admin.username = "admin_user_name";
   admin.host = "admin_host";
-  const std::string diskFileUser = "public_disk_user";
-  const std::string diskFileGroup = "public_disk_group";
   
   //Create a logical library in the catalogue
   const bool libraryIsDisabled = false;
@@ -2240,12 +2225,12 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
   ossVid << s_vid << "_" << 1;
   std::string vid = ossVid.str();
   catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
 
   //Create a repack destination tape
   std::string vidDestinationRepack = "vidDestinationRepack";
   catalogue.createTape(s_adminOnAdminHost,vidDestinationRepack, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-   disabledValue, false, comment);
+   disabledValue, false, readOnlyValue, comment);
   
   //Create a storage class in the catalogue
   common::dataStructures::StorageClass storageClass;
@@ -2254,12 +2239,9 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
   storageClass.nbCopies = 2;
   storageClass.comment = "Create storage class";
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
   const uint64_t nbArchiveFilesPerTape = 10;
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
   
   //Simulate the writing of 10 files per tape in the catalogue
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
@@ -2277,16 +2259,15 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = diskFileUser;
-      fileWritten.diskFileGroup = diskFileGroup;
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
       fileWritten.storageClassName = s_storageClassName;
       fileWritten.vid = currentVid;
       fileWritten.fSeq = j;
       fileWritten.blockId = j * 100;
-      fileWritten.compressedSize = compressedFileSize;
+      fileWritten.size = archiveFileSize;
       fileWritten.copyNb = 1;
       fileWritten.tapeDrive = tapeDrive;
       tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -2299,7 +2280,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
   scheduler.waitSchedulerDbSubthreadsComplete();
   
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly, common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
 
     log::TimingList tl;
@@ -2367,7 +2348,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
     std::unique_ptr<cta::TapeMount> mount;
     mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
     ASSERT_NE(nullptr, mount.get());
-    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
     
     std::unique_ptr<cta::ArchiveMount> archiveMount;
     archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
@@ -2380,9 +2361,8 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
       auto jobBatch = archiveMount->getNextJobBatch(1,archiveFileSize,lc);
       archiveJob.reset(jobBatch.front().release());
       archiveJob->tapeFile.blockId = j * 101;
-      archiveJob->tapeFile.checksumType = checksumType;
-      archiveJob->tapeFile.checksumValue = checksumValue;
-      archiveJob->tapeFile.compressedSize = compressedFileSize;
+      archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32,"1234");
+      archiveJob->tapeFile.fileSize = archiveFileSize;
       ASSERT_NE(nullptr,archiveJob.get());
       executedJobs.push_back(std::move(archiveJob));
     }
@@ -2440,7 +2420,7 @@ TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
       std::unique_ptr<cta::TapeMount> mount;
       mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
       ASSERT_NE(nullptr, mount.get());
-      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForRepack, mount.get()->getMountType());
       std::unique_ptr<cta::ArchiveMount> archiveMount;
       archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
       ASSERT_NE(nullptr, archiveMount.get());
@@ -2530,12 +2510,11 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
   const bool disabledValue = false;
   const bool fullValue = true;
+  const bool readOnlyValue = false;
   const std::string comment = "Create tape";
   cta::common::dataStructures::SecurityIdentity admin;
   admin.username = "admin_user_name";
   admin.host = "admin_host";
-  const std::string diskFileUser = "public_disk_user";
-  const std::string diskFileGroup = "public_disk_group";
   
   //Create a logical library in the catalogue
   const bool logicalLibraryIsDisabled = false;
@@ -2545,7 +2524,7 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   ossVid << s_vid << "_" << 1;
   std::string vid = ossVid.str();
   catalogue.createTape(s_adminOnAdminHost,vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    disabledValue, fullValue, comment);
+    disabledValue, fullValue, readOnlyValue, comment);
   
   //Create a storage class in the catalogue
   common::dataStructures::StorageClass storageClass;
@@ -2554,12 +2533,9 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   storageClass.nbCopies = 2;
   storageClass.comment = "Create storage class";
 
-  const std::string checksumType = "checksum_type";
-  const std::string checksumValue = "checksum_value";
   const std::string tapeDrive = "tape_drive";
   const uint64_t nbArchiveFilesPerTape = 10;
   const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
-  const uint64_t compressedFileSize = archiveFileSize;
   
   //Simulate the writing of 10 files in 1 tape in the catalogue
   std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
@@ -2577,16 +2553,15 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
       fileWritten.diskInstance = storageClass.diskInstance;
       fileWritten.diskFileId = diskFileId.str();
       fileWritten.diskFilePath = diskFilePath.str();
-      fileWritten.diskFileUser = diskFileUser;
-      fileWritten.diskFileGroup = diskFileGroup;
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
       fileWritten.size = archiveFileSize;
-      fileWritten.checksumType = checksumType;
-      fileWritten.checksumValue = checksumValue;
+      fileWritten.checksumBlob.insert(cta::checksum::ADLER32,"1234");
       fileWritten.storageClassName = s_storageClassName;
       fileWritten.vid = currentVid;
       fileWritten.fSeq = j;
       fileWritten.blockId = j * 100;
-      fileWritten.compressedSize = compressedFileSize;
+      fileWritten.size = archiveFileSize;
       fileWritten.copyNb = 1;
       fileWritten.tapeDrive = tapeDrive;
       tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
@@ -2599,7 +2574,7 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   //one retrieve request
   scheduler.waitSchedulerDbSubthreadsComplete();
   {
-    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,lc);
+    scheduler.queueRepack(admin,vid,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack, lc);
     scheduler.waitSchedulerDbSubthreadsComplete();
 
     log::TimingList tl;
@@ -2650,19 +2625,18 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     diskFileInfo.path += std::to_string(i);
     cta::common::dataStructures::ArchiveRequest request;
-    request.checksumType="ADLER32";
-    request.checksumValue="1234abcd";
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
     request.creationLog=creationLog;
     request.diskFileInfo=diskFileInfo;
     request.diskFileID="diskFileID";
     request.diskFileID += std::to_string(i);
     request.fileSize=100*1000*1000;
-    cta::common::dataStructures::UserIdentity requester;
+    cta::common::dataStructures::RequesterIdentity requester;
     requester.name = s_userName;
     requester.group = "userGroup";
     request.requester = requester;
@@ -2705,10 +2679,11 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   const std::string driveName = "tape_drive";
   for (auto i:fileRange) {
     catalogue.createTape(s_adminOnAdminHost, s_vid + std::to_string(i), s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-      notDisabled, notFull, tapeComment);
+      notDisabled, notFull, notReadOnly, tapeComment);
     catalogue.tapeLabelled(s_vid + std::to_string(i), "tape_drive");    
   }
 
@@ -2738,9 +2713,8 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
       std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
       archiveJob->tapeFile.blockId = 1;
       archiveJob->tapeFile.fSeq = 1;
-      archiveJob->tapeFile.checksumType = "ADLER32";
-      archiveJob->tapeFile.checksumValue = "1234abcd";
-      archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
+      archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+      archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
       archiveJob->tapeFile.copyNb = 1;
       archiveJob->validate();
       std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
@@ -2789,8 +2763,8 @@ TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
     creationLog.time=0;
     creationLog.username="admin1";
     cta::common::dataStructures::DiskFileInfo diskFileInfo;
-    diskFileInfo.group="group2";
-    diskFileInfo.owner="cms_user";
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
     diskFileInfo.path="path/to/file";
     for (auto i:fileRange) {
       cta::common::dataStructures::RetrieveRequest request;
diff --git a/scheduler/TapeMount.hpp b/scheduler/TapeMount.hpp
index 01dcae8da022e73022237d5ec385d08dfe422c6d..d350afb7dcc3a7acfb9af71b68e2330af822b00f 100644
--- a/scheduler/TapeMount.hpp
+++ b/scheduler/TapeMount.hpp
@@ -21,6 +21,7 @@
 #include "common/dataStructures/MountType.hpp"
 #include "common/dataStructures/DriveStatus.hpp"
 #include "common/optional.hpp"
+#include "common/log/LogContext.hpp"
 #include "tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp"
 
 #include <string>
@@ -96,6 +97,12 @@ namespace cta {
      */
     virtual void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) = 0;
     
+    /**
+     * Report a tape mounted event
+     * @param LogContext
+     */
+    virtual void setTapeMounted(cta::log::LogContext &logContext) const = 0;
+    
     /**
      * Destructor.
      */
diff --git a/scheduler/TapeMountDummy.hpp b/scheduler/TapeMountDummy.hpp
index ca98d766ef6cf51c6d73ca9dbb7b3e7d7d377e9d..85b1153bfe257aedaab75c6e815d4128dc2b5ff7 100644
--- a/scheduler/TapeMountDummy.hpp
+++ b/scheduler/TapeMountDummy.hpp
@@ -58,6 +58,7 @@ class TapeMountDummy: public TapeMount {
 
   void setDriveStatus(cta::common::dataStructures::DriveStatus status) override {}
   void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override {};
+  void setTapeMounted(log::LogContext &logContext) const override {};
 };
 
 }
\ No newline at end of file
diff --git a/scheduler/testingMocks/MockArchiveJob.hpp b/scheduler/testingMocks/MockArchiveJob.hpp
index 03a45db8305410edae9fcb186e15df4ed47754ac..f3d6a5c4a808c1b6048ccbda39a9bd31daaf88c3 100644
--- a/scheduler/testingMocks/MockArchiveJob.hpp
+++ b/scheduler/testingMocks/MockArchiveJob.hpp
@@ -49,13 +49,11 @@ namespace cta {
       auto &  fileReport = *fileReportUP;
       fileReport.archiveFileId = archiveFile.archiveFileID;
       fileReport.blockId = tapeFile.blockId;
-      fileReport.checksumType = tapeFile.checksumType;
-      fileReport.checksumValue = tapeFile.checksumValue;
-      fileReport.compressedSize = tapeFile.compressedSize;
+      fileReport.checksumBlob = tapeFile.checksumBlob;
       fileReport.copyNb = tapeFile.copyNb;
       fileReport.diskFileId = archiveFile.diskFileId;
-      fileReport.diskFileUser = archiveFile.diskFileInfo.owner;
-      fileReport.diskFileGroup = archiveFile.diskFileInfo.group;
+      fileReport.diskFileOwnerUid = archiveFile.diskFileInfo.owner_uid;
+      fileReport.diskFileGid = archiveFile.diskFileInfo.gid;
       fileReport.diskFilePath = archiveFile.diskFileInfo.path;
       fileReport.diskInstance = archiveFile.diskInstance;
       fileReport.fSeq = tapeFile.fSeq;
diff --git a/scheduler/testingMocks/MockRetrieveMount.hpp b/scheduler/testingMocks/MockRetrieveMount.hpp
index 4753487bc702efd67854073720189c1b6b7b84ef..6ffefac6286e89d6f8a074e6f1aee16aa531e5ae 100644
--- a/scheduler/testingMocks/MockRetrieveMount.hpp
+++ b/scheduler/testingMocks/MockRetrieveMount.hpp
@@ -21,6 +21,7 @@
 #include "scheduler/RetrieveMount.hpp"
 #include "scheduler/RetrieveJob.hpp"
 #include "scheduler/testingMocks/MockRetrieveJob.hpp"
+#include "catalogue/DummyCatalogue.hpp"
 #include <memory>
 
 namespace cta {
@@ -28,7 +29,7 @@ namespace cta {
   public:
     int getJobs;
     int completes;
-    MockRetrieveMount(): getJobs(0), completes(0) {}
+    MockRetrieveMount(cta::catalogue::Catalogue &catalogue): RetrieveMount(catalogue), getJobs(0), completes(0) {}
 
     ~MockRetrieveMount() throw() {
     }
@@ -68,6 +69,8 @@ namespace cta {
     
     void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override {};
     
+    void setTapeMounted(log::LogContext &logContext) const override {};
+    
     void flushAsyncSuccessReports(std::queue<std::unique_ptr<cta::RetrieveJob> >& successfulRetrieveJobs, cta::log::LogContext& logContext) override {};
 
   private:
diff --git a/tapeserver/CMakeLists.txt b/tapeserver/CMakeLists.txt
index 3ab3b09995a471741b78a8514dba22cdd36d3023..1cc4f8589ae660837bde40cf85d5145d8fee6ad0 100644
--- a/tapeserver/CMakeLists.txt
+++ b/tapeserver/CMakeLists.txt
@@ -8,6 +8,7 @@ add_subdirectory (daemon)
 add_subdirectory (session)
 # The tape session's threads are in a separate directory (session, but compiled
 # from the previous one to create a single library).
+add_subdirectory (tapelabel)
 
 include_directories (${PROTOBUF3_INCLUDE_DIRS})
 add_executable (cta-taped cta-taped.cpp)
diff --git a/tapeserver/castor/tape/Constants.hpp b/tapeserver/castor/tape/Constants.hpp
index 3e0eba0b86d584087e8455b6c6730c8cd9bddbde..8d3fecd73fd8ab29cbf6a08dc758ef2777bb4b30 100644
--- a/tapeserver/castor/tape/Constants.hpp
+++ b/tapeserver/castor/tape/Constants.hpp
@@ -35,7 +35,7 @@ namespace tape   {
   /**
    * The full path of the TPCONFIG file which is installed on each tape server.
    */
-  const char *const TPCONFIGPATH = "/etc/castor/TPCONFIG";
+  const char *const TPCONFIGPATH = "/etc/cta/TPCONFIG";
 
 } // namespace tape
 } // namespace castor
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp
index 66806fa0f41061ab8e87f57926f87088c540e351..00946f255692412687b12c5a6b993f8d4bd73d54 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp
@@ -544,7 +544,8 @@ const char *castor::tape::tapeserver::daemon::DataTransferSession::
   mountTypeToString(const cta::common::dataStructures::MountType mountType) const throw() {
   switch(mountType) {
   case cta::common::dataStructures::MountType::Retrieve: return "Retrieve";
-  case cta::common::dataStructures::MountType::ArchiveForUser : return "Archive";
+  case cta::common::dataStructures::MountType::ArchiveForUser : return "ArchiveForUser";
+  case cta::common::dataStructures::MountType::ArchiveForRepack : return "ArchiveForRepack";
   case cta::common::dataStructures::MountType::Label: return "Label";
   default                      : return "UNKNOWN";
   }
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
index 1326e57d1c8e7d1db82ea51c1747c0ea4fb2dc11..83184c131c203ff385d655058ee73abe637830e1 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
@@ -71,6 +71,11 @@ using namespace castor::tape::tapeserver::daemon;
 
 namespace unitTests {
 
+const uint32_t DISK_FILE_OWNER_UID = 9751;
+const uint32_t DISK_FILE_GID = 9752;
+const uint32_t DISK_FILE_SOME_USER = 9753;
+const uint32_t DISK_FILE_SOME_GROUP = 9754;
+
 namespace {
 
 /**
@@ -269,7 +274,6 @@ public:
     ASSERT_EQ(mountPolicyComment, group.comment);
 
     const std::string ruleComment = "create requester mount-rule";
-    cta::common::dataStructures::UserIdentity userIdentity;
     catalogue.createRequesterMountRule(s_adminOnAdminHost, mountPolicyName, s_diskInstance, s_userName, ruleComment);
 
     const std::list<common::dataStructures::RequesterMountRule> rules = catalogue.getRequesterMountRules();
@@ -387,8 +391,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayRecall) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // 6) Prepare files for reading by writing them to the mock system
   {
@@ -433,18 +438,16 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayRecall) {
 
       // Create file entry in the archive namespace
       tapeFileWritten.archiveFileId=fseq;
-      tapeFileWritten.checksumType="ADLER32";
-      tapeFileWritten.checksumValue=cta::utils::getAdler32String(data, archiveFileSize);
+      tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::utils::getAdler32(data, archiveFileSize));
       tapeFileWritten.vid=volInfo.vid;
       tapeFileWritten.size=archiveFileSize;
       tapeFileWritten.fSeq=fseq;
       tapeFileWritten.copyNb=1;
-      tapeFileWritten.compressedSize=archiveFileSize; // No compression
       tapeFileWritten.diskInstance = s_diskInstance;
       tapeFileWritten.diskFileId = fseq;
       tapeFileWritten.diskFilePath = remoteFilePath.str();
-      tapeFileWritten.diskFileUser = s_userName;
-      tapeFileWritten.diskFileGroup = "someGroup";
+      tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+      tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
       tapeFileWritten.storageClassName = s_storageClassName;
       tapeFileWritten.tapeDrive = "drive0";
       catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -568,8 +571,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionWrongRecall) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // 6) Prepare files for reading by writing them to the mock system
   {
@@ -597,7 +601,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionWrongRecall) {
       // Write the file to tape
       const uint64_t archiveFileSize = 1000;
       cta::MockArchiveMount mam(catalogue);
-      cta::MockRetrieveMount mrm;
+      cta::MockRetrieveMount mrm(catalogue);
       std::unique_ptr<cta::ArchiveJob> aj(new cta::MockArchiveJob(&mam, catalogue));
       aj->tapeFile.fSeq = fseq;
       aj->archiveFile.archiveFileID = 1000 + fseq;
@@ -614,19 +618,17 @@ TEST_P(DataTransferSessionTest, DataTransferSessionWrongRecall) {
         std::set<cta::catalogue::TapeItemWrittenPointer> tapeFileWrittenSet;
         tapeFileWrittenSet.insert(tapeFileWrittenUP.release());
         tapeFileWritten.archiveFileId=666;
-        tapeFileWritten.checksumType="ADLER32";
-        tapeFileWritten.checksumValue="0xDEADBEEF";
+        tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::checksum::ChecksumBlob::HexToByteArray("0xDEADBEEF"));
         tapeFileWritten.vid=volInfo.vid;
         tapeFileWritten.size=archiveFileSize;
         tapeFileWritten.fSeq=fseq;
         tapeFileWritten.blockId=0;
         tapeFileWritten.copyNb=1;
-        tapeFileWritten.compressedSize=archiveFileSize; // No compression
         tapeFileWritten.diskInstance = s_diskInstance;
         tapeFileWritten.diskFileId = std::to_string(fseq);
         tapeFileWritten.diskFilePath = "/somefile";
-        tapeFileWritten.diskFileUser = s_userName;
-        tapeFileWritten.diskFileGroup = "someGroup";
+        tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+        tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
         tapeFileWritten.storageClassName = s_storageClassName;
         tapeFileWritten.tapeDrive = "drive0";
         catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -639,19 +641,17 @@ TEST_P(DataTransferSessionTest, DataTransferSessionWrongRecall) {
         std::set<cta::catalogue::TapeItemWrittenPointer> tapeFileWrittenSet;
         tapeFileWrittenSet.insert(tapeFileWrittenUP.release());
         tapeFileWritten.archiveFileId=1000 + fseq;
-        tapeFileWritten.checksumType="ADLER32";
-        tapeFileWritten.checksumValue=cta::utils::getAdler32String(data, archiveFileSize);
+        tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::utils::getAdler32(data, archiveFileSize));
         tapeFileWritten.vid=volInfo.vid;
         tapeFileWritten.size=archiveFileSize;
         tapeFileWritten.fSeq=fseq + 1;
         tapeFileWritten.blockId=wf.getBlockId() + 10000;
         tapeFileWritten.copyNb=1;
-        tapeFileWritten.compressedSize=archiveFileSize; // No compression
         tapeFileWritten.diskInstance = s_diskInstance;
         tapeFileWritten.diskFileId = std::to_string(fseq + 1);
         tapeFileWritten.diskFilePath = remoteFilePath.str();
-        tapeFileWritten.diskFileUser = s_userName;
-        tapeFileWritten.diskFileGroup = "someGroup";
+        tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+        tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
         tapeFileWritten.storageClassName = s_storageClassName;
         tapeFileWritten.tapeDrive = "drive0";
         catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -763,8 +763,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionRAORecall) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
 
   int MAX_RECALLS = 50;
   int MAX_BULK_RECALLS = 31;
@@ -816,18 +817,16 @@ TEST_P(DataTransferSessionTest, DataTransferSessionRAORecall) {
 
       // Create file entry in the archive namespace
       tapeFileWritten.archiveFileId=fseq;
-      tapeFileWritten.checksumType="ADLER32";
-      tapeFileWritten.checksumValue=cta::utils::getAdler32String(data, archiveFileSize);
+      tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::utils::getAdler32(data, archiveFileSize));
       tapeFileWritten.vid=volInfo.vid;
       tapeFileWritten.size=archiveFileSize;
       tapeFileWritten.fSeq=fseq;
       tapeFileWritten.copyNb=1;
-      tapeFileWritten.compressedSize=archiveFileSize; // No compression
       tapeFileWritten.diskInstance = s_diskInstance;
       tapeFileWritten.diskFileId = fseq;
       tapeFileWritten.diskFilePath = remoteFilePath.str();
-      tapeFileWritten.diskFileUser = s_userName;
-      tapeFileWritten.diskFileGroup = "someGroup";
+      tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+      tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
       tapeFileWritten.storageClassName = s_storageClassName;
       tapeFileWritten.tapeDrive = "drive0";
       catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -981,8 +980,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionNoSuchDrive) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // 6) Prepare files for reading by writing them to the mock system
   {
@@ -1027,18 +1027,16 @@ TEST_P(DataTransferSessionTest, DataTransferSessionNoSuchDrive) {
 
       // Create file entry in the archive namespace
       tapeFileWritten.archiveFileId=fseq;
-      tapeFileWritten.checksumType="ADLER32";
-      tapeFileWritten.checksumValue=cta::utils::getAdler32String(data, archiveFileSize);
+      tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::utils::getAdler32(data, archiveFileSize));
       tapeFileWritten.vid=volInfo.vid;
       tapeFileWritten.size=archiveFileSize;
       tapeFileWritten.fSeq=fseq;
       tapeFileWritten.copyNb=1;
-      tapeFileWritten.compressedSize=archiveFileSize; // No compression
       tapeFileWritten.diskInstance = s_diskInstance;
       tapeFileWritten.diskFileId = fseq;
       tapeFileWritten.diskFilePath = remoteFilePath.str();
-      tapeFileWritten.diskFileUser = s_userName;
-      tapeFileWritten.diskFileGroup = "someGroup";
+      tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+      tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
       tapeFileWritten.storageClassName = s_storageClassName;
       tapeFileWritten.tapeDrive = "drive0";
       catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -1129,8 +1127,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionFailtoMount) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // 6) Prepare files for reading by writing them to the mock system
   {
@@ -1175,18 +1174,16 @@ TEST_P(DataTransferSessionTest, DataTransferSessionFailtoMount) {
 
       // Create file entry in the archive namespace
       tapeFileWritten.archiveFileId=fseq;
-      tapeFileWritten.checksumType="ADLER32";
-      tapeFileWritten.checksumValue=cta::utils::getAdler32String(data, archiveFileSize);
+      tapeFileWritten.checksumBlob.insert(cta::checksum::ADLER32, cta::utils::getAdler32(data, archiveFileSize));
       tapeFileWritten.vid=volInfo.vid;
       tapeFileWritten.size=archiveFileSize;
       tapeFileWritten.fSeq=fseq;
       tapeFileWritten.copyNb=1;
-      tapeFileWritten.compressedSize=archiveFileSize; // No compression
       tapeFileWritten.diskInstance = s_diskInstance;
       tapeFileWritten.diskFileId = fseq;
       tapeFileWritten.diskFilePath = remoteFilePath.str();
-      tapeFileWritten.diskFileUser = s_userName;
-      tapeFileWritten.diskFileGroup = "someGroup";
+      tapeFileWritten.diskFileOwnerUid = DISK_FILE_SOME_USER;
+      tapeFileWritten.diskFileGid = DISK_FILE_SOME_GROUP;
       tapeFileWritten.storageClassName = s_storageClassName;
       tapeFileWritten.tapeDrive = "drive0";
       catalogue.filesWrittenToTape(tapeFileWrittenSet);
@@ -1287,8 +1284,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayMigration) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // Create the mount criteria
   catalogue.createMountPolicy(requester, "immediateMount", 1000, 0, 1000, 0, 1, "Policy comment");
@@ -1317,8 +1315,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayMigration) {
       remoteFilePaths.push_back(sourceFiles.back()->path());
       // Schedule the archival of the file
       cta::common::dataStructures::ArchiveRequest ar;
-      ar.checksumType="ADLER32";
-      ar.checksumValue=sourceFiles.back()->adler32();
+      ar.checksumBlob.insert(cta::checksum::ADLER32, sourceFiles.back()->adler32());
       ar.storageClass=s_storageClassName;
       ar.srcURL=std::string("file://") + sourceFiles.back()->path();
       ar.requester.name = requester.username;
@@ -1326,8 +1323,8 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayMigration) {
       ar.fileSize = 1000;
       ar.diskFileID = std::to_string(fseq);
       ar.diskFileInfo.path = "y";
-      ar.diskFileInfo.owner = "z";
-      ar.diskFileInfo.group = "g";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
       archiveFileIds.push_back(archiveFileId);
       scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
@@ -1367,7 +1364,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayMigration) {
     auto afi = *(afiiter++);
     auto afs = catalogue.getArchiveFileById(afi);
     ASSERT_EQ(1, afs.tapeFiles.size());
-    ASSERT_EQ(sf->adler32(), afs.checksumValue);
+    cta::checksum::ChecksumBlob checksumBlob;
+    checksumBlob.insert(cta::checksum::ADLER32, sf->adler32());
+    ASSERT_EQ(afs.checksumBlob, checksumBlob);
     ASSERT_EQ(1000, afs.fileSize);
   }
 
@@ -1430,8 +1429,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionMissingFilesMigration) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // Create the mount criteria
   catalogue.createMountPolicy(requester, "immediateMount", 1000, 0, 1000, 0, 1, "Policy comment");
@@ -1460,8 +1460,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionMissingFilesMigration) {
       remoteFilePaths.push_back(sourceFiles.back()->path());
       // Schedule the archival of the file
       cta::common::dataStructures::ArchiveRequest ar;
-      ar.checksumType="ADLER32";
-      ar.checksumValue=sourceFiles.back()->adler32();
+      ar.checksumBlob.insert(cta::checksum::ADLER32, sourceFiles.back()->adler32());
       ar.storageClass=s_storageClassName;
       ar.srcURL=std::string("file://") + sourceFiles.back()->path();
       ar.requester.name = requester.username;
@@ -1470,8 +1469,8 @@ TEST_P(DataTransferSessionTest, DataTransferSessionMissingFilesMigration) {
       ar.diskFileID = "x";
       ar.diskFileID += std::to_string(fseq);
       ar.diskFileInfo.path = "y";
-      ar.diskFileInfo.owner = "z";
-      ar.diskFileInfo.group = "g";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
       archiveFileIds.push_back(archiveFileId);
       scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
@@ -1589,8 +1588,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullMigration) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // Create the mount criteria
   catalogue.createMountPolicy(requester, "immediateMount", 1000, 0, 1000, 0, 1, "Policy comment");
@@ -1620,8 +1620,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullMigration) {
       remoteFilePaths.push_back(sourceFiles.back()->path());
       // Schedule the archival of the file
       cta::common::dataStructures::ArchiveRequest ar;
-      ar.checksumType="ADLER32";
-      ar.checksumValue=sourceFiles.back()->adler32();
+      ar.checksumBlob.insert(cta::checksum::ADLER32, sourceFiles.back()->adler32());
       ar.storageClass=s_storageClassName;
       ar.srcURL=std::string("file://") + sourceFiles.back()->path();
       ar.requester.name = requester.username;
@@ -1629,8 +1628,8 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullMigration) {
       ar.fileSize = 1000;
       ar.diskFileID = std::to_string(fseq);
       ar.diskFileInfo.path = "y";
-      ar.diskFileInfo.owner = "z";
-      ar.diskFileInfo.group = "g";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
       archiveFileIds.push_back(archiveFileId);
       scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
@@ -1674,7 +1673,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullMigration) {
     if (archiveFileCount <= 3) {
       auto afs = catalogue.getArchiveFileById(afi);
       ASSERT_EQ(1, afs.tapeFiles.size());
-      ASSERT_EQ(sf->adler32(), afs.checksumValue);
+      cta::checksum::ChecksumBlob checksumBlob;
+      checksumBlob.insert(cta::checksum::ADLER32, sf->adler32());
+      ASSERT_EQ(afs.checksumBlob, checksumBlob);
       ASSERT_EQ(1000, afs.fileSize);
     } else {
       ASSERT_THROW(catalogue.getArchiveFileById(afi), cta::exception::Exception);
@@ -1746,8 +1747,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
   const std::string tapeComment = "Tape comment";
   bool notDisabled = false;
   bool notFull = false;
+  bool notReadOnly = false;
   catalogue.createTape(s_adminOnAdminHost, s_vid, s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
-    notDisabled, notFull, tapeComment);
+    notDisabled, notFull, notReadOnly, tapeComment);
   
   // Create the mount criteria
   catalogue.createMountPolicy(requester, "immediateMount", 1000, 0, 1000, 0, 1, "Policy comment");
@@ -1778,8 +1780,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
       remoteFilePaths.push_back(sourceFiles.back()->path());
       // Schedule the archival of the file
       cta::common::dataStructures::ArchiveRequest ar;
-      ar.checksumType="ADLER32";
-      ar.checksumValue=sourceFiles.back()->adler32();
+      ar.checksumBlob.insert(cta::checksum::ADLER32, sourceFiles.back()->adler32());
       ar.storageClass=s_storageClassName;
       ar.srcURL=std::string("file://") + sourceFiles.back()->path();
       ar.requester.name = requester.username;
@@ -1787,8 +1788,8 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
       ar.fileSize = 1000;
       ar.diskFileID = std::to_string(fseq);
       ar.diskFileInfo.path = "y";
-      ar.diskFileInfo.owner = "z";
-      ar.diskFileInfo.group = "g";
+      ar.diskFileInfo.owner_uid = DISK_FILE_OWNER_UID;
+      ar.diskFileInfo.gid = DISK_FILE_GID;
       const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
       archiveFileIds.push_back(archiveFileId);
       scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
@@ -1804,7 +1805,7 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
   // We need to create the drive in the registry before being able to put it up.
   scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, logContext);
   scheduler.setDesiredDriveState(s_adminOnAdminHost, driveConfig.unitName, true, false, logContext);
-
+  
   // Create the data transfer session
   DataTransferConfig castorConf;
   castorConf.bufsz = 1024*1024; // 1 MB memory buffers
@@ -1832,7 +1833,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
     if (archiveFileCount <= 3) {
       auto afs = catalogue.getArchiveFileById(afi);
       ASSERT_EQ(1, afs.tapeFiles.size());
-      ASSERT_EQ(sf->adler32(), afs.checksumValue);
+      cta::checksum::ChecksumBlob checksumBlob;
+      checksumBlob.insert(cta::checksum::ADLER32, sf->adler32());
+      ASSERT_EQ(afs.checksumBlob, checksumBlob);
       ASSERT_EQ(1000, afs.fileSize);
     } else {
       ASSERT_THROW(catalogue.getArchiveFileById(afi), cta::exception::Exception);
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DiskWriteTaskTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DiskWriteTaskTest.cpp
index 6ef2ede0c1585cf018fc927f4a392bdf56f1005d..a49fc4bb658bb0803814a71e91df8b7495143b13 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DiskWriteTaskTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DiskWriteTaskTest.cpp
@@ -53,7 +53,7 @@ namespace unitTests{
   
   class TestingRetrieveMount: public cta::RetrieveMount {
   public:
-    TestingRetrieveMount(std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm): RetrieveMount(std::move(dbrm)) {
+    TestingRetrieveMount(cta::catalogue::Catalogue &catalogue, std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm): RetrieveMount(catalogue, std::move(dbrm)) {
     }
   };
   
@@ -103,13 +103,14 @@ namespace unitTests{
     cta::log::LogContext lc(log);
     
     std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm(new TestingDatabaseRetrieveMount());
-    TestingRetrieveMount trm(std::move(dbrm));
+    std::unique_ptr<cta::catalogue::Catalogue> catalogue(new cta::catalogue::DummyCatalogue);
+    TestingRetrieveMount trm(*catalogue, std::move(dbrm));
     MockRecallReportPacker report(&trm,lc);
     RecallMemoryManager mm(10,100,lc);
     cta::disk::RadosStriperPool striperPool;
     DiskFileFactory fileFactory("", 0, striperPool);
     
-    cta::MockRetrieveMount mrm;
+    cta::MockRetrieveMount mrm(*catalogue);
     std::unique_ptr<TestingRetrieveJob> fileToRecall(new TestingRetrieveJob(mrm));
     fileToRecall->retrieveRequest.archiveFileID = 1;
     fileToRecall->selectedCopyNb=1;
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DiskWriteThreadPoolTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DiskWriteThreadPoolTest.cpp
index b3984a5f6aa9db4e09e3f5ed3d11aa4217d50214..6ec2d62b6bfdba5c24dca0400952257fc96577a7 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DiskWriteThreadPoolTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DiskWriteThreadPoolTest.cpp
@@ -30,6 +30,7 @@
 #include "castor/tape/tapeserver/daemon/MemBlock.hpp"
 #include "castor/messages/TapeserverProxyDummy.hpp"
 #include "scheduler/TapeMountDummy.hpp"
+#include "catalogue/DummyCatalogue.hpp"
 #include <gtest/gtest.h>
 
 namespace unitTests{
@@ -46,7 +47,7 @@ namespace unitTests{
   
   class TestingRetrieveMount: public cta::RetrieveMount {
   public:
-    TestingRetrieveMount(std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm): RetrieveMount(std::move(dbrm)) {
+    TestingRetrieveMount(cta::catalogue::Catalogue &catalogue, std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm): RetrieveMount(catalogue, std::move(dbrm)) {
     }
   };
   
@@ -101,7 +102,8 @@ namespace unitTests{
     cta::log::LogContext lc(log);
     
     std::unique_ptr<cta::SchedulerDatabase::RetrieveMount> dbrm(new TestingDatabaseRetrieveMount);
-    TestingRetrieveMount trm(std::move(dbrm));
+    std::unique_ptr<cta::catalogue::Catalogue> catalogue(new cta::catalogue::DummyCatalogue);
+    TestingRetrieveMount trm(*catalogue, std::move(dbrm));
     MockRecallReportPacker report(&trm,lc);    
     
     RecallMemoryManager mm(10,100,lc);
diff --git a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPackerTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPackerTest.cpp
index 9b7f5028611ccf401b645bd07c184a63d9b348f9..f92e086521b7e1089bacfc7a0740c1bf7f6d100f 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPackerTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPackerTest.cpp
@@ -35,7 +35,12 @@ using ::testing::Invoke;
 using namespace castor::tape;
 
 namespace unitTests {
-  
+
+const uint32_t TEST_USER_1  = 9751;
+const uint32_t TEST_GROUP_1 = 9752;
+const uint32_t TEST_USER_2  = 9753;
+const uint32_t TEST_GROUP_2 = 9754;
+
   class castor_tape_tapeserver_daemon_MigrationReportPackerTest: public ::testing::Test {
   public:
     castor_tape_tapeserver_daemon_MigrationReportPackerTest():
@@ -64,26 +69,24 @@ namespace unitTests {
     std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
 
   }; // class castor_tape_tapeserver_daemon_MigrationReportPackerTest
-  
+
   class MockArchiveJobExternalStats: public cta::MockArchiveJob {
   public:
     MockArchiveJobExternalStats(cta::ArchiveMount & am, cta::catalogue::Catalogue & catalogue, 
        int & completes, int &failures):
     MockArchiveJob(&am, catalogue), completesRef(completes), failuresRef(failures) {}
-    
+
     virtual void validate() override {}
     virtual cta::catalogue::TapeItemWrittenPointer validateAndGetTapeFileWritten() override {
       auto fileReportUP=cta::make_unique<cta::catalogue::TapeFileWritten>();
       auto & fileReport = *fileReportUP;
       fileReport.archiveFileId = archiveFile.archiveFileID;
       fileReport.blockId = tapeFile.blockId;
-      fileReport.checksumType = tapeFile.checksumType;
-      fileReport.checksumValue = tapeFile.checksumValue;
-      fileReport.compressedSize = tapeFile.compressedSize;
+      fileReport.checksumBlob = tapeFile.checksumBlob;
       fileReport.copyNb = tapeFile.copyNb;
       fileReport.diskFileId = archiveFile.diskFileId;
-      fileReport.diskFileUser = archiveFile.diskFileInfo.owner;
-      fileReport.diskFileGroup = archiveFile.diskFileInfo.group;
+      fileReport.diskFileOwnerUid = archiveFile.diskFileInfo.owner_uid;
+      fileReport.diskFileGid = archiveFile.diskFileInfo.gid;
       fileReport.diskFilePath = archiveFile.diskFileInfo.path;
       fileReport.diskInstance = archiveFile.diskInstance;
       fileReport.fSeq = tapeFile.fSeq;
@@ -93,12 +96,12 @@ namespace unitTests {
       fileReport.vid = tapeFile.vid;
       return cta::catalogue::TapeItemWrittenPointer(fileReportUP.release());
     }
-   
+
 
     void transferFailed(const std::string& failureReason, cta::log::LogContext& lc) override {
       failuresRef++;
     }
-    
+
     void reportJobSucceeded() override {
       completesRef++;
     }
@@ -107,10 +110,10 @@ namespace unitTests {
     int & completesRef;
     int & failuresRef;
   };
-  
+
   TEST_F(castor_tape_tapeserver_daemon_MigrationReportPackerTest, MigrationReportPackerNominal) {
     cta::MockArchiveMount tam(*m_catalogue);
-    
+
     const std::string vid1 = "VTEST001";
     const std::string vid2 = "VTEST002";
     const std::string mediaType = "media_type";
@@ -123,13 +126,14 @@ namespace unitTests {
     const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
     const bool disabledValue = true;
     const bool fullValue = false;
+    const bool readOnlyValue = false;
     const std::string createTapeComment = "Create tape";
     cta::common::dataStructures::SecurityIdentity admin = cta::common::dataStructures::SecurityIdentity("admin","localhost");
 
     m_catalogue->createLogicalLibrary(admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
     m_catalogue->createTapePool(admin, tapePoolName, vo, 2, true, supply, "Create tape pool");
     m_catalogue->createTape(admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-      disabledValue, fullValue, createTapeComment);
+      disabledValue, fullValue, readOnlyValue, createTapeComment);
 
     cta::common::dataStructures::StorageClass storageClass;
     storageClass.diskInstance = "disk_instance";
@@ -137,7 +141,7 @@ namespace unitTests {
     storageClass.nbCopies = 1;
     storageClass.comment = "Create storage class";
     m_catalogue->createStorageClass(admin, storageClass);
-    
+
     ::testing::InSequence dummy;
     std::unique_ptr<cta::ArchiveJob> job1;
     int job1completes(0), job1failures(0);
@@ -150,20 +154,18 @@ namespace unitTests {
     job1->archiveFile.diskInstance="disk_instance";
     job1->archiveFile.diskFileId="diskFileId1";
     job1->archiveFile.diskFileInfo.path="filePath1";
-    job1->archiveFile.diskFileInfo.owner="testUser1";
-    job1->archiveFile.diskFileInfo.group="testGroup1";
+    job1->archiveFile.diskFileInfo.owner_uid=TEST_USER_1;
+    job1->archiveFile.diskFileInfo.gid=TEST_GROUP_1;
     job1->archiveFile.fileSize=1024;        
-    job1->archiveFile.checksumType="md5";
-    job1->archiveFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
+    job1->archiveFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
     job1->archiveFile.storageClass="storage_class";
     job1->tapeFile.vid="VTEST001";
     job1->tapeFile.fSeq=1;
     job1->tapeFile.blockId=256;
-    job1->tapeFile.compressedSize=768;
+    job1->tapeFile.fileSize=768;
     job1->tapeFile.copyNb=1;
-    job1->tapeFile.checksumType="md5";
-    job1->tapeFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
-    
+    job1->tapeFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
+
     std::unique_ptr<cta::ArchiveJob> job2;
     int job2completes(0), job2failures(0);
     {
@@ -175,20 +177,18 @@ namespace unitTests {
     job2->archiveFile.diskInstance="disk_instance";
     job2->archiveFile.diskFileId="diskFileId2";
     job2->archiveFile.diskFileInfo.path="filePath2";
-    job2->archiveFile.diskFileInfo.owner="testUser2";
-    job2->archiveFile.diskFileInfo.group="testGroup2";
+    job2->archiveFile.diskFileInfo.owner_uid=TEST_USER_2;
+    job2->archiveFile.diskFileInfo.gid=TEST_GROUP_2;
     job2->archiveFile.fileSize=1024;        
-    job2->archiveFile.checksumType="md5";
-    job2->archiveFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
+    job2->archiveFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
     job2->archiveFile.storageClass="storage_class";
     job2->tapeFile.vid="VTEST001";
     job2->tapeFile.fSeq=2;
     job2->tapeFile.blockId=512;
-    job2->tapeFile.compressedSize=768;
+    job2->tapeFile.fileSize=768;
     job2->tapeFile.copyNb=1;
-    job2->tapeFile.checksumType="md5";
-    job2->tapeFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
-    
+    job2->tapeFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
+
     cta::log::StringLogger log("dummy","castor_tape_tapeserver_daemon_MigrationReportPackerNominal",cta::log::DEBUG);
     cta::log::LogContext lc(log);
     tapeserver::daemon::MigrationReportPacker mrp(&tam,lc);
@@ -231,7 +231,7 @@ namespace unitTests {
         new MockArchiveJobExternalStats(tam, *m_catalogue, job3completes, job3failures));
       job3.reset(mockJob.release());
     }
-    
+
     cta::log::StringLogger log("dummy","castor_tape_tapeserver_daemon_MigrationReportPackerFailure",cta::log::DEBUG);
     cta::log::LogContext lc(log);  
     tapeserver::daemon::MigrationReportPacker mrp(&tam,lc);
@@ -258,7 +258,7 @@ namespace unitTests {
 
   TEST_F(castor_tape_tapeserver_daemon_MigrationReportPackerTest, MigrationReportPackerBadFile) {
     cta::MockArchiveMount tam(*m_catalogue);
-    
+
     const std::string vid1 = "VTEST001";
     const std::string vid2 = "VTEST002";
     const std::string mediaType = "media_type";
@@ -273,13 +273,14 @@ namespace unitTests {
     const uint64_t capacityInBytes = (uint64_t)10 * 1000 * 1000 * 1000 * 1000;
     const bool disabledValue = true;
     const bool fullValue = false;
+    const bool readOnlyValue = false;
     const std::string createTapeComment = "Create tape";
     cta::common::dataStructures::SecurityIdentity admin = cta::common::dataStructures::SecurityIdentity("admin","localhost");
 
     m_catalogue->createLogicalLibrary(admin, logicalLibraryName, logicalLibraryIsDisabled, "Create logical library");
     m_catalogue->createTapePool(admin, tapePoolName, vo, nbPartialTapes, isEncrypted, supply, "Create tape pool");
     m_catalogue->createTape(admin, vid1, mediaType, vendor, logicalLibraryName, tapePoolName, capacityInBytes,
-      disabledValue, fullValue, createTapeComment);
+      disabledValue, fullValue, readOnlyValue, createTapeComment);
 
     cta::common::dataStructures::StorageClass storageClass;
     storageClass.diskInstance = "disk_instance";
@@ -287,7 +288,7 @@ namespace unitTests {
     storageClass.nbCopies = 1;
     storageClass.comment = "Create storage class";
     m_catalogue->createStorageClass(admin, storageClass);
-    
+
     ::testing::InSequence dummy;
     std::unique_ptr<cta::ArchiveJob> migratedBigFile;
     int migratedBigFileCompletes(0), migratedBigFileFailures(0);
@@ -315,56 +316,50 @@ namespace unitTests {
     migratedBigFile->archiveFile.diskInstance="disk_instance";
     migratedBigFile->archiveFile.diskFileId="diskFileId2";
     migratedBigFile->archiveFile.diskFileInfo.path="filePath2";
-    migratedBigFile->archiveFile.diskFileInfo.owner="testUser2";
-    migratedBigFile->archiveFile.diskFileInfo.group="testGroup2";
+    migratedBigFile->archiveFile.diskFileInfo.owner_uid=TEST_USER_2;
+    migratedBigFile->archiveFile.diskFileInfo.gid=TEST_GROUP_2;
     migratedBigFile->archiveFile.fileSize=100000;        
-    migratedBigFile->archiveFile.checksumType="md5";
-    migratedBigFile->archiveFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
+    migratedBigFile->archiveFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
     migratedBigFile->archiveFile.storageClass="storage_class";
     migratedBigFile->tapeFile.vid="VTEST001";
     migratedBigFile->tapeFile.fSeq=1;
     migratedBigFile->tapeFile.blockId=256;
-    migratedBigFile->tapeFile.compressedSize=768;
+    migratedBigFile->tapeFile.fileSize=768;
     migratedBigFile->tapeFile.copyNb=1;
-    migratedBigFile->tapeFile.checksumType="md5";
-    migratedBigFile->tapeFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
-    
+    migratedBigFile->tapeFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
+
     migratedFileSmall->archiveFile.archiveFileID=5;
     migratedFileSmall->archiveFile.diskInstance="disk_instance";
     migratedFileSmall->archiveFile.diskFileId="diskFileId3";
     migratedFileSmall->archiveFile.diskFileInfo.path="filePath3";
-    migratedFileSmall->archiveFile.diskFileInfo.owner="testUser2";
-    migratedFileSmall->archiveFile.diskFileInfo.group="testGroup2";
+    migratedFileSmall->archiveFile.diskFileInfo.owner_uid=TEST_USER_2;
+    migratedFileSmall->archiveFile.diskFileInfo.gid=TEST_GROUP_2;
     migratedFileSmall->archiveFile.fileSize=1;        
-    migratedFileSmall->archiveFile.checksumType="md5";
-    migratedFileSmall->archiveFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
+    migratedFileSmall->archiveFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
     migratedFileSmall->archiveFile.storageClass="storage_class";
     migratedFileSmall->tapeFile.vid="VTEST001";
     migratedFileSmall->tapeFile.fSeq=2;
     migratedFileSmall->tapeFile.blockId=512;
-    migratedFileSmall->tapeFile.compressedSize=1;
+    migratedFileSmall->tapeFile.fileSize=1;
     migratedFileSmall->tapeFile.copyNb=1;
-    migratedFileSmall->tapeFile.checksumType="md5";
-    migratedFileSmall->tapeFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
-    
+    migratedFileSmall->tapeFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
+
     migratedNullFile->archiveFile.archiveFileID=6;
     migratedNullFile->archiveFile.diskInstance="disk_instance";
     migratedNullFile->archiveFile.diskFileId="diskFileId4";
     migratedNullFile->archiveFile.diskFileInfo.path="filePath4";
-    migratedNullFile->archiveFile.diskFileInfo.owner="testUser2";
-    migratedNullFile->archiveFile.diskFileInfo.group="testGroup2";
+    migratedNullFile->archiveFile.diskFileInfo.owner_uid=TEST_USER_2;
+    migratedNullFile->archiveFile.diskFileInfo.gid=TEST_GROUP_2;
     migratedNullFile->archiveFile.fileSize=0;        
-    migratedNullFile->archiveFile.checksumType="md5";
-    migratedNullFile->archiveFile.checksumValue="b170288bf1f61b26a648358866f4d6c6";
+    migratedNullFile->archiveFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
     migratedNullFile->archiveFile.storageClass="storage_class";
     migratedNullFile->tapeFile.vid="VTEST001";
     migratedNullFile->tapeFile.fSeq=3;
     migratedNullFile->tapeFile.blockId=768;
-    migratedNullFile->tapeFile.compressedSize=0;
+    migratedNullFile->tapeFile.fileSize=0;
     migratedNullFile->tapeFile.copyNb=1;
-    migratedNullFile->tapeFile.checksumType="md5";
-    migratedFileSmall->tapeFile.checksumValue="b170288bf1f61b26a648358866f4d6c6"; 
-    
+    migratedNullFile->tapeFile.checksumBlob.insert(cta::checksum::MD5, cta::checksum::ChecksumBlob::HexToByteArray("b170288bf1f61b26a648358866f4d6c6"));
+
     cta::log::StringLogger log("dummy","castor_tape_tapeserver_daemon_MigrationReportPackerOneByteFile",cta::log::DEBUG);
     cta::log::LogContext lc(log);  
     tapeserver::daemon::MigrationReportPacker mrp(&tam,lc);
diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
index 39d6285b2659ba40fd9a00e74e7366b5e9c4595f..f58f6a0ad72bf6134f976aac761431a912e62089 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
@@ -236,6 +236,7 @@ void RecallReportPacker::WorkerThread::run(){
   bool endFound = false;
   
   std::list <std::unique_ptr<Report>> reportedSuccessfully;
+  cta::utils::Timer t;
   while(1) {
     std::string debugType;
     std::unique_ptr<Report> rep(m_parent.m_fifo.pop());
@@ -265,8 +266,11 @@ void RecallReportPacker::WorkerThread::run(){
       // m_parent.fullCheckAndFinishAsyncExecute will execute the shared half of the
       // request updates (individual, asynchronous is done in rep->execute(m_parent);
       if (typeid(*rep) == typeid(RecallReportPacker::ReportSuccessful) 
-          && m_parent.m_successfulRetrieveJobs.size() >= m_parent.RECALL_REPORT_PACKER_FLUSH_SIZE)
+          && (m_parent.m_successfulRetrieveJobs.size() >= m_parent.RECALL_REPORT_PACKER_FLUSH_SIZE || t.secs() >= m_parent.RECALL_REPORT_PACKER_FLUSH_TIME )){
+        m_parent.m_lc.log(cta::log::INFO,"m_parent.fullCheckAndFinishAsyncExecute()");
         m_parent.fullCheckAndFinishAsyncExecute();
+        t.reset();
+      }
     } catch(const cta::exception::Exception& e){
       //we get there because to tried to close the connection and it failed
       //either from the catch a few lines above or directly from rep->execute
diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.hpp b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.hpp
index a0b62a0c62ef06348f148294c8de1e00ed53ca8a..057d310377a98ec45f180b985205deee392ad081 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.hpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.hpp
@@ -238,7 +238,12 @@ private:
   /*
    * The limit for successful reports to trigger flush.
    */
-  const unsigned int RECALL_REPORT_PACKER_FLUSH_SIZE = 500;
+  const unsigned int RECALL_REPORT_PACKER_FLUSH_SIZE = 2000;
+  
+  /*
+   * The time limit for successful reports to trigger flush.
+   */
+  const double RECALL_REPORT_PACKER_FLUSH_TIME = 180;
 };
 
 }}}}
diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPackerTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPackerTest.cpp
index 152b2f4831b1d19215b3c774b4c4bcc4a0f80b33..683a813e19c52ab2d8e2e898cf87550676404c8f 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPackerTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPackerTest.cpp
@@ -66,7 +66,8 @@ protected:
   };
 
 TEST_F(castor_tape_tapeserver_daemon_RecallReportPackerTest, RecallReportPackerNominal) {
-  cta::MockRetrieveMount retrieveMount;
+  auto catalogue = cta::catalogue::DummyCatalogue();
+  cta::MockRetrieveMount retrieveMount(catalogue);
 
 
   
@@ -111,7 +112,8 @@ TEST_F(castor_tape_tapeserver_daemon_RecallReportPackerTest, RecallReportPackerN
 }
 
 TEST_F(castor_tape_tapeserver_daemon_RecallReportPackerTest, RecallReportPackerBadBadEnd) {
-  cta::MockRetrieveMount retrieveMount;
+  auto catalogue = cta::catalogue::DummyCatalogue();
+  cta::MockRetrieveMount retrieveMount(catalogue);
 
   ::testing::InSequence dummy;
   std::unique_ptr<cta::RetrieveJob> job1;
diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjectorTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjectorTest.cpp
index 0ee82c988bd61d0cec5a83a4203ba221721804ce..cbcea00193aaa3a43e618c48f52c2305c30e3487 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjectorTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjectorTest.cpp
@@ -147,7 +147,8 @@ namespace unitTests
     RecallMemoryManager mm(50U, 50U, lc);
     castor::tape::tapeserver::drive::FakeDrive drive;
     
-    cta::MockRetrieveMount trm;
+    auto catalogue = cta::catalogue::DummyCatalogue();
+    cta::MockRetrieveMount trm(catalogue);
     trm.createRetrieveJobs(nbJobs);
     //EXPECT_CALL(trm, internalGetNextJob()).Times(nbJobs+1);
     
@@ -207,8 +208,8 @@ namespace unitTests
     cta::log::LogContext lc(log);
     RecallMemoryManager mm(50U, 50U, lc);
     castor::tape::tapeserver::drive::FakeDrive drive;
-    
-    cta::MockRetrieveMount trm;
+    auto catalogue = cta::catalogue::DummyCatalogue();
+    cta::MockRetrieveMount trm(catalogue);
     trm.createRetrieveJobs(0);
     //EXPECT_CALL(trm, internalGetNextJob()).Times(1); //no work: single call to getnextjob
     
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeReadSingleThread.cpp b/tapeserver/castor/tape/tapeserver/daemon/TapeReadSingleThread.cpp
index 2c61233c1bc3abe82afd3a4ccef962ef68c402e5..dd8fc7bfe372878bcafc0f323658a7a10c43c3c5 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TapeReadSingleThread.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TapeReadSingleThread.cpp
@@ -256,6 +256,7 @@ void castor::tape::tapeserver::daemon::TapeReadSingleThread::run() {
         scoped.add("mountTime", m_stats.mountTime);
         m_logContext.log(cta::log::INFO, "Tape mounted and drive ready");
       }
+      m_retrieveMount.setTapeMounted(m_logContext);
       try {
         currentErrorToCount = "Error_tapeEncryptionEnable";
         // We want those scoped params to last for the whole mount.
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp
index f209e4f0df2568daab19f7731b35653012fc47da..d76ec118eb2a19a76ed41e976d51315b32d9f69b 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp
@@ -336,6 +336,7 @@ void castor::tape::tapeserver::daemon::TapeWriteSingleThread::run() {
         scoped.add("mountTime", m_stats.mountTime);
         m_logContext.log(cta::log::INFO, "Tape mounted and drive ready");
       }
+      m_archiveMount.setTapeMounted(m_logContext);
       try {
         currentErrorToCount = "Error_tapeEncryptionEnable";
         // We want those scoped params to last for the whole mount.
diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp
index 1f62ca5b8c77071141fdf6788a74408c7640fb54..ea9af35ed7efb082a6d087066bb38f8c3a51025b 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp
@@ -161,14 +161,8 @@ namespace daemon {
       m_taskStats.filesCount ++;
       // Record the fSeq in the tape session
       session.reportWrittenFSeq(m_archiveJob->tapeFile.fSeq);
-      m_archiveJob->tapeFile.checksumType = "ADLER32";
-      { 
-        std::stringstream cs;
-        cs << "0X" << std::hex << std::noshowbase << std::uppercase 
-            << std::setfill('0') << std::setw(8) << (uint32_t)ckSum;
-        m_archiveJob->tapeFile.checksumValue = cs.str();
-      }
-      m_archiveJob->tapeFile.compressedSize = m_taskStats.dataVolume;
+      m_archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, ckSum);
+      m_archiveJob->tapeFile.fileSize = m_taskStats.dataVolume;
       m_archiveJob->tapeFile.blockId = output->getBlockId();
       reportPacker.reportCompletedJob(std::move(m_archiveJob), lc);
       m_taskStats.waitReportingTime += timer.secs(cta::utils::Timer::resetCounter);
diff --git a/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.hpp b/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.hpp
index 3d4745141e1abede69e05c409e53d30df26afc01..fbf2621a720f35235c6cb47c2e967d5ce8f0f4a8 100644
--- a/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.hpp
+++ b/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.hpp
@@ -22,7 +22,7 @@
  *****************************************************************************/
 #pragma once 
 
-#include "castor/tape/tapeserver/drive/DriveInterface.hpp"
+#include "tapeserver/castor/tape/tapeserver/drive/DriveInterface.hpp"
 
 namespace castor {
 namespace tape {
diff --git a/tapeserver/castor/tape/tapeserver/file/File.hpp b/tapeserver/castor/tape/tapeserver/file/File.hpp
index 0d495fa2751130b21e028dd0e10613a9bff6662f..587cb9c035f4daff047d542ae47cc63d63e9d209 100644
--- a/tapeserver/castor/tape/tapeserver/file/File.hpp
+++ b/tapeserver/castor/tape/tapeserver/file/File.hpp
@@ -23,8 +23,8 @@
 
 #pragma once
 
-#include "castor/tape/tapeserver/file/Structures.hpp"
-#include "castor/tape/tapeserver/daemon/VolumeInfo.hpp"
+#include "tapeserver/castor/tape/tapeserver/file/Structures.hpp"
+#include "tapeserver/castor/tape/tapeserver/daemon/VolumeInfo.hpp"
 #include "common/exception/Exception.hpp"
 #include "scheduler/ArchiveJob.hpp"
 #include "scheduler/RetrieveJob.hpp"
diff --git a/tapeserver/cta-taped.service b/tapeserver/cta-taped.service
index 2740d6c7cde3013e0dee3abba4de1b2a8135b538..1bf041aee53183a06dc80a05df1335406bff197c 100644
--- a/tapeserver/cta-taped.service
+++ b/tapeserver/cta-taped.service
@@ -6,6 +6,7 @@ After=syslog.target network-online.target
 EnvironmentFile=-/etc/sysconfig/cta-taped
 ExecStart=/usr/bin/cta-taped ${CTA_TAPED_OPTIONS}
 LimitCORE=infinity
+OOMScoreAdjust=-1000
 Type=forking
 Restart=no
 
diff --git a/tapeserver/tapelabel/CMakeLists.txt b/tapeserver/tapelabel/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4f32ad6377d6b76723627085328907f3a283dc9f
--- /dev/null
+++ b/tapeserver/tapelabel/CMakeLists.txt
@@ -0,0 +1,49 @@
+# The CERN Tape Archive (CTA) project
+# Copyright (C) 2015  CERN
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+cmake_minimum_required (VERSION 2.6)
+
+add_executable(cta-tape-label
+  TapeLabelCmd.cpp
+  CmdLineTool.cpp
+  TapeLabelCmdLineArgs.cpp
+  TapeLabelCmdMain.cpp)
+
+target_link_libraries (cta-tape-label
+  ctacommon
+  TapeDrive
+  ctamediachanger
+  ctacatalogue
+  SCSI
+)
+
+# need to be removed when drop dependencies to taped
+find_package(Protobuf3 REQUIRED)
+set_property (TARGET cta-tape-label APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+if (OCCI_SUPPORT)
+  set_property (TARGET cta-tape-label APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
+endif (OCCI_SUPPORT)
+
+install (TARGETS cta-tape-label DESTINATION /usr/bin)
+install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/cta-tape-label.1cta DESTINATION /usr/share/man/man1)
+
+add_library(ctatapelabelunittests SHARED
+  TapeLabelCmdLineArgs.cpp
+  TapeLabelCmdLineArgsTest.cpp)
+
+set_property(TARGET ctatapelabelunittests PROPERTY SOVERSION "${CTA_SOVERSION}")
+set_property(TARGET ctatapelabelunittests PROPERTY   VERSION "${CTA_LIBVERSION}")
+
+install (TARGETS ctatapelabelunittests DESTINATION usr/${CMAKE_INSTALL_LIBDIR})
diff --git a/tapeserver/tapelabel/CmdLineTool.cpp b/tapeserver/tapelabel/CmdLineTool.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8596d6388003847f42120a28c2bd717119de156e
--- /dev/null
+++ b/tapeserver/tapelabel/CmdLineTool.cpp
@@ -0,0 +1,106 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "tapeserver/tapelabel/CmdLineTool.hpp"
+#include "common/exception/CommandLineNotParsed.hpp"
+
+#include <unistd.h>
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+CmdLineTool::CmdLineTool(
+  std::istream &inStream,
+  std::ostream &outStream,
+  std::ostream &errStream) noexcept:
+  m_in(inStream),
+  m_out(outStream),
+  m_err(errStream) {
+}
+
+//------------------------------------------------------------------------------
+// destructor
+//------------------------------------------------------------------------------
+CmdLineTool::~CmdLineTool() noexcept {
+}
+
+//------------------------------------------------------------------------------
+// getUsername
+//------------------------------------------------------------------------------
+std::string CmdLineTool::getUsername() {
+  char buf[256];
+
+  if(getlogin_r(buf, sizeof(buf))) {
+    return "UNKNOWN";
+  } else {
+    return buf;
+  }
+}
+
+//------------------------------------------------------------------------------
+// getHostname
+//------------------------------------------------------------------------------
+std::string CmdLineTool::getHostname() {
+  char buf[256];
+
+  if(gethostname(buf, sizeof(buf))) {
+    return "UNKNOWN";
+  } else {
+    buf[sizeof(buf) - 1] = '\0';
+    return buf;
+  }
+}
+
+//------------------------------------------------------------------------------
+// main
+//------------------------------------------------------------------------------
+int CmdLineTool::main(const int argc, char *const *const argv) {
+  bool cmdLineNotParsed = false;
+  std::string errorMessage;
+
+  try {
+    return exceptionThrowingMain(argc, argv);
+  } catch(exception::CommandLineNotParsed &ue) {
+    errorMessage = ue.getMessage().str();
+    cmdLineNotParsed = true;
+  } catch(exception::Exception &ex) {
+    errorMessage = ex.getMessage().str();
+  } catch(std::exception &se) {
+    errorMessage = se.what();
+  } catch(...) {
+    errorMessage = "An unknown exception was thrown";
+  }
+
+  // Reaching this point means the command has failed, an exception was throw
+  // and errorMessage has been set accordingly
+
+  m_err << "Aborting: " << errorMessage << std::endl;
+  if(cmdLineNotParsed) {
+    m_err << std::endl;
+    printUsage(m_err);
+  }
+  return 1;
+}
+
+} // namespace tapelabel
+} // namespace tapeserver
+} // namespace cta
diff --git a/tapeserver/tapelabel/CmdLineTool.hpp b/tapeserver/tapelabel/CmdLineTool.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3f2cbf3843dabcf2841db219a6f913a0de047aa8
--- /dev/null
+++ b/tapeserver/tapelabel/CmdLineTool.hpp
@@ -0,0 +1,109 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <istream>
+#include <ostream>
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+/**
+ * Abstract class implementing common code and data structures for a
+ * command-line tool.
+ */
+class CmdLineTool {
+public:
+  /**
+   * Constructor.
+   *
+   * @param inStream Standard input stream.
+   * @param outStream Standard output stream.
+   * @param errStream Standard error stream.
+   */
+  CmdLineTool(std::istream &inStream, std::ostream &outStream, std::ostream &errStream) noexcept;
+
+  /**
+   * Pure-virtual destructor to guarantee this class is abstract.
+   */
+  virtual ~CmdLineTool() noexcept = 0;
+
+  /**
+   * The object's implementation of main() that should be called from the main()
+   * of the program.
+   *
+   * @param argc The number of command-line arguments including the program name.
+   * @param argv The command-line arguments.
+   * @return The exit value of the program.
+   */
+  int main(const int argc, char *const *const argv);
+
+protected:
+
+  /**
+   * An exception throwing version of main().
+   *
+   * @param argc The number of command-line arguments including the program name.
+   * @param argv The command-line arguments.
+   * @return The exit value of the program.
+   */
+  virtual int exceptionThrowingMain(const int argc, char *const *const argv) = 0;
+
+  /**
+   * Prints the usage message of the command-line tool.
+   *
+   * @param os The output stream to which the usage message is to be printed.
+   */
+  virtual void printUsage(std::ostream &os) = 0;
+
+  /**
+   * Standard input stream.
+   */
+  std::istream &m_in;
+
+  /**
+   * Standard output stream.
+   */
+  std::ostream &m_out;
+
+  /**
+   * Standard error stream.
+   */
+  std::ostream &m_err;
+
+  /**
+   * Returns the name of the user running the command-line tool.
+   *
+   * @return The name of the user running the command-line tool.
+   */
+  static std::string getUsername();
+
+  /**
+   * Returns the name of the host on which the command-line tool is running.
+   *
+   * @return The name of the host on which the command-line tool is running.
+   */
+  static std::string getHostname();
+
+}; // class CmdLineTool
+
+} // namespace tapelabel
+} // namespace catalogue
+} // namespace cta
diff --git a/tapeserver/tapelabel/TapeLabelCmd.cpp b/tapeserver/tapelabel/TapeLabelCmd.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e0f3413c702e0badcfe59dd70cc7d2db23904a3f
--- /dev/null
+++ b/tapeserver/tapelabel/TapeLabelCmd.cpp
@@ -0,0 +1,565 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2019  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "common/Constants.hpp"
+#include "tapeserver/castor/tape/Constants.hpp"
+#include "tapeserver/castor/tape/tapeserver/file/File.hpp"
+#include "tapeserver/castor/tape/tapeserver/file/Structures.hpp"
+#include "tapeserver/tapelabel/TapeLabelCmd.hpp"
+#include "tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp"
+#include "mediachanger/LibrarySlotParser.hpp"
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+TapeLabelCmd::TapeLabelCmd(std::istream &inStream, std::ostream &outStream,
+  std::ostream &errStream, cta::log::StdoutLogger &log,
+  cta::mediachanger::MediaChangerFacade &mc):
+  CmdLineTool(inStream, outStream, errStream),
+  m_log(log),
+  m_encryptionControl(""),
+  m_mc(mc),
+  m_useLbp(true),
+  m_driveSupportLbp(true),
+  m_force(false){
+}
+
+//------------------------------------------------------------------------------
+// destructor
+//------------------------------------------------------------------------------
+TapeLabelCmd::~TapeLabelCmd() noexcept {
+}
+
+//------------------------------------------------------------------------------
+// exceptionThrowingMain
+//------------------------------------------------------------------------------
+int TapeLabelCmd::exceptionThrowingMain(const int argc, char *const *const argv) {
+  const TapeLabelCmdLineArgs cmdLineArgs(argc, argv);
+
+  if(cmdLineArgs.help) {
+    printUsage(m_out);
+    return 0;
+  }
+
+  if (!cmdLineArgs.m_debug) {
+    m_log.setLogMask("WARNING");
+  }
+  
+  if (cmdLineArgs.m_force) {
+    m_force = true;
+  } else {
+    m_force = false;
+  }
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", getUsername()));
+  params.push_back(cta::log::Param("tapeVid", cmdLineArgs.m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel",cmdLineArgs.m_oldLabel));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+  m_log(cta::log::INFO, "Label session started", params);
+  
+  readAndSetConfiguration(getUsername(), cmdLineArgs.m_vid, cmdLineArgs.m_oldLabel);
+   
+  const std::string capabilities("cap_sys_rawio+ep");
+  setProcessCapabilities(capabilities);
+  
+  m_catalogue->checkTapeForLabel(m_vid);
+  
+  std::unique_ptr<castor::tape::tapeserver::drive::DriveInterface> drivePtr = createDrive();
+  castor::tape::tapeserver::drive::DriveInterface &drive = *drivePtr.get();
+  
+  // The label to be written without encryption
+  m_encryptionControl.disable(drive);
+  
+  if (!isDriveSupportLbp(drive)) {
+    m_log(cta::log::WARNING, "Drive does not support LBP", params);
+    m_driveSupportLbp = false;
+  } else {
+    m_driveSupportLbp = true;
+  };
+  
+  mountTape(m_vid);
+  waitUntilTapeLoaded(drive, TAPE_LABEL_UNITREADY_TIMEOUT);
+  
+  int returnCode = 0;
+  if(drive.isWriteProtected()) {
+    m_log(cta::log::ERR, "Cannot label the tape because it is write-protected", params);
+    returnCode = 1;
+  } else {
+    try {
+      rewindDrive(drive);
+      // If the user is trying to label a non-empty tape
+      if(!drive.isTapeBlank()) {
+        if (m_force) {
+          m_log(cta::log::WARNING, "Label a non-empty tape with force option", params);
+          setLbpMode(drive, m_useLbp, m_driveSupportLbp);
+          writeTapeLabel(drive, m_useLbp, m_driveSupportLbp);
+        } else {     
+          if (m_oldLabel.empty()) {
+            m_log(cta::log::WARNING, "Label a non-empty tape without the oldLabel option", params);
+            checkTapeLabel(drive, m_vid); // oldLabel is not set assume it is the same as VID
+            setLbpMode(drive, m_useLbp, m_driveSupportLbp);
+            writeTapeLabel(drive, m_useLbp, m_driveSupportLbp);
+          } else {
+            checkTapeLabel(drive, m_oldLabel);
+            setLbpMode(drive, m_useLbp, m_driveSupportLbp);
+            writeTapeLabel(drive, m_useLbp, m_driveSupportLbp);
+          }
+        }
+      // Else the labeling can go ahead
+      } else {
+        setLbpMode(drive, m_useLbp, m_driveSupportLbp);
+        writeTapeLabel(drive, m_useLbp, m_driveSupportLbp);
+      }
+    } catch(cta::exception::Exception &ne) {
+      params.push_back(cta::log::Param("tapeLabelError", ne.getMessage().str()));
+      m_log(cta::log::ERR, "Label session failed to label the tape", params);
+      returnCode = 1; 
+    }
+  }
+  unloadTape(m_vid, drive);
+  dismountTape(m_vid);
+  drive.disableLogicalBlockProtection();
+  if(!returnCode) {
+    m_catalogue->tapeLabelled(m_vid, m_unitName);
+  }
+  return returnCode;
+}
+
+
+//------------------------------------------------------------------------------
+// isDriveSupportLbp
+//------------------------------------------------------------------------------
+bool TapeLabelCmd::isDriveSupportLbp(
+  castor::tape::tapeserver::drive::DriveInterface &drive) const {
+  castor::tape::tapeserver::drive::deviceInfo devInfo = drive.getDeviceInfo();
+  if (devInfo.isPIsupported) { //drive supports LBP
+    return true;
+  } else {
+    return false;
+  }
+}
+
+//------------------------------------------------------------------------------
+// setLbpMode
+//------------------------------------------------------------------------------
+void TapeLabelCmd::setLbpMode(
+  castor::tape::tapeserver::drive::DriveInterface &drive, const bool useLbp,
+  const bool driveSupportLbp) {
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  if(useLbp) {
+    if (driveSupportLbp) {
+      // only crc32c lbp mode is supported
+      drive.enableCRC32CLogicalBlockProtectionReadWrite();
+      m_log(cta::log::INFO, "Label session enabling LBP on drive", params);
+    } else {
+      drive.disableLogicalBlockProtection();
+      m_log(cta::log::WARNING, "Label session disabling LBP on not supported drive", params);
+    }
+  } else {
+    drive.disableLogicalBlockProtection();
+    m_log(cta::log::INFO, "Label session disabling LBP on drive", params);
+  }
+}
+
+//------------------------------------------------------------------------------
+// writeTapeLabel
+//------------------------------------------------------------------------------
+void TapeLabelCmd::writeTapeLabel(
+  castor::tape::tapeserver::drive::DriveInterface &drive, const bool useLbp,
+  const bool driveSupportLbp) {
+  if (useLbp && driveSupportLbp) {
+    writeLabelWithLbpToTape(drive);
+  } else {
+    writeLabelToTape(drive);
+  }
+}
+
+//------------------------------------------------------------------------------
+// checkTapeLabel
+//------------------------------------------------------------------------------
+void TapeLabelCmd::checkTapeLabel(
+  castor::tape::tapeserver::drive::DriveInterface &drive, const std::string &labelToCheck) {
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+  m_log(cta::log::INFO, "Label session checking non empty tape", params);
+  
+  if(drive.isTapeBlank()) {
+    cta::exception::Exception ex;
+    ex.getMessage() << "[TapeLabelCmd::checkTapeLabel()] - Tape is blank, "
+                       "cannot proceed with checking the tape";
+    throw ex;
+  }
+  
+  drive.disableLogicalBlockProtection();
+  {
+    castor::tape::tapeFile::VOL1 vol1;
+    drive.readExactBlock((void * )&vol1, sizeof(vol1), "[TapeLabelCmd::checkTapeLabel] - Reading VOL1");
+    switch(vol1.getLBPMethod()) {
+      case castor::tape::SCSI::logicBlockProtectionMethod::CRC32C:
+        if (m_useLbp) {
+          setLbpMode(drive, m_useLbp, m_driveSupportLbp);
+        } else {
+          cta::exception::Exception ex;
+          ex.getMessage() << "[TapeLabelCmd::checkTapeLabel()] - Tape "
+            "labeled with crc32c logical block protection but cta-tape-label "
+            "started without LBP support";
+          throw ex;
+        }
+        break;
+      case castor::tape::SCSI::logicBlockProtectionMethod::ReedSolomon:
+        throw cta::exception::Exception("In TapeLabelCmd::checkTapeLabel(): "
+            "ReedSolomon LBP method not supported");
+      case castor::tape::SCSI::logicBlockProtectionMethod::DoNotUse:
+        drive.disableLogicalBlockProtection();
+        break;
+      default:
+        throw cta::exception::Exception("In TapeLabelCmd::checkTapeLabel(): unknown LBP method");
+    }
+  }
+  // from this point the right LBP mode should be set or not set
+  drive.rewind();
+  {
+    castor::tape::tapeFile::VOL1 vol1;
+    drive.readExactBlock((void *) &vol1, sizeof(vol1), "[TapeLabelCmd::checkTapeLabel()] - Reading VOL1");
+    try {
+      vol1.verify();
+    } catch (std::exception &e) {
+      throw castor::tape::tapeFile::TapeFormatError(e.what());
+    }
+    castor::tape::tapeFile::HeaderChecker::checkVOL1(vol1, labelToCheck); // now we know that we are going to check the correct tape
+  }
+  drive.rewind();
+  params.push_back(cta::log::Param("tapeLabel", labelToCheck));
+  m_log(cta::log::INFO, "Label session successfully checked non empty tape", params);
+}
+
+//------------------------------------------------------------------------------
+// dismountTape
+//------------------------------------------------------------------------------
+void TapeLabelCmd::dismountTape(
+  const std::string &vid) {
+  std::unique_ptr<cta::mediachanger::LibrarySlot> librarySlotPtr;
+  librarySlotPtr.reset(
+    cta::mediachanger::LibrarySlotParser::parse(m_rawLibrarySlot));
+  const cta::mediachanger::LibrarySlot &librarySlot = *librarySlotPtr.get();
+  
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("librarySlot", librarySlot.str()));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  try {
+    m_log(cta::log::INFO, "Label session dismounting tape", params);
+    m_mc.dismountTape(vid, librarySlot);
+    const bool dismountWasManual = cta::mediachanger::TAPE_LIBRARY_TYPE_MANUAL ==
+      librarySlot.getLibraryType();
+    if(dismountWasManual) {
+      m_log(cta::log::INFO, "Label session did not dismount tape because media"
+        " changer is manual", params);
+    } else {
+      m_log(cta::log::INFO, "Label session dismounted tape", params);
+    }
+  } catch(cta::exception::Exception &ne) {
+    cta::exception::Exception ex;
+    ex.getMessage() << "Label session failed to dismount tape: " <<
+      ne.getMessage().str();
+    throw ex;
+  }
+}
+
+//------------------------------------------------------------------------------
+// writeLabelWithLbpToTape
+//------------------------------------------------------------------------------
+void TapeLabelCmd::writeLabelWithLbpToTape(
+  castor::tape::tapeserver::drive::DriveInterface &drive) {
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  if(!m_useLbp) {
+    m_log(cta::log::WARNING, "LBP mode mismatch. Force labeling with LBP.", params);
+  }
+  m_log(cta::log::INFO, "Label session is writing label with LBP to tape", params);
+  castor::tape::tapeFile::LabelSession ls(drive, m_vid, true);
+  m_log(cta::log::INFO, "Label session has written label with LBP to tape", params);
+}
+
+//------------------------------------------------------------------------------
+// writeLabelToTape
+//------------------------------------------------------------------------------
+void TapeLabelCmd::writeLabelToTape(
+  castor::tape::tapeserver::drive::DriveInterface &drive) {
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  if(m_useLbp) {
+    m_log(cta::log::WARNING, "LBP mode mismatch. Force labeling without LBP.", params);
+  }
+  m_log(cta::log::INFO, "Label session is writing label to tape", params);
+  castor::tape::tapeFile::LabelSession ls(drive, m_vid, false);
+  m_log(cta::log::INFO, "Label session has written label to tape", params);
+}
+
+//------------------------------------------------------------------------------
+// unloadTape
+//------------------------------------------------------------------------------
+void TapeLabelCmd::unloadTape(
+  const std::string &vid, castor::tape::tapeserver::drive::DriveInterface &drive) {
+  std::unique_ptr<cta::mediachanger::LibrarySlot> librarySlotPtr;
+  librarySlotPtr.reset(
+    cta::mediachanger::LibrarySlotParser::parse(m_rawLibrarySlot));
+  const cta::mediachanger::LibrarySlot &librarySlot = *librarySlotPtr.get();
+  
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  // We implement the same policy as with the tape sessions: 
+  // if the librarySlot parameter is "manual", do nothing.
+  if(cta::mediachanger::TAPE_LIBRARY_TYPE_MANUAL == librarySlot.getLibraryType()) {
+    m_log(cta::log::INFO, "Label session not unloading tape because media changer is"
+      " manual", params);
+    return;
+  }
+  try {
+    m_log(cta::log::INFO, "Label session unloading tape", params);
+    drive.unloadTape();
+    m_log(cta::log::INFO, "Label session unloaded tape", params);
+  } catch (cta::exception::Exception &ne) {
+    cta::exception::Exception ex;
+    ex.getMessage() << "Label session failed to unload tape: " <<
+      ne.getMessage().str();
+    throw ex;
+  }
+}
+
+//------------------------------------------------------------------------------
+// rewindDrive
+//------------------------------------------------------------------------------
+void TapeLabelCmd::rewindDrive(
+  castor::tape::tapeserver::drive::DriveInterface &drive) {
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+  
+  m_log(cta::log::INFO, "Label session rewinding tape", params);
+  drive.rewind();
+  m_log(cta::log::INFO, "Label session successfully rewound tape", params);
+}
+
+//------------------------------------------------------------------------------
+// setProcessCapabilities
+//------------------------------------------------------------------------------
+void TapeLabelCmd::setProcessCapabilities(
+  const std::string &capabilities) {
+  m_capUtils.setProcText(capabilities);
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("capabilities", capabilities));
+  m_log(cta::log::INFO, "Label session set process capabilities", params);
+}
+
+//------------------------------------------------------------------------------
+// readConfiguration
+//------------------------------------------------------------------------------
+void TapeLabelCmd::readAndSetConfiguration(const std::string &userName,
+  const std::string &vid, const std::string &oldLabel) {
+  m_vid = vid;
+  m_oldLabel = oldLabel;
+  m_userName = userName;
+  cta::tape::daemon::Tpconfig tpConfig;
+  tpConfig  = cta::tape::daemon::Tpconfig::parseFile(castor::tape::TPCONFIGPATH);
+  const int configuredDrives =  tpConfig.size();
+    if( 1 == configuredDrives)
+     for(auto & driveConfig: tpConfig) {
+      m_devFilename = driveConfig.second.value().devFilename;
+      m_rawLibrarySlot = driveConfig.second.value().rawLibrarySlot;
+      m_logicalLibrary = driveConfig.second.value().logicalLibrary;
+      m_unitName = driveConfig.second.value().unitName;
+    } else {
+      cta::exception::Exception ex;
+      ex.getMessage() << "Failed to read configuration: " 
+                      << configuredDrives << " drives configured";
+      throw ex;
+    }
+
+  const cta::rdbms::Login catalogueLogin = cta::rdbms::Login::parseFile(CATALOGUE_CONFIG_PATH);
+  const uint64_t nbConns = 1;
+  const uint64_t nbArchiveFileListingConns = 0;
+  auto catalogueFactory = cta::catalogue::CatalogueFactoryFactory::create(m_log,
+    catalogueLogin, nbConns, nbArchiveFileListingConns);
+    m_catalogue = catalogueFactory->create();
+    
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("catalogueDbType", catalogueLogin.dbTypeToString(catalogueLogin.dbType)));
+  params.push_back(cta::log::Param("catalogueDatabase", catalogueLogin.database));
+  params.push_back(cta::log::Param("catalogueUsername", catalogueLogin.username));
+  params.push_back(cta::log::Param("devFilename", m_devFilename));
+  params.push_back(cta::log::Param("rawLibrarySlot", m_rawLibrarySlot));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("unitName", m_unitName));
+  m_log(cta::log::INFO, "Label session read configuration", params);
+}
+
+//------------------------------------------------------------------------------
+// mountTape
+//------------------------------------------------------------------------------
+void TapeLabelCmd::mountTape(const std::string &vid) {
+  std::unique_ptr<cta::mediachanger::LibrarySlot> librarySlotPtr;
+  librarySlotPtr.reset(
+    cta::mediachanger::LibrarySlotParser::parse(m_rawLibrarySlot));
+  const cta::mediachanger::LibrarySlot &librarySlot = *librarySlotPtr.get();
+    
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp", boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("librarySlot", librarySlot.str()));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  m_log(cta::log::INFO, "Label session mounting tape", params);
+  m_mc.mountTapeReadWrite(vid, librarySlot);
+  if(cta::mediachanger::TAPE_LIBRARY_TYPE_MANUAL == librarySlot.getLibraryType()) {
+    m_log(cta::log::INFO, "Label session did not mounted tape because the media"
+      " changer is manual", params);
+  } else {
+   m_log(cta::log::INFO, "Label session mounted tape", params);
+  }
+}
+
+//------------------------------------------------------------------------------
+// createDrive
+//------------------------------------------------------------------------------
+std::unique_ptr<castor::tape::tapeserver::drive::DriveInterface>
+  TapeLabelCmd::createDrive() {
+  castor::tape::SCSI::DeviceVector dv(m_sysWrapper);    
+  castor::tape::SCSI::DeviceInfo driveInfo = dv.findBySymlink(m_devFilename);
+  
+  // Instantiate the drive object
+  std::unique_ptr<castor::tape::tapeserver::drive::DriveInterface>
+    drive(castor::tape::tapeserver::drive::createDrive(driveInfo, m_sysWrapper));
+
+  if(NULL == drive.get()) {
+    cta::exception::Exception ex;
+    ex.getMessage() << "Failed to instantiate drive object";
+    throw ex;
+  }
+  
+  return drive;
+}
+
+
+//------------------------------------------------------------------------------
+// waitUntilTapeLoaded
+//------------------------------------------------------------------------------
+void TapeLabelCmd::waitUntilTapeLoaded(
+  castor::tape::tapeserver::drive::DriveInterface &drive, const int timeoutSecond) { 
+  std::list<cta::log::Param> params;
+  params.push_back(cta::log::Param("userName", m_userName));
+  params.push_back(cta::log::Param("tapeVid", m_vid));
+  params.push_back(cta::log::Param("tapeOldLabel", m_oldLabel));
+  params.push_back(cta::log::Param("tapeDrive", m_unitName));
+  params.push_back(cta::log::Param("logicalLibrary", m_logicalLibrary));
+  params.push_back(cta::log::Param("useLbp",boolToStr(m_useLbp)));
+  params.push_back(cta::log::Param("driveSupportLbp",boolToStr(m_driveSupportLbp)));
+  params.push_back(cta::log::Param("force", boolToStr(m_force)));
+
+  try {
+    m_log(cta::log::INFO, "Label session loading tape", params);
+    drive.waitUntilReady(timeoutSecond);
+    m_log(cta::log::INFO, "Label session loaded tape", params);
+  } catch(cta::exception::Exception &ne) {
+    cta::exception::Exception ex;
+    ex.getMessage() << "Failed to wait for tape to be loaded: " <<
+      ne.getMessage().str();
+    throw ex;
+  }
+}
+
+//------------------------------------------------------------------------------
+// boolToStr
+//------------------------------------------------------------------------------
+const char *TapeLabelCmd::boolToStr(
+  const bool value) {
+  return value ? "true" : "false";
+}
+
+//------------------------------------------------------------------------------
+// printUsage
+//------------------------------------------------------------------------------
+void TapeLabelCmd::printUsage(std::ostream &os) {
+  TapeLabelCmdLineArgs::printUsage(os);
+}
+
+} // namespace tapelabel
+} // namespace tapeserver
+} // namespace cta
diff --git a/tapeserver/tapelabel/TapeLabelCmd.hpp b/tapeserver/tapelabel/TapeLabelCmd.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7e535d30da6f6cdf1183091b2df788920930c981
--- /dev/null
+++ b/tapeserver/tapelabel/TapeLabelCmd.hpp
@@ -0,0 +1,305 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "common/log/StdoutLogger.hpp"
+#include "common/log/LogContext.hpp"
+#include "common/processCap/ProcessCap.hpp"
+#include "tapeserver/castor/tape/tapeserver/drive/DriveInterface.hpp"
+#include "tapeserver/castor/tape/tapeserver/drive/DriveGeneric.hpp"
+#include "tapeserver/castor/tape/tapeserver/daemon/EncryptionControl.hpp"
+#include "tapeserver/daemon/Tpconfig.hpp"
+#include "tapeserver/tapelabel/CmdLineTool.hpp"
+#include "catalogue/CatalogueFactoryFactory.hpp"
+#include "mediachanger/MediaChangerFacade.hpp"
+#include <memory>
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+/**
+ * Command-line tool for pre-labeling a CTA tape.
+ */
+class TapeLabelCmd: public CmdLineTool {
+public:
+
+  /**
+   * Constructor.
+   *
+   * @param inStream Standard input stream.
+   * @param outStream Standard output stream.
+   * @param errStream Standard error stream.
+   * @param log The object representing the API of the CTA logging system.
+   * @param mc Interface to the media changer.
+   */
+  TapeLabelCmd(std::istream &inStream, std::ostream &outStream,
+    std::ostream &errStream, cta::log::StdoutLogger &log,
+    cta::mediachanger::MediaChangerFacade &mc);
+
+  /**
+   * Destructor.
+   */
+  ~TapeLabelCmd() noexcept;
+
+private:
+  
+  /**
+   * The object representing the API of the CTA logging system.
+   */
+  cta::log::StdoutLogger  &m_log;
+  
+  /**
+   * Hard coded path for the catalogue login configuration.
+   */
+  const std::string CATALOGUE_CONFIG_PATH = "/etc/cta/cta-catalogue.conf";
+  
+  /**
+   * Unique pointer to the catalogue interface;
+   */
+  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
+  
+  /**
+   * Object providing utilities for working UNIX capabilities.
+   */
+  cta::server::ProcessCap m_capUtils;
+  
+  /**
+   * The system wrapper used to find the device and instantiate the drive object.
+   */
+  castor::tape::System::realWrapper m_sysWrapper;
+  
+  /**
+   * The filename of the device file of the tape drive.
+   */
+  std::string m_devFilename;
+  
+  /**
+   * The slot in the tape library that contains the tape drive (string encoded).
+   */
+  std::string m_rawLibrarySlot;
+  
+  /**
+   * The logical library of the tape drive.
+   */
+  std::string m_logicalLibrary;
+  
+  /**
+   * The unit name of the tape drive.
+   */
+  std::string m_unitName;
+  
+  /**
+   * The name of the user running the command-line tool.
+   */
+  std::string m_userName;
+  
+  /**
+   * The tape VID to be pre-label.
+   */
+  std::string m_vid;
+  
+  /**
+   * The old label on tape to be checked when pre-labeling.
+   */
+  std::string m_oldLabel;
+  
+  /** 
+   * Encryption helper object 
+   */
+   castor::tape::tapeserver::daemon::EncryptionControl m_encryptionControl;
+   
+  /**
+   * The object representing the media changer.
+   */
+  cta::mediachanger::MediaChangerFacade &m_mc;
+  
+  /**
+   * The boolean variable which determinate logical block protection usage by
+   * pre-labeling commands. Hard code when we create the class.
+   */
+  const bool m_useLbp;
+  
+  /**
+   * The boolean variable to store if drive support LBP.
+   */
+  bool m_driveSupportLbp;
+  
+  /**
+   * The boolean variable to skip label checks on not-blank tapes. 
+   */ 
+  bool m_force;
+  
+  /**
+   * An exception throwing version of main().
+   *
+   * @param argc The number of command-line arguments including the program name.
+   * @param argv The command-line arguments.
+   * @return The exit value of the program.
+   */
+  int exceptionThrowingMain(const int argc, char *const *const argv) override;
+
+  /**
+   * Prints the usage message of the command-line tool.
+   *
+   * @param os The output stream to which the usage message is to be printed.
+   */
+  void printUsage(std::ostream &os) override;
+  
+  /**
+   * Sets internal configuration parameters to be used for labeling.
+   * It reads drive and library parameters from /etc/cta/TPCONFIG and catalogue
+   * login parameters from /etc/cta/cta-catalogue.conf.
+   *
+   * @param username The name of the user running the command-line tool.
+   * @param vid The tape VID to be pre-label.
+   * @param oldLabel The old label on tape to be checked when pre-labeling. Could be empty.
+   */
+  void readAndSetConfiguration(const std::string &userName,
+    const std::string &vid, const std::string &oldLabel);
+  
+  
+  /**
+   * Sets the capabilities of the process and logs the result.
+   *
+   * @param capabilities The string representation of the capabilities.
+   */
+  void setProcessCapabilities(const std::string &capabilities);
+  
+  /**
+   * Returns a Drive object representing the tape drive to be used to label
+   * a tape.
+   *
+   * @return The drive object.
+   */
+  std::unique_ptr<castor::tape::tapeserver::drive::DriveInterface> createDrive();
+  
+  /**
+   * Mounts the tape to be labeled.
+   * @param vid The volume identifier of the tape to be mounted.
+   */
+  void mountTape(const std::string &vid);
+  
+  /**
+   * Waits for the tape to be loaded into the tape drive.
+   *
+   * @param drive Object representing the drive hardware.
+   * @param timeoutSecond The number of seconds to wait for the tape to be
+   * loaded into the tape drive. 
+   */
+  void waitUntilTapeLoaded(castor::tape::tapeserver::drive::DriveInterface &drive,
+    const int timeoutSecond);
+  
+  /**
+   * Writes the label file with logical block protection to the tape.
+   *
+   * This method assumes the tape has been rewound.
+   *
+   * @param drive The tape drive.
+   */
+  void writeLabelWithLbpToTape(castor::tape::tapeserver::drive::DriveInterface &drive);
+  
+  /**
+   * Writes the label file to the tape.
+   *
+   * This method assumes the tape has been rewound.
+   *
+   * @param drive The tape drive.
+   */
+  void writeLabelToTape(castor::tape::tapeserver::drive::DriveInterface &drive);
+  
+  /**
+   * Unloads the specified tape from the specified tape drive.
+   *
+   * @param vid The volume identifier of the tape to be unloaded.  Please note
+   * that the value of this field is only used for logging purposes.
+   * @param drive The tape drive.
+   */
+  void unloadTape(const std::string &vid, castor::tape::tapeserver::drive::DriveInterface &drive);
+  
+  /**
+   * Dismounts the specified tape.
+   *
+   * @param vid The volume identifier of the tape to be dismounted.
+   */
+  void dismountTape(const std::string &vid);
+  
+  /**
+   * Rewinds the specified tape drive.
+   *
+   * @param drive The tape drive.
+   */
+  void rewindDrive(castor::tape::tapeserver::drive::DriveInterface &drive);
+  
+  /**
+   * Checks the specified tape on the specified tape drive.
+   * This method assumes that the drive has the tape and the tape has been rewound. 
+   * It checks the tape label from the VOL1 tape header on the tape against given label
+   * and throws an exception in case of labels mismatch. This method leaves tape rewound.
+   *  
+   * @param drive The tape drive.
+   * @param labelToCheck The label for what the tape should be checked for.
+   */
+  void checkTapeLabel(castor::tape::tapeserver::drive::DriveInterface &drive, const std::string &labelToCheck);
+  
+  /**
+   * Writes the label file with or without logical block protection to the tape
+   * depending on useLbp and driveSupportLbp parameters.
+   *
+   * This method assumes the tape has been rewound.
+   *
+   * @param drive The tape drive.
+   * @param useLbp The configuration parameter for LBP mode.
+   * @param driveSupportLbp The detected parameter for the drive.
+   */
+  void writeTapeLabel(castor::tape::tapeserver::drive::DriveInterface &drive,
+    const bool useLbp, const bool driveSupportLbp);
+  
+  /**
+   * Sets the logical block protection mode on the drive
+   * depending on useLbp and driveSupportLbp parameters. This method needs to
+   * be used to avoid exceptions in setLbp if drive does not support LBP (mhvtl).
+   *
+   * @param drive The tape drive.
+   * @param useLbp The configuration parameter for LBP mode.
+   * @param driveSupportLbp The detected parameter for the drive.
+   */
+  void setLbpMode(castor::tape::tapeserver::drive::DriveInterface &drive,
+    const bool useLbp, const bool driveSupportLbp);
+  
+  /**
+   * Detects if the drive supports the logical block protection.
+   *
+   * @param drive The tape drive.
+   * @return The boolean value true if the drive supports LBP or false otherwise.
+   */
+  bool isDriveSupportLbp(castor::tape::tapeserver::drive::DriveInterface &drive) const;
+  
+  /**
+   * Returns the string representation of the specified boolean value.
+   *
+   * @param value The boolean value.
+   * @return The string representation.
+   */
+  const char *boolToStr(const bool value);
+}; // class TapeLabelCmd
+
+} // namespace tapelabel
+} // namespace tapeserver
+} // namespace cta
diff --git a/tapeserver/tapelabel/TapeLabelCmdLineArgs.cpp b/tapeserver/tapelabel/TapeLabelCmdLineArgs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f7b64b7a9fab2a8629ba73551b53f8589cac4fa6
--- /dev/null
+++ b/tapeserver/tapelabel/TapeLabelCmdLineArgs.cpp
@@ -0,0 +1,142 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp"
+#include "common/exception/CommandLineNotParsed.hpp"
+#include "common/utils/utils.hpp"
+#include "common/Constants.hpp"
+
+#include <getopt.h>
+#include <ostream>
+
+#include <string.h>
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+//------------------------------------------------------------------------------
+// constructor
+//------------------------------------------------------------------------------
+TapeLabelCmdLineArgs::TapeLabelCmdLineArgs(const int argc, char *const *const argv):
+  help(false), m_debug(false), m_force(false) {
+
+  static struct option longopts[] = {
+    {"vid", required_argument, NULL, 'v'},
+    {"oldlabel", required_argument, NULL, 'o'},
+    {"debug", no_argument, NULL, 'd'},
+    {"force", no_argument, NULL, 'f'},
+    {"help", no_argument, NULL, 'h'},
+    {NULL  ,           0, NULL,   0}
+  };
+ 
+  // Prevent getopt() from printing an error message if it does not recognize
+  // an option character
+  opterr = 0;
+
+  int opt = 0;
+
+  while((opt = getopt_long(argc, argv, ":v:o:hdf", longopts, NULL)) != -1) {
+    switch(opt) {
+    case 'v':
+      if (strlen(optarg) > CA_MAXVIDLEN) {
+        exception::CommandLineNotParsed ex;
+        ex.getMessage() << "The -" << (char)opt << " option too big";
+        throw ex;
+      } else {
+        m_vid = std::string(optarg);
+        utils::toUpper(m_vid);
+      }
+      break;
+    case 'o':
+      if (strlen(optarg) > CA_MAXVIDLEN) {
+        exception::CommandLineNotParsed ex;
+        ex.getMessage() << "The -" << (char)opt << " option too big";
+        throw ex;
+      } else {
+        m_oldLabel = std::string(optarg);
+	utils::toUpper(m_oldLabel);
+      }
+      break;
+    case 'h':
+      help = true;
+      break;
+    case 'd':
+      m_debug = true;
+      break;
+    case 'f':
+      m_force = true;
+      break;
+    case ':': // Missing parameter
+      {
+        exception::CommandLineNotParsed ex;
+        ex.getMessage() << "The -" << (char)optopt << " option requires a parameter";
+        throw ex;
+      }
+    case '?': // Unknown option
+      {
+        exception::CommandLineNotParsed ex;
+        if(0 == optopt) {
+          ex.getMessage() << "Unknown command-line option";
+        } else {
+          ex.getMessage() << "Unknown command-line option: -" << (char)optopt;
+        }
+        throw ex;
+      }
+    default:
+      {
+        exception::CommandLineNotParsed ex;
+        ex.getMessage() <<
+          "getopt_long returned the following unknown value: 0x" <<
+          std::hex << (int)opt;
+        throw ex;
+      }
+    } // switch(opt)
+  } // while getopt_long()
+
+  if (m_vid.empty() && !help) {
+    exception::CommandLineNotParsed ex;
+        ex.getMessage() <<
+          "--vid/-v VID must be specified";
+        throw ex;
+  }
+  // There is no need to continue parsing when the help option is set
+  if(help) {
+    return;
+  }
+}
+
+//------------------------------------------------------------------------------
+// printUsage
+//------------------------------------------------------------------------------
+void TapeLabelCmdLineArgs::printUsage(std::ostream &os) {
+  os <<
+    "Usage:" << std::endl <<
+    "  cta-tape-label [options] --vid/-v VID" << std::endl <<
+    "Where:" << std::endl <<
+    "  -v, --vid        The VID of the tape to be labeled" << std::endl <<
+    "Options:" <<std::endl <<
+    "  -o, --oldlabel   The vid of the current tape label on the tape if it is not the same as VID" << std::endl <<
+    "  -h, --help       Print this help message and exit" << std::endl <<
+    "  -d, --debug      Print more logs for label operations" << std::endl <<
+    "  -f, --force      Force labeling for not-blank tapes for testing purpose and without label checks. Must only be used manually." << std::endl;  
+}
+
+} // namespace tapelabel
+} // namespace catalogue
+} // namespace cta
diff --git a/tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp b/tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2867efcd3673f6e9540e3ddac92bfe7d23a860ec
--- /dev/null
+++ b/tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp
@@ -0,0 +1,77 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <string>
+
+namespace cta {
+namespace tapeserver {
+namespace tapelabel {
+
+/**
+ * Structure to store the command-line arguments of the command-line tool
+ * named cta-tape-label.
+ */
+struct TapeLabelCmdLineArgs {
+  /**
+   * True if the usage message should be printed.
+   */
+  bool help;
+  
+  /**
+   * The tape VID to be pre-label.
+   */
+  std::string m_vid;
+  
+  /**
+   * The old label on tape to be checked when pre-labeling.
+   */
+  std::string m_oldLabel;
+  
+  /**
+   * The boolean variable to enable verbose output in the command line.
+   * By default it prints only ERR and WARNING messages. 
+   */ 
+  bool m_debug;
+  
+  /**
+   * The boolean variable to skip label checks on not-blank tapes. 
+   */ 
+  bool m_force;
+
+  /**
+   * Constructor that parses the specified command-line arguments.
+   *
+   * @param argc The number of command-line arguments including the name of the
+   * executable.
+   * @param argv The vector of command-line arguments.
+   */
+  TapeLabelCmdLineArgs(const int argc, char *const *const argv);
+
+  /**
+   * Prints the usage message of the command-line tool.
+   *
+   * @param os The output stream to which the usage message is to be printed.
+   */
+  static void printUsage(std::ostream &os);
+};
+
+} // namespace tapelabel
+} // namespace catalogue
+} // namespace cta
diff --git a/tapeserver/tapelabel/TapeLabelCmdLineArgsTest.cpp b/tapeserver/tapelabel/TapeLabelCmdLineArgsTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..67f4d729ebfe79494a74aa6aada43bbd950c7f2f
--- /dev/null
+++ b/tapeserver/tapelabel/TapeLabelCmdLineArgsTest.cpp
@@ -0,0 +1,299 @@
+/*
+ * The CERN Tape Archive(CTA) project
+ * Copyright(C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ *(at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "common/exception/Exception.hpp"
+#include "common/exception/CommandLineNotParsed.hpp"
+#include "tapeserver/tapelabel/TapeLabelCmdLineArgs.hpp"
+
+#include <gtest/gtest.h>
+#include <list>
+
+namespace unitTests {
+
+class cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest : public ::testing::Test {
+protected:
+
+  struct Argcv {
+    int argc;
+    char **argv;
+    Argcv(): argc(0), argv(NULL) {
+    }
+  };
+  typedef std::list<Argcv*> ArgcvList;
+  ArgcvList m_argsList;
+
+  /**
+   * Creates a duplicate string using the new operator.
+   */
+  char *dupString(const char *str) {
+    const size_t len = strlen(str);
+    char *duplicate = new char[len+1];
+    strncpy(duplicate, str, len);
+    duplicate[len] = '\0';
+    return duplicate;
+  }
+
+  virtual void SetUp() {
+    // Allow getopt_long to be called again
+    optind = 0;
+  }
+
+  virtual void TearDown() {
+    // Allow getopt_long to be called again
+    optind = 0;
+
+    for(ArgcvList::const_iterator itor = m_argsList.begin();
+      itor != m_argsList.end(); itor++) {
+      for(int i=0; i < (*itor)->argc; i++) {
+        delete[] (*itor)->argv[i];
+      }
+      delete[] (*itor)->argv;
+      delete *itor;
+    }
+  }
+};
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, help_short) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 2;
+  args->argv = new char *[3];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-h");
+  args->argv[2] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.help);
+  ASSERT_TRUE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, help_long) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 2;
+  args->argv = new char *[3];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("--help");
+  args->argv[2] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.help);
+  ASSERT_TRUE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, debug_short) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 4;
+  args->argv = new char *[5];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("-d");
+  args->argv[4] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.m_debug);
+  ASSERT_FALSE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, debug_long) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 4;
+  args->argv = new char *[5];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("--debug");
+  args->argv[4] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.m_debug);
+  ASSERT_FALSE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, force_short) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 4;
+  args->argv = new char *[5];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("-f");
+  args->argv[4] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.m_force);
+  ASSERT_FALSE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, force_long) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 4;
+  args->argv = new char *[5];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("--force");
+  args->argv[4] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_TRUE(cmdLine.m_force);
+  ASSERT_FALSE(cmdLine.m_vid.empty());
+  ASSERT_TRUE(cmdLine.m_oldLabel.empty());
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, vid_short) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 3;
+  args->argv = new char *[4];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_FALSE(cmdLine.help);
+  ASSERT_EQ(std::string("VID001"), cmdLine.m_vid);
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, vid_long) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 3;
+  args->argv = new char *[4];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("--vid");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_FALSE(cmdLine.help);
+  ASSERT_EQ(std::string("VID001"), cmdLine.m_vid);
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, vid_missed) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 2;
+  args->argv = new char *[3];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("--vid");
+  args->argv[2] = NULL;
+
+  ASSERT_THROW(TapeLabelCmdLineArgs cmdLine(args->argc, args->argv),
+    cta::exception::CommandLineNotParsed);
+}
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, oldVid_short) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 5;
+  args->argv = new char *[6];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("-o");
+  args->argv[4] = dupString("VID002");
+  args->argv[5] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_FALSE(cmdLine.help);
+  ASSERT_EQ(std::string("VID001"), cmdLine.m_vid);
+  ASSERT_EQ(std::string("VID002"), cmdLine.m_oldLabel);
+}
+
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, oldVid_long) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 5;
+  args->argv = new char *[6];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("--oldlabel");
+  args->argv[4] = dupString("VID002");
+  args->argv[5] = NULL;
+
+  TapeLabelCmdLineArgs cmdLine(args->argc, args->argv);
+
+  ASSERT_FALSE(cmdLine.help);
+  ASSERT_EQ(std::string("VID001"), cmdLine.m_vid);
+  ASSERT_EQ(std::string("VID002"), cmdLine.m_oldLabel);
+}
+
+
+TEST_F(cta_tapeserver_tapelabel_TapeLabelCmdLineArgsTest, oldVid_missed) {
+  using namespace cta::tapeserver::tapelabel;
+
+  Argcv *args = new Argcv();
+  m_argsList.push_back(args);
+  args->argc = 4;
+  args->argv = new char *[5];
+  args->argv[0] = dupString("cta-tape-label");
+  args->argv[1] = dupString("-v");
+  args->argv[2] = dupString("VID001");
+  args->argv[3] = dupString("-o");
+  args->argv[4] = NULL;
+  
+  ASSERT_THROW(TapeLabelCmdLineArgs cmdLine(args->argc, args->argv),
+    cta::exception::CommandLineNotParsed);
+}
+
+} // namespace unitTests
diff --git a/catalogue/ChecksumValueMismatch.cpp b/tapeserver/tapelabel/TapeLabelCmdMain.cpp
similarity index 62%
rename from catalogue/ChecksumValueMismatch.cpp
rename to tapeserver/tapelabel/TapeLabelCmdMain.cpp
index 12f91a7272e11b4098b2e4324526dc9cc9a4a795..184d21ea28e521d25c79ed652d3d4f8277ed8680 100644
--- a/catalogue/ChecksumValueMismatch.cpp
+++ b/tapeserver/tapelabel/TapeLabelCmdMain.cpp
@@ -16,24 +16,26 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "catalogue/ChecksumValueMismatch.hpp"
+#include "tapeserver/tapelabel/TapeLabelCmd.hpp"
 
-namespace cta {
-namespace catalogue {
-
-
-//------------------------------------------------------------------------------
-// constructor
-//------------------------------------------------------------------------------
-ChecksumValueMismatch::ChecksumValueMismatch(const std::string &context, const bool embedBacktrace):
-  cta::exception::Exception(context, embedBacktrace) {
-}
+#include <iostream>
 
 //------------------------------------------------------------------------------
-// destructor
+// main
 //------------------------------------------------------------------------------
-ChecksumValueMismatch::~ChecksumValueMismatch() {
+int main(const int argc, char *const *const argv) {
+  char buf[256];
+  std::string hostName;
+  if(gethostname(buf, sizeof(buf))) {
+    hostName = "UNKNOWN";
+  } else {
+    buf[sizeof(buf) - 1] = '\0';
+    hostName = buf;
+  }
+  cta::log::StdoutLogger log(hostName, "cta-tape-label");
+  cta::mediachanger::MediaChangerFacade mc(log);
+  
+  cta::tapeserver::tapelabel::TapeLabelCmd cmd(std::cin, std::cout, std::cerr, log, mc);
+  return cmd.main(argc, argv);
 }
 
-} // namespace catalogue
-} // namespace cta
diff --git a/tapeserver/tapelabel/cta-tape-label.1cta b/tapeserver/tapelabel/cta-tape-label.1cta
new file mode 100644
index 0000000000000000000000000000000000000000..dcf407a51be44a0951a47f2014f53cacfb653439
--- /dev/null
+++ b/tapeserver/tapelabel/cta-tape-label.1cta
@@ -0,0 +1,47 @@
+.\" The CERN Tape Archive (CTA) project
+.\" Copyright (C) 2015  CERN
+.\"
+.\" This program is free software: you can redistribute it and/or modify
+.\" it under the terms of the GNU General Public License as published by
+.\" the Free Software Foundation, either version 3 of the License, or
+.\" (at your option) any later version.
+.\"
+.\" This program is distributed in the hope that it will be useful,
+.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
+.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+.\" GNU General Public License for more details.
+.\"
+.\" You should have received a copy of the GNU General Public License
+.\" along with this program.  If not, see <http://www.gnu.org/licenses/>.
+.TH CTA-TAPE-LABEL 1CTA "July 2019" CTA CTA
+.SH NAME
+cta-tape-label \- Pre-label a CTA tape
+.SH SYNOPSIS
+.BI "cta-tape-label [options] --vid/-v VID"
+
+.SH DESCRIPTION
+\fBcta-tape-label\fP is a command-line tool for pre-labelling a CTA tape.
+
+\fBcta-tape-label\fP writes CTA label to the tape with specified \fBVID\fP. 
+.SH OPTIONS
+.TP
+\fB\-o, \-\-oldlabel
+Volume ID from the tape label if the tape non-blank. Needs to be set if the tape has any vid in the label differ from VID.
+.TP
+\fB\-h, \-\-help
+Prints the usage message.
+.TP
+\fB\-d, \-\-debug
+Prints more logs for label operations.
+.TP
+\fB\-f, \-\-force
+Force labeling for not-blank tapes for testing purpose and without label checks. Must only be used manually.
+.SH RETURN VALUE
+Zero on success and non-zero on failure.
+.SH EXAMPLES
+cta-tape-label --vid I54321 --oldvid T12345 --debug
+.br
+cta-tape-label --vid L54321 --force
+
+.SH AUTHOR
+\fBCTA\fP Team
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index b0e195cfdac8c1dcc9110a93e4d56cbb269617d7..051ac24e6bbffe8c37c3b6391f42ee2eb7d5c188 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -35,6 +35,7 @@ target_link_libraries(cta-unitTests
   ctainmemorystmtunittests
   ctaobjectstore
   ctaobjectstoreunittests
+  ctardbmsunittests
   ctardbmswrapperunittests
   ctaschedulerunittests
   ctatapeserverdaemonunittests
@@ -42,6 +43,7 @@ target_link_libraries(cta-unitTests
   ctatapeserverfileunittests
   ctatapeserverscsiunittests
   ctadiskunittests
+  ctatapelabelunittests
   gtest
   pthread)
 
@@ -63,7 +65,8 @@ target_link_libraries(cta-rdbmsUnitTests
   ctardbmsunittests
   ctadisk
   gtest
-  pthread)
+  pthread
+  ${PROTOBUF3_LIBRARIES})
 
 if (OCCI_SUPPORT)
   set_property (TARGET cta-rdbmsUnitTests APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
@@ -101,11 +104,14 @@ add_executable(cta-systemTests
   system_tests.cpp
   ${GMOCK_SRC})
 
+set_property (TARGET cta-systemTests APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+
 target_link_libraries(cta-systemTests
   systemTestHelperTests
   cta-tapedSystemTests
   gtest
-  pthread)
+  pthread
+  ${PROTOBUF3_LIBRARIES})
 
 install(TARGETS cta-rdbmsUnitTests cta-unitTests cta-unitTests-multiProcess cta-systemTests DESTINATION usr/bin)
 
diff --git a/tests/TempFile.cpp b/tests/TempFile.cpp
index a55ed032905b546078d00c3aadf9482877a87c1d..5b21edbc9555d000f0beb480a671b9d356297189 100644
--- a/tests/TempFile.cpp
+++ b/tests/TempFile.cpp
@@ -57,14 +57,14 @@ void TempFile::randomFill(size_t size) {
   out.write(buff.get(), size);
 }
 
-std::string TempFile::adler32() {
+uint32_t TempFile::adler32() {
   struct ::stat fileStat;
   cta::exception::Errnum::throwOnMinusOne(::stat(m_path.c_str(), &fileStat),
       "In TempFile::adler32(): failed to stat file.");
   std::unique_ptr<char[] > buff(new char[fileStat.st_size]);
   std::ifstream in(m_path, std::ios::in | std::ios::binary);
   in.read(buff.get(), fileStat.st_size);
-  return cta::utils::getAdler32String((uint8_t*)buff.get(), fileStat.st_size);
+  return cta::utils::getAdler32((uint8_t*)buff.get(), fileStat.st_size);
 }
 
 void TempFile::stringFill(const std::string& string) {
@@ -77,4 +77,4 @@ void TempFile::stringAppend(const std::string& string) {
   out << string;
 }
   
-}
\ No newline at end of file
+}
diff --git a/tests/TempFile.hpp b/tests/TempFile.hpp
index b23daa6d5cfcc2473334fee738d3474b1d46e7ef..7f4fc14306b7df7aef158d41aaa389ec3e066f05 100644
--- a/tests/TempFile.hpp
+++ b/tests/TempFile.hpp
@@ -31,11 +31,11 @@ public:
   TempFile(const std::string& path);
   std::string path();
   void randomFill(size_t size);
-  std::string adler32();
+  uint32_t adler32();
   void stringFill(const std::string &string);
   void stringAppend(const std::string &string);
   ~TempFile();
 private:
   std::string m_path;
 };
-}
\ No newline at end of file
+}
diff --git a/upgrade_db/CMakeLists.txt b/upgrade_db/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..baa29e2ab7bd05b3ec09654de7122d967a4a9137
--- /dev/null
+++ b/upgrade_db/CMakeLists.txt
@@ -0,0 +1,29 @@
+# The CERN Tape Archive (CTA) project
+# Copyright 2019 CERN
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+cmake_minimum_required (VERSION 2.6)
+
+find_package(xrootd REQUIRED)
+find_package(Protobuf3 REQUIRED)
+
+include_directories(${XRD_SSI_PB_DIR}/include ${XROOTD_INCLUDE_DIR} ${XROOTD_INCLUDE_DIR}/private)
+
+# Upgrade DB tool
+add_executable(cta-upgrade-db UpgradeDB.cpp)
+target_link_libraries(cta-upgrade-db ctacatalogue)
+set_property(TARGET cta-upgrade-db APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
+
+install(TARGETS cta-upgrade-db DESTINATION usr/bin)
diff --git a/upgrade_db/UpgradeDB.cpp b/upgrade_db/UpgradeDB.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d53f38227972200ad868f42e30462078cf57611e
--- /dev/null
+++ b/upgrade_db/UpgradeDB.cpp
@@ -0,0 +1,333 @@
+/*!
+ * @project        The CERN Tape Archive (CTA)
+ * @brief          Migration tool to upgrade Oracle DB
+ * @copyright      Copyright 2019 CERN
+ * @license        This program is free software: you can redistribute it and/or modify
+ *                 it under the terms of the GNU General Public License as published by
+ *                 the Free Software Foundation, either version 3 of the License, or
+ *                 (at your option) any later version.
+ *
+ *                 This program is distributed in the hope that it will be useful,
+ *                 but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *                 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *                 GNU General Public License for more details.
+ *
+ *                 You should have received a copy of the GNU General Public License
+ *                 along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <iostream>
+#include <sstream>
+#include <vector>
+
+#include <XrdSsiPbConfig.hpp>
+#include <common/exception/Exception.hpp>
+#include <common/checksum/ChecksumBlob.hpp>
+#include <migration/gRPC/OracleDbConn.hpp>
+
+namespace cta {
+namespace migration {
+
+//! DB Connection Pool
+std::unique_ptr<rdbms::ConnPool> OracleDbConn::m_connPool;
+
+//! Mapping of users to uid
+const std::map<std::string,uint32_t> UidMap = {
+  { "atlas003",  10763 },
+  { "cmsrobot", 109701 },
+  { "mdavis",    71761 },
+  { "ctaops",    98119 }
+};
+
+//! Mapping of groups to gid
+const std::map<std::string,uint32_t> GidMap = {
+  { "zp",     1307 },
+  { "def-cg", 2766 },
+  { "si",     1077 },
+  { "it",     2763 }
+};
+
+
+class UpgradeDB {
+public:
+  UpgradeDB(const std::string &configfile);
+
+  void renameLogical();
+  void undoRenameLogical();
+  void addUidGid();
+  void removeUidGid();
+  void addChecksumBlob();
+  void populateChecksumBlob();
+  void removeChecksumBlob();
+  void addAdler32();
+  void removeAdler32();
+
+private:
+  void populateAdler32();
+
+  OracleDbConn m_ctadb;         //!< Oracle database for CTA Catalogue
+  unsigned int m_max_depth;     //!< Maximum directory tree depth to import
+  unsigned int m_cur_depth;     //!< Current directory tree depth
+  unsigned int m_batch_size;    //!< Number of records to fetch from the DB at a time
+};
+
+
+
+UpgradeDB::UpgradeDB(const std::string &configfile) {
+  // Parse configuration file
+  XrdSsiPb::Config config(configfile);
+
+  auto dbconn        = config.getOptionValueStr("cta.db_login");
+  auto max_num_conns = config.getOptionValueInt("cta.max_num_connections");
+  auto batch_size    = config.getOptionValueInt("cta.batch_size");
+
+  // Connect to Oracle
+  if(!dbconn.first) {
+    throw std::runtime_error("cta.db_login must be specified in the config file in the form oracle:user/password@TNS");
+  }
+  m_ctadb.connect(dbconn.second, max_num_conns.first ? max_num_conns.second : 1);
+
+  // Set parameters and defaults
+  m_batch_size = batch_size.first ? batch_size.second : 1000;
+}
+
+
+void UpgradeDB::renameLogical() {
+  std::cerr << "Renaming column COMPRESSED_SIZE_IN_BYTES to LOGICAL_SIZE_IN_BYTES in TAPE_FILE table...";
+  m_ctadb.execute("ALTER TABLE TAPE_FILE RENAME COLUMN COMPRESSED_SIZE_IN_BYTES TO LOGICAL_SIZE_IN_BYTES");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::undoRenameLogical() {
+  std::cerr << "Renaming column LOGICAL_SIZE_IN_BYTES to COMPRESSED_SIZE_IN_BYTES in TAPE_FILE table...";
+  m_ctadb.execute("ALTER TABLE TAPE_FILE RENAME COLUMN LOGICAL_SIZE_IN_BYTES TO COMPRESSED_SIZE_IN_BYTES");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::addUidGid() {
+  std::cerr << "Adding DISK_FILE_UID and DISK_FILE_GID columns to ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE ADD ("
+    "DISK_FILE_UID NUMERIC(20, 0),"
+    "DISK_FILE_GID NUMERIC(20, 0))");
+  std::cerr << "done." << std::endl;
+
+  // Update UIDs
+  std::cerr << "Populating DISK_FILE_UID";
+  for(auto it = UidMap.begin(); it != UidMap.end(); ++it) {
+    std::cerr << "...";
+    std::string sql("UPDATE ARCHIVE_FILE SET DISK_FILE_UID=");
+    sql += std::to_string(it->second) + " WHERE DISK_FILE_USER='" + it->first + "'";
+    m_ctadb.execute(sql);
+  }
+  std::cerr << "done." << std::endl;
+
+  // Update GIDs
+  std::cerr << "Populating DISK_FILE_GID";
+  for(auto it = GidMap.begin(); it != GidMap.end(); ++it) {
+    std::cerr << "...";
+    std::string sql("UPDATE ARCHIVE_FILE SET DISK_FILE_GID=");
+    sql += std::to_string(it->second) + " WHERE DISK_FILE_GROUP='" + it->first + "'";
+    m_ctadb.execute(sql);
+  }
+  std::cerr << "done." << std::endl;
+
+  std::cerr << "Adding constraints to ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE MODIFY (DISK_FILE_UID NUMERIC(20, 0) CONSTRAINT ARCHIVE_FILE_DFUID_NN NOT NULL)");
+  std::cerr << "...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE MODIFY (DISK_FILE_GID NUMERIC(20, 0) CONSTRAINT ARCHIVE_FILE_DFGID_NN NOT NULL)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::removeUidGid() {
+  std::cerr << "Removing DISK_FILE_UID and DISK_FILE_GID columns from ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE DROP (DISK_FILE_UID, DISK_FILE_GID)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::addChecksumBlob() {
+  std::cerr << "Adding column CHECKSUM_BLOB in ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE ADD (CHECKSUM_BLOB RAW(200))");
+  std::cerr << "done." << std::endl;
+
+  std::cerr << "Initialising CHECKSUM_BLOB to invalid values...";
+  std::string CASTOR = checksum::ChecksumBlob::ByteArrayToHex("CASTOR");
+  m_ctadb.execute("UPDATE ARCHIVE_FILE SET CHECKSUM_BLOB = '" + CASTOR + "'");
+  std::cerr << "done." << std::endl;
+
+#if 0
+  // We should only be dealing with ADLER32 checksums
+  m_ctadb.query("SELECT CHECKSUM_TYPE, COUNT(*) AS CNT FROM ARCHIVE_FILE GROUP BY CHECKSUM_TYPE");
+  if(!m_ctadb.isQueryEmpty()) {
+    auto checksumType = m_ctadb.getResultColumnString("CHECKSUM_TYPE");
+    std::cerr << "Updating " << m_ctadb.getResultColumnString("CNT") << " checksums of type " << checksumType << "...";
+    if(checksumType != "ADLER32") throw std::runtime_error("Checksum type is not ADLER32, aborting");
+    populateChecksumBlob();
+    std::cerr << "done." << std::endl;
+  }
+#endif
+
+  std::cerr << "Adding constraint to ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE MODIFY (CHECKSUM_BLOB RAW(200) CONSTRAINT ARCHIVE_FILE_CB1_NN NOT NULL)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::removeChecksumBlob() {
+  std::cerr << "Removing column CHECKSUM_BLOB from ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE DROP (CHECKSUM_BLOB)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::populateChecksumBlob() {
+  m_ctadb.query("SELECT ARCHIVE_FILE_ID, CHECKSUM_VALUE FROM ARCHIVE_FILE");
+
+  // Get the list of checksums
+  while(!m_ctadb.isQueryEmpty()) {
+    using namespace checksum;
+
+    auto archiveFileId = m_ctadb.getResultColumnString("ARCHIVE_FILE_ID");
+    auto checksumValue = m_ctadb.getResultColumnString("CHECKSUM_VALUE");
+    ChecksumBlob csb(ADLER32, ChecksumBlob::HexToByteArray(checksumValue));
+
+    std::string blob_str;
+    std::string inv_blob_str(ChecksumBlob::ByteArrayToHex(csb.serialize()));
+    for(unsigned int i = 0; i < inv_blob_str.length(); i += 2) {
+      blob_str = inv_blob_str.substr(i,2) + blob_str;
+    }
+    std::string sql("UPDATE ARCHIVE_FILE SET CHECKSUM_BLOB = hextoraw('");
+    sql += blob_str + "') WHERE ARCHIVE_FILE_ID = " + archiveFileId;
+    // this should be a separate DB object
+    m_ctadb.execute(sql);
+
+    if(!m_ctadb.nextRow()) break;
+  }
+
+  // Validate checksums
+  m_ctadb.query("SELECT CHECKSUM_VALUE, CHECKSUM_BLOB FROM ARCHIVE_FILE");
+  while(!m_ctadb.isQueryEmpty()) {
+    using namespace checksum;
+
+    auto checksumValue = m_ctadb.getResultColumnString("CHECKSUM_VALUE");
+    auto checksumBlob  = m_ctadb.getResultColumnBlob("CHECKSUM_BLOB");
+    ChecksumBlob csb1(ADLER32, ChecksumBlob::HexToByteArray(checksumValue));
+    ChecksumBlob csb2;
+    csb2.deserialize(checksumBlob);
+    csb2.validate(csb1);
+
+    if(!m_ctadb.nextRow()) break;
+  }
+}
+
+void UpgradeDB::addAdler32() {
+  std::cerr << "Adding column CHECKSUM_ADLER32 in ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE ADD (CHECKSUM_ADLER32 NUMERIC(20,0))");
+  std::cerr << "done." << std::endl;
+
+  // We should only be dealing with ADLER32 checksums
+  m_ctadb.query("SELECT CHECKSUM_TYPE, COUNT(*) AS CNT FROM ARCHIVE_FILE GROUP BY CHECKSUM_TYPE");
+  if(!m_ctadb.isQueryEmpty()) {
+    auto checksumType = m_ctadb.getResultColumnString("CHECKSUM_TYPE");
+    std::cerr << "Updating " << m_ctadb.getResultColumnString("CNT") << " checksums of type " << checksumType << "...";
+    if(checksumType != "ADLER32") throw std::runtime_error("Checksum type is not ADLER32, aborting");
+    populateAdler32();
+    std::cerr << "done." << std::endl;
+  }
+
+  std::cerr << "Adding constraint to ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE MODIFY (CHECKSUM_ADLER32 NUMERIC(20,0) CONSTRAINT ARCHIVE_FILE_CB2_NN NOT NULL)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::removeAdler32() {
+  std::cerr << "Removing column CHECKSUM_ADLER32 from ARCHIVE_FILE table...";
+  m_ctadb.execute("ALTER TABLE ARCHIVE_FILE DROP (CHECKSUM_ADLER32)");
+  std::cerr << "done." << std::endl;
+}
+
+void UpgradeDB::populateAdler32() {
+  std::string sql("UPDATE ARCHIVE_FILE SET CHECKSUM_ADLER32=TO_NUMBER(SUBSTR(CHECKSUM_VALUE, 3, 8), 'XXXXXXXX')");
+  m_ctadb.execute(sql);
+
+  // Validate checksums
+  std::cerr << "validating checksums...";
+  m_ctadb.query("SELECT CHECKSUM_VALUE, CHECKSUM_ADLER32 FROM ARCHIVE_FILE");
+  while(!m_ctadb.isQueryEmpty()) {
+    using namespace checksum;
+
+    auto checksumValue = m_ctadb.getResultColumnString("CHECKSUM_VALUE");
+    auto adler32str = m_ctadb.getResultColumnString("CHECKSUM_ADLER32");
+    uint32_t checksumAdler32 = strtoul(adler32str.c_str(), 0, 10);
+    ChecksumBlob csb1(ADLER32, ChecksumBlob::HexToByteArray(checksumValue));
+    ChecksumBlob csb2(ADLER32, checksumAdler32);
+    csb2.validate(csb1);
+
+    if(!m_ctadb.nextRow()) break;
+  }
+}
+
+}} // namespace cta::migration
+
+
+void throwUsage(const std::string &program, const std::string &error_txt)
+{
+  std::stringstream help;
+
+  help << program << ": " << error_txt << std::endl
+       << "Usage: " << program << " [--config <config_file>] [--rename-logical] [--undo-rename-logical] [--add-uid-gid] [--remove-uid-gid] [--add-checksum-blob] [--remove-checksum-blob] [--add-adler32] [--remove-adler32]";
+
+  throw std::runtime_error(help.str());
+}
+
+
+int main(int argc, const char* argv[])
+{
+  std::string configfile = "/etc/cta/cta-catalogue-upgrade.conf";
+
+  bool renameLogical = false;
+  bool undoRenameLogical = false;
+  bool addUidGid = false;
+  bool removeUidGid = false;
+  bool addChecksumBlob = false;
+  bool populateChecksumBlob = false;
+  bool removeChecksumBlob = false;
+  bool addAdler32 = false;
+  bool removeAdler32 = false;
+
+  try {
+    // Parse options
+    if(argc < 2) throwUsage(argv[0], "");
+    for(auto i = 1; i < argc; ++i) {
+      std::string option(argv[i]);
+
+           if(option == "--config" && argc > ++i) configfile = argv[i];
+      else if(option == "--rename-logical") renameLogical = true;
+      else if(option == "--undo-rename-logical") undoRenameLogical = true;
+      else if(option == "--add-uid-gid") addUidGid = true;
+      else if(option == "--remove-uid-gid") removeUidGid = true;
+      else if(option == "--add-checksum-blob") addChecksumBlob = true;
+      //else if(option == "--populate-checksum-blob") populateChecksumBlob = true;
+      else if(option == "--remove-checksum-blob") removeChecksumBlob = true;
+      else if(option == "--add-adler32") addAdler32 = true;
+      else if(option == "--remove-adler32") removeAdler32 = true;
+      else throwUsage(argv[0], "invalid option " + option);
+    }
+
+    // Process options
+    cta::migration::UpgradeDB ctaDb(configfile);
+
+    if(renameLogical) ctaDb.renameLogical();
+    if(undoRenameLogical) ctaDb.undoRenameLogical();
+    if(addUidGid) ctaDb.addUidGid();
+    if(removeUidGid) ctaDb.removeUidGid();
+    if(addChecksumBlob) ctaDb.addChecksumBlob();
+    if(populateChecksumBlob) ctaDb.populateChecksumBlob();
+    if(removeChecksumBlob) ctaDb.removeChecksumBlob();
+    if(addAdler32) ctaDb.addAdler32();
+    if(removeAdler32) ctaDb.removeAdler32();
+  } catch(cta::exception::Exception &ex) {
+    std::cerr << ex.getMessage().str() << std::endl;
+  } catch(std::runtime_error &ex) {
+    std::cerr << ex.what() << std::endl;
+    return -1;
+  }
+  return 0;
+}
diff --git a/upgrade_db/cta-catalogue-upgrade.conf b/upgrade_db/cta-catalogue-upgrade.conf
new file mode 100644
index 0000000000000000000000000000000000000000..24e76fec4ffcb384a0c1347b2b4d13be8ef1576e
--- /dev/null
+++ b/upgrade_db/cta-catalogue-upgrade.conf
@@ -0,0 +1,4 @@
+#
+# Configuration for CTA database update tool
+#
+cta.db_login               oracle:<USER>/<PASSWORD>@<DATABASE>
diff --git a/xroot_plugins/XrdCtaArchiveFileLs.hpp b/xroot_plugins/XrdCtaArchiveFileLs.hpp
index addab744d9c7dab1907d38bfbee48fe3f696609d..b917f835e6589f9beabfb3eb26101fc10817a4fb 100644
--- a/xroot_plugins/XrdCtaArchiveFileLs.hpp
+++ b/xroot_plugins/XrdCtaArchiveFileLs.hpp
@@ -20,6 +20,7 @@
 
 #include <xroot_plugins/XrdCtaStream.hpp>
 #include <xroot_plugins/XrdSsiCtaRequestMessage.hpp>
+#include <common/checksum/ChecksumBlobSerDeser.hpp>
 
 
 namespace cta { namespace xrd {
@@ -70,16 +71,16 @@ ArchiveFileLsStream::ArchiveFileLsStream(const RequestMessage &requestMsg,
 
     // Get the search criteria from the optional options
 
-    m_searchCriteria.archiveFileId  = requestMsg.getOptional(OptionUInt64::ARCHIVE_FILE_ID, &has_any);
-    m_searchCriteria.tapeFileCopyNb = requestMsg.getOptional(OptionUInt64::COPY_NUMBER,     &has_any);
-    m_searchCriteria.diskFileId     = requestMsg.getOptional(OptionString::DISKID,          &has_any);
-    m_searchCriteria.vid            = requestMsg.getOptional(OptionString::VID,             &has_any);
-    m_searchCriteria.tapePool       = requestMsg.getOptional(OptionString::TAPE_POOL,       &has_any);
-    m_searchCriteria.diskFileUser   = requestMsg.getOptional(OptionString::OWNER,           &has_any);
-    m_searchCriteria.diskFileGroup  = requestMsg.getOptional(OptionString::GROUP,           &has_any);
-    m_searchCriteria.storageClass   = requestMsg.getOptional(OptionString::STORAGE_CLASS,   &has_any);
-    m_searchCriteria.diskFilePath   = requestMsg.getOptional(OptionString::PATH,            &has_any);
-    m_searchCriteria.diskInstance   = requestMsg.getOptional(OptionString::INSTANCE,        &has_any);
+    m_searchCriteria.archiveFileId    = requestMsg.getOptional(OptionUInt64::ARCHIVE_FILE_ID, &has_any);
+    m_searchCriteria.tapeFileCopyNb   = requestMsg.getOptional(OptionUInt64::COPY_NUMBER,     &has_any);
+    m_searchCriteria.diskFileId       = requestMsg.getOptional(OptionString::DISKID,          &has_any);
+    m_searchCriteria.vid              = requestMsg.getOptional(OptionString::VID,             &has_any);
+    m_searchCriteria.tapePool         = requestMsg.getOptional(OptionString::TAPE_POOL,       &has_any);
+    m_searchCriteria.diskFileOwnerUid = requestMsg.getOptional(OptionUInt64::OWNER_UID,       &has_any);
+    m_searchCriteria.diskFileGid      = requestMsg.getOptional(OptionUInt64::GID,             &has_any);
+    m_searchCriteria.storageClass     = requestMsg.getOptional(OptionString::STORAGE_CLASS,   &has_any);
+    m_searchCriteria.diskFilePath     = requestMsg.getOptional(OptionString::PATH,            &has_any);
+    m_searchCriteria.diskInstance     = requestMsg.getOptional(OptionString::INSTANCE,        &has_any);
 
     if(!has_any) {
       throw cta::exception::UserError("Must specify at least one search option, or --all");
@@ -119,11 +120,10 @@ int ArchiveFileLsStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
       af->set_disk_instance(archiveFile.diskInstance);
       af->set_disk_id(archiveFile.diskFileId);
       af->set_size(archiveFile.fileSize);
-      af->mutable_cs()->set_type(archiveFile.checksumType);
-      af->mutable_cs()->set_value(archiveFile.checksumValue);
+      checksum::ChecksumBlobToProtobuf(archiveFile.checksumBlob, *(af->mutable_csb()));
       af->set_storage_class(archiveFile.storageClass);
-      af->mutable_df()->set_owner(archiveFile.diskFileInfo.owner);
-      af->mutable_df()->set_group(archiveFile.diskFileInfo.group);
+      af->mutable_df()->mutable_owner_id()->set_uid(archiveFile.diskFileInfo.owner_uid);
+      af->mutable_df()->mutable_owner_id()->set_gid(archiveFile.diskFileInfo.gid);
       af->mutable_df()->set_path(archiveFile.diskFileInfo.path);
       af->set_creation_time(archiveFile.creationTime);
 
diff --git a/xroot_plugins/XrdCtaListPendingQueue.hpp b/xroot_plugins/XrdCtaListPendingQueue.hpp
index d06dc4721817c9cc3cc12cd59b618f86be91aae1..27bf8dace784a4348f9203018993a208ef06ee5d 100644
--- a/xroot_plugins/XrdCtaListPendingQueue.hpp
+++ b/xroot_plugins/XrdCtaListPendingQueue.hpp
@@ -153,13 +153,14 @@ bool ListPendingQueueStream<OStoreDB::ArchiveQueueItor_t>::pushRecord(XrdSsiPb::
   af->set_disk_instance(job.instanceName);
   af->set_disk_id(job.request.diskFileID);
   af->set_size(job.request.fileSize);
-  af->mutable_cs()->set_type(job.request.checksumType);
-  af->mutable_cs()->set_value(job.request.checksumValue);         
   af->set_storage_class(job.request.storageClass);
-  af->mutable_df()->set_owner(job.request.requester.name);
-  af->mutable_df()->set_group(job.request.requester.group);
+  af->mutable_df()->mutable_owner_id()->set_uid(job.request.diskFileInfo.owner_uid);
+  af->mutable_df()->mutable_owner_id()->set_gid(job.request.diskFileInfo.gid);
   af->mutable_df()->set_path(job.request.diskFileInfo.path);
 
+  // Checksum array
+  checksum::ChecksumBlobToProtobuf(job.request.checksumBlob, *(af->mutable_csb()));
+
   return streambuf->Push(record);
 }
 
@@ -213,10 +214,9 @@ bool ListPendingQueueStream<OStoreDB::RetrieveQueueItor_t>::pushRecord(XrdSsiPb:
     // Archive file
     auto af = record.mutable_lpr_item()->mutable_af();
     af->set_archive_id(job.request.archiveFileID);
-    //af->set_size(tape_it->second.second.compressedSize);
     af->set_size(job.fileSize);
-    af->mutable_df()->set_owner(job.request.requester.name);
-    af->mutable_df()->set_group(job.request.requester.group);
+    af->mutable_df()->mutable_owner_id()->set_uid(job.request.diskFileInfo.owner_uid);
+    af->mutable_df()->mutable_owner_id()->set_gid(job.request.diskFileInfo.gid);
     af->mutable_df()->set_path(job.request.diskFileInfo.path);
 
     // Tape file
@@ -225,6 +225,9 @@ bool ListPendingQueueStream<OStoreDB::RetrieveQueueItor_t>::pushRecord(XrdSsiPb:
     tf->set_f_seq(tape_it->second.second.fSeq);
     tf->set_block_id(tape_it->second.second.blockId);
 
+    // Checksum array
+    checksum::ChecksumBlobToProtobuf(tape_it->second.second.checksumBlob, *(af->mutable_csb()));
+
     is_buffer_full = streambuf->Push(record);
   }
 
diff --git a/xroot_plugins/XrdCtaShowQueues.hpp b/xroot_plugins/XrdCtaShowQueues.hpp
index 5237847b6f3e73f104c4ebd105411e3ef56634e2..f2a86590bff844ca74d135124b0440c931463192 100644
--- a/xroot_plugins/XrdCtaShowQueues.hpp
+++ b/xroot_plugins/XrdCtaShowQueues.hpp
@@ -77,6 +77,7 @@ int ShowQueuesStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
     auto  sq_item = record.mutable_sq_item();
 
     switch(sq.mountType) {
+      case common::dataStructures::MountType::ArchiveForRepack:
       case common::dataStructures::MountType::ArchiveForUser:
         sq_item->set_priority(sq.mountPolicy.archivePriority);
         sq_item->set_min_age(sq.mountPolicy.archiveMinRequestAge);
@@ -109,6 +110,7 @@ int ShowQueuesStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
     sq_item->set_full_tapes(sq.fullTapes);
     sq_item->set_empty_tapes(sq.emptyTapes);
     sq_item->set_disabled_tapes(sq.disabledTapes);
+    sq_item->set_rdonly_tapes(sq.readOnlyTapes);
     sq_item->set_writable_tapes(sq.writableTapes);
     if (sq.sleepForSpaceInfo) {
       sq_item->set_sleeping_for_space(true);
diff --git a/xroot_plugins/XrdCtaTapeLs.hpp b/xroot_plugins/XrdCtaTapeLs.hpp
index 1fd378df91a9811983777400e810593094c3155f..06e4157da7bdaea32162b9f828921d362842e2e1 100644
--- a/xroot_plugins/XrdCtaTapeLs.hpp
+++ b/xroot_plugins/XrdCtaTapeLs.hpp
@@ -72,6 +72,7 @@ TapeLsStream::TapeLsStream(const RequestMessage &requestMsg, cta::catalogue::Cat
     // Get the search criteria from the optional options
 
     searchCriteria.disabled        = requestMsg.getOptional(OptionBoolean::DISABLED,       &has_any);
+    searchCriteria.readOnly        = requestMsg.getOptional(OptionBoolean::READ_ONLY,      &has_any);
     searchCriteria.full            = requestMsg.getOptional(OptionBoolean::FULL,           &has_any);
     searchCriteria.capacityInBytes = requestMsg.getOptional(OptionUInt64::CAPACITY,        &has_any);
     searchCriteria.logicalLibrary  = requestMsg.getOptional(OptionString::LOGICAL_LIBRARY, &has_any);
@@ -108,6 +109,10 @@ int TapeLsStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
     tape_item->set_last_fseq(tape.lastFSeq);
     tape_item->set_full(tape.full);
     tape_item->set_disabled(tape.disabled);
+    tape_item->set_rdonly(tape.readOnly);
+    tape_item->set_from_castor(tape.isFromCastor);
+    tape_item->set_read_mount_count(tape.readMountCount);
+    tape_item->set_write_mount_count(tape.writeMountCount);
 
     if(tape.labelLog) {
       ::cta::common::TapeLog * labelLog = tape_item->mutable_label_log();
@@ -132,6 +137,7 @@ int TapeLsStream::fillBuffer(XrdSsiPb::OStreamBuffer<Data> *streambuf) {
     lastModificationLog->set_username(tape.lastModificationLog.username);
     lastModificationLog->set_host(tape.lastModificationLog.host);
     lastModificationLog->set_time(tape.lastModificationLog.time);
+    tape_item->set_comment(tape.comment);
     
     is_buffer_full = streambuf->Push(record);
   }
diff --git a/xroot_plugins/XrdSsiCtaRequestMessage.cpp b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
index 686bccb0b3ae379f23de45e13ec8004ce383e731..86d27f6bdd99bf5e8e570605014457bbb6ddf6d3 100644
--- a/xroot_plugins/XrdSsiCtaRequestMessage.cpp
+++ b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
@@ -327,9 +327,9 @@ void RequestMessage::processCREATE(const cta::eos::Notification &notification, c
    checkIsNotEmptyString(notification.cli().user().groupname(), "notification.cli.user.groupname");
 
    // Unpack message
-   cta::common::dataStructures::UserIdentity originator;
-   originator.name  = notification.cli().user().username();
-   originator.group = notification.cli().user().groupname();
+   cta::common::dataStructures::RequesterIdentity requester;
+   requester.name  = notification.cli().user().username();
+   requester.group = notification.cli().user().groupname();
 
    const auto storageClassItor = notification.file().xattr().find("CTA_StorageClass");
    if(notification.file().xattr().end() == storageClassItor) {
@@ -342,7 +342,7 @@ void RequestMessage::processCREATE(const cta::eos::Notification &notification, c
 
    cta::utils::Timer t;
 
-   const uint64_t archiveFileId = m_scheduler.checkAndGetNextArchiveFileId(m_cliIdentity.username, storageClass, originator, m_lc);
+   const uint64_t archiveFileId = m_scheduler.checkAndGetNextArchiveFileId(m_cliIdentity.username, storageClass, requester, m_lc);
 
    // Create a log entry
    cta::log::ScopedParamContainer params(m_lc);
@@ -369,47 +369,32 @@ void RequestMessage::processCLOSEW(const cta::eos::Notification &notification, c
    // Validate received protobuf
    checkIsNotEmptyString(notification.cli().user().username(),    "notification.cli.user.username");
    checkIsNotEmptyString(notification.cli().user().groupname(),   "notification.cli.user.groupname");
-   checkIsNotEmptyString(notification.file().owner().username(),  "notification.file.owner.username");
-   checkIsNotEmptyString(notification.file().owner().groupname(), "notification.file.owner.groupname");
    checkIsNotEmptyString(notification.file().lpath(),             "notification.file.lpath");
    checkIsNotEmptyString(notification.wf().instance().url(),      "notification.wf.instance.url");
    checkIsNotEmptyString(notification.transport().report_url(),   "notification.transport.report_url");
 
    // Unpack message
-   cta::common::dataStructures::UserIdentity originator;
-   originator.name    = notification.cli().user().username();
-   originator.group   = notification.cli().user().groupname();
-
-   cta::common::dataStructures::DiskFileInfo diskFileInfo;
-   diskFileInfo.owner = notification.file().owner().username();
-   diskFileInfo.group = notification.file().owner().groupname();
-   diskFileInfo.path  = notification.file().lpath();
-
-   std::string checksumtype(notification.file().cks().type());
-   if(checksumtype == "adler") checksumtype = "ADLER32";   // replace this with an enum!
-
-   std::string checksumvalue("0X" + notification.file().cks().value());
-   cta::utils::toUpper(checksumvalue);    // replace this with a number!
-
    const auto storageClassItor = notification.file().xattr().find("CTA_StorageClass");
    if(notification.file().xattr().end() == storageClassItor) {
      throw PbException(std::string(__FUNCTION__) + ": Failed to find the extended attribute named CTA_StorageClass");
    }
 
    cta::common::dataStructures::ArchiveRequest request;
-   request.checksumType         = checksumtype;
-   request.checksumValue        = checksumvalue;
-   request.diskFileInfo         = diskFileInfo;
-   request.diskFileID           = std::to_string(notification.file().fid());
-   request.fileSize             = notification.file().size();
-   request.requester            = originator;
-   request.srcURL               = notification.wf().instance().url();
-   request.storageClass         = storageClassItor->second;
-   request.archiveReportURL     = notification.transport().report_url();
-   request.archiveErrorReportURL = notification.transport().error_report_url();
-   request.creationLog.host     = m_cliIdentity.host;
-   request.creationLog.username = m_cliIdentity.username;
-   request.creationLog.time     = time(nullptr);
+   checksum::ProtobufToChecksumBlob(notification.file().csb(), request.checksumBlob);
+   request.diskFileInfo.owner_uid = notification.file().owner().uid();
+   request.diskFileInfo.gid       = notification.file().owner().gid();
+   request.diskFileInfo.path      = notification.file().lpath();
+   request.diskFileID             = std::to_string(notification.file().fid());
+   request.fileSize               = notification.file().size();
+   request.requester.name         = notification.cli().user().username();
+   request.requester.group        = notification.cli().user().groupname();
+   request.srcURL                 = notification.wf().instance().url();
+   request.storageClass           = storageClassItor->second;
+   request.archiveReportURL       = notification.transport().report_url();
+   request.archiveErrorReportURL  = notification.transport().error_report_url();
+   request.creationLog.host       = m_cliIdentity.host;
+   request.creationLog.username   = m_cliIdentity.username;
+   request.creationLog.time       = time(nullptr);
 
    // CTA Archive ID is an EOS extended attribute, i.e. it is stored as a string, which
    // must be converted to a valid uint64_t
@@ -446,30 +431,21 @@ void RequestMessage::processPREPARE(const cta::eos::Notification &notification,
    // Validate received protobuf
    checkIsNotEmptyString(notification.cli().user().username(),    "notification.cli.user.username");
    checkIsNotEmptyString(notification.cli().user().groupname(),   "notification.cli.user.groupname");
-   checkIsNotEmptyString(notification.file().owner().username(),  "notification.file.owner.username");
-   checkIsNotEmptyString(notification.file().owner().groupname(), "notification.file.owner.groupname");
    checkIsNotEmptyString(notification.file().lpath(),             "notification.file.lpath");
    checkIsNotEmptyString(notification.transport().dst_url(),      "notification.transport.dst_url");
 
    // Unpack message
-   cta::common::dataStructures::UserIdentity originator;
-   originator.name              = notification.cli().user().username();
-   originator.group             = notification.cli().user().groupname();
-
-   cta::common::dataStructures::DiskFileInfo diskFileInfo;
-   diskFileInfo.owner           = notification.file().owner().username();
-   diskFileInfo.group           = notification.file().owner().groupname();
-   diskFileInfo.path            = notification.file().lpath();
-
    cta::common::dataStructures::RetrieveRequest request;
-   request.requester            = originator;
-   request.dstURL               = notification.transport().dst_url();
-   request.errorReportURL       = notification.transport().error_report_url();
-   request.diskFileInfo         = diskFileInfo;
-   request.creationLog.host     = m_cliIdentity.host;
-   request.creationLog.username = m_cliIdentity.username;
-   request.creationLog.time     = time(nullptr);
-
+   request.requester.name         = notification.cli().user().username();
+   request.requester.group        = notification.cli().user().groupname();
+   request.dstURL                 = notification.transport().dst_url();
+   request.errorReportURL         = notification.transport().error_report_url();
+   request.diskFileInfo.owner_uid = notification.file().owner().uid();
+   request.diskFileInfo.gid       = notification.file().owner().gid();
+   request.diskFileInfo.path      = notification.file().lpath();
+   request.creationLog.host       = m_cliIdentity.host;
+   request.creationLog.username   = m_cliIdentity.username;
+   request.creationLog.time       = time(nullptr);
 
    // CTA Archive ID is an EOS extended attribute, i.e. it is stored as a string, which must be
    // converted to a valid uint64_t
@@ -515,13 +491,9 @@ void RequestMessage::processABORT_PREPARE(const cta::eos::Notification &notifica
    checkIsNotEmptyString(notification.cli().user().groupname(),   "notification.cli.user.groupname");
 
    // Unpack message
-   cta::common::dataStructures::UserIdentity originator;
-   originator.name   = notification.cli().user().username();
-   originator.group  = notification.cli().user().groupname();
-
    cta::common::dataStructures::DeleteArchiveRequest request;
-   request.requester = originator;
-
+   request.requester.name   = notification.cli().user().username();
+   request.requester.group  = notification.cli().user().groupname();
 
    // CTA Archive ID is an EOS extended attribute, i.e. it is stored as a string, which must be
    // converted to a valid uint64_t
@@ -560,12 +532,9 @@ void RequestMessage::processDELETE(const cta::eos::Notification &notification, c
    checkIsNotEmptyString(notification.cli().user().groupname(),   "notification.cli.user.groupname");
 
    // Unpack message
-   cta::common::dataStructures::UserIdentity originator;
-   originator.name          = notification.cli().user().username();
-   originator.group         = notification.cli().user().groupname();
-
    cta::common::dataStructures::DeleteArchiveRequest request;
-   request.requester        = originator;
+   request.requester.name    = notification.cli().user().username();
+   request.requester.group   = notification.cli().user().groupname();
 
    // CTA Archive ID is an EOS extended attribute, i.e. it is stored as a string, which
    // must be converted to a valid uint64_t
@@ -1114,10 +1083,41 @@ void RequestMessage::processRepack_Add(cta::xrd::Response &response)
    }
    
    auto buff = getOptional(OptionString::BUFFERURL);
-   if (buff)
+   if (buff){
+     //The buffer is provided by the user
      bufferURL = buff.value();
-   else
-     throw cta::exception::UserError("Must specify the buffer URL using --bufferurl option.");
+   }
+   else {
+     //Buffer is not provided by the user, try to get the one from the configuration file
+     if(m_repackBufferURL){
+       bufferURL = m_repackBufferURL.value();
+     } else {
+       //Buffer is neither provided by the user, neither provided by the frontend configuration file, exception
+       throw cta::exception::UserError("Must specify the buffer URL using --bufferurl option or using the frontend configuration file.");
+     }
+   }
+   
+   typedef common::dataStructures::MountPolicy MountPolicy;
+   MountPolicy mountPolicy = MountPolicy::s_defaultMountPolicyForRepack;
+   
+   auto mountPolicyProvidedByUserOpt = getOptional(OptionString::MOUNT_POLICY);
+   if(mountPolicyProvidedByUserOpt){
+     //The user specified a mount policy name for this repack request
+     std::string mountPolicyProvidedByUser = mountPolicyProvidedByUserOpt.value();
+     //Get the mountpolicy from the catalogue
+     typedef std::list<common::dataStructures::MountPolicy> MountPolicyList;
+     MountPolicyList mountPolicies = m_catalogue.getMountPolicies();
+     MountPolicyList::const_iterator repackMountPolicyItor = std::find_if(mountPolicies.begin(),mountPolicies.end(),[mountPolicyProvidedByUser](const common::dataStructures::MountPolicy & mp){
+       return mp.name == mountPolicyProvidedByUser;
+     });
+     if(repackMountPolicyItor != mountPolicies.end()){
+       //The mount policy exists
+       mountPolicy = *repackMountPolicyItor;
+     } else {
+       //The mount policy does not exist, throw a user error
+       throw cta::exception::UserError("The mount policy name provided does not match any existing mount policy.");
+     }
+   }
 
    // Expand, repack, or both ?
    cta::common::dataStructures::RepackInfo::Type type;
@@ -1134,7 +1134,7 @@ void RequestMessage::processRepack_Add(cta::xrd::Response &response)
 
    // Process each item in the list
    for(auto it = vid_list.begin(); it != vid_list.end(); ++it) {
-      m_scheduler.queueRepack(m_cliIdentity, *it, bufferURL,  type, m_lc);
+      m_scheduler.queueRepack(m_cliIdentity, *it, bufferURL,  type, mountPolicy , m_lc);
    }
 
    response.set_type(cta::xrd::Response::RSP_SUCCESS);
@@ -1334,9 +1334,10 @@ void RequestMessage::processTape_Add(cta::xrd::Response &response)
    auto &capacity       = getRequired(OptionUInt64::CAPACITY);
    auto &disabled       = getRequired(OptionBoolean::DISABLED);
    auto &full           = getRequired(OptionBoolean::FULL);
+   auto &readOnly       = getRequired(OptionBoolean::READ_ONLY);
    auto  comment        = getOptional(OptionString::COMMENT);
 
-   m_catalogue.createTape(m_cliIdentity, vid, mediaType, vendor, logicallibrary, tapepool, capacity, disabled, full, comment ? comment.value() : "-");
+   m_catalogue.createTape(m_cliIdentity, vid, mediaType, vendor, logicallibrary, tapepool, capacity, disabled, full, readOnly, comment ? comment.value() : "-");
 
    response.set_type(cta::xrd::Response::RSP_SUCCESS);
 }
@@ -1357,6 +1358,7 @@ void RequestMessage::processTape_Ch(cta::xrd::Response &response)
    auto  encryptionkey  = getOptional(OptionString::ENCRYPTION_KEY);
    auto  disabled       = getOptional(OptionBoolean::DISABLED);
    auto  full           = getOptional(OptionBoolean::FULL);
+   auto  readOnly       = getOptional(OptionBoolean::READ_ONLY);
 
    if(mediaType) {
       m_catalogue.modifyTapeMediaType(m_cliIdentity, vid, mediaType.value());
@@ -1385,6 +1387,9 @@ void RequestMessage::processTape_Ch(cta::xrd::Response &response)
    if(full) {
       m_catalogue.setTapeFull(m_cliIdentity, vid, full.value());
    }
+   if(readOnly) {
+      m_catalogue.setTapeReadOnly(m_cliIdentity, vid, readOnly.value());
+   }
 
    response.set_type(cta::xrd::Response::RSP_SUCCESS);
 }
diff --git a/xroot_plugins/XrdSsiCtaRequestMessage.hpp b/xroot_plugins/XrdSsiCtaRequestMessage.hpp
index da2f5b324b8d33997776062d08419cd2c11aea89..fb4774aeefa69221f73cd978c7d97c8e5fd6da96 100644
--- a/xroot_plugins/XrdSsiCtaRequestMessage.hpp
+++ b/xroot_plugins/XrdSsiCtaRequestMessage.hpp
@@ -36,6 +36,7 @@ public:
     m_scheddb  (service->getSchedDb()),
     m_catalogue(service->getCatalogue()),
     m_scheduler(service->getScheduler()),
+    m_repackBufferURL(service->getRepackBufferURL()),	    
     m_lc       (service->getLogContext()) {
       m_cliIdentity.username = client.name;
       m_cliIdentity.host     = client.host;
@@ -268,6 +269,7 @@ private:
   cta::OStoreDBWithAgent                               &m_scheddb;            //!< Reference to CTA ObjectStore
   cta::catalogue::Catalogue                            &m_catalogue;          //!< Reference to CTA Catalogue
   cta::Scheduler                                       &m_scheduler;          //!< Reference to CTA Scheduler
+  optional<std::string>				        m_repackBufferURL;    //!< Repack buffer URL
   cta::log::LogContext                                  m_lc;                 //!< CTA Log Context
   std::map<cta::admin::OptionBoolean::Key, bool>        m_option_bool;        //!< Boolean options
   std::map<cta::admin::OptionUInt64::Key, uint64_t>     m_option_uint64;      //!< UInt64 options
diff --git a/xroot_plugins/XrdSsiCtaServiceProvider.cpp b/xroot_plugins/XrdSsiCtaServiceProvider.cpp
index f490969036fcacb7fcfaefbabc7f24ed04a5988e..e8729af8e79509d7af0a654fb7d908341c125f48 100644
--- a/xroot_plugins/XrdSsiCtaServiceProvider.cpp
+++ b/xroot_plugins/XrdSsiCtaServiceProvider.cpp
@@ -147,6 +147,12 @@ void XrdSsiCtaServiceProvider::ExceptionThrowingInit(XrdSsiLogger *logP, XrdSsiC
    } catch (std::bad_cast &) {
       // If not, never mind
    }
+   
+   //Initialize the repack buffer URL
+   auto repackBufferURLConf = config.getOptionValueStr("cta.repack.repack_buffer_url");
+   if(repackBufferURLConf.first){
+     m_repackBufferURL = repackBufferURLConf.second;
+   }
   
    // Start the heartbeat thread for the agent object. The thread is guaranteed to have started before we call the unique_ptr deleter
    auto aht = new cta::objectstore::AgentHeartbeatThread(m_backendPopulator->getAgentReference(), *m_backend, *m_log);
diff --git a/xroot_plugins/XrdSsiCtaServiceProvider.hpp b/xroot_plugins/XrdSsiCtaServiceProvider.hpp
index 34ba96f57d0590694e725d2a78e62b4e8a35c4bc..4f200a2082bb2fc6aa51cb1677f471677f8c4c55 100644
--- a/xroot_plugins/XrdSsiCtaServiceProvider.hpp
+++ b/xroot_plugins/XrdSsiCtaServiceProvider.hpp
@@ -103,6 +103,8 @@ public:
     * Get the log context for this Service
     */
    cta::log::LogContext getLogContext() const { return cta::log::LogContext(*m_log); }
+   
+   cta::optional<std::string> getRepackBufferURL() const { return m_repackBufferURL; }
 
 private:
    /*!
@@ -138,6 +140,7 @@ private:
    std::unique_ptr<cta::catalogue::Catalogue>          m_catalogue;           //!< CTA catalogue of tapes and tape files
    std::unique_ptr<cta::Scheduler>                     m_scheduler;           //!< The scheduler
    std::unique_ptr<cta::log::Logger>                   m_log;                 //!< The logger
+   cta::optional<std::string>		               m_repackBufferURL;     //!< The repack buffer URL
    UniquePtrAgentHeartbeatThread                       m_agentHeartbeat;      //!< Agent heartbeat thread
 
    static constexpr const char* const LOG_SUFFIX = "XrdSsiCtaServiceProvider";    //!< Identifier for log messages
diff --git a/xroot_plugins/cta-frontend-xrootd.conf b/xroot_plugins/cta-frontend-xrootd.conf
index 25159c3b7727f6d02b2d177545716d8905a0234f..b0585903132e1daad5264ec13e1823d63deca515 100644
--- a/xroot_plugins/cta-frontend-xrootd.conf
+++ b/xroot_plugins/cta-frontend-xrootd.conf
@@ -17,6 +17,9 @@ cta.log.url file:/var/log/cta/cta-frontend.log
 # CTA XRootD SSI/Protobuf log level
 cta.log.ssi warning
 
+# CTA Repack buffer URL
+# cta.repack.repack_buffer_url root://ctaeos//eos/ctaeos/repack
+
 #
 # XRootD/SSI options
 #
diff --git a/xroot_plugins/cta-frontend.service b/xroot_plugins/cta-frontend.service
index e8cad2eb794c106c0b204f9c47b87a68512eb5d5..5cec9f5024f3b9aaaaa56cb107fe827adb7b8900 100644
--- a/xroot_plugins/cta-frontend.service
+++ b/xroot_plugins/cta-frontend.service
@@ -10,6 +10,7 @@ Restart=always
 RestartSec=10
 StartLimitInterval=0
 StartLimitBurst=0
+OOMScoreAdjust=-1000
 LimitCORE=infinity
 LimitNOFILE=65536