diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1ec800525b8b0410e459a46ae84ac7dac09e72d8..d405430df82c78e7f60aa81fdf9fbeeed8fbaea6 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -13,6 +13,9 @@ stages:
 
 variables:
   XROOTD_VERSION: 4
+  SCHED_TYPE: "objectstore"
+  SCHED_OPT: ""
+  SCHED_VERSION: ""
 
 before_script:
   - export CTA_BUILD_ID=${CI_PIPELINE_ID}git${CI_COMMIT_SHA:0:8}
@@ -31,6 +34,13 @@ before_script:
       cp -f continuousintegration/docker/ctafrontend/cc7/versionlock-xrootd5.list continuousintegration/docker/ctafrontend/cc7/etc/yum/pluginconf.d/versionlock.list;
     else echo "Using XRootD version 4";
     fi
+  - if [ "${SCHED_TYPE}" = "objectstore" ]; then
+      SCHED_TYPE="";
+    else
+      echo "Using specified scheduler database type $SCHED_TYPE";
+      SCHED_OPT="-DCTA_USE_$(echo ${SCHED_TYPE} | tr '[:lower:]' '[:upper:]'):Bool=true";
+      SCHED_VERSION=$(echo ${SCHED_TYPE} | cut -c 1-3);
+    fi
 
 cta_srpm:
   stage: build:srpm
@@ -41,7 +51,7 @@ cta_srpm:
     - source /opt/rh/devtoolset-8/enable
     - mkdir build_srpm
     - cd build_srpm
-    - CTA_VERSION=${XROOTD_VERSION} cmake3 -DPackageOnly:Bool=true -DVCS_VERSION=${CTA_BUILD_ID} ..
+    - CTA_VERSION=${XROOTD_VERSION}${SCHED_VERSION} cmake3 -DPackageOnly:Bool=true -DVCS_VERSION=${CTA_BUILD_ID} ${SCHED_OPT} ..
     - make cta_srpm
 
   artifacts:
@@ -70,7 +80,12 @@ cta_rpm:
     - yum-builddep --nogpgcheck -y build_srpm/RPM/SRPMS/*
     - mkdir build_rpm
     - cd build_rpm
-    - CTA_VERSION=${XROOTD_VERSION} cmake3 -DVCS_VERSION=${CTA_BUILD_ID} ..
+    - if [ "${SCHED_TYPE}" = "pgsched" ]; then
+        echo "Skipping unit tests during cta_rpm because failed tests are expected with the development version of pgsched";
+        CTA_VERSION=${XROOTD_VERSION}${SCHED_VERSION} cmake3 -DVCS_VERSION=${CTA_BUILD_ID} -DSKIP_UNIT_TESTS:STRING=1 ${SCHED_OPT} ..;
+      else
+        CTA_VERSION=${XROOTD_VERSION}${SCHED_VERSION} cmake3 -DVCS_VERSION=${CTA_BUILD_ID} ${SCHED_OPT} ..;
+      fi;
     - make cta_rpm
 
   artifacts:
@@ -101,7 +116,7 @@ cta_tagged_rpm:
     - yum-builddep --nogpgcheck -y build_srpm/RPM/SRPMS/*
     - mkdir build_tagged_rpm
     - cd build_tagged_rpm
-    - CTA_VERSION=${TAG_VERSION} cmake3 -DVCS_VERSION=${TAG_RELEASE} ..
+    - CTA_VERSION=${TAG_VERSION} cmake3 -DVCS_VERSION=${TAG_RELEASE} ${SCHED_OPT} ..
     - make cta_rpm
 
   artifacts:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ef4113313086809a1cb4fc1e18fdca8ad0716cfa..2526b929f87e7400b11eaa0dc46d976ba3100cfa 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -33,6 +33,11 @@ set(CMAKE_CXX_FLAGS "-fPIC -pedantic -Wall -Wextra -Werror -Wno-unused-parameter
 
 set (BUILD_CMDLINE_ONLY FALSE)
 
+# By default the objectstore based scheduler is used.
+# A postgres scheduler may be enabled and the objectstore one disabled by
+# giving -DCTA_USE_PGSCHED:Bool=true on the cmake command line.
+set (CTA_USE_PGSCHED FALSE CACHE BOOL "Build with the postgres scheduler")
+
 execute_process(
   COMMAND ${CMAKE_C_COMPILER} -dumpversion
   OUTPUT_VARIABLE GCC_VERSION)
@@ -55,6 +60,15 @@ if (ENABLE_STDOUT_LOGGING)
   add_definitions (-DSTDOUT_LOGGING)
 endif ()
 
+if (CTA_USE_PGSCHED)
+  add_definitions (-DCTA_PGSCHED)
+  set(RPM_USINGOBJECTSTORE "0" CACHE STRING "" FORCE)
+  set(RPM_SCHEDOPT "-DCTA_USE_PGSCHED:Bool=true" CACHE STRING "" FORCE)
+else ()
+  set(RPM_USINGOBJECTSTORE "1" CACHE STRING "" FORCE)
+  set(RPM_SCHEDOPT "%{nil}" CACHE STRING "" FORCE)
+endif ()
+
 # Generate the compilation variables, if needed
 if (NOT DEFINED SKIP_UNIT_TESTS)
   message (STATUS "Setting SKIP_UNIT_TESTS to the value of 0")
@@ -132,7 +146,11 @@ ELSE(DEFINED PackageOnly)
   add_subdirectory(common)
   add_subdirectory(disk)
   add_subdirectory(mediachanger)
-  add_subdirectory(objectstore)
+
+  if(NOT CTA_USE_PGSCHED)
+    add_subdirectory(objectstore)
+  endif()
+
   add_subdirectory(python)
   add_subdirectory(rdbms)
   add_subdirectory(scheduler)
diff --git a/ReleaseNotes.md b/ReleaseNotes.md
index fa90a05f6f8042b6bbf3a1dfb371da7fbd928f58..b139b22f1f1be85a5b72708cfcc5f5561f62573e 100644
--- a/ReleaseNotes.md
+++ b/ReleaseNotes.md
@@ -41,6 +41,7 @@ The following manual pages have been updated:
 
 ### Continuous Integration
 ### Building and Packaging
+- cta/CTA#1229 - Introduce build of a new taped, using new type of SchedulerDatabase
 
 # v4.7.7-1
 
diff --git a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
index 46f3bf0d760c44d870bc9d7d38953064dca8f73d..23be958c544ea3522cb42d7e23943490676e1554 100755
--- a/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
+++ b/continuousintegration/docker/ctafrontend/cc7/opt/run/bin/init.sh
@@ -32,10 +32,18 @@ if [ ! -e /etc/buildtreeRunner ]; then
   yum-config-manager --enable ceph
 
   # install needed packages
-  yum -y install cta-objectstore-tools mt-st mtx lsscsi sg3_utils cta-catalogueutils ceph-common oracle-instantclient19.3-sqlplus oracle-instantclient-tnsnames.ora
+  # if we are using the postgres scheduler cta-objectstore-tools will not be available:
+  #   cta-objectstore-tools requires cta-lib, which will create /etc/cta/cta-catalogue.conf.example
+  #   the directory /etc/cta is relied on to exist below, so install cta-lib explicitly
+  yum -y install cta-objectstore-tools cta-lib mt-st mtx lsscsi sg3_utils cta-catalogueutils ceph-common oracle-instantclient19.3-sqlplus oracle-instantclient-tnsnames.ora
   yum clean packages
 fi
 
+pgsched=0
+if rpm -q --qf '%{VERSION}' cta-lib-common | grep -Eq '^[0-9]*pgs'; then
+  pgsched=1
+fi
+
 echo "Using this configuration for library:"
 /opt/run/bin/init_library.sh
 cat /tmp/library-rc.sh
@@ -45,7 +53,9 @@ echo "Configuring objectstore:"
 /opt/run/bin/init_objectstore.sh
 . /tmp/objectstore-rc.sh
 
-if [ "$KEEP_OBJECTSTORE" == "0" ]; then
+if [ $pgsched -ne 0 ]; then
+  echo "Detected that we are using the postgres scheduler. For now ignore the objectstore initialize step and proceed."
+elif [ "$KEEP_OBJECTSTORE" == "0" ]; then
   echo "Wiping objectstore"
   if [ "$OBJECTSTORETYPE" == "file" ]; then
     rm -fr $OBJECTSTOREURL
diff --git a/continuousintegration/orchestration/create_instance.sh b/continuousintegration/orchestration/create_instance.sh
index ed94d6bd47de6814326293bed769c329e201f4b7..1c643513168633179382fd4525bf53b53e88cade 100755
--- a/continuousintegration/orchestration/create_instance.sh
+++ b/continuousintegration/orchestration/create_instance.sh
@@ -197,9 +197,9 @@ else
 fi
 
 if [ $keepobjectstore == 1 ] ; then
-    echo "objecstore content will be kept"
+    echo "objecstore content (if used) will be kept"
 else
-    echo "objectstore content will be wiped"
+    echo "objectstore content (if used) will be wiped"
 fi
 
 
diff --git a/cta-release/CMakeLists.txt b/cta-release/CMakeLists.txt
index cc3e29394268c835837950297d3fd5c900dd7767..0604266ff730bbdaace0f411b64be2e3f90f71e0 100644
--- a/cta-release/CMakeLists.txt
+++ b/cta-release/CMakeLists.txt
@@ -56,12 +56,23 @@ safedl("https://yum.oracle.com/RPM-GPG-KEY-oracle-ol${OSV}"
 )
 
 # generate versionlock file
+
+if(NOT CTA_USE_PGSCHED)
 execute_process(
     COMMAND grep "%package" ${CMAKE_CURRENT_SOURCE_DIR}/../cta.spec.in
     COMMAND awk "{print $3}"
     COMMAND grep -v "cta-release"
     OUTPUT_VARIABLE RESULT OUTPUT_STRIP_TRAILING_WHITESPACE
     )
+else()
+execute_process(
+    COMMAND grep "%package" ${CMAKE_CURRENT_SOURCE_DIR}/../cta.spec.in
+    COMMAND awk "{print $3}"
+    COMMAND grep -v "cta-objectstore-tools"
+    COMMAND grep -v "cta-release"
+    OUTPUT_VARIABLE RESULT OUTPUT_STRIP_TRAILING_WHITESPACE
+    )
+endif()
 
 string(REPLACE "\n" ";" RESULTS ${RESULT})
 
diff --git a/cta.spec.in b/cta.spec.in
index 287ddfd4941526e74279c09b4e28563542b0dda0..d772ed9e1bacb94eff5f3e2ce5958ec5f311a6e1 100644
--- a/cta.spec.in
+++ b/cta.spec.in
@@ -7,6 +7,9 @@
 %define ctaVersion @CTA_VERSION@
 %define ctaRelease @CTA_RELEASE@
 
+%define usingObjectstore @RPM_USINGOBJECTSTORE@
+%define schedOpt @RPM_SCHEDOPT@
+
 # Neutral packaging (for srpm)
 #-----------------------------
 %if 0%{?neutralpackage:1} > 0
@@ -77,7 +80,7 @@ The CTA project is the CERN Tape Archive system.
 mkdir -p build
 cd build
 # The cmake step does the selection between client/server compilation or just client
-CTA_VERSION=%{ctaVersion} cmake3 .. -DCOMPILE_PACKAGING:STRING=0 -DVCS_VERSION=%{ctaRelease}
+CTA_VERSION=%{ctaVersion} cmake3 .. -DCOMPILE_PACKAGING:STRING=0 -DVCS_VERSION=%{ctaRelease} %{schedOpt}
 # Workaround for the inability of cmake to handle properly the dependencies to generated code.
 %{__make} -s %{_smp_mflags} -k || true
 %{__make} -s %{_smp_mflags}
@@ -261,6 +264,9 @@ Shared libraries required to access the CTA catalogue
 Summary: CERN Tape Archive libraries
 Group: Application/CTA
 Requires: librados2 = %{radosVersion}
+%if "%{usingObjectstore}" == "0"
+Requires: postgresql-libs
+%endif
 Requires: xrootd-client-libs >= %{xrootdVersion}
 # Explicity require protobuf3 to avoid clash with eos-protobuf3
 Requires: protobuf3
@@ -272,7 +278,9 @@ The shared libraries
 %defattr(0755,root,root,-)
 #TODO: merge util and common
 %{_libdir}/libctascheduler.so*
+%if "%{usingObjectstore}" != "0"
 %{_libdir}/libctaobjectstore.so*
+%endif
 %{_libdir}/libctamediachanger.so*
 %{_libdir}/libctamessages.so*
 %{_libdir}/libctamessagesutils.so*
@@ -315,7 +323,9 @@ Unit tests and system tests with virtual tape drives
 %{_libdir}/libctainmemorycatalogueunittests.so*
 %{_libdir}/libctainmemoryconnunittests.so*
 %{_libdir}/libctainmemorystmtunittests.so*
+%if "%{usingObjectstore}" != "0"
 %{_libdir}/libctaobjectstoreunittests.so*
+%endif
 %{_libdir}/libctardbmsunittests.so*
 %{_libdir}/libctardbmswrapperunittests.so*
 %{_libdir}/libctaschedulerunittests.so*
@@ -345,6 +355,7 @@ Command-line tool for testing immutable files
 %defattr(0755,root,root,-)
 %{_bindir}/cta-immutable-file-test
 
+%if "%{usingObjectstore}" != "0"
 %package -n cta-objectstore-tools
 Summary: CERN Tape Archive: object store tools
 Group: Application/CTA
@@ -360,6 +371,7 @@ Tools allowing initialization and inspection of the object store.
 %attr(0755,root,root) %{_bindir}/cta-objectstore-dereference-removed-queues
 %attr(0755,root,root) %{_bindir}/cta-objectstore-collect-orphaned-object
 %attr(0755,root,root) %{_bindir}/cta-objectstore-create-missing-repack-index
+%endif
 
 #cta-systemtests installs libraries so we need ldconfig.
 %post -n cta-systemtests -p /sbin/ldconfig
diff --git a/disk/CMakeLists.txt b/disk/CMakeLists.txt
index f8ba8d534a79fa3d4caceb6e21ce449ac1b5d379..c0d64f75e5af287360146d3d340b2acda6229b67 100644
--- a/disk/CMakeLists.txt
+++ b/disk/CMakeLists.txt
@@ -15,6 +15,7 @@
 
 cmake_minimum_required (VERSION 3.17)
 
+find_package (librados2 REQUIRED)
 find_package (xrootd REQUIRED)
 find_package (xrootdclient REQUIRED)
 
diff --git a/frontend-grpc/CMakeLists.txt b/frontend-grpc/CMakeLists.txt
index 74f783b3bafa420bce14f86350808437d2895807..33f62c0d4da224f5d1d00e424403245bb525e90e 100644
--- a/frontend-grpc/CMakeLists.txt
+++ b/frontend-grpc/CMakeLists.txt
@@ -54,11 +54,16 @@ include_directories(${CMAKE_BINARY_DIR}/frontend-grpc ${CMAKE_BINARY_DIR}/eos_ct
 
 add_executable(cta-frontend-grpc Main.cpp FrontendGRpcSvc.cpp ${ProtoSourcesCtaFrontend} ${ProtoGrpcSourcesCtaFrontend})
 
-target_link_libraries(cta-frontend-grpc ${PROTOBUF3_LIBRARIES} ${GRPC_GRPC++_LIBRARY}
-        ctascheduler ctacommon ctaobjectstore ctacatalogue)
+if(CTA_USE_PGSCHED)
+  target_link_libraries(cta-frontend-grpc ${PROTOBUF3_LIBRARIES} ${GRPC_GRPC++_LIBRARY}
+          ctascheduler ctacommon ctacatalogue)
+else()
+  target_link_libraries(cta-frontend-grpc ${PROTOBUF3_LIBRARIES} ${GRPC_GRPC++_LIBRARY}
+          ctascheduler ctacommon ctaobjectstore ctacatalogue)
+endif()
 set_property(TARGET cta-frontend-grpc APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
 
 install(TARGETS cta-frontend-grpc DESTINATION usr/bin)
 install (FILES cta-frontend-grpc.service DESTINATION etc/systemd/system)
 install (FILES cta-frontend-grpc.sysconfig DESTINATION /etc/sysconfig RENAME cta-frontend-grpc)
-install (FILES cta-frontend-grpc.1cta DESTINATION /usr/share/man/man1)
\ No newline at end of file
+install (FILES cta-frontend-grpc.1cta DESTINATION /usr/share/man/man1)
diff --git a/frontend-grpc/Main.cpp b/frontend-grpc/Main.cpp
index 4405580bf8bf208c4990a2318b2b854e3571bd65..2b2bbe47d6ce44371fd18cfa27c90eeb4a13c968 100644
--- a/frontend-grpc/Main.cpp
+++ b/frontend-grpc/Main.cpp
@@ -24,9 +24,14 @@
 #include "common/log/StdoutLogger.hpp"
 #include "common/log/Logger.hpp"
 #include "common/log/LogLevel.hpp"
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp"
+#else
 #include "scheduler/OStoreDB/OStoreDBInit.hpp"
+#endif
 
 #include <getopt.h>
+#include <fstream>
 
 using namespace cta;
 using namespace cta::common;
diff --git a/scheduler/ArchiveMount.cpp b/scheduler/ArchiveMount.cpp
index c1d43594851de1f93d52ceb1c35892b420a3b8d8..ff2a3fd84204bfd9ee1676ba3a76cf8c37d49d18 100644
--- a/scheduler/ArchiveMount.cpp
+++ b/scheduler/ArchiveMount.cpp
@@ -18,7 +18,6 @@
 #include <iostream>
 
 #include "common/exception/NoSuchObject.hpp"
-#include "objectstore/Backend.hpp"
 #include "scheduler/ArchiveMount.hpp"
 
 //------------------------------------------------------------------------------
diff --git a/scheduler/CMakeLists.txt b/scheduler/CMakeLists.txt
index edb019ce928c2d5dc9ba001a7485999b12d28994..d128e88ae732d8b2afc073fb7c796b2d61e84fc2 100644
--- a/scheduler/CMakeLists.txt
+++ b/scheduler/CMakeLists.txt
@@ -25,10 +25,6 @@ set (CTA_SCHEDULER_SRC_FILES
   LogicalLibrary.cpp
   MountType.cpp
   MountType.cpp
-  OStoreDB/MemQueues.cpp
-  OStoreDB/OStoreDB.cpp
-  OStoreDB/OStoreDBWithAgent.cpp
-  OStoreDB/QueueItor.cpp
   PositioningMethod.cpp
   RepackReportThread.cpp
   RepackRequest.cpp
@@ -41,6 +37,27 @@ set (CTA_SCHEDULER_SRC_FILES
   SchedulingInfos.cpp
   TapeMount.cpp)
 
+if(NOT CTA_USE_PGSCHED)
+  set (CTA_SCHEDULER_SRC_FILES ${CTA_SCHEDULER_SRC_FILES}
+    OStoreDB/MemQueues.cpp
+    OStoreDB/OStoreDB.cpp
+    OStoreDB/OStoreDBWithAgent.cpp
+    OStoreDB/QueueItor.cpp)
+else()
+  set (CTA_SCHEDULER_SRC_FILES ${CTA_SCHEDULER_SRC_FILES}
+    PostgresSchedDB/PostgresSchedDB.cpp
+    PostgresSchedDB/ArchiveMount.cpp
+    PostgresSchedDB/ArchiveJob.cpp
+    PostgresSchedDB/ArchiveJobQueueItor.cpp
+    PostgresSchedDB/RetrieveMount.cpp
+    PostgresSchedDB/RetrieveJob.cpp
+    PostgresSchedDB/RetrieveJobQueueItor.cpp
+    PostgresSchedDB/RepackRequestPromotionStatistics.cpp
+    PostgresSchedDB/RepackRequest.cpp
+    PostgresSchedDB/RepackReportBatch.cpp
+    PostgresSchedDB/TapeMountDecisionInfo.cpp)
+endif()
+
 find_package(Protobuf3 REQUIRED)
 include_directories (${PROTOBUF3_INCLUDE_DIRS})
 add_library (ctascheduler SHARED
@@ -50,13 +67,25 @@ set_property(TARGET ctascheduler PROPERTY   VERSION "${CTA_LIBVERSION}")
 
 install (TARGETS ctascheduler DESTINATION usr/${CMAKE_INSTALL_LIBDIR})
 
-target_link_libraries (ctascheduler ctacommon ctaobjectstore ${PROTOBUF3_LIBRARIES} ctadisk)
+if(NOT CTA_USE_PGSCHED)
+  target_link_libraries (ctascheduler ctacommon ctaobjectstore ${PROTOBUF3_LIBRARIES} ctadisk)
+else()
+  target_link_libraries (ctascheduler ctacommon ${PROTOBUF3_LIBRARIES} ctadisk)
+endif()
 
-add_library (ctaschedulerunittests SHARED
-  SchedulerDatabaseFactory.cpp
-  SchedulerDatabaseTest.cpp
-  SchedulerTest.cpp
-  OStoreDB/OStoreDBTest.cpp)
+if(NOT CTA_USE_PGSCHED)
+  add_library (ctaschedulerunittests SHARED
+    SchedulerDatabaseFactory.cpp
+    SchedulerDatabaseTest.cpp
+    SchedulerTest.cpp
+    OStoreDB/OStoreDBTest.cpp)
+else()
+  add_library (ctaschedulerunittests SHARED
+    SchedulerDatabaseFactory.cpp
+    SchedulerDatabaseTest.cpp
+    GenericSchedulerTest.cpp
+    PostgresSchedDB/PostgresSchedDBTest.cpp)
+endif()
 set_property(TARGET ctaschedulerunittests PROPERTY SOVERSION "${CTA_SOVERSION}")
 set_property(TARGET ctaschedulerunittests PROPERTY   VERSION "${CTA_LIBVERSION}")
 
diff --git a/scheduler/GenericSchedulerTest.cpp b/scheduler/GenericSchedulerTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..153941620041ba934ff9cf8a6684e20e88b1a76e
--- /dev/null
+++ b/scheduler/GenericSchedulerTest.cpp
@@ -0,0 +1,2959 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include <gtest/gtest.h>
+
+#include <bits/unique_ptr.h>
+#include <exception>
+#include <memory>
+#include <utility>
+
+#include "catalogue/InMemoryCatalogue.hpp"
+#include "catalogue/SchemaCreatingSqliteCatalogue.hpp"
+#include "common/dataStructures/JobQueueType.hpp"
+#include "common/exception/NoSuchObject.hpp"
+#include "common/log/DummyLogger.hpp"
+#include "common/Timer.hpp"
+#include "scheduler/ArchiveMount.hpp"
+#include "scheduler/LogicalLibrary.hpp"
+#include "scheduler/RetrieveMount.hpp"
+#include "scheduler/Scheduler.hpp"
+#include "scheduler/SchedulerDatabase.hpp"
+#include "scheduler/SchedulerDatabaseFactory.hpp"
+#include "scheduler/TapeMount.hpp"
+#include "tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.hpp"
+#include "tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.hpp"
+#include "tests/TempDirectory.hpp"
+#include "tests/TempFile.hpp"
+#include "tests/TestsCompileTimeSwitches.hpp"
+
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp"
+#endif
+
+#ifdef STDOUT_LOGGING
+#include "common/log/StdoutLogger.hpp"
+#endif
+
+namespace unitTests {
+
+const uint32_t CMS_USER = 9751;
+const uint32_t GROUP_2  = 9752;
+const uint32_t PUBLIC_OWNER_UID = 9753;
+const uint32_t PUBLIC_GID = 9754;
+
+namespace {
+
+/**
+ * This structure is used to parameterize scheduler tests.
+ */
+struct SchedulerTestParam {
+  cta::SchedulerDatabaseFactory &dbFactory;
+
+  SchedulerTestParam(
+    cta::SchedulerDatabaseFactory &dbFactory):
+    dbFactory(dbFactory) {
+ }
+}; // struct SchedulerTestParam
+
+}
+
+/**
+ * The scheduler test is a parameterized test.  It takes a pair of name server
+ * and scheduler database factories as a parameter.
+ */
+class SchedulerTest: public ::testing::TestWithParam<SchedulerTestParam> {
+public:
+
+  SchedulerTest(): m_dummyLog("dummy", "dummy") {
+  }
+
+  class FailedToGetCatalogue: public std::exception {
+  public:
+    const char *what() const noexcept {
+      return "Failed to get catalogue";
+    }
+  };
+
+  class FailedToGetScheduler: public std::exception {
+  public:
+    const char *what() const noexcept {
+      return "Failed to get scheduler";
+    }
+  };
+
+  class FailedToGetSchedulerDB: public std::exception {
+  public:
+    const char *what() const noexcept {
+      return "Failed to get scheduler db.";
+    }
+  };
+
+  virtual void SetUp() {
+    using namespace cta;
+
+    // We do a deep reference to the member as the C++ compiler requires the function to be already defined if called implicitly
+    const auto &factory = GetParam().dbFactory;
+    const uint64_t nbConns = 1;
+    const uint64_t nbArchiveFileListingConns = 1;
+    //m_catalogue = std::make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns);
+    m_catalogue = std::make_unique<catalogue::InMemoryCatalogue>(m_dummyLog, nbConns, nbArchiveFileListingConns);
+    // Get the SchedulerDatabase from the factory
+    auto sdb = std::move(factory.create(m_catalogue));
+    // We don't check the specific type of the SchedulerDatabase as we intend to ge generic
+    m_db = std::move(sdb);
+    m_scheduler = std::make_unique<Scheduler>(*m_catalogue, *m_db, s_minFilesToWarrantAMount, s_minBytesToWarrantAMount);
+  }
+
+  virtual void TearDown() {
+    m_scheduler.reset();
+    m_catalogue.reset();
+    m_db.reset();
+  }
+
+  cta::catalogue::Catalogue &getCatalogue() {
+    cta::catalogue::Catalogue *const ptr = m_catalogue.get();
+    if(nullptr == ptr) {
+      throw FailedToGetCatalogue();
+    }
+    return *ptr;
+  }
+
+  cta::Scheduler &getScheduler() {
+    cta::Scheduler *const ptr = m_scheduler.get();
+    if(nullptr == ptr) {
+      throw FailedToGetScheduler();
+    }
+    return *ptr;
+  }
+
+  cta::SchedulerDatabase &getSchedulerDB() {
+    cta::SchedulerDatabase *const ptr = m_db.get();
+    if(nullptr == ptr) {
+      throw FailedToGetSchedulerDB();
+    }
+    return *ptr;
+  }
+
+  void setupDefaultCatalogue() {
+    using namespace cta;
+    auto & catalogue=getCatalogue();
+
+    const std::string mountPolicyName = s_mountPolicyName;
+    const uint64_t archivePriority = s_archivePriority;
+    const uint64_t minArchiveRequestAge = s_minArchiveRequestAge;
+    const uint64_t retrievePriority = s_retrievePriority;
+    const uint64_t minRetrieveRequestAge = s_minRetrieveRequestAge;
+    const std::string mountPolicyComment = "create mount group";
+
+    catalogue::CreateMountPolicyAttributes mountPolicy;
+    mountPolicy.name = mountPolicyName;
+    mountPolicy.archivePriority = archivePriority;
+    mountPolicy.minArchiveRequestAge = minArchiveRequestAge;
+    mountPolicy.retrievePriority = retrievePriority;
+    mountPolicy.minRetrieveRequestAge = minRetrieveRequestAge;
+    mountPolicy.comment = mountPolicyComment;
+
+    ASSERT_TRUE(catalogue.getMountPolicies().empty());
+
+    catalogue.createMountPolicy(
+      s_adminOnAdminHost,
+      mountPolicy);
+
+    const std::list<common::dataStructures::MountPolicy> groups = catalogue.getMountPolicies();
+    ASSERT_EQ(1, groups.size());
+    const common::dataStructures::MountPolicy group = groups.front();
+    ASSERT_EQ(mountPolicyName, group.name);
+    ASSERT_EQ(archivePriority, group.archivePriority);
+    ASSERT_EQ(minArchiveRequestAge, group.archiveMinRequestAge);
+    ASSERT_EQ(retrievePriority, group.retrievePriority);
+    ASSERT_EQ(minRetrieveRequestAge, group.retrieveMinRequestAge);
+    ASSERT_EQ(mountPolicyComment, group.comment);
+
+    m_catalogue->createDiskInstance(s_adminOnAdminHost, s_diskInstance, "comment");
+
+    const std::string ruleComment = "create requester mount-rule";
+    catalogue.createRequesterMountRule(s_adminOnAdminHost, mountPolicyName, s_diskInstance, s_userName, ruleComment);
+
+    const std::list<common::dataStructures::RequesterMountRule> rules = catalogue.getRequesterMountRules();
+    ASSERT_EQ(1, rules.size());
+
+    const common::dataStructures::RequesterMountRule rule = rules.front();
+
+    ASSERT_EQ(s_userName, rule.name);
+    ASSERT_EQ(mountPolicyName, rule.mountPolicy);
+    ASSERT_EQ(ruleComment, rule.comment);
+    ASSERT_EQ(s_adminOnAdminHost.username, rule.creationLog.username);
+    ASSERT_EQ(s_adminOnAdminHost.host, rule.creationLog.host);
+    ASSERT_EQ(rule.creationLog, rule.lastModificationLog);
+
+    cta::common::dataStructures::VirtualOrganization vo;
+    vo.name = s_vo;
+    vo.comment = "comment";
+    vo.writeMaxDrives = 1;
+    vo.readMaxDrives = 1;
+    vo.maxFileSize = 0;
+    vo.diskInstanceName = s_diskInstance;
+    m_catalogue->createVirtualOrganization(s_adminOnAdminHost,vo);
+
+    common::dataStructures::StorageClass storageClass;
+    storageClass.name = s_storageClassName;
+    storageClass.nbCopies = 1;
+    storageClass.vo.name = vo.name;
+    storageClass.comment = "create storage class";
+    m_catalogue->createStorageClass(s_adminOnAdminHost, storageClass);
+
+    const uint16_t nbPartialTapes = 1;
+    const std::string tapePoolComment = "Tape-pool comment";
+    const bool tapePoolEncryption = false;
+    const std::optional<std::string> tapePoolSupply("value for the supply pool mechanism");
+    catalogue.createTapePool(s_adminOnAdminHost, s_tapePoolName, vo.name, nbPartialTapes, tapePoolEncryption, tapePoolSupply,
+      tapePoolComment);
+    const uint32_t copyNb = 1;
+    const std::string archiveRouteComment = "Archive-route comment";
+    catalogue.createArchiveRoute(s_adminOnAdminHost, s_storageClassName, copyNb, s_tapePoolName,
+      archiveRouteComment);
+
+    cta::catalogue::MediaType mediaType;
+    mediaType.name = s_mediaType;
+    mediaType.capacityInBytes = s_mediaTypeCapacityInBytes;
+    mediaType.cartridge = "cartridge";
+    mediaType.comment = "comment";
+    catalogue.createMediaType(s_adminOnAdminHost, mediaType);
+
+    const std::string driveName = "tape_drive";
+    const auto tapeDrive = getDefaultTapeDrive(driveName);
+    catalogue.createTapeDrive(tapeDrive);
+    const std::string driveName2 = "drive0";
+    const auto tapeDrive2 = getDefaultTapeDrive(driveName2);
+    catalogue.createTapeDrive(tapeDrive2);
+  }
+
+  cta::catalogue::CreateTapeAttributes getDefaultTape() {
+    using namespace cta;
+
+    catalogue::CreateTapeAttributes tape;
+    tape.vid = s_vid;
+    tape.mediaType = s_mediaType;
+    tape.vendor = s_vendor;
+    tape.logicalLibraryName = s_libraryName;
+    tape.tapePoolName = s_tapePoolName;
+    tape.full = false;
+    tape.state = common::dataStructures::Tape::ACTIVE;
+    tape.comment = "Comment";
+
+    return tape;
+  }
+
+  cta::common::dataStructures::TapeDrive getDefaultTapeDrive(const std::string &driveName) {
+    cta::common::dataStructures::TapeDrive tapeDrive;
+    tapeDrive.driveName = driveName;
+    tapeDrive.host = "admin_host";
+    tapeDrive.logicalLibrary = "VLSTK10";
+    tapeDrive.mountType = cta::common::dataStructures::MountType::NoMount;
+    tapeDrive.driveStatus = cta::common::dataStructures::DriveStatus::Up;
+    tapeDrive.desiredUp = false;
+    tapeDrive.desiredForceDown = false;
+    tapeDrive.diskSystemName = "dummyDiskSystemName";
+    tapeDrive.reservedBytes = 694498291384;
+    tapeDrive.reservationSessionId = 0;
+    return tapeDrive;
+  }
+
+private:
+
+  // Prevent copying
+  SchedulerTest(const SchedulerTest &) = delete;
+
+  // Prevent assignment
+  SchedulerTest & operator= (const SchedulerTest &) = delete;
+
+  cta::log::DummyLogger m_dummyLog;
+  std::unique_ptr<cta::SchedulerDatabase> m_db;
+  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
+  std::unique_ptr<cta::Scheduler> m_scheduler;
+
+protected:
+
+  // Default parameters for storage classes, etc...
+  const std::string s_userName = "user_name";
+  const std::string s_diskInstance = "disk_instance";
+  const std::string s_storageClassName = "TestStorageClass";
+  const cta::common::dataStructures::SecurityIdentity s_adminOnAdminHost = { "admin1", "host1" };
+  const std::string s_tapePoolName = "TapePool";
+  const std::string s_libraryName = "TestLogicalLibrary";
+  const std::string s_vid = "TestVid";
+  const std::string s_mediaType = "TestMediaType";
+  const std::string s_vendor = "TestVendor";
+  const std::string s_mountPolicyName = "mount_group";
+  const std::string s_repackMountPolicyName = "repack_mount_group";
+  const bool s_defaultRepackDisabledTapeFlag = false;
+  const bool s_defaultRepackNoRecall = false;
+  const uint64_t s_minFilesToWarrantAMount = 5;
+  const uint64_t s_minBytesToWarrantAMount = 2*1000*1000;
+  const uint64_t s_archivePriority = 1;
+  const uint64_t s_minArchiveRequestAge = 2;
+  const uint64_t s_retrievePriority = 3;
+  const uint64_t s_minRetrieveRequestAge = 4;
+  const uint64_t s_mediaTypeCapacityInBytes = 10;
+  const std::string s_vo = "vo";
+  //TempFile m_tempSqliteFile;
+
+}; // class SchedulerTest
+
+TEST_P(SchedulerTest, archive_to_new_file) {
+  using namespace cta;
+
+  setupDefaultCatalogue();
+  Scheduler &scheduler = getScheduler();
+
+  cta::common::dataStructures::EntryLog creationLog;
+  creationLog.host="host2";
+  creationLog.time=0;
+  creationLog.username="admin1";
+  cta::common::dataStructures::DiskFileInfo diskFileInfo;
+  diskFileInfo.gid=GROUP_2;
+  diskFileInfo.owner_uid=CMS_USER;
+  diskFileInfo.path="path/to/file";
+  cta::common::dataStructures::ArchiveRequest request;
+  request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
+  request.creationLog=creationLog;
+  request.diskFileInfo=diskFileInfo;
+  request.diskFileID="diskFileID";
+  request.fileSize=100*1000*1000;
+  cta::common::dataStructures::RequesterIdentity requester;
+  requester.name = s_userName;
+  requester.group = "userGroup";
+  request.requester = requester;
+  request.srcURL="srcURL";
+  request.storageClass=s_storageClassName;
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+  const uint64_t archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass,
+      request.requester, lc);
+  scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  {
+    auto rqsts = scheduler.getPendingArchiveJobs(lc);
+    ASSERT_EQ(1, rqsts.size());
+    auto poolItor = rqsts.cbegin();
+    ASSERT_FALSE(poolItor == rqsts.cend());
+    const std::string pool = poolItor->first;
+    ASSERT_TRUE(s_tapePoolName == pool);
+    auto poolRqsts = poolItor->second;
+    ASSERT_EQ(1, poolRqsts.size());
+    std::set<std::string> remoteFiles;
+    std::set<std::string> archiveFiles;
+    for(auto rqstItor = poolRqsts.cbegin();
+      rqstItor != poolRqsts.cend(); rqstItor++) {
+      remoteFiles.insert(rqstItor->request.diskFileInfo.path);
+    }
+    ASSERT_EQ(1, remoteFiles.size());
+    ASSERT_FALSE(remoteFiles.find(request.diskFileInfo.path) == remoteFiles.end());
+  }
+}
+
+
+TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the file in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  bool found=false;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      if (req.archiveFileID == archiveFileId)
+        found = true;
+    }
+  }
+  ASSERT_TRUE(found);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = true;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    //Test that no mount is available when a logical library is disabled
+    ASSERT_EQ(nullptr, mount.get());
+    catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,false);
+    //continue our test
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Emulate the reporter process reporting successful transfer to tape to the disk system
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_NE(0, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve("disk_instance", request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Check that the retrieve request is queued
+  {
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    // We expect 1 tape with queued jobs
+    ASSERT_EQ(1, rqsts.size());
+    // We expect the queue to contain 1 job
+    ASSERT_EQ(1, rqsts.cbegin()->second.size());
+    // We expect the job to be single copy
+    auto & job = rqsts.cbegin()->second.back();
+    ASSERT_EQ(1, job.tapeCopies.size());
+    // We expect the copy to be on the provided tape.
+    ASSERT_TRUE(s_vid == job.tapeCopies.cbegin()->first);
+    // Check the remote target
+    ASSERT_EQ("dstURL", job.request.dstURL);
+    // Check the archive file ID
+    ASSERT_EQ(archiveFileId, job.request.archiveFileID);
+
+    // Check that we can retrieve jobs by VID
+
+    // Get the vid from the above job and submit a separate request for the same vid
+    auto vid = rqsts.begin()->second.back().tapeCopies.begin()->first;
+    auto rqsts_vid = scheduler.getPendingRetrieveJobs(vid, lc);
+    // same tests as above
+    ASSERT_EQ(1, rqsts_vid.size());
+    auto &job_vid = rqsts_vid.back();
+    ASSERT_EQ(1, job_vid.tapeCopies.size());
+    ASSERT_TRUE(s_vid == job_vid.tapeCopies.cbegin()->first);
+    ASSERT_EQ("dstURL", job_vid.request.dstURL);
+    ASSERT_EQ(archiveFileId, job_vid.request.archiveFileID);
+  }
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+    auto jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(1, jobBatch.size());
+    retrieveJob.reset(jobBatch.front().release());
+    ASSERT_NE(nullptr, retrieveJob.get());
+    retrieveJob->asyncSetSuccessful();
+    std::queue<std::unique_ptr<cta::RetrieveJob> > jobQueue;
+    jobQueue.push(std::move(retrieveJob));
+    retrieveMount->flushAsyncSuccessReports(jobQueue, lc);
+    jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, jobBatch.size());
+  }
+}
+
+TEST_P(SchedulerTest, archive_report_and_retrieve_new_file_with_specific_mount_policy) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the file in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  bool found=false;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      if (req.archiveFileID == archiveFileId)
+        found = true;
+    }
+  }
+  ASSERT_TRUE(found);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = true;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, "tape_drive");
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    //Test that no mount is available when a logical library is disabled
+    ASSERT_EQ(nullptr, mount.get());
+    catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,false);
+    //continue our test
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Emulate the the reporter process reporting successful transfer to tape to the disk system
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_NE(0, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    //create custom mount policy for retrieve
+    catalogue::CreateMountPolicyAttributes mountPolicy;
+    mountPolicy.name = "custom_mount_policy";
+    mountPolicy.archivePriority = s_archivePriority;
+    mountPolicy.minArchiveRequestAge = s_minArchiveRequestAge;
+    mountPolicy.retrievePriority = s_retrievePriority;
+    mountPolicy.minRetrieveRequestAge = s_minRetrieveRequestAge;
+    mountPolicy.comment = "custom mount policy";
+
+    catalogue.createMountPolicy(s_adminOnAdminHost, mountPolicy);
+  }
+
+  {
+    //queue retrieve
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    request.mountPolicy = "custom_mount_policy";
+    scheduler.queueRetrieve("disk_instance", request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Check that the retrieve request is queued
+  {
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    // We expect 1 tape with queued jobs
+    ASSERT_EQ(1, rqsts.size());
+    // We expect the queue to contain 1 job
+    ASSERT_EQ(1, rqsts.cbegin()->second.size());
+    // We expect the job to be single copy
+    auto & job = rqsts.cbegin()->second.back();
+    ASSERT_EQ(1, job.tapeCopies.size());
+    // We expect the copy to be on the provided tape.
+    ASSERT_TRUE(s_vid == job.tapeCopies.cbegin()->first);
+    // Check the remote target
+    ASSERT_EQ("dstURL", job.request.dstURL);
+    // Check the archive file ID
+    ASSERT_EQ(archiveFileId, job.request.archiveFileID);
+
+    // Check that we can retrieve jobs by VID
+
+    // Get the vid from the above job and submit a separate request for the same vid
+    auto vid = rqsts.begin()->second.back().tapeCopies.begin()->first;
+    auto rqsts_vid = scheduler.getPendingRetrieveJobs(vid, lc);
+    // same tests as above
+    ASSERT_EQ(1, rqsts_vid.size());
+    auto &job_vid = rqsts_vid.back();
+    ASSERT_EQ(1, job_vid.tapeCopies.size());
+    ASSERT_TRUE(s_vid == job_vid.tapeCopies.cbegin()->first);
+    ASSERT_EQ("dstURL", job_vid.request.dstURL);
+    ASSERT_EQ(archiveFileId, job_vid.request.archiveFileID);
+  }
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+    auto jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(1, jobBatch.size());
+    retrieveJob.reset(jobBatch.front().release());
+    ASSERT_NE(nullptr, retrieveJob.get());
+    retrieveJob->asyncSetSuccessful();
+    std::queue<std::unique_ptr<cta::RetrieveJob> > jobQueue;
+    jobQueue.push(std::move(retrieveJob));
+    retrieveMount->flushAsyncSuccessReports(jobQueue, lc);
+    jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, jobBatch.size());
+  }
+}
+
+TEST_P(SchedulerTest, archive_report_and_retrieve_new_dual_copy_file) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  // Setup catalogue for dual tape copies
+  const std::string tapePool1Name = "tape_pool_1";
+  const std::string tapePool2Name = "tape_pool_2";
+  const std::string dualCopyStorageClassName = "dual_copy";
+  {
+    using namespace cta;
+
+    const std::string mountPolicyName = s_mountPolicyName;
+    const uint64_t archivePriority = s_archivePriority;
+    const uint64_t minArchiveRequestAge = s_minArchiveRequestAge;
+    const uint64_t retrievePriority = s_retrievePriority;
+    const uint64_t minRetrieveRequestAge = s_minRetrieveRequestAge;
+    const std::string mountPolicyComment = "create mount group";
+
+    catalogue::CreateMountPolicyAttributes mountPolicy;
+    mountPolicy.name = mountPolicyName;
+    mountPolicy.archivePriority = archivePriority;
+    mountPolicy.minArchiveRequestAge = minArchiveRequestAge;
+    mountPolicy.retrievePriority = retrievePriority;
+    mountPolicy.minRetrieveRequestAge = minRetrieveRequestAge;
+    mountPolicy.comment = mountPolicyComment;
+
+    ASSERT_TRUE(catalogue.getMountPolicies().empty());
+
+    catalogue.createMountPolicy(
+      s_adminOnAdminHost,
+      mountPolicy);
+
+    const std::list<common::dataStructures::MountPolicy> groups = catalogue.getMountPolicies();
+    ASSERT_EQ(1, groups.size());
+    const common::dataStructures::MountPolicy group = groups.front();
+    ASSERT_EQ(mountPolicyName, group.name);
+    ASSERT_EQ(archivePriority, group.archivePriority);
+    ASSERT_EQ(minArchiveRequestAge, group.archiveMinRequestAge);
+    ASSERT_EQ(retrievePriority, group.retrievePriority);
+    ASSERT_EQ(minRetrieveRequestAge, group.retrieveMinRequestAge);
+    ASSERT_EQ(mountPolicyComment, group.comment);
+
+    cta::common::dataStructures::DiskInstance di;
+    di.name = s_diskInstance;
+    di.comment = "comment";
+    catalogue.createDiskInstance(s_adminOnAdminHost, di.name, di.comment);
+
+    const std::string ruleComment = "create requester mount-rule";
+    catalogue.createRequesterMountRule(s_adminOnAdminHost, mountPolicyName, di.name, s_userName, ruleComment);
+
+    const std::list<common::dataStructures::RequesterMountRule> rules = catalogue.getRequesterMountRules();
+    ASSERT_EQ(1, rules.size());
+
+    const common::dataStructures::RequesterMountRule rule = rules.front();
+
+    ASSERT_EQ(s_userName, rule.name);
+    ASSERT_EQ(mountPolicyName, rule.mountPolicy);
+    ASSERT_EQ(ruleComment, rule.comment);
+    ASSERT_EQ(s_adminOnAdminHost.username, rule.creationLog.username);
+    ASSERT_EQ(s_adminOnAdminHost.host, rule.creationLog.host);
+    ASSERT_EQ(rule.creationLog, rule.lastModificationLog);
+
+    cta::common::dataStructures::VirtualOrganization vo;
+    vo.name = s_vo;
+    vo.comment = "comment";
+    vo.writeMaxDrives = 1;
+    vo.readMaxDrives = 1;
+    vo.maxFileSize = 0;
+    vo.diskInstanceName = s_diskInstance;
+    catalogue.createVirtualOrganization(s_adminOnAdminHost,vo);
+
+    common::dataStructures::StorageClass storageClass;
+    storageClass.name = dualCopyStorageClassName;
+    storageClass.nbCopies = 2;
+    storageClass.vo.name = vo.name;
+    storageClass.comment = "create dual copy storage class";
+    catalogue.createStorageClass(s_adminOnAdminHost, storageClass);
+
+    const uint16_t nbPartialTapes = 1;
+    const std::string tapePool1Comment = "Tape-pool for copy number 1";
+    const std::string tapePool2Comment = "Tape-pool for copy number 2";
+    const bool tapePoolEncryption = false;
+    const std::optional<std::string> tapePoolSupply("value for the supply pool mechanism");
+    catalogue.createTapePool(s_adminOnAdminHost, tapePool1Name, vo.name, nbPartialTapes, tapePoolEncryption,
+      tapePoolSupply, tapePool1Comment);
+    catalogue.createTapePool(s_adminOnAdminHost, tapePool2Name, vo.name, nbPartialTapes, tapePoolEncryption,
+      tapePoolSupply, tapePool2Comment);
+
+    const std::string archiveRoute1Comment = "Archive-route for copy number 1";
+    const std::string archiveRoute2Comment = "Archive-route for copy number 2";
+    const uint32_t archiveRoute1CopyNb = 1;
+    const uint32_t archiveRoute2CopyNb = 2;
+    catalogue.createArchiveRoute(s_adminOnAdminHost, dualCopyStorageClassName, archiveRoute1CopyNb, tapePool1Name,
+      archiveRoute1Comment);
+    catalogue.createArchiveRoute(s_adminOnAdminHost, dualCopyStorageClassName, archiveRoute2CopyNb, tapePool2Name,
+      archiveRoute1Comment);
+
+    cta::catalogue::MediaType mediaType;
+    mediaType.name = s_mediaType;
+    mediaType.capacityInBytes = s_mediaTypeCapacityInBytes;
+    mediaType.cartridge = "cartridge";
+    mediaType.comment = "comment";
+    catalogue.createMediaType(s_adminOnAdminHost, mediaType);
+
+    const std::string driveName = "tape_drive";
+    const auto tapeDrive = getDefaultTapeDrive(driveName);
+    catalogue.createTapeDrive(tapeDrive);
+    const std::string driveName2 = "drive0";
+    const auto tapeDrive2 = getDefaultTapeDrive(driveName2);
+    catalogue.createTapeDrive(tapeDrive2);
+  }
+
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=dualCopyStorageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the file in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  bool found=false;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      if (req.archiveFileID == archiveFileId)
+        found = true;
+    }
+  }
+  ASSERT_TRUE(found);
+
+  // Create the environment for the migration of copy 1 to happen (library +
+  // tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = true;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  const std::string copy1TapeVid = "copy_1_tape";
+  {
+    using namespace cta;
+
+    catalogue::CreateTapeAttributes tape;
+    tape.vid = copy1TapeVid;
+    tape.mediaType = s_mediaType;
+    tape.vendor = s_vendor;
+    tape.logicalLibraryName = s_libraryName;
+    tape.tapePoolName = tapePool1Name;
+    tape.full = false;
+    tape.state = common::dataStructures::Tape::ACTIVE;
+    tape.comment = "Comment";
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(copy1TapeVid, driveName);
+
+  // Archive copy 1 to tape
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    //Test that no mount is available when a logical library is disabled
+    ASSERT_EQ(nullptr, mount.get());
+    catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,false);
+    //continue our test
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ(tapePool1Name, mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ(copy1TapeVid, mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Check that there are no jobs to report because only 1 copy of a dual copy
+    // file has been archived
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_EQ(0, jobsToReport.size());
+  }
+
+  // Create the environment for the migration of copy 2 to happen (library +
+  // tape)
+  catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,true);
+  const std::string copy2TapeVid = "copy_2_tape";
+  {
+    using namespace cta;
+
+    catalogue::CreateTapeAttributes tape;
+    tape.vid = copy2TapeVid;
+    tape.mediaType = s_mediaType;
+    tape.vendor = s_vendor;
+    tape.logicalLibraryName = s_libraryName;
+    tape.tapePoolName = tapePool2Name;
+    tape.full = false;
+    tape.state = common::dataStructures::Tape::ACTIVE;
+    tape.comment = "Comment";
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  catalogue.tapeLabelled(copy2TapeVid, driveName);
+
+  // Archive copy 2 to tape
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    //Test that no mount is available when a logical library is disabled
+    ASSERT_EQ(nullptr, mount.get());
+    catalogue.setLogicalLibraryDisabled(s_adminOnAdminHost,s_libraryName,false);
+    //continue our test
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ(tapePool2Name, mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ(copy2TapeVid, mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 2;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Emulate the reporter process reporting successful transfer to tape to the disk system
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_NE(0, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  // Check that there are now two tape copies in the catalogue
+  {
+    common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    std::optional<std::string> activity;
+    const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
+      catalogue.prepareToRetrieveFile(s_diskInstance, archiveFileId, requester, activity, lc);
+    ASSERT_EQ(2, queueCriteria.archiveFile.tapeFiles.size());
+
+    std::map<uint8_t, common::dataStructures::TapeFile> copyNbToTape;
+    for (auto &tapeFile: queueCriteria.archiveFile.tapeFiles) {
+      if(copyNbToTape.end() != copyNbToTape.find(tapeFile.copyNb)) {
+        FAIL() << "Duplicate copyNb: vid=" << tapeFile.vid << " copyNb=" << (uint32_t)(tapeFile.copyNb);
+      }
+      copyNbToTape[tapeFile.copyNb] = tapeFile;
+    }
+
+    {
+      const auto tapeItor = copyNbToTape.find(1);
+      ASSERT_NE(copyNbToTape.end(), tapeItor);
+
+      const auto tapeFile = tapeItor->second;
+      ASSERT_EQ(copy1TapeVid, tapeFile.vid);
+      ASSERT_EQ(1, tapeFile.copyNb);
+    }
+
+    {
+      const auto tapeItor = copyNbToTape.find(2);
+      ASSERT_NE(copyNbToTape.end(), tapeItor);
+
+      const auto tapeFile = tapeItor->second;
+      ASSERT_EQ(copy2TapeVid, tapeFile.vid);
+      ASSERT_EQ(2, tapeFile.copyNb);
+    }
+  }
+
+  // Queue the retrieve request
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve("disk_instance", request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Check that the retrieve request is queued
+  {
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    // We expect 1 tape with queued jobs
+    ASSERT_EQ(1, rqsts.size());
+    // We expect the queue to contain 1 job
+    ASSERT_EQ(1, rqsts.cbegin()->second.size());
+    // We expect the job to be single copy
+    auto & job = rqsts.cbegin()->second.back();
+    ASSERT_EQ(1, job.tapeCopies.size());
+    // Check the remote target
+    ASSERT_EQ("dstURL", job.request.dstURL);
+    // Check the archive file ID
+    ASSERT_EQ(archiveFileId, job.request.archiveFileID);
+
+    // Check that we can retrieve jobs by VID
+    // Get the vid from the above job and submit a separate request for the same vid
+    auto vid = rqsts.begin()->second.back().tapeCopies.begin()->first;
+    auto rqsts_vid = scheduler.getPendingRetrieveJobs(vid, lc);
+    // same tests as above
+    ASSERT_EQ(1, rqsts_vid.size());
+    auto &job_vid = rqsts_vid.back();
+    ASSERT_EQ(1, job_vid.tapeCopies.size());
+    ASSERT_EQ("dstURL", job_vid.request.dstURL);
+    ASSERT_EQ(archiveFileId, job_vid.request.archiveFileID);
+  }
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+    auto jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(1, jobBatch.size());
+    retrieveJob.reset(jobBatch.front().release());
+    ASSERT_NE(nullptr, retrieveJob.get());
+    retrieveJob->asyncSetSuccessful();
+    std::queue<std::unique_ptr<cta::RetrieveJob> > jobQueue;
+    jobQueue.push(std::move(retrieveJob));
+    retrieveMount->flushAsyncSuccessReports(jobQueue, lc);
+    jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, jobBatch.size());
+  }
+}
+
+TEST_P(SchedulerTest, archive_and_retrieve_failure) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the file in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  bool found=false;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      if (req.archiveFileID == archiveFileId)
+        found = true;
+    }
+  }
+  ASSERT_TRUE(found);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Emulate the the reporter process reporting successful transfer to tape to the disk system
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_NE(0, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.errorReportURL="null:";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve("disk_instance", request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Try mounting the tape twice
+  for(int mountPass = 0; mountPass < 2; ++mountPass)
+  {
+    // Check that the retrieve request is queued
+    {
+      auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+      // We expect 1 tape with queued jobs
+      ASSERT_EQ(1, rqsts.size());
+      // We expect the queue to contain 1 job
+      ASSERT_EQ(1, rqsts.cbegin()->second.size());
+      // We expect the job to be single copy
+      auto & job = rqsts.cbegin()->second.back();
+      ASSERT_EQ(1, job.tapeCopies.size());
+      // We expect the copy to be on the provided tape.
+      ASSERT_TRUE(s_vid == job.tapeCopies.cbegin()->first);
+      // Check the remote target
+      ASSERT_EQ("dstURL", job.request.dstURL);
+      // Check the archive file ID
+      ASSERT_EQ(archiveFileId, job.request.archiveFileID);
+
+      // Check that we can retrieve jobs by VID
+
+      // Get the vid from the above job and submit a separate request for the same vid
+      auto vid = rqsts.begin()->second.back().tapeCopies.begin()->first;
+      auto rqsts_vid = scheduler.getPendingRetrieveJobs(vid, lc);
+      // same tests as above
+      ASSERT_EQ(1, rqsts_vid.size());
+      auto &job_vid = rqsts_vid.back();
+      ASSERT_EQ(1, job_vid.tapeCopies.size());
+      ASSERT_TRUE(s_vid == job_vid.tapeCopies.cbegin()->first);
+      ASSERT_EQ("dstURL", job_vid.request.dstURL);
+      ASSERT_EQ(archiveFileId, job_vid.request.archiveFileID);
+    }
+
+    {
+      // Emulate a tape server by asking for a mount and then a file
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+      std::unique_ptr<cta::RetrieveMount> retrieveMount;
+      retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+      ASSERT_NE(nullptr, retrieveMount.get());
+      // The file should be retried three times
+      for(int i = 0; i < 3; ++i)
+      {
+        std::list<std::unique_ptr<cta::RetrieveJob>> retrieveJobList = retrieveMount->getNextJobBatch(1,1,lc);
+        if (!retrieveJobList.front().get()) {
+          int __attribute__((__unused__)) debugI=i;
+        }
+        ASSERT_NE(0, retrieveJobList.size());
+        // Validate we got the right file
+        ASSERT_EQ(archiveFileId, retrieveJobList.front()->archiveFile.archiveFileID);
+        retrieveJobList.front()->transferFailed("Retrieve failed (mount " + std::to_string(mountPass) +
+                                                ", attempt " + std::to_string(i) + ")", lc);
+      }
+      // Then the request should be gone
+      ASSERT_EQ(0, retrieveMount->getNextJobBatch(1,1,lc).size());
+    } // end of retries
+  } // end of pass
+
+  {
+    // We expect the retrieve queue to be empty
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    ASSERT_EQ(0, rqsts.size());
+    // The failed queue should be empty
+    auto retrieveJobFailedList = scheduler.getNextRetrieveJobsFailedBatch(10,lc);
+    ASSERT_EQ(0, retrieveJobFailedList.size());
+    // Emulate the reporter process
+    auto jobsToReport = scheduler.getNextRetrieveJobsToReportBatch(10, lc);
+    ASSERT_EQ(1, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportRetrieveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextRetrieveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    // There should be one failed job
+    auto retrieveJobFailedList = scheduler.getNextRetrieveJobsFailedBatch(10,lc);
+    ASSERT_EQ(1, retrieveJobFailedList.size());
+  }
+}
+
+TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the file in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  bool found=false;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      if (req.archiveFileID == archiveFileId)
+        found = true;
+    }
+  }
+  ASSERT_TRUE(found);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    mount->setDriveStatus(cta::common::dataStructures::DriveStatus::Starting);
+    auto & osdb=getSchedulerDB();
+    auto mi=osdb.getMountInfo(lc);
+    ASSERT_EQ(1, mi->existingOrNextMounts.size());
+    ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
+    ASSERT_EQ("TestVid", mi->existingOrNextMounts.front().vid);
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    std::queue<std::unique_ptr<cta::SchedulerDatabase::ArchiveJob>> failedToReportArchiveJobs;
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  {
+    // Emulate the reporter process reporting successful transfer to tape to the disk system
+    auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+    ASSERT_NE(0, jobsToReport.size());
+    disk::DiskReporterFactory factory;
+    log::TimingList timings;
+    utils::Timer t;
+    scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.errorReportURL="null:";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve("disk_instance", request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Try mounting the tape twice
+  for(int mountPass = 0; mountPass < 2; ++mountPass)
+  {
+    // Check that the retrieve request is queued
+    {
+      auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+      // We expect 1 tape with queued jobs
+      ASSERT_EQ(1, rqsts.size());
+      // We expect the queue to contain 1 job
+      ASSERT_EQ(1, rqsts.cbegin()->second.size());
+      // We expect the job to be single copy
+      auto & job = rqsts.cbegin()->second.back();
+      ASSERT_EQ(1, job.tapeCopies.size());
+      // We expect the copy to be on the provided tape.
+      ASSERT_TRUE(s_vid == job.tapeCopies.cbegin()->first);
+      // Check the remote target
+      ASSERT_EQ("dstURL", job.request.dstURL);
+      // Check the archive file ID
+      ASSERT_EQ(archiveFileId, job.request.archiveFileID);
+
+      // Check that we can retrieve jobs by VID
+
+      // Get the vid from the above job and submit a separate request for the same vid
+      auto vid = rqsts.begin()->second.back().tapeCopies.begin()->first;
+      auto rqsts_vid = scheduler.getPendingRetrieveJobs(vid, lc);
+      // same tests as above
+      ASSERT_EQ(1, rqsts_vid.size());
+      auto &job_vid = rqsts_vid.back();
+      ASSERT_EQ(1, job_vid.tapeCopies.size());
+      ASSERT_TRUE(s_vid == job_vid.tapeCopies.cbegin()->first);
+      ASSERT_EQ("dstURL", job_vid.request.dstURL);
+      ASSERT_EQ(archiveFileId, job_vid.request.archiveFileID);
+    }
+
+    {
+      // Emulate a tape server by asking for a mount and then a file
+      std::unique_ptr<cta::TapeMount> mount;
+      mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+      std::unique_ptr<cta::RetrieveMount> retrieveMount;
+      retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+      ASSERT_NE(nullptr, retrieveMount.get());
+      // The file should be retried three times
+      for(int i = 0; i < 3; ++i)
+      {
+        std::list<std::unique_ptr<cta::RetrieveJob>> retrieveJobList = retrieveMount->getNextJobBatch(1,1,lc);
+        if (!retrieveJobList.front().get()) {
+          int __attribute__((__unused__)) debugI=i;
+        }
+        ASSERT_NE(0, retrieveJobList.size());
+        // Validate we got the right file
+        ASSERT_EQ(archiveFileId, retrieveJobList.front()->archiveFile.archiveFileID);
+        retrieveJobList.front()->transferFailed("Retrieve failed (mount " + std::to_string(mountPass) +
+                                                ", attempt " + std::to_string(i) + ")", lc);
+      }
+      // Then the request should be gone
+      ASSERT_EQ(0, retrieveMount->getNextJobBatch(1,1,lc).size());
+    } // end of retries
+  } // end of pass
+
+  {
+    // We expect the retrieve queue to be empty
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    ASSERT_EQ(0, rqsts.size());
+    // The failed queue should be empty
+    auto retrieveJobFailedList = scheduler.getNextRetrieveJobsFailedBatch(10,lc);
+    ASSERT_EQ(0, retrieveJobFailedList.size());
+    // The failure should be on the jobs to report queue
+    auto retrieveJobToReportList = scheduler.getNextRetrieveJobsToReportBatch(10,lc);
+    ASSERT_EQ(1, retrieveJobToReportList.size());
+    // Fail the report
+    retrieveJobToReportList.front()->reportFailed("Report failed once", lc);
+    // Job should still be on the report queue
+    retrieveJobToReportList = scheduler.getNextRetrieveJobsToReportBatch(10,lc);
+    ASSERT_EQ(1, retrieveJobToReportList.size());
+    // Fail the report again
+    retrieveJobToReportList.front()->reportFailed("Report failed twice", lc);
+    // Job should be gone from the report queue
+    retrieveJobToReportList = scheduler.getNextRetrieveJobsToReportBatch(10,lc);
+    ASSERT_EQ(0, retrieveJobToReportList.size());
+  }
+
+  {
+    // There should be one failed job
+    auto retrieveJobFailedList = scheduler.getNextRetrieveJobsFailedBatch(10,lc);
+    ASSERT_EQ(1, retrieveJobFailedList.size());
+  }
+}
+
+TEST_P(SchedulerTest, retry_archive_until_max_reached) {
+  using namespace cta;
+
+  setupDefaultCatalogue();
+
+  auto &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId;
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    request.archiveErrorReportURL="null:";
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // Emulate a tape server by asking for a mount and then a file
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    // The file should be retried twice
+    for (int i=0; i<=1; i++) {
+      std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobList = archiveMount->getNextJobBatch(1,1,lc);
+      if (!archiveJobList.front().get()) {
+        int __attribute__((__unused__)) debugI=i;
+      }
+      ASSERT_NE(0, archiveJobList.size());
+      // Validate we got the right file
+      ASSERT_EQ(archiveFileId, archiveJobList.front()->archiveFile.archiveFileID);
+      archiveJobList.front()->transferFailed("Archive failed", lc);
+    }
+    // Then the request should be gone
+    ASSERT_EQ(0, archiveMount->getNextJobBatch(1,1,lc).size());
+  }
+}
+
+TEST_P(SchedulerTest, retrieve_non_existing_file) {
+  using namespace cta;
+
+  setupDefaultCatalogue();
+
+  Scheduler &scheduler = getScheduler();
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = 12345;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    ASSERT_THROW(scheduler.queueRetrieve("disk_instance", request, lc), cta::exception::Exception);
+  }
+}
+
+TEST_P(SchedulerTest, showqueues) {
+  using namespace cta;
+
+  setupDefaultCatalogue();
+
+  Scheduler &scheduler = getScheduler();
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+
+  uint64_t archiveFileId __attribute__((unused));
+  {
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // get the queues from scheduler
+  auto queuesSummary = scheduler.getQueuesAndMountSummaries(lc);
+  ASSERT_EQ(1, queuesSummary.size());
+}
+
+TEST_P(SchedulerTest, repack) {
+  using namespace cta;
+  unitTests::TempDirectory tempDirectory;
+  setupDefaultCatalogue();
+
+  Scheduler &scheduler = getScheduler();
+  cta::catalogue::Catalogue& catalogue = getCatalogue();
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+
+  typedef cta::common::dataStructures::RepackInfo RepackInfo;
+  typedef cta::common::dataStructures::RepackInfo::Status Status;
+
+   // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+
+  common::dataStructures::SecurityIdentity cliId;
+  cliId.host = "host";
+  cliId.username = s_userName;
+  std::string tape1 = "Tape";
+
+  {
+    auto tape = getDefaultTape();
+    tape.vid = tape1;
+    catalogue.createTape(cliId, tape);
+  }
+
+  //The queueing of a repack request should fail if the tape to repack is not full
+  cta::SchedulerDatabase::QueueRepackRequest qrr(tape1,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,
+    common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag,s_defaultRepackNoRecall);
+  ASSERT_THROW(scheduler.queueRepack(cliId, qrr, lc),cta::exception::UserError);
+  //The queueing of a repack request in a vid that does not exist should throw an exception
+  qrr.m_vid = "NOT_EXIST";
+  ASSERT_THROW(scheduler.queueRepack(cliId, qrr, lc),cta::exception::UserError);
+
+  catalogue.setTapeFull(cliId,tape1,true);
+
+  // Create and then cancel repack
+  qrr.m_vid = tape1;
+  scheduler.queueRepack(cliId, qrr, lc);
+  {
+    auto repacks = scheduler.getRepacks();
+    ASSERT_EQ(1, repacks.size());
+    auto repack = scheduler.getRepack(repacks.front().vid);
+    ASSERT_EQ(tape1, repack.vid);
+  }
+  scheduler.cancelRepack(cliId, tape1, lc);
+  ASSERT_EQ(0, scheduler.getRepacks().size());
+  // Recreate a repack and get it moved to ToExpand
+  std::string tape2 = "Tape2";
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+    tape.vid = tape2;
+    tape.full = true;
+    catalogue.createTape(cliId, tape);
+  }
+  qrr.m_vid = tape2;
+  scheduler.queueRepack(cliId, qrr, lc);
+  {
+    auto repacks = scheduler.getRepacks();
+    ASSERT_EQ(1, repacks.size());
+    auto repack = scheduler.getRepack(repacks.front().vid);
+    ASSERT_EQ(tape2, repack.vid);
+  }
+  scheduler.promoteRepackRequestsToToExpand(lc);
+  {
+    auto repacks = scheduler.getRepacks();
+    ASSERT_EQ(1, std::count_if(repacks.begin(), repacks.end(), [](RepackInfo &r){ return r.status == Status::ToExpand; }));
+    ASSERT_EQ(1, repacks.size());
+  }
+}
+
+TEST_P(SchedulerTest, getNextRepackRequestToExpand) {
+  using namespace cta;
+  unitTests::TempDirectory tempDirectory;
+
+  setupDefaultCatalogue();
+
+  Scheduler &scheduler = getScheduler();
+  catalogue::Catalogue& catalogue = getCatalogue();
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+
+  common::dataStructures::SecurityIdentity cliId;
+  cliId.host = "host";
+  cliId.username = s_userName;
+  std::string tape1 = "Tape";
+  {
+    auto tape = getDefaultTape();
+    tape.vid = tape1;
+    tape.full = true;
+    catalogue.createTape(cliId, tape);
+  }
+
+  //Queue the first repack request
+  cta::SchedulerDatabase::QueueRepackRequest qrr(tape1,"file://"+tempDirectory.path(),common::dataStructures::RepackInfo::Type::MoveOnly,
+    common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag,s_defaultRepackNoRecall);
+  scheduler.queueRepack(cliId, qrr, lc);
+
+  std::string tape2 = "Tape2";
+
+  {
+    auto tape = getDefaultTape();
+    tape.vid = tape2;
+    tape.full = true;
+    catalogue.createTape(cliId, tape);
+  }
+
+  //Queue the second repack request
+  qrr.m_vid = tape2;
+  qrr.m_repackType = common::dataStructures::RepackInfo::Type::AddCopiesOnly;
+  scheduler.queueRepack(cliId,qrr,lc);
+
+  //Test the repack request queued has status Pending
+  ASSERT_EQ(scheduler.getRepack(tape1).status,common::dataStructures::RepackInfo::Status::Pending);
+  ASSERT_EQ(scheduler.getRepack(tape2).status,common::dataStructures::RepackInfo::Status::Pending);
+
+  //Change the repack request status to ToExpand
+  scheduler.promoteRepackRequestsToToExpand(lc);
+
+  //Test the getNextRepackRequestToExpand method that is supposed to retrieve the previously first inserted request
+  auto repackRequestToExpand1 = scheduler.getNextRepackRequestToExpand();
+  //Check vid
+  ASSERT_EQ(repackRequestToExpand1.get()->getRepackInfo().vid,tape1);
+  //Check status changed from Pending to ToExpand
+  ASSERT_EQ(repackRequestToExpand1.get()->getRepackInfo().status,common::dataStructures::RepackInfo::Status::ToExpand);
+  ASSERT_EQ(repackRequestToExpand1.get()->getRepackInfo().type,common::dataStructures::RepackInfo::Type::MoveOnly);
+
+  //Test the getNextRepackRequestToExpand method that is supposed to retrieve the previously second inserted request
+  auto repackRequestToExpand2 = scheduler.getNextRepackRequestToExpand();
+
+  //Check vid
+  ASSERT_EQ(repackRequestToExpand2.get()->getRepackInfo().vid,tape2);
+  //Check status changed from Pending to ToExpand
+  ASSERT_EQ(repackRequestToExpand2.get()->getRepackInfo().status,common::dataStructures::RepackInfo::Status::ToExpand);
+  ASSERT_EQ(repackRequestToExpand2.get()->getRepackInfo().type,common::dataStructures::RepackInfo::Type::AddCopiesOnly);
+
+  auto nullRepackRequest = scheduler.getNextRepackRequestToExpand();
+  ASSERT_EQ(nullRepackRequest,nullptr);
+}
+
+TEST_P(SchedulerTest, expandRepackRequest) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestRetrieveFailed) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestArchiveSuccess) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestArchiveFailed) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestDisabledTape) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestBrokenTape) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, noMountIsTriggeredWhenTapeIsDisabled) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, DISABLED_archiveReportMultipleAndQueueRetrievesWithActivities) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  // We want to virtually archive files on 10 different tapes that will be asked for by different activities.
+  // Activity A will have a weight of .4, B 0.3, and this allows partially predicting the mount order for them:
+  // (A or B) (the other) A B A B A (A or B) (the other) A.
+  // We hence need to create files on 10 different tapes and recall them with the respective activities.
+  std::map<size_t, uint64_t> archiveFileIds;
+  // Generates a list of 10 numbers from 0 to 9
+  const uint8_t NUMBER_OF_FILES = 10;
+  for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+    // Queue several archive requests.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    diskFileInfo.path += std::to_string(i);
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.diskFileID += std::to_string(i);
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileIds[i] = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileIds[i], s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  // Check that we have the files in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  std::map<size_t, bool> found;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      for (auto i = 0; i < NUMBER_OF_FILES; i++)
+        if (req.archiveFileID == archiveFileIds.at(i))
+          found[i] = true;
+    }
+  }
+  for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+    ASSERT_NO_THROW(found.at(i));
+    ASSERT_TRUE(found.at(i));
+  }
+
+  // Create the environment for the migrations to happen (library + tapes)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+  const std::string driveName = "tape_drive";
+  for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+    auto tape = getDefaultTape();
+    std::string vid = s_vid + std::to_string(i);
+    tape.vid = vid;
+    catalogue.createTape(s_adminOnAdminHost, tape);
+    catalogue.tapeLabelled(vid, driveName);
+  }
+
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+      (void) i; // ignore unused variable
+      mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+      auto & osdb=getSchedulerDB();
+      auto mi=osdb.getMountInfo(lc);
+      ASSERT_EQ(1, mi->existingOrNextMounts.size());
+      ASSERT_EQ("TapePool", mi->existingOrNextMounts.front().tapePool);
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+      std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+      ASSERT_NE(nullptr, archiveJobBatch.front().get());
+      ASSERT_EQ(1, archiveJobBatch.size());
+      std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+      archiveJob->tapeFile.blockId = 1;
+      archiveJob->tapeFile.fSeq = 1;
+      archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+      archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+      archiveJob->tapeFile.copyNb = 1;
+      archiveJob->validate();
+      std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+      std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+      std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+      sDBarchiveJobBatch.emplace(std::move(archiveJob));
+      archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, failedToReportArchiveJobs, lc);
+      // Mark the tape full so we get one file per tape.
+      archiveMount->setTapeFull();
+      archiveMount->complete();
+    }
+  }
+
+  {
+    // Emulate the reporter process reporting successful transfer to tape to the disk system
+    // The jobs get reported by tape, so we need to report 10*1 file (one per tape).
+    for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+      auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+      ASSERT_EQ(1, jobsToReport.size());
+      disk::DiskReporterFactory factory;
+      log::TimingList timings;
+      utils::Timer t;
+      scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    }
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+      cta::common::dataStructures::RetrieveRequest request;
+      request.archiveFileID = archiveFileIds.at(i);
+      request.creationLog = creationLog;
+      request.diskFileInfo = diskFileInfo;
+      request.dstURL = "dstURL";
+      request.requester.name = s_userName;
+      request.requester.group = "userGroup";
+      if (i < 6)
+        request.activity = "A";
+      else
+        request.activity = "B";
+      scheduler.queueRetrieve(s_diskInstance, request, lc);
+    }
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Check that the retrieve requests are queued
+  {
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    // We expect 10 tape with queued jobs
+    ASSERT_EQ(10, rqsts.size());
+    // We expect each queue to contain 1 job
+    for (auto & q: rqsts) {
+      ASSERT_EQ(1, q.second.size());
+      // We expect the job to be single copy
+      auto & job = q.second.back();
+      ASSERT_EQ(1, job.tapeCopies.size());
+      // Check the remote target
+      ASSERT_EQ("dstURL", job.request.dstURL);
+    }
+    // We expect each tape to be seen
+    for (auto i = 0; i < NUMBER_OF_FILES; i++) {
+      ASSERT_NO_THROW(rqsts.at(s_vid + std::to_string(i)));
+    }
+  }
+
+
+  enum ExpectedActivity {
+    Unknown,
+    A,
+    B
+  };
+
+  std::vector<ExpectedActivity> expectedActivities = { Unknown, Unknown, A, B, A, B, A, Unknown, Unknown, A};
+  size_t i=0;
+  for (auto ea: expectedActivities) {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    std::string drive="drive";
+    drive += std::to_string(++i);
+    mount.reset(scheduler.getNextMount(s_libraryName, drive, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    ASSERT_TRUE((bool)mount.get()->getActivity());
+    if (ea != Unknown) {
+      std::string expectedActivity(ea==A?"A":"B"), activity(mount.get()->getActivity().value());
+      ASSERT_EQ(expectedActivity, activity);
+    }
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+    auto jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(1, jobBatch.size());
+    retrieveJob.reset(jobBatch.front().release());
+    ASSERT_NE(nullptr, retrieveJob.get());
+    retrieveJob->asyncSetSuccessful();
+    std::queue<std::unique_ptr<cta::RetrieveJob> > jobQueue;
+    jobQueue.push(std::move(retrieveJob));
+    retrieveMount->flushAsyncSuccessReports(jobQueue, lc);
+    jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, jobBatch.size());
+  }
+}
+
+TEST_P(SchedulerTest, expandRepackRequestAddCopiesOnly) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestShouldFailIfArchiveRouteMissing) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestMoveAndAddCopies){
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, cancelRepackRequest) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, getNextMountEmptyArchiveForRepackIfNbFilesQueuedIsLessThan2TimesMinFilesWarrantAMount) {
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, getNextMountBrokenOrDisabledTapeShouldNotReturnAMount) {
+  //Queue 2 archive requests in two different logical libraries
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+
+  auto tape = getDefaultTape();
+  {
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+  }
+
+  uint64_t archiveFileId;
+
+  // Queue an archive request.
+  cta::common::dataStructures::EntryLog creationLog;
+  creationLog.host="host2";
+  creationLog.time=0;
+  creationLog.username="admin1";
+  cta::common::dataStructures::DiskFileInfo diskFileInfo;
+  diskFileInfo.gid=GROUP_2;
+  diskFileInfo.owner_uid=CMS_USER;
+  diskFileInfo.path="path/to/file";
+  cta::common::dataStructures::ArchiveRequest request;
+  request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+  request.creationLog=creationLog;
+  request.diskFileInfo=diskFileInfo;
+  request.diskFileID="diskFileID";
+  request.fileSize=100*1000*1000;
+  cta::common::dataStructures::RequesterIdentity requester;
+  requester.name = s_userName;
+  requester.group = "userGroup";
+  request.requester = requester;
+  request.srcURL="srcURL";
+  request.storageClass=s_storageClassName;
+  archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+  scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::BROKEN,std::string("Test"));
+  ASSERT_EQ(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::ACTIVE,std::nullopt);
+  ASSERT_NE(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::DISABLED,std::string("Test"));
+  ASSERT_EQ(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::ACTIVE,std::nullopt);
+  ASSERT_NE(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+
+  {
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  //Queue a retrieve request for the archived file
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve(s_diskInstance, request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::BROKEN,std::string("Test"));
+  ASSERT_EQ(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::ACTIVE,std::nullopt);
+  ASSERT_NE(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::DISABLED,std::string("Test"));
+  ASSERT_EQ(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+  catalogue.modifyTapeState(s_adminOnAdminHost,tape.vid,common::dataStructures::Tape::ACTIVE,std::nullopt);
+  ASSERT_NE(nullptr,scheduler.getNextMount(s_libraryName, driveName, lc));
+}
+
+TEST_P(SchedulerTest, repackRetrieveRequestsFailToFetchDiskSystem){
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, getSchedulingInformations) {
+  //Queue 2 archive requests in two different logical libraries
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+
+  setupDefaultCatalogue();
+  catalogue.deleteTapeDrive("drive0");  // It's not needed
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  {
+    auto tape = getDefaultTape();
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, driveName);
+
+  {
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+  }
+
+  uint64_t archiveFileId;
+
+  // Queue an archive request.
+  cta::common::dataStructures::EntryLog creationLog;
+  creationLog.host="host2";
+  creationLog.time=0;
+  creationLog.username="admin1";
+  cta::common::dataStructures::DiskFileInfo diskFileInfo;
+  diskFileInfo.gid=GROUP_2;
+  diskFileInfo.owner_uid=CMS_USER;
+  diskFileInfo.path="path/to/file";
+  cta::common::dataStructures::ArchiveRequest request;
+  request.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+  request.creationLog=creationLog;
+  request.diskFileInfo=diskFileInfo;
+  request.diskFileID="diskFileID";
+  request.fileSize=100*1000*1000;
+  cta::common::dataStructures::RequesterIdentity requester;
+  requester.name = s_userName;
+  requester.group = "userGroup";
+  request.requester = requester;
+  request.srcURL="srcURL";
+  request.storageClass=s_storageClassName;
+  archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+  scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  {
+    auto schedulerInformations = scheduler.getSchedulingInformations(lc);
+    ASSERT_FALSE(schedulerInformations.empty());
+
+    auto & schedulerInfo = schedulerInformations.front();
+    ASSERT_EQ(s_libraryName,schedulerInfo.getLogicalLibraryName());
+    const auto & potentialMounts = schedulerInfo.getPotentialMounts();
+
+    ASSERT_FALSE(potentialMounts.empty());
+    const auto & potentialMount = potentialMounts.front();
+
+    ASSERT_EQ(request.fileSize,potentialMount.bytesQueued);
+    ASSERT_EQ(0,potentialMount.capacityInBytes);
+    ASSERT_EQ("",potentialMount.diskSystemSleptFor);
+    ASSERT_EQ(1,potentialMount.filesQueued);
+    ASSERT_EQ(0,potentialMount.mountCount);
+    ASSERT_EQ(s_minArchiveRequestAge,potentialMount.minRequestAge);
+    ASSERT_EQ(s_archivePriority,potentialMount.priority);
+    ASSERT_EQ(0,potentialMount.ratioOfMountQuotaUsed);
+    ASSERT_EQ(0,potentialMount.sleepTime);
+    ASSERT_FALSE(potentialMount.sleepingMount);
+    ASSERT_EQ(s_tapePoolName,potentialMount.tapePool);
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser,potentialMount.type);
+  }
+
+  {
+    std::unique_ptr<cta::TapeMount> mount;
+    mount.reset(scheduler.getNextMount(s_libraryName, driveName, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    std::unique_ptr<cta::ArchiveMount> archiveMount;
+    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+    ASSERT_NE(nullptr, archiveMount.get());
+    std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_NE(nullptr, archiveJobBatch.front().get());
+    std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+    archiveJob->tapeFile.blockId = 1;
+    archiveJob->tapeFile.fSeq = 1;
+    archiveJob->tapeFile.checksumBlob.insert(cta::checksum::ADLER32, 0x1234abcd);
+    archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
+    archiveJob->tapeFile.copyNb = 1;
+    archiveJob->validate();
+    std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+    std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+    std::queue<std::unique_ptr <cta::SchedulerDatabase::ArchiveJob >> failedToReportArchiveJobs;
+    sDBarchiveJobBatch.emplace(std::move(archiveJob));
+    archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems,failedToReportArchiveJobs, lc);
+    archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, archiveJobBatch.size());
+    archiveMount->complete();
+  }
+
+  ASSERT_TRUE(scheduler.getSchedulingInformations(lc).empty());
+
+  //Queue a retrieve request for the archived file
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file";
+    cta::common::dataStructures::RetrieveRequest request;
+    request.archiveFileID = archiveFileId;
+    request.creationLog = creationLog;
+    request.diskFileInfo = diskFileInfo;
+    request.dstURL = "dstURL";
+    request.requester.name = s_userName;
+    request.requester.group = "userGroup";
+    scheduler.queueRetrieve(s_diskInstance, request, lc);
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  {
+    auto schedulerInformations = scheduler.getSchedulingInformations(lc);
+    ASSERT_FALSE(schedulerInformations.empty());
+
+    auto & schedulerInfo = schedulerInformations.front();
+    ASSERT_EQ(s_libraryName,schedulerInfo.getLogicalLibraryName());
+    const auto & potentialMounts = schedulerInfo.getPotentialMounts();
+
+    ASSERT_FALSE(potentialMounts.empty());
+    const auto & potentialMount = potentialMounts.front();
+
+    ASSERT_EQ(request.fileSize,potentialMount.bytesQueued);
+    ASSERT_EQ(s_mediaTypeCapacityInBytes,potentialMount.capacityInBytes);
+    ASSERT_EQ("",potentialMount.diskSystemSleptFor);
+    ASSERT_EQ(1,potentialMount.filesQueued);
+    ASSERT_EQ(0,potentialMount.mountCount);
+    ASSERT_EQ(s_minRetrieveRequestAge,potentialMount.minRequestAge);
+    ASSERT_EQ(s_retrievePriority,potentialMount.priority);
+    ASSERT_EQ(0,potentialMount.ratioOfMountQuotaUsed);
+    ASSERT_EQ(0,potentialMount.sleepTime);
+    ASSERT_FALSE(potentialMount.sleepingMount);
+    ASSERT_EQ(s_tapePoolName,potentialMount.tapePool);
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve,potentialMount.type);
+    ASSERT_EQ(s_libraryName,potentialMount.logicalLibrary);
+    ASSERT_EQ(s_vid,potentialMount.vid);
+    ASSERT_EQ(s_vo,potentialMount.vo);
+  }
+  //Now let's queue an Archive request with a high priority
+  //Modify the mount policy to have an equality between all values
+  catalogue.modifyMountPolicyArchiveMinRequestAge(s_adminOnAdminHost,s_mountPolicyName,1);
+  catalogue.modifyMountPolicyArchivePriority(s_adminOnAdminHost,s_mountPolicyName,1);
+  catalogue.modifyMountPolicyRetrieveMinRequestAge(s_adminOnAdminHost,s_mountPolicyName,1);
+  catalogue.modifyMountPolicyRetrievePriority(s_adminOnAdminHost,s_mountPolicyName,1);
+
+  {
+    auto schedulerInformations = scheduler.getSchedulingInformations(lc);
+    ASSERT_FALSE(schedulerInformations.empty());
+
+    // Queue an archive request.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.gid=GROUP_2;
+    diskFileInfo.owner_uid=CMS_USER;
+    diskFileInfo.path="path/to/file2";
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumBlob.insert(cta::checksum::ADLER32, 0xabcd1234);
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.fileSize=200*1000*1000;
+    cta::common::dataStructures::RequesterIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL2";
+    request.storageClass=s_storageClassName;
+    uint64_t archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  {
+    auto schedulingInfos = scheduler.getSchedulingInformations(lc);
+    ASSERT_FALSE(schedulingInfos.empty());
+    //We have only one logical library
+    EXPECT_EQ(1,schedulingInfos.size());
+    const auto & schedulingInfo = schedulingInfos.front();
+    //We have two potential mounts
+    auto potentialMounts = schedulingInfo.getPotentialMounts();
+    ASSERT_EQ(2,potentialMounts.size());
+    //The first mount should be an Archive and the second one a Retrieve as Archive is more prior than the Retrieve
+    auto & firstMount = potentialMounts.front();
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser,firstMount.type);
+    potentialMounts.pop_front();
+    auto & secondMount = potentialMounts.front();
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve,secondMount.type);
+  }
+
+  //Change the mount policies to have a Retrieve priority higher than the Archive priority
+  catalogue.modifyMountPolicyRetrievePriority(s_adminOnAdminHost,s_mountPolicyName,10);
+
+  {
+    auto schedulingInfos = scheduler.getSchedulingInformations(lc);
+    ASSERT_FALSE(schedulingInfos.empty());
+    //We have only one logical library
+    EXPECT_EQ(1,schedulingInfos.size());
+    const auto & schedulingInfo = schedulingInfos.front();
+    //We have two potential mounts
+    auto potentialMounts = schedulingInfo.getPotentialMounts();
+    ASSERT_EQ(2,potentialMounts.size());
+    //The first mount should be an Archive and the second one a Retrieve as Archive is more prior than the Retrieve
+    auto & firstMount = potentialMounts.front();
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve,firstMount.type);
+    potentialMounts.pop_front();
+    auto & secondMount = potentialMounts.front();
+    ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser,secondMount.type);
+  }
+}
+
+TEST_P(SchedulerTest, expandRepackRequestShouldThrowIfUseBufferNotRecallButNoDirectoryCreated){
+  using namespace cta;
+  unitTests::TempDirectory tempDirectory;
+
+  auto &catalogue = getCatalogue();
+  auto &scheduler = getScheduler();
+
+  setupDefaultCatalogue();
+  catalogue.createDiskInstance({"user", "host"}, "diskInstance", "no comment");
+  catalogue.createDiskInstanceSpace({"user", "host"}, "diskInstanceSpace", "diskInstance", "eos:ctaeos:default", 10, "no comment");
+  catalogue.createDiskSystem({"user", "host"}, "repackBuffer", "diskInstance", "diskInstanceSpace", tempDirectory.path(), 10L*1000*1000*1000, 15*60, "no comment");
+
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  cta::common::dataStructures::SecurityIdentity admin;
+  admin.username = "admin_user_name";
+  admin.host = "admin_host";
+
+  //Create a logical library in the catalogue
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(admin, s_libraryName, libraryIsDisabled, "Create logical library");
+
+  {
+    auto tape = getDefaultTape();
+    tape.full = true;
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  //Create a storage class in the catalogue
+  common::dataStructures::StorageClass storageClass;
+  storageClass.name = s_storageClassName;
+  storageClass.nbCopies = 2;
+  storageClass.comment = "Create storage class";
+  const std::string tapeDrive = "tape_drive";
+  const uint64_t nbArchiveFilesPerTape = 10;
+  const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
+
+  //Simulate the writing of 10 files per tape in the catalogue
+  std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  checksum::ChecksumBlob checksumBlob;
+  checksumBlob.insert(cta::checksum::ADLER32, "1234");
+  {
+    uint64_t archiveFileId = 1;
+    for(uint64_t j = 1; j <= nbArchiveFilesPerTape; ++j) {
+      std::ostringstream diskFileId;
+      diskFileId << (12345677 + archiveFileId);
+      std::ostringstream diskFilePath;
+      diskFilePath << "/public_dir/public_file_"<<j;
+      auto fileWrittenUP=std::make_unique<cta::catalogue::TapeFileWritten>();
+      auto & fileWritten = *fileWrittenUP;
+      fileWritten.archiveFileId = archiveFileId++;
+      fileWritten.diskInstance = s_diskInstance;
+      fileWritten.diskFileId = diskFileId.str();
+
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
+      fileWritten.size = archiveFileSize;
+      fileWritten.checksumBlob = checksumBlob;
+      fileWritten.storageClassName = s_storageClassName;
+      fileWritten.vid = s_vid;
+      fileWritten.fSeq = j;
+      fileWritten.blockId = j * 100;
+      fileWritten.copyNb = 1;
+      fileWritten.tapeDrive = tapeDrive;
+      tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
+    }
+    //update the DB tape
+    catalogue.filesWrittenToTape(tapeFilesWrittenCopy1);
+    tapeFilesWrittenCopy1.clear();
+  }
+
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  bool noRecall = true;
+
+  cta::SchedulerDatabase::QueueRepackRequest qrr(s_vid,"file://DOES_NOT_EXIST",common::dataStructures::RepackInfo::Type::MoveOnly,
+  common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag,noRecall);
+  scheduler.queueRepack(admin,qrr, lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  scheduler.promoteRepackRequestsToToExpand(lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  auto repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+  log::TimingList tl;
+  utils::Timer t;
+  ASSERT_THROW(scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc),cta::ExpandRepackRequestException);
+}
+
+TEST_P(SchedulerTest, expandRepackRequestShouldNotThrowIfTapeDisabledButNoRecallFlagProvided){
+  using namespace cta;
+  unitTests::TempDirectory tempDirectory;
+
+  auto &catalogue = getCatalogue();
+  auto &scheduler = getScheduler();
+
+  setupDefaultCatalogue();
+  catalogue.createDiskInstance({"user", "host"}, "diskInstance", "no comment");
+  catalogue.createDiskInstanceSpace({"user", "host"}, "diskInstanceSpace", "diskInstance", "eos:ctaeos:default", 10, "no comment");
+  catalogue.createDiskSystem({"user", "host"}, "repackBuffer", "diskInstance", "diskInstanceSpace",tempDirectory.path(), 10L*1000*1000*1000, 15*60, "no comment");
+
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+
+  cta::common::dataStructures::SecurityIdentity admin;
+  admin.username = "admin_user_name";
+  admin.host = "admin_host";
+
+  //Create a logical library in the catalogue
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(admin, s_libraryName, libraryIsDisabled, "Create logical library");
+
+  {
+    auto tape = getDefaultTape();
+    tape.full = true;
+    catalogue.createTape(s_adminOnAdminHost, tape);
+  }
+
+  //Create a storage class in the catalogue
+  common::dataStructures::StorageClass storageClass;
+  storageClass.name = s_storageClassName;
+  storageClass.nbCopies = 2;
+  storageClass.comment = "Create storage class";
+  const std::string tapeDrive = "tape_drive";
+  const uint64_t nbArchiveFilesPerTape = 10;
+  const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
+
+  //Simulate the writing of 10 files per tape in the catalogue
+  std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
+  checksum::ChecksumBlob checksumBlob;
+  checksumBlob.insert(cta::checksum::ADLER32, "1234");
+  {
+    uint64_t archiveFileId = 1;
+    for(uint64_t j = 1; j <= nbArchiveFilesPerTape; ++j) {
+      std::ostringstream diskFileId;
+      diskFileId << (12345677 + archiveFileId);
+      std::ostringstream diskFilePath;
+      diskFilePath << "/public_dir/public_file_"<<j;
+      auto fileWrittenUP=std::make_unique<cta::catalogue::TapeFileWritten>();
+      auto & fileWritten = *fileWrittenUP;
+      fileWritten.archiveFileId = archiveFileId++;
+      fileWritten.diskInstance = s_diskInstance;
+      fileWritten.diskFileId = diskFileId.str();
+
+      fileWritten.diskFileOwnerUid = PUBLIC_OWNER_UID;
+      fileWritten.diskFileGid = PUBLIC_GID;
+      fileWritten.size = archiveFileSize;
+      fileWritten.checksumBlob = checksumBlob;
+      fileWritten.storageClassName = s_storageClassName;
+      fileWritten.vid = s_vid;
+      fileWritten.fSeq = j;
+      fileWritten.blockId = j * 100;
+      fileWritten.copyNb = 1;
+      fileWritten.tapeDrive = tapeDrive;
+      tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
+    }
+    //update the DB tape
+    catalogue.filesWrittenToTape(tapeFilesWrittenCopy1);
+    tapeFilesWrittenCopy1.clear();
+  }
+
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  bool noRecall = true;
+  std::string pathRepackBuffer = "file://"+tempDirectory.path();
+  tempDirectory.append("/"+s_vid);
+  tempDirectory.mkdir();
+  cta::SchedulerDatabase::QueueRepackRequest qrr(s_vid,pathRepackBuffer,common::dataStructures::RepackInfo::Type::MoveOnly,
+  common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,s_defaultRepackDisabledTapeFlag,noRecall);
+  scheduler.queueRepack(admin,qrr, lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  scheduler.promoteRepackRequestsToToExpand(lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  auto repackRequestToExpand = scheduler.getNextRepackRequestToExpand();
+  log::TimingList tl;
+  utils::Timer t;
+  ASSERT_NO_THROW(scheduler.expandRepackRequest(repackRequestToExpand,tl,t,lc));
+}
+
+TEST_P(SchedulerTest, archiveMaxDrivesVoInFlightChangeScheduleMount){
+  using namespace cta;
+
+  setupDefaultCatalogue();
+  Scheduler &scheduler = getScheduler();
+  auto & catalogue = getCatalogue();
+  cta::common::dataStructures::EntryLog creationLog;
+  creationLog.host="host2";
+  creationLog.time=0;
+  creationLog.username="admin1";
+  cta::common::dataStructures::DiskFileInfo diskFileInfo;
+  diskFileInfo.gid=GROUP_2;
+  diskFileInfo.owner_uid=CMS_USER;
+  diskFileInfo.path="path/to/file";
+  cta::common::dataStructures::ArchiveRequest request;
+  request.checksumBlob.insert(cta::checksum::ADLER32, "1111");
+  request.creationLog=creationLog;
+  request.diskFileInfo=diskFileInfo;
+  request.diskFileID="diskFileID";
+  request.fileSize=100*1000*1000;
+  cta::common::dataStructures::RequesterIdentity requester;
+  requester.name = s_userName;
+  requester.group = "userGroup";
+  request.requester = requester;
+  request.srcURL="srcURL";
+  request.storageClass=s_storageClassName;
+
+  // Create the environment for the migration to happen (library + tape)
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+
+  auto tape = getDefaultTape();
+  catalogue.createTape(s_adminOnAdminHost, tape);
+
+  const std::string driveName = "tape_drive";
+
+  catalogue.tapeLabelled(s_vid, driveName);
+
+
+  log::DummyLogger dl("", "");
+  log::LogContext lc(dl);
+  const uint64_t archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass,
+      request.requester, lc);
+  scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
+  scheduler.waitSchedulerDbSubthreadsComplete();
+
+  catalogue.modifyVirtualOrganizationWriteMaxDrives(s_adminOnAdminHost,s_vo,0);
+
+  {
+    // Emulate a tape server
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    bool nextMount = scheduler.getNextMountDryRun(s_libraryName, driveName, lc);
+    //nextMount should be false as the VO write max drives is 0
+    ASSERT_FALSE(nextMount);
+    catalogue.modifyVirtualOrganizationWriteMaxDrives(s_adminOnAdminHost,s_vo,1);
+    //Reset the VO write max drives to a positive number should give a new mount
+    nextMount = scheduler.getNextMountDryRun(s_libraryName,driveName,lc);
+    ASSERT_TRUE(nextMount);
+  }
+}
+
+TEST_P(SchedulerTest, retrieveMaxDrivesVoInFlightChangeScheduleMount)
+{
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, retrieveArchiveAllTypesMaxDrivesVoInFlightChangeScheduleMount)
+{
+  ASSERT_EQ(0,1);
+}
+
+TEST_P(SchedulerTest, getQueuesAndMountSummariesTest)
+{
+  ASSERT_EQ(0,1);
+}
+
+//This test tests what is described in the use case ticket
+// high priority Archive job not scheduled when Repack is running : https://gitlab.cern.ch/cta/operations/-/issues/150
+TEST_P(SchedulerTest, getNextMountWithArchiveForUserAndArchiveForRepackShouldReturnBothMountsArchiveMinRequestAge){
+  ASSERT_EQ(0,1);
+}
+
+#ifdef CTA_PGSCHED
+static cta::PostgresSchedDBFactory PostgresSchedDBFactoryStatic;
+
+INSTANTIATE_TEST_CASE_P(PostgresSchedulerDBPlusMockGenericSchedulerTest, SchedulerTest,
+  ::testing::Values(SchedulerTestParam(PostgresSchedDBFactoryStatic)));
+#else
+#error Generic SchedulerTest not configured for current scheduler type
+#endif
+} // namespace unitTests
diff --git a/scheduler/OStoreDB/OStoreDB.hpp b/scheduler/OStoreDB/OStoreDB.hpp
index f9abfced07565e7e98d8f2202bf0c3078137116f..f9b61f346d0f86442c0559b898eeaa3751de6ae9 100644
--- a/scheduler/OStoreDB/OStoreDB.hpp
+++ b/scheduler/OStoreDB/OStoreDB.hpp
@@ -665,8 +665,6 @@ class OStoreDB: public SchedulerDatabase {
 
   void requeueRetrieveJobs(std::list<cta::SchedulerDatabase::RetrieveJob *> &jobs, log::LogContext& logContext) override;
 
-  CTA_GENERATE_EXCEPTION_CLASS(NoRepackReportBatchFound);
-
  private:
   const size_t c_repackArchiveReportBatchSize = 10000;
   const size_t c_repackRetrieveReportBatchSize = 10000;
diff --git a/scheduler/OStoreDB/OStoreDBFactory.hpp b/scheduler/OStoreDB/OStoreDBFactory.hpp
index f6e20076199dd96afed396c784eda7f47e003ec4..834b35db4c236d45c654a14034231769a5d93408 100644
--- a/scheduler/OStoreDB/OStoreDBFactory.hpp
+++ b/scheduler/OStoreDB/OStoreDBFactory.hpp
@@ -44,8 +44,10 @@ namespace objectstore {
  * and test recovery.
  */
 
-class OStoreDBWrapperInterface: public SchedulerDatabase {
+class OStoreDBWrapperInterface: public SchedulerDatabaseDecorator {
 public:
+  OStoreDBWrapperInterface(SchedulerDatabase &db) : SchedulerDatabaseDecorator(db) {}
+
   virtual objectstore::Backend & getBackend() = 0;
   virtual objectstore::AgentReference & getAgentReference() = 0;
   virtual cta::OStoreDB & getOstoreDB() = 0;
@@ -97,183 +99,6 @@ public:
 
   cta::OStoreDB& getOstoreDB() override { return m_OStoreDB; }
 
-  void waitSubthreadsComplete() override {
-    m_OStoreDB.waitSubthreadsComplete();
-  }
-
-  void ping() override {
-    m_OStoreDB.ping();
-  }
-
-  std::string queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest& request, const cta::common::dataStructures::ArchiveFileQueueCriteriaAndFileId& criteria, log::LogContext &logContext) override {
-    return m_OStoreDB.queueArchive(instanceName, request, criteria, logContext);
-  }
-
-  void deleteRetrieveRequest(const common::dataStructures::SecurityIdentity& cliIdentity, const std::string& remoteFile) override {
-    m_OStoreDB.deleteRetrieveRequest(cliIdentity, remoteFile);
-  }
-
-  std::list<cta::common::dataStructures::RetrieveJob> getRetrieveJobs(const std::string& tapePoolName) const override {
-    return m_OStoreDB.getRetrieveJobs(tapePoolName);
-  }
-
-  std::map<std::string, std::list<common::dataStructures::RetrieveJob> > getRetrieveJobs() const override {
-    return m_OStoreDB.getRetrieveJobs();
-  }
-
-  std::map<std::string, std::list<common::dataStructures::ArchiveJob> > getArchiveJobs() const override {
-    return m_OStoreDB.getArchiveJobs();
-  }
-
-  std::list<cta::common::dataStructures::ArchiveJob> getArchiveJobs(const std::string& tapePoolName) const override {
-    return m_OStoreDB.getArchiveJobs(tapePoolName);
-  }
-
-  std::unique_ptr<IArchiveJobQueueItor> getArchiveJobQueueItor(const std::string &tapePoolName,
-    common::dataStructures::JobQueueType queueType) const override {
-    return m_OStoreDB.getArchiveJobQueueItor(tapePoolName, queueType);
-  }
-
-  std::unique_ptr<IRetrieveJobQueueItor> getRetrieveJobQueueItor(const std::string &vid,
-    common::dataStructures::JobQueueType queueType) const override {
-    return m_OStoreDB.getRetrieveJobQueueItor(vid, queueType);
-  }
-
-  std::map<std::string, std::list<RetrieveRequestDump> > getRetrieveRequests() const override {
-    return m_OStoreDB.getRetrieveRequests();
-  }
-
-  std::list<std::unique_ptr<ArchiveJob>> getNextArchiveJobsToReportBatch(uint64_t filesRequested, log::LogContext &lc) override {
-    return m_OStoreDB.getNextArchiveJobsToReportBatch(filesRequested, lc);
-  }
-
-  JobsFailedSummary getArchiveJobsFailedSummary(log::LogContext &lc) override {
-    return m_OStoreDB.getArchiveJobsFailedSummary(lc);
-  }
-
-  std::list<std::unique_ptr<RetrieveJob>> getNextRetrieveJobsToReportBatch(uint64_t filesRequested, log::LogContext &lc) override {
-    return m_OStoreDB.getNextRetrieveJobsToReportBatch(filesRequested, lc);
-  }
-
-  std::list<std::unique_ptr<RetrieveJob>> getNextRetrieveJobsFailedBatch(uint64_t filesRequested, log::LogContext &lc) override {
-    return m_OStoreDB.getNextRetrieveJobsFailedBatch(filesRequested, lc);
-  }
-
-  std::unique_ptr<RepackReportBatch> getNextRepackReportBatch(log::LogContext& lc) override {
-    return m_OStoreDB.getNextRepackReportBatch(lc);
-  }
-
-  std::unique_ptr<RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc) override {
-    return m_OStoreDB.getNextSuccessfulRetrieveRepackReportBatch(lc);
-  }
-
-  std::unique_ptr<RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc) override {
-    return m_OStoreDB.getNextSuccessfulArchiveRepackReportBatch(lc);
-  }
-
-  std::unique_ptr<RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext& lc) override {
-    return m_OStoreDB.getNextFailedRetrieveRepackReportBatch(lc);
-  }
-
-  std::unique_ptr<RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext& lc) override {
-    return m_OStoreDB.getNextFailedArchiveRepackReportBatch(lc);
-  }
-
-  std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> getRepackReportBatches(log::LogContext &lc) override {
-    return m_OStoreDB.getRepackReportBatches(lc);
-  }
-
-  JobsFailedSummary getRetrieveJobsFailedSummary(log::LogContext &lc) override {
-    return m_OStoreDB.getRetrieveJobsFailedSummary(lc);
-  }
-
-  void setArchiveJobBatchReported(std::list<cta::SchedulerDatabase::ArchiveJob*>& jobsBatch, log::TimingList & timingList,
-      utils::Timer & t, log::LogContext& lc) override {
-    m_OStoreDB.setArchiveJobBatchReported(jobsBatch, timingList, t, lc);
-  }
-
-  void setRetrieveJobBatchReportedToUser(std::list<cta::SchedulerDatabase::RetrieveJob*>& jobsBatch, log::TimingList & timingList,
-      utils::Timer & t, log::LogContext& lc) override {
-    m_OStoreDB.setRetrieveJobBatchReportedToUser(jobsBatch, timingList, t, lc);
-  }
-
-  std::list<RetrieveRequestDump> getRetrieveRequestsByVid(const std::string& vid) const override {
-    return m_OStoreDB.getRetrieveRequestsByVid(vid);
-  }
-
-  std::list<RetrieveRequestDump> getRetrieveRequestsByRequester(const std::string& requester) const override {
-    return m_OStoreDB.getRetrieveRequestsByRequester(requester);
-  }
-
-
-  std::unique_ptr<TapeMountDecisionInfo> getMountInfo(log::LogContext& logContext) override {
-    return m_OStoreDB.getMountInfo(logContext);
-  }
-
-  void trimEmptyQueues(log::LogContext& lc) override {
-    m_OStoreDB.trimEmptyQueues(lc);
-  }
-
-  std::unique_ptr<TapeMountDecisionInfo> getMountInfoNoLock(PurposeGetMountInfo purpose, log::LogContext& logContext) override {
-    return m_OStoreDB.getMountInfoNoLock(purpose,logContext);
-  }
-
-  std::list<RetrieveQueueStatistics> getRetrieveQueueStatistics(const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria,
-          const std::set<std::string> & vidsToConsider) override {
-    return m_OStoreDB.getRetrieveQueueStatistics(criteria, vidsToConsider);
-  }
-
-  SchedulerDatabase::RetrieveRequestInfo queueRetrieve(common::dataStructures::RetrieveRequest& rqst,
-    const common::dataStructures::RetrieveFileQueueCriteria &criteria, const std::optional<std::string> diskSystemName,
-    log::LogContext &logContext) override {
-    return m_OStoreDB.queueRetrieve(rqst, criteria, diskSystemName, logContext);
-  }
-
-  void cancelArchive(const common::dataStructures::DeleteArchiveRequest& request, log::LogContext & lc) override {
-    m_OStoreDB.cancelArchive(request,lc);
-  }
-
-  void cancelRetrieve(const std::string& instanceName, const cta::common::dataStructures::CancelRetrieveRequest& rqst,
-    log::LogContext& lc) override {
-    m_OStoreDB.cancelRetrieve(instanceName, rqst, lc);
-  }
-
-  void deleteFailed(const std::string &objectId, log::LogContext & lc) override {
-    m_OStoreDB.deleteFailed(objectId, lc);
-  }
-
-  std::string queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext& lc) override {
-    return m_OStoreDB.queueRepack(repackRequest, lc);
-  }
-
-  std::list<common::dataStructures::RepackInfo> getRepackInfo() override {
-    return m_OStoreDB.getRepackInfo();
-  }
-
-  common::dataStructures::RepackInfo getRepackInfo(const std::string& vid) override {
-    return m_OStoreDB.getRepackInfo(vid);
-  }
-
-  void cancelRepack(const std::string& vid, log::LogContext & lc) override {
-    m_OStoreDB.cancelRepack(vid, lc);
-  }
-
-  std::unique_ptr<RepackRequestStatistics> getRepackStatistics() override {
-    return m_OStoreDB.getRepackStatistics();
-  }
-
-  std::unique_ptr<RepackRequestStatistics> getRepackStatisticsNoLock() override {
-    return m_OStoreDB.getRepackStatisticsNoLock();
-  }
-
-  std::unique_ptr<RepackRequest> getNextRepackJobToExpand() override {
-    return m_OStoreDB.getNextRepackJobToExpand();
-  }
-
-  void requeueRetrieveJobs(std::list<cta::SchedulerDatabase::RetrieveJob *> &jobs, log::LogContext& logContext) override {
-    return m_OStoreDB.requeueRetrieveJobs(jobs, logContext);
-  }
-
 private:
   std::unique_ptr <cta::log::Logger> m_logger;
   std::unique_ptr <cta::objectstore::Backend> m_backend;
@@ -285,6 +110,7 @@ private:
 template <>
 OStoreDBWrapper<cta::objectstore::BackendVFS>::OStoreDBWrapper(
         const std::string &context, std::unique_ptr<cta::catalogue::Catalogue> & catalogue, const std::string &URL) :
+OStoreDBWrapperInterface(m_OStoreDB),
 m_logger(new cta::log::DummyLogger("", "")), m_backend(new cta::objectstore::BackendVFS()),
 m_catalogue(catalogue),
 m_OStoreDB(*m_backend, *m_catalogue, *m_logger),
diff --git a/scheduler/PostgresSchedDB/ArchiveJob.cpp b/scheduler/PostgresSchedDB/ArchiveJob.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..13e167f067c7646b01bf18e55d4e13f1a409811e
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveJob.cpp
@@ -0,0 +1,43 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "ArchiveJob.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::ArchiveJob::ArchiveJob()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveJob::failTransfer(const std::string & failureReason, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveJob::failReport(const std::string & failureReason, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveJob::bumpUpTapeFileCount(uint64_t newFileCount)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/ArchiveJob.hpp b/scheduler/PostgresSchedDB/ArchiveJob.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c68e9971778a05347a3a75bd826db503659c47c
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveJob.hpp
@@ -0,0 +1,45 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+
+#include <list>
+#include <memory>
+#include <optional>
+#include <string>
+#include <cstdint>
+#include <time.h>
+
+namespace cta {
+
+class PostgresSchedDB::ArchiveJob : public SchedulerDatabase::ArchiveJob {
+ public:
+
+   ArchiveJob();
+
+   void failTransfer(const std::string & failureReason, log::LogContext & lc) override;
+
+   void failReport(const std::string & failureReason, log::LogContext & lc) override;
+
+   void bumpUpTapeFileCount(uint64_t newFileCount) override;
+
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/ArchiveJobQueueItor.cpp b/scheduler/PostgresSchedDB/ArchiveJobQueueItor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e71043f9ff4961dc627a8199b85bb40d597d1ef6
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveJobQueueItor.cpp
@@ -0,0 +1,48 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "ArchiveJobQueueItor.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::ArchiveJobQueueItor::ArchiveJobQueueItor()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const std::string &PostgresSchedDB::ArchiveJobQueueItor::qid() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+bool PostgresSchedDB::ArchiveJobQueueItor::end() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveJobQueueItor::operator++()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const common::dataStructures::ArchiveJob &PostgresSchedDB::ArchiveJobQueueItor::operator*() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/ArchiveJobQueueItor.hpp b/scheduler/PostgresSchedDB/ArchiveJobQueueItor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f1a32b14907028c6787d426c9c478514f3c1af94
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveJobQueueItor.hpp
@@ -0,0 +1,41 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/dataStructures/ArchiveJob.hpp"
+
+#include <string>
+
+namespace cta {
+
+class PostgresSchedDB::ArchiveJobQueueItor : public SchedulerDatabase::IArchiveJobQueueItor {
+ public:
+
+   ArchiveJobQueueItor();
+
+   const std::string &qid() const override;
+
+   bool end() const override;
+
+   void operator++() override;
+
+   const common::dataStructures::ArchiveJob &operator*() const override;
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/ArchiveMount.cpp b/scheduler/PostgresSchedDB/ArchiveMount.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d1aeab41b265b61d41d53994359bdc7d3339743a
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveMount.cpp
@@ -0,0 +1,56 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "ArchiveMount.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::ArchiveMount::ArchiveMount()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const SchedulerDatabase::ArchiveMount::MountInfo & PostgresSchedDB::ArchiveMount::getMountInfo()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob>> PostgresSchedDB::ArchiveMount::getNextJobBatch(uint64_t filesRequested,
+      uint64_t bytesRequested, log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveMount::setDriveStatus(common::dataStructures::DriveStatus status, common::dataStructures::MountType mountType,
+                                time_t completionTime, const std::optional<std::string>& reason)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveMount::setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ArchiveMount::setJobBatchTransferred(
+      std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob>> & jobsBatch, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/ArchiveMount.hpp b/scheduler/PostgresSchedDB/ArchiveMount.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cc1ef4f305b012bf1bc70b5cf6af21eb811129b4
--- /dev/null
+++ b/scheduler/PostgresSchedDB/ArchiveMount.hpp
@@ -0,0 +1,53 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+#include "common/dataStructures/DriveState.hpp"
+#include "common/dataStructures/MountType.hpp"
+
+#include <list>
+#include <memory>
+#include <optional>
+#include <cstdint>
+#include <time.h>
+
+namespace cta {
+
+class PostgresSchedDB::ArchiveMount : public SchedulerDatabase::ArchiveMount {
+ public:
+
+   ArchiveMount();
+
+   const MountInfo & getMountInfo() override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob>> getNextJobBatch(uint64_t filesRequested,
+      uint64_t bytesRequested, log::LogContext& logContext) override;
+
+   void setDriveStatus(common::dataStructures::DriveStatus status, common::dataStructures::MountType mountType,
+                                time_t completionTime, const std::optional<std::string>& reason = std::nullopt) override;
+
+   void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override;
+
+   void setJobBatchTransferred(
+      std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob>> & jobsBatch, log::LogContext & lc) override;
+
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDB.cpp b/scheduler/PostgresSchedDB/PostgresSchedDB.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e83fe49718ddb727ea75b8ed346d77b82be84ef
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDB.cpp
@@ -0,0 +1,268 @@
+/*
+ * @project	 The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license	 This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "PostgresSchedDB.hpp"
+#include "scheduler/Scheduler.hpp"
+#include "scheduler/LogicalLibrary.hpp"
+#include "scheduler/RetrieveJob.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::PostgresSchedDB(void *pgstuff, catalogue::Catalogue & catalogue, log::Logger &logger)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+PostgresSchedDB::~PostgresSchedDB() throw()
+{
+}
+
+void PostgresSchedDB::waitSubthreadsComplete()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::ping()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::string PostgresSchedDB::queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest &request,
+    const cta::common::dataStructures::ArchiveFileQueueCriteriaAndFileId &criteria, log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::map<std::string, std::list<common::dataStructures::ArchiveJob>> PostgresSchedDB::getArchiveJobs() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<cta::common::dataStructures::ArchiveJob> PostgresSchedDB::getArchiveJobs(const std::string& tapePoolName) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::IArchiveJobQueueItor> PostgresSchedDB::getArchiveJobQueueItor(const std::string &tapePoolName,
+    common::dataStructures::JobQueueType queueType) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > PostgresSchedDB::getNextArchiveJobsToReportBatch(uint64_t filesRequested,
+     log::LogContext & logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+SchedulerDatabase::JobsFailedSummary PostgresSchedDB::getArchiveJobsFailedSummary(log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::setArchiveJobBatchReported(std::list<SchedulerDatabase::ArchiveJob*> & jobsBatch,
+     log::TimingList & timingList, utils::Timer & t, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<SchedulerDatabase::RetrieveQueueStatistics> PostgresSchedDB::getRetrieveQueueStatistics(
+    const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria, const std::set<std::string>& vidsToConsider)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+SchedulerDatabase::RetrieveRequestInfo PostgresSchedDB::queueRetrieve(cta::common::dataStructures::RetrieveRequest& rqst,
+    const cta::common::dataStructures::RetrieveFileQueueCriteria &criteria, const std::optional<std::string> diskSystemName,
+    log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::cancelRetrieve(const std::string& instanceName, const cta::common::dataStructures::CancelRetrieveRequest& rqst,
+    log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::map<std::string, std::list<RetrieveRequestDump> > PostgresSchedDB::getRetrieveRequests() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<RetrieveRequestDump> PostgresSchedDB::getRetrieveRequestsByVid(const std::string& vid) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<RetrieveRequestDump> PostgresSchedDB::getRetrieveRequestsByRequester(const std::string& vid) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::deleteRetrieveRequest(const common::dataStructures::SecurityIdentity& requester,
+    const std::string& remoteFile)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::cancelArchive(const common::dataStructures::DeleteArchiveRequest& request, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::deleteFailed(const std::string &objectId, log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::map<std::string, std::list<common::dataStructures::RetrieveJob>> PostgresSchedDB::getRetrieveJobs() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<cta::common::dataStructures::RetrieveJob> PostgresSchedDB::getRetrieveJobs(const std::string &vid) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::IRetrieveJobQueueItor> PostgresSchedDB::getRetrieveJobQueueItor(const std::string &vid,
+    common::dataStructures::JobQueueType queueType) const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::string PostgresSchedDB::queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<common::dataStructures::RepackInfo> PostgresSchedDB::getRepackInfo()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+common::dataStructures::RepackInfo PostgresSchedDB::getRepackInfo(const std::string& vid)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::cancelRepack(const std::string& vid, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackRequestStatistics> PostgresSchedDB::getRepackStatistics()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackRequestStatistics> PostgresSchedDB::getRepackStatisticsNoLock()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackRequest> PostgresSchedDB::getNextRepackJobToExpand()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> PostgresSchedDB::getNextRetrieveJobsToReportBatch(
+    uint64_t filesRequested, log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> PostgresSchedDB::getNextRetrieveJobsFailedBatch(
+    uint64_t filesRequested, log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackReportBatch> PostgresSchedDB::getNextRepackReportBatch(log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackReportBatch> PostgresSchedDB::getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackReportBatch> PostgresSchedDB::getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackReportBatch> PostgresSchedDB::getNextFailedRetrieveRepackReportBatch(log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RepackReportBatch> PostgresSchedDB::getNextFailedArchiveRepackReportBatch(log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> PostgresSchedDB::getRepackReportBatches(log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::setRetrieveJobBatchReportedToUser(std::list<SchedulerDatabase::RetrieveJob*> & jobsBatch,
+     log::TimingList & timingList, utils::Timer & t, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+SchedulerDatabase::JobsFailedSummary PostgresSchedDB::getRetrieveJobsFailedSummary(log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> PostgresSchedDB::getMountInfo(log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::trimEmptyQueues(log::LogContext& lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> PostgresSchedDB::getMountInfoNoLock(PurposeGetMountInfo purpose,
+    log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::requeueRetrieveJobs(std::list<SchedulerDatabase::RetrieveJob *> &jobs, log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::setThreadNumber(uint64_t threadNumber, const std::optional<size_t> &stackSize) {
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::setBottomHalfQueueSize(uint64_t tasksNumber) {
+   throw cta::exception::Exception("Not implemented");
+}
+
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDB.hpp b/scheduler/PostgresSchedDB/PostgresSchedDB.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1b7542b3672636f11c3247702e5fac7c579533a5
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDB.hpp
@@ -0,0 +1,166 @@
+/*
+ * @project	 The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license	 This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "catalogue/Catalogue.hpp"
+#include "common/dataStructures/ArchiveJob.hpp"
+#include "common/dataStructures/ArchiveRequest.hpp"
+#include "common/dataStructures/ArchiveFileQueueCriteriaAndFileId.hpp"
+#include "common/dataStructures/CancelRetrieveRequest.hpp"
+#include "common/dataStructures/DeleteArchiveRequest.hpp"
+#include "common/dataStructures/JobQueueType.hpp"
+#include "common/dataStructures/LabelFormat.hpp"
+#include "common/dataStructures/RepackInfo.hpp"
+#include "common/dataStructures/RetrieveFileQueueCriteria.hpp"
+#include "common/dataStructures/RetrieveJob.hpp"
+#include "common/dataStructures/RetrieveRequest.hpp"
+#include "common/dataStructures/SecurityIdentity.hpp"
+#include "common/log/Logger.hpp"
+#include "scheduler/SchedulerDatabase.hpp"
+#include "scheduler/RetrieveJob.hpp"
+
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+#include <cstdint>
+
+namespace cta {
+
+class PostgresSchedDB: public SchedulerDatabase {
+ public:
+   PostgresSchedDB(void *pgstuff, catalogue::Catalogue & catalogue, log::Logger &logger);
+   virtual ~PostgresSchedDB() throw();
+
+   class ArchiveMount;
+   class ArchiveJob;
+   class ArchiveJobQueueItor;
+   class RetrieveMount;
+   class RetrieveJob;
+   class RetrieveJobQueueItor;
+   class RepackRequestPromotionStatistics;
+   class RepackRequest;
+   class RepackReportBatch;
+   class TapeMountDecisionInfo;
+
+   void waitSubthreadsComplete() override;
+
+   void ping() override;
+
+   std::string queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest &request,
+     const cta::common::dataStructures::ArchiveFileQueueCriteriaAndFileId &criteria, log::LogContext &logContext) override;
+
+   std::map<std::string, std::list<common::dataStructures::ArchiveJob>> getArchiveJobs() const override;
+
+   std::list<cta::common::dataStructures::ArchiveJob> getArchiveJobs(const std::string& tapePoolName) const override;
+
+   std::unique_ptr<IArchiveJobQueueItor> getArchiveJobQueueItor(const std::string &tapePoolName,
+     common::dataStructures::JobQueueType queueType) const override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::ArchiveJob> > getNextArchiveJobsToReportBatch(uint64_t filesRequested,
+     log::LogContext & logContext) override;
+
+   JobsFailedSummary getArchiveJobsFailedSummary(log::LogContext &logContext) override;
+
+   void setArchiveJobBatchReported(std::list<SchedulerDatabase::ArchiveJob*> & jobsBatch,
+     log::TimingList & timingList, utils::Timer & t, log::LogContext & lc) override;
+
+   std::list<RetrieveQueueStatistics> getRetrieveQueueStatistics(
+     const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria, const std::set<std::string>& vidsToConsider) override;
+
+   SchedulerDatabase::RetrieveRequestInfo queueRetrieve(cta::common::dataStructures::RetrieveRequest& rqst,
+     const cta::common::dataStructures::RetrieveFileQueueCriteria &criteria, const std::optional<std::string> diskSystemName,
+     log::LogContext &logContext) override;
+
+   void cancelRetrieve(const std::string& instanceName, const cta::common::dataStructures::CancelRetrieveRequest& rqst,
+     log::LogContext& lc) override;
+
+   std::map<std::string, std::list<RetrieveRequestDump> > getRetrieveRequests() const override;
+
+   std::list<RetrieveRequestDump> getRetrieveRequestsByVid(const std::string& vid) const override;
+
+   std::list<RetrieveRequestDump> getRetrieveRequestsByRequester(const std::string& vid) const override;
+
+   void deleteRetrieveRequest(const common::dataStructures::SecurityIdentity& requester,
+     const std::string& remoteFile) override;
+
+   virtual void cancelArchive(const common::dataStructures::DeleteArchiveRequest& request, log::LogContext & lc) override;
+
+   virtual void deleteFailed(const std::string &objectId, log::LogContext &lc) override;
+
+   std::map<std::string, std::list<common::dataStructures::RetrieveJob>> getRetrieveJobs() const override;
+
+   std::list<cta::common::dataStructures::RetrieveJob> getRetrieveJobs(const std::string &vid) const override;
+
+   std::unique_ptr<IRetrieveJobQueueItor> getRetrieveJobQueueItor(const std::string &vid,
+    common::dataStructures::JobQueueType queueType) const override;
+
+   std::string queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext &logContext) override;
+
+   std::list<common::dataStructures::RepackInfo> getRepackInfo() override;
+
+   common::dataStructures::RepackInfo getRepackInfo(const std::string& vid) override;
+
+   void cancelRepack(const std::string& vid, log::LogContext & lc) override;
+
+   std::unique_ptr<RepackRequestStatistics> getRepackStatistics() override;
+
+   std::unique_ptr<RepackRequestStatistics> getRepackStatisticsNoLock() override;
+
+   std::unique_ptr<SchedulerDatabase::RepackRequest> getNextRepackJobToExpand() override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> getNextRetrieveJobsToReportBatch(
+     uint64_t filesRequested, log::LogContext &logContext) override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> getNextRetrieveJobsFailedBatch(
+     uint64_t filesRequested, log::LogContext &logContext) override;
+
+   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextRepackReportBatch(log::LogContext& lc) override;
+
+   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc) override;
+
+   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc) override;
+
+   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext& lc) override;
+
+   std::unique_ptr<SchedulerDatabase::RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext &lc) override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> getRepackReportBatches(log::LogContext &lc) override;
+
+   void setRetrieveJobBatchReportedToUser(std::list<SchedulerDatabase::RetrieveJob*> & jobsBatch,
+     log::TimingList & timingList, utils::Timer & t, log::LogContext & lc) override;
+
+   JobsFailedSummary getRetrieveJobsFailedSummary(log::LogContext &logContext) override;
+
+   std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> getMountInfo(log::LogContext& logContext) override;
+
+   void trimEmptyQueues(log::LogContext& lc) override;
+
+   std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> getMountInfoNoLock(PurposeGetMountInfo purpose,
+     log::LogContext& logContext) override;
+
+   void requeueRetrieveJobs(std::list<SchedulerDatabase::RetrieveJob *> &jobs, log::LogContext& logContext) override;
+
+   // these are not in the baseclass but are beeded by XrdSsiCtaServiceProvider
+   void setThreadNumber(uint64_t threadNumber, const std::optional<size_t> &stackSize = std::nullopt);
+   void setBottomHalfQueueSize(uint64_t tasksNumber);
+
+};
+
+}  // namespace cta
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp b/scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..46411d4c939055bbb6f2f740bce7ea918e3eff78
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp
@@ -0,0 +1,74 @@
+/*
+ * @project	 The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license	 This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "scheduler/SchedulerDatabaseFactory.hpp"
+#include "scheduler/PostgresSchedDB/PostgresSchedDB.hpp"
+#include "common/dataStructures/SecurityIdentity.hpp"
+#include "scheduler/LogicalLibrary.hpp"
+#include "scheduler/RetrieveRequestDump.hpp"
+#include "common/log/DummyLogger.hpp"
+#include "catalogue/DummyCatalogue.hpp"
+#include <memory>
+
+namespace cta {
+
+class PostgresSchedDBWrapper: public SchedulerDatabaseDecorator {
+public:
+  PostgresSchedDBWrapper(const std::string &context, std::unique_ptr<cta::catalogue::Catalogue>& catalogue, const std::string &URL = "") :
+    SchedulerDatabaseDecorator(m_PostgresSchedDB), m_logger(new cta::log::DummyLogger("", "")), m_catalogue(catalogue),
+    m_PostgresSchedDB(nullptr, *m_catalogue, *m_logger) {}
+
+  ~PostgresSchedDBWrapper() throw () {}
+
+private:
+  std::unique_ptr <cta::log::Logger> m_logger;
+  std::unique_ptr <cta::catalogue::Catalogue> & m_catalogue;
+  cta::PostgresSchedDB m_PostgresSchedDB;
+};
+
+/**
+ * A concrete implementation of a scheduler database factory that creates mock
+ * scheduler database objects.
+ */
+class PostgresSchedDBFactory: public SchedulerDatabaseFactory {
+public:
+  /**
+   * Constructor
+   */
+  PostgresSchedDBFactory(const std::string & URL = ""): m_URL(URL) {}
+
+  /**
+   * Destructor.
+   */
+  ~PostgresSchedDBFactory() throw() {}
+
+  /**
+   * Returns a newly created scheduler database object.
+   *
+   * @return A newly created scheduler database object.
+   */
+  std::unique_ptr<SchedulerDatabase> create(std::unique_ptr<cta::catalogue::Catalogue>& catalogue) const {
+    return std::unique_ptr<SchedulerDatabase>(new PostgresSchedDBWrapper("UnitTest", catalogue, m_URL));
+  }
+
+  private:
+    std::string m_URL;
+}; // class PostgresSchedDBFactory
+
+} // namespace cta
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp b/scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..386a41b570a8f1f09b051cc8bcab0dd408e8be47
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp
@@ -0,0 +1,58 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/Logger.hpp"
+#include "catalogue/Catalogue.hpp"
+
+#include <memory>
+#include <string>
+
+namespace cta {
+
+class PostgresSchedDBGC {
+public:
+
+  PostgresSchedDBGC(void *pgstuff, catalogue::Catalogue& catalogue) { }
+  void runOnePass(log::LogContext & lc) { }
+};
+
+class PostgresSchedDBInit
+{
+public:
+  PostgresSchedDBInit(const std::string& client_process, const std::string& db_conn_str, log::Logger& log,
+    bool leaveNonEmptyAgentsBehind = false)
+  {
+  }
+
+  std::unique_ptr<PostgresSchedDB> getSchedDB(catalogue::Catalogue& catalogue, log::Logger& log) {
+    return std::make_unique<PostgresSchedDB>(nullptr, catalogue, log);
+  }
+
+  PostgresSchedDBGC getGarbageCollector(catalogue::Catalogue& catalogue) {
+    return PostgresSchedDBGC(nullptr, catalogue);
+  }
+
+private:
+};
+
+typedef PostgresSchedDBInit      SchedulerDBInit_t;
+typedef PostgresSchedDB          SchedulerDB_t;
+
+} // namespace cta
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDBTest.cpp b/scheduler/PostgresSchedDB/PostgresSchedDBTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bf24c8acab8bab8b3a8a113c7e5a38a867bcce8f
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDBTest.cpp
@@ -0,0 +1,124 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include <limits>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "catalogue/InMemoryCatalogue.hpp"
+#include "common/exception/Exception.hpp"
+#include "common/log/Logger.hpp"
+#include "common/log/StringLogger.hpp"
+#include "PostgresSchedDB.hpp"
+#include "PostgresSchedDBFactory.hpp"
+#include "PostgresSchedDBTest.hpp"
+
+namespace unitTests {
+
+/**
+ * This structure is used to parameterize PostgresSchedDB database tests.
+ */
+struct PostgresSchedDBTestParams {
+  cta::SchedulerDatabaseFactory &dbFactory;
+
+  explicit PostgresSchedDBTestParams(cta::SchedulerDatabaseFactory *dbFactory) :
+    dbFactory(*dbFactory) {}
+};  // struct PostgresSchedDBTestParams
+
+
+/**
+ * The PostgresSchedDB database test is a parameterized test.  It takes an
+ * PostgresSchedDB database factory as a parameter.
+ */
+class PostgresSchedDBTest: public
+  ::testing::TestWithParam<PostgresSchedDBTestParams> {
+ public:
+  PostgresSchedDBTest() throw() {
+  }
+
+  class FailedToGetDatabase: public std::exception {
+   public:
+    const char *what() const throw() {
+      return "Failed to get scheduler database";
+    }
+  };
+
+  virtual void SetUp() {
+    // We do a deep reference to the member as the C++ compiler requires the function to be
+    // already defined if called implicitly.
+    const auto &factory = GetParam().dbFactory;
+    m_catalogue = std::make_unique<cta::catalogue::DummyCatalogue>();
+    // Get the PostgresSched DB from the factory.
+    auto psdb = std::move(factory.create(m_catalogue));
+    // Make sure the type of the SchedulerDatabase is correct (it should be an PostgresSchedDBWrapper).
+    dynamic_cast<cta::PostgresSchedDBWrapper *> (psdb.get());
+    // We know the cast will not fail, so we can safely do it (otherwise we could leak memory).
+    m_db.reset(dynamic_cast<cta::PostgresSchedDBWrapper *> (psdb.release()));
+  }
+
+  virtual void TearDown() {
+    m_db.reset();
+    m_catalogue.reset();
+  }
+
+  cta::PostgresSchedDBWrapper &getDb() {
+    cta::PostgresSchedDBWrapper *const ptr = m_db.get();
+    if (nullptr == ptr) {
+      throw FailedToGetDatabase();
+    }
+    return *ptr;
+  }
+
+  static const std::string s_systemHost;
+  static const std::string s_adminHost;
+  static const std::string s_userHost;
+
+  static const std::string s_system;
+  static const std::string s_admin;
+  static const std::string s_user;
+
+  static const cta::common::dataStructures::SecurityIdentity s_systemOnSystemHost;
+
+  static const cta::common::dataStructures::SecurityIdentity s_adminOnAdminHost;
+  static const cta::common::dataStructures::SecurityIdentity s_adminOnUserHost;
+
+  static const cta::common::dataStructures::SecurityIdentity s_userOnAdminHost;
+  static const cta::common::dataStructures::SecurityIdentity s_userOnUserHost;
+
+ private:
+  // Prevent copying
+  PostgresSchedDBTest(const PostgresSchedDBTest &) = delete;
+
+  // Prevent assignment
+  PostgresSchedDBTest & operator= (const PostgresSchedDBTest &) = delete;
+
+  std::unique_ptr<cta::PostgresSchedDBWrapper> m_db;
+
+  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
+};  // class PostgresSchedDBTest
+
+TEST_P(PostgresSchedDBTest, getBatchArchiveJob) {
+  ASSERT_EQ(0,1);
+}
+
+static cta::PostgresSchedDBFactory PostgresSchedDBFactoryStatic;
+INSTANTIATE_TEST_CASE_P(PostgresSchedDBTest, PostgresSchedDBTest,
+    ::testing::Values(PostgresSchedDBTestParams(&PostgresSchedDBFactoryStatic)));
+
+}  // namespace unitTests
diff --git a/scheduler/PostgresSchedDB/PostgresSchedDBTest.hpp b/scheduler/PostgresSchedDB/PostgresSchedDBTest.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0eeb7e6c21de6b1488f15c4946aa2285ff8eb157
--- /dev/null
+++ b/scheduler/PostgresSchedDB/PostgresSchedDBTest.hpp
@@ -0,0 +1,24 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include <gtest/gtest.h>
+
+namespace unitTests {
+
+}
diff --git a/scheduler/PostgresSchedDB/RepackReportBatch.cpp b/scheduler/PostgresSchedDB/RepackReportBatch.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..face963b40be78d0283ca367599c0cc6dbf52d57
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackReportBatch.cpp
@@ -0,0 +1,33 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RepackReportBatch.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RepackReportBatch::RepackReportBatch()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackReportBatch::report(log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RepackReportBatch.hpp b/scheduler/PostgresSchedDB/RepackReportBatch.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e67a5faea0102609a2a8724413259aed18bf0f82
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackReportBatch.hpp
@@ -0,0 +1,33 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+
+namespace cta {
+
+class PostgresSchedDB::RepackReportBatch : public SchedulerDatabase::RepackReportBatch {
+ public:
+
+   RepackReportBatch();
+
+   void report(log::LogContext & lc) override;
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RepackRequest.cpp b/scheduler/PostgresSchedDB/RepackRequest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e3d5d82b37bbe45f141756761e25acb02e7d4020
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackRequest.cpp
@@ -0,0 +1,66 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RepackRequest.hpp"
+#include "common/log/LogContext.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RepackRequest::RepackRequest()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::setLastExpandedFSeq(uint64_t fseq)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+uint64_t PostgresSchedDB::RepackRequest::addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests,
+      cta::common::dataStructures::ArchiveRoute::FullMap & archiveRoutesMap, uint64_t maxFSeqLowBound,
+      const uint64_t maxAddedFSeq, const TotalStatsFiles &totalStatsFiles, disk::DiskSystemList diskSystemList,
+      log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::expandDone()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::fail()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::requeueInToExpandQueue(log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::setExpandStartedAndChangeStatus()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RepackRequest::fillLastExpandedFSeqAndTotalStatsFile(uint64_t &fSeq, TotalStatsFiles &totalStatsFiles)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RepackRequest.hpp b/scheduler/PostgresSchedDB/RepackRequest.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c7917b17151e6e1eaf2e7189804d702ef72e9314
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackRequest.hpp
@@ -0,0 +1,54 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/dataStructures/ArchiveRoute.hpp"
+#include "common/log/LogContext.hpp"
+#include "disk/DiskSystem.hpp"
+
+#include <list>
+#include <cstdint>
+
+namespace cta {
+
+class PostgresSchedDB::RepackRequest : public SchedulerDatabase::RepackRequest {
+ public:
+
+   RepackRequest();
+
+   void setLastExpandedFSeq(uint64_t fseq) override;
+
+   uint64_t addSubrequestsAndUpdateStats(std::list<Subrequest>& repackSubrequests,
+      cta::common::dataStructures::ArchiveRoute::FullMap & archiveRoutesMap, uint64_t maxFSeqLowBound,
+      const uint64_t maxAddedFSeq, const TotalStatsFiles &totalStatsFiles, disk::DiskSystemList diskSystemList,
+      log::LogContext & lc) override;
+
+   void expandDone() override;
+
+   void fail() override;
+
+   void requeueInToExpandQueue(log::LogContext &lc) override;
+
+   void setExpandStartedAndChangeStatus() override;
+
+   void fillLastExpandedFSeqAndTotalStatsFile(uint64_t &fSeq, TotalStatsFiles &totalStatsFiles) override;
+
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.cpp b/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0e9495f5810b60ef6820ae5a4fc212d97940a07
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.cpp
@@ -0,0 +1,34 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RepackRequestPromotionStatistics.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RepackRequestPromotionStatistics::RepackRequestPromotionStatistics()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+SchedulerDatabase::RepackRequestStatistics::PromotionToToExpandResult PostgresSchedDB::RepackRequestPromotionStatistics::promotePendingRequestsForExpansion(size_t requestCount,
+      log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.hpp b/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d4a57d70699a018bfbccdb1866ab096b7398fd62
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RepackRequestPromotionStatistics.hpp
@@ -0,0 +1,36 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+
+#include <cstddef>
+
+namespace cta {
+
+class PostgresSchedDB::RepackRequestPromotionStatistics : public SchedulerDatabase::RepackRequestStatistics {
+ public:
+
+   RepackRequestPromotionStatistics();
+
+   PromotionToToExpandResult promotePendingRequestsForExpansion(size_t requestCount,
+      log::LogContext &lc) override;
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveJob.cpp b/scheduler/PostgresSchedDB/RetrieveJob.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0629259ac2406a8e1803f07407d5bf58bb956f8e
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveJob.cpp
@@ -0,0 +1,53 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RetrieveJob.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RetrieveJob::RetrieveJob()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJob::asyncSetSuccessful()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJob::failTransfer(const std::string &failureReason, log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJob::failReport(const std::string &failureReason, log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJob::abort(const std::string &abortReason, log::LogContext &lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJob::fail()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveJob.hpp b/scheduler/PostgresSchedDB/RetrieveJob.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8cefb8c3c8b9e88d0de97f767801cf0e5876eeae
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveJob.hpp
@@ -0,0 +1,43 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+
+#include <string>
+
+namespace cta {
+
+class PostgresSchedDB::RetrieveJob : public SchedulerDatabase::RetrieveJob {
+ public:
+
+   RetrieveJob();
+
+   void asyncSetSuccessful() override;
+
+   void failTransfer(const std::string &failureReason, log::LogContext &lc) override;
+
+   void failReport(const std::string &failureReason, log::LogContext &lc) override;
+
+   void abort(const std::string &abortReason, log::LogContext &lc) override;
+
+   void fail() override;
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveJobQueueItor.cpp b/scheduler/PostgresSchedDB/RetrieveJobQueueItor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..37c69c743362b5df6db042a68841b37387f4c3af
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveJobQueueItor.cpp
@@ -0,0 +1,48 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RetrieveJobQueueItor.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RetrieveJobQueueItor::RetrieveJobQueueItor()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const std::string &PostgresSchedDB::RetrieveJobQueueItor::qid() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+bool PostgresSchedDB::RetrieveJobQueueItor::end() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveJobQueueItor::operator++()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const common::dataStructures::RetrieveJob &PostgresSchedDB::RetrieveJobQueueItor::operator*() const
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveJobQueueItor.hpp b/scheduler/PostgresSchedDB/RetrieveJobQueueItor.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f381910d3caeaf7059b00db73caf1e11f5f0502c
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveJobQueueItor.hpp
@@ -0,0 +1,42 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/dataStructures/RetrieveJob.hpp"
+
+#include <string>
+
+namespace cta {
+
+class PostgresSchedDB::RetrieveJobQueueItor : public SchedulerDatabase::IRetrieveJobQueueItor {
+ public:
+
+   RetrieveJobQueueItor();
+
+   const std::string &qid() const override;
+
+   bool end() const override;
+
+   void operator++() override;
+
+   const common::dataStructures::RetrieveJob &operator*() const override;
+
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveMount.cpp b/scheduler/PostgresSchedDB/RetrieveMount.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..613e9cabd171dfacb19bc9348902816be093ad32
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveMount.cpp
@@ -0,0 +1,83 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "RetrieveMount.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::RetrieveMount::RetrieveMount()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+const SchedulerDatabase::RetrieveMount::MountInfo & PostgresSchedDB::RetrieveMount::getMountInfo()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> PostgresSchedDB::RetrieveMount::getNextJobBatch(uint64_t filesRequested,
+     uint64_t bytesRequested, log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+bool PostgresSchedDB::RetrieveMount::reserveDiskSpace(const cta::DiskSpaceReservationRequest &request,
+      const std::string &externalFreeDiskSpaceScript, log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+bool PostgresSchedDB::RetrieveMount::testReserveDiskSpace(const cta::DiskSpaceReservationRequest &request,
+      const std::string &externalFreeDiskSpaceScript, log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::requeueJobBatch(std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>>& jobBatch,
+      log::LogContext& logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::setDriveStatus(common::dataStructures::DriveStatus status, common::dataStructures::MountType mountType,
+                                time_t completionTime, const std::optional<std::string> & reason)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::flushAsyncSuccessReports(std::list<SchedulerDatabase::RetrieveJob *> & jobsBatch, log::LogContext & lc)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::addDiskSystemToSkip(const DiskSystemToSkip &diskSystemToSkip)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+void PostgresSchedDB::RetrieveMount::putQueueToSleep(const std::string &diskSystemName, const uint64_t sleepTime, log::LogContext &logContext)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/RetrieveMount.hpp b/scheduler/PostgresSchedDB/RetrieveMount.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c00e37bd8f2c52ec0afe8452a0762a2b6d73027
--- /dev/null
+++ b/scheduler/PostgresSchedDB/RetrieveMount.hpp
@@ -0,0 +1,66 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/log/LogContext.hpp"
+#include "common/dataStructures/DriveState.hpp"
+#include "common/dataStructures/MountType.hpp"
+#include "common/dataStructures/DiskSpaceReservationRequest.hpp"
+
+#include <list>
+#include <memory>
+#include <optional>
+#include <string>
+#include <cstdint>
+#include <time.h>
+
+namespace cta {
+
+class PostgresSchedDB::RetrieveMount : public SchedulerDatabase::RetrieveMount {
+ public:
+
+   RetrieveMount();
+
+   const MountInfo & getMountInfo() override;
+
+   std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>> getNextJobBatch(uint64_t filesRequested,
+     uint64_t bytesRequested, log::LogContext& logContext) override;
+
+   bool reserveDiskSpace(const cta::DiskSpaceReservationRequest &request,
+      const std::string &externalFreeDiskSpaceScript, log::LogContext& logContext) override;
+
+   bool testReserveDiskSpace(const cta::DiskSpaceReservationRequest &request,
+      const std::string &externalFreeDiskSpaceScript, log::LogContext& logContext) override;
+
+   void requeueJobBatch(std::list<std::unique_ptr<SchedulerDatabase::RetrieveJob>>& jobBatch,
+      log::LogContext& logContext) override;
+
+   void setDriveStatus(common::dataStructures::DriveStatus status, common::dataStructures::MountType mountType,
+                                time_t completionTime, const std::optional<std::string> & reason = std::nullopt) override;
+
+   void setTapeSessionStats(const castor::tape::tapeserver::daemon::TapeSessionStats &stats) override;
+
+   void flushAsyncSuccessReports(std::list<SchedulerDatabase::RetrieveJob *> & jobsBatch, log::LogContext & lc) override;
+
+   void addDiskSystemToSkip(const DiskSystemToSkip &diskSystemToSkip) override;
+
+   void putQueueToSleep(const std::string &diskSystemName, const uint64_t sleepTime, log::LogContext &logContext) override;
+};
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/TapeMountDecisionInfo.cpp b/scheduler/PostgresSchedDB/TapeMountDecisionInfo.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e50ccea9be289387d5327ad91bddc656e797025
--- /dev/null
+++ b/scheduler/PostgresSchedDB/TapeMountDecisionInfo.cpp
@@ -0,0 +1,53 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#include "TapeMountDecisionInfo.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta {
+
+PostgresSchedDB::TapeMountDecisionInfo::TapeMountDecisionInfo()
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::ArchiveMount> PostgresSchedDB::TapeMountDecisionInfo::createArchiveMount(
+      common::dataStructures::MountType mountType,
+      const catalogue::TapeForWriting & tape, const std::string& driveName,
+      const std::string & logicalLibrary, const std::string & hostName,
+      const std::string& vo, const std::string& mediaType,
+      const std::string& vendor,
+      const uint64_t capacityInBytes,
+      const std::optional<std::string> &activity,
+      cta::common::dataStructures::Label::Format labelFormat)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+std::unique_ptr<SchedulerDatabase::RetrieveMount> PostgresSchedDB::TapeMountDecisionInfo::createRetrieveMount(const std::string & vid,
+      const std::string & tapePool, const std::string& driveName,
+      const std::string& logicalLibrary, const std::string& hostName,
+      const std::string& vo, const std::string& mediaType,
+      const std::string& vendor,
+      const uint64_t capacityInBytes,
+      const std::optional<std::string> &activity,
+      cta::common::dataStructures::Label::Format labelFormat)
+{
+   throw cta::exception::Exception("Not implemented");
+}
+
+} //namespace cta
diff --git a/scheduler/PostgresSchedDB/TapeMountDecisionInfo.hpp b/scheduler/PostgresSchedDB/TapeMountDecisionInfo.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..16329c2a62a12450ac92163a941693942dc6acb8
--- /dev/null
+++ b/scheduler/PostgresSchedDB/TapeMountDecisionInfo.hpp
@@ -0,0 +1,57 @@
+/*
+ * @project      The CERN Tape Archive (CTA)
+ * @copyright    Copyright © 2022 CERN
+ * @license      This program is free software, distributed under the terms of the GNU General Public
+ *               Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". You can
+ *               redistribute it and/or modify it under the terms of the GPL Version 3, or (at your
+ *               option) any later version.
+ *
+ *               This program is distributed in the hope that it will be useful, but WITHOUT ANY
+ *               WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+ *               PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ *               In applying this licence, CERN does not waive the privileges and immunities
+ *               granted to it by virtue of its status as an Intergovernmental Organization or
+ *               submit itself to any jurisdiction.
+ */
+
+#pragma once
+
+#include "PostgresSchedDB.hpp"
+#include "common/dataStructures/LabelFormat.hpp"
+#include "common/dataStructures/MountType.hpp"
+
+#include <memory>
+#include <optional>
+#include <string>
+#include <cstdint>
+#include <time.h>
+
+namespace cta {
+
+class PostgresSchedDB::TapeMountDecisionInfo : public SchedulerDatabase::TapeMountDecisionInfo {
+ public:
+
+   TapeMountDecisionInfo();
+
+   std::unique_ptr<SchedulerDatabase::ArchiveMount> createArchiveMount(
+      common::dataStructures::MountType mountType,
+      const catalogue::TapeForWriting & tape, const std::string& driveName,
+      const std::string & logicalLibrary, const std::string & hostName,
+      const std::string& vo, const std::string& mediaType,
+      const std::string& vendor,
+      const uint64_t capacityInBytes,
+      const std::optional<std::string> &activity,
+      cta::common::dataStructures::Label::Format labelFormat) override;
+
+   std::unique_ptr<SchedulerDatabase::RetrieveMount> createRetrieveMount(const std::string & vid,
+      const std::string & tapePool, const std::string& driveName,
+      const std::string& logicalLibrary, const std::string& hostName,
+      const std::string& vo, const std::string& mediaType,
+      const std::string& vendor,
+      const uint64_t capacityInBytes,
+      const std::optional<std::string> &activity,
+      cta::common::dataStructures::Label::Format labelFormat) override;
+};
+
+} //namespace cta
diff --git a/scheduler/RepackRequestManager.cpp b/scheduler/RepackRequestManager.cpp
index 110fc200cb8d83f02ebd9d671ccbad015b06f15c..707754f282165ba385c8168e02159ef206a18367 100644
--- a/scheduler/RepackRequestManager.cpp
+++ b/scheduler/RepackRequestManager.cpp
@@ -16,7 +16,6 @@
  */
 
 #include "common/exception/NoSuchObject.hpp"
-#include "OStoreDB/OStoreDB.hpp"
 #include "RepackReportThread.hpp"
 #include "RepackRequestManager.hpp"
 #include "Scheduler.hpp"
diff --git a/scheduler/RetrieveMount.cpp b/scheduler/RetrieveMount.cpp
index 02b1221d20ea1453bec7c2e115fa5d84b9490ef8..881593342e0364a390968c4b99e9773bbf51da91 100644
--- a/scheduler/RetrieveMount.cpp
+++ b/scheduler/RetrieveMount.cpp
@@ -19,7 +19,6 @@
 #include "common/log/TimingList.hpp"
 #include "common/Timer.hpp"
 #include "disk/DiskSystem.hpp"
-#include "objectstore/Backend.hpp"
 #include "scheduler/RetrieveMount.hpp"
 
 #include <numeric>
diff --git a/scheduler/Scheduler.cpp b/scheduler/Scheduler.cpp
index 11521e0d77d2d7567a18f9cce3422bce80259b3a..8e16af5800ffb1a28d6537bbcd5cddbced935aaa 100644
--- a/scheduler/Scheduler.cpp
+++ b/scheduler/Scheduler.cpp
@@ -25,8 +25,6 @@
 #include "disk/DiskFileImplementations.hpp"
 #include "disk/RadosStriperPool.hpp"
 #include "DiskReportRunner.hpp"
-#include "objectstore/RepackRequest.hpp"
-#include "OStoreDB/OStoreDB.hpp"
 #include "RetrieveMount.hpp"
 #include "RetrieveRequestDump.hpp"
 #include "Scheduler.hpp"
@@ -730,7 +728,7 @@ Scheduler::RepackReportBatch Scheduler::getNextSuccessfulRetrieveRepackReportBat
   Scheduler::RepackReportBatch ret;
   try{
     ret.m_DbBatch.reset(m_db.getNextSuccessfulRetrieveRepackReportBatch(lc).release());
-  } catch (OStoreDB::NoRepackReportBatchFound &){
+  } catch (SchedulerDatabase::NoRepackReportBatchFound &){
     ret.m_DbBatch = nullptr;
   }
   return ret;
@@ -743,7 +741,7 @@ Scheduler::RepackReportBatch Scheduler::getNextFailedRetrieveRepackReportBatch(l
   Scheduler::RepackReportBatch ret;
   try{
     ret.m_DbBatch.reset(m_db.getNextFailedRetrieveRepackReportBatch(lc).release());
-  } catch (OStoreDB::NoRepackReportBatchFound &){
+  } catch (SchedulerDatabase::NoRepackReportBatchFound &){
     ret.m_DbBatch = nullptr;
   }
   return ret;
@@ -756,7 +754,7 @@ Scheduler::RepackReportBatch Scheduler::getNextSuccessfulArchiveRepackReportBatc
   Scheduler::RepackReportBatch ret;
   try{
     ret.m_DbBatch.reset(m_db.getNextSuccessfulArchiveRepackReportBatch(lc).release());
-  } catch (OStoreDB::NoRepackReportBatchFound &){
+  } catch (SchedulerDatabase::NoRepackReportBatchFound &){
     ret.m_DbBatch = nullptr;
   }
   return ret;
@@ -769,7 +767,7 @@ Scheduler::RepackReportBatch Scheduler::getNextFailedArchiveRepackReportBatch(lo
   Scheduler::RepackReportBatch ret;
   try{
     ret.m_DbBatch.reset(m_db.getNextFailedArchiveRepackReportBatch(lc).release());
-  } catch (OStoreDB::NoRepackReportBatchFound &){
+  } catch (SchedulerDatabase::NoRepackReportBatchFound &){
     ret.m_DbBatch = nullptr;
   }
   return ret;
diff --git a/scheduler/Scheduler.hpp b/scheduler/Scheduler.hpp
index c273d178425ff5cb6d8dc408be42541d7e9493c3..80ca96dccc279422f52dbd91f7e34b14d07b926a 100644
--- a/scheduler/Scheduler.hpp
+++ b/scheduler/Scheduler.hpp
@@ -46,8 +46,6 @@
 #include "scheduler/TapeMount.hpp"
 #include "scheduler/SchedulerDatabase.hpp"
 #include "scheduler/RepackRequest.hpp"
-#include "objectstore/RetrieveRequest.hpp"
-#include "objectstore/ArchiveRequest.hpp"
 
 #include "tapeserver/daemon/TapedConfiguration.hpp"
 
diff --git a/scheduler/SchedulerDatabase.hpp b/scheduler/SchedulerDatabase.hpp
index 0eb68b44370476019615cd9dbb5841d993b22d93..8addbc2a6b568eebbd1df09b7258dfd8982540ae 100644
--- a/scheduler/SchedulerDatabase.hpp
+++ b/scheduler/SchedulerDatabase.hpp
@@ -96,6 +96,7 @@ namespace cta {
 class SchedulerDatabase {
  public:
   CTA_GENERATE_EXCEPTION_CLASS(DriveAlreadyExistsException);
+  CTA_GENERATE_EXCEPTION_CLASS(NoRepackReportBatchFound);
   /**
    * Destructor.
    */
diff --git a/scheduler/SchedulerDatabaseFactory.hpp b/scheduler/SchedulerDatabaseFactory.hpp
index 0e21196b5574b9c148d2bf6733f15173836eb170..d47f3919a5e28bdc072f53d7703d04115fb99e32 100644
--- a/scheduler/SchedulerDatabaseFactory.hpp
+++ b/scheduler/SchedulerDatabaseFactory.hpp
@@ -18,13 +18,13 @@
 #pragma once
 
 #include <memory>
+#include "scheduler/SchedulerDatabase.hpp"
+#include "common/dataStructures/SecurityIdentity.hpp"
+#include "scheduler/RetrieveRequestDump.hpp"
 #include "catalogue/Catalogue.hpp"
 
 namespace cta {
 
-// Forward declarations
-class SchedulerDatabase;
-
 /**
  * Asbtract class specifying the interface to a factory of scheduler database
  * objects.
@@ -46,4 +46,195 @@ public:
 
 }; // class SchedulerDatabaseFactory
 
+/**
+ * Base of a wrapper class. This follows the decorator structural pattern. Wrappers are used by
+ * SchedulerDatabase specialisations, in conjunction with the factory calss above, to provide
+ * test classes for unit tests.
+ */
+class SchedulerDatabaseDecorator : public SchedulerDatabase {
+public:
+
+  SchedulerDatabaseDecorator(SchedulerDatabase &db) : m_SchedDB(&db) { }
+
+  void waitSubthreadsComplete() override {
+    m_SchedDB->waitSubthreadsComplete();
+  }
+
+  void ping() override {
+    m_SchedDB->ping();
+  }
+
+  std::string queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest& request, const cta::common::dataStructures::ArchiveFileQueueCriteriaAndFileId& criteria, log::LogContext &logContext) override {
+    return m_SchedDB->queueArchive(instanceName, request, criteria, logContext);
+  }
+
+  void deleteRetrieveRequest(const common::dataStructures::SecurityIdentity& cliIdentity, const std::string& remoteFile) override {
+    m_SchedDB->deleteRetrieveRequest(cliIdentity, remoteFile);
+  }
+
+  std::list<cta::common::dataStructures::RetrieveJob> getRetrieveJobs(const std::string& tapePoolName) const override {
+    return m_SchedDB->getRetrieveJobs(tapePoolName);
+  }
+
+  std::map<std::string, std::list<common::dataStructures::RetrieveJob> > getRetrieveJobs() const override {
+    return m_SchedDB->getRetrieveJobs();
+  }
+
+  std::map<std::string, std::list<common::dataStructures::ArchiveJob> > getArchiveJobs() const override {
+    return m_SchedDB->getArchiveJobs();
+  }
+
+  std::list<cta::common::dataStructures::ArchiveJob> getArchiveJobs(const std::string& tapePoolName) const override {
+    return m_SchedDB->getArchiveJobs(tapePoolName);
+  }
+
+  std::unique_ptr<IArchiveJobQueueItor> getArchiveJobQueueItor(const std::string &tapePoolName,
+    common::dataStructures::JobQueueType queueType) const override {
+    return m_SchedDB->getArchiveJobQueueItor(tapePoolName, queueType);
+  }
+
+  std::unique_ptr<IRetrieveJobQueueItor> getRetrieveJobQueueItor(const std::string &vid,
+    common::dataStructures::JobQueueType queueType) const override {
+    return m_SchedDB->getRetrieveJobQueueItor(vid, queueType);
+  }
+
+  std::map<std::string, std::list<RetrieveRequestDump> > getRetrieveRequests() const override {
+    return m_SchedDB->getRetrieveRequests();
+  }
+
+  std::list<std::unique_ptr<ArchiveJob>> getNextArchiveJobsToReportBatch(uint64_t filesRequested, log::LogContext &lc) override {
+    return m_SchedDB->getNextArchiveJobsToReportBatch(filesRequested, lc);
+  }
+
+  JobsFailedSummary getArchiveJobsFailedSummary(log::LogContext &lc) override {
+    return m_SchedDB->getArchiveJobsFailedSummary(lc);
+  }
+
+  std::list<std::unique_ptr<RetrieveJob>> getNextRetrieveJobsToReportBatch(uint64_t filesRequested, log::LogContext &lc) override {
+    return m_SchedDB->getNextRetrieveJobsToReportBatch(filesRequested, lc);
+  }
+
+  std::list<std::unique_ptr<RetrieveJob>> getNextRetrieveJobsFailedBatch(uint64_t filesRequested, log::LogContext &lc) override {
+    return m_SchedDB->getNextRetrieveJobsFailedBatch(filesRequested, lc);
+  }
+
+  std::unique_ptr<RepackReportBatch> getNextRepackReportBatch(log::LogContext& lc) override {
+    return m_SchedDB->getNextRepackReportBatch(lc);
+  }
+
+  std::unique_ptr<RepackReportBatch> getNextSuccessfulRetrieveRepackReportBatch(log::LogContext& lc) override {
+    return m_SchedDB->getNextSuccessfulRetrieveRepackReportBatch(lc);
+  }
+
+  std::unique_ptr<RepackReportBatch> getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc) override {
+    return m_SchedDB->getNextSuccessfulArchiveRepackReportBatch(lc);
+  }
+
+  std::unique_ptr<RepackReportBatch> getNextFailedRetrieveRepackReportBatch(log::LogContext& lc) override {
+    return m_SchedDB->getNextFailedRetrieveRepackReportBatch(lc);
+  }
+
+  std::unique_ptr<RepackReportBatch> getNextFailedArchiveRepackReportBatch(log::LogContext& lc) override {
+    return m_SchedDB->getNextFailedArchiveRepackReportBatch(lc);
+  }
+
+  std::list<std::unique_ptr<SchedulerDatabase::RepackReportBatch>> getRepackReportBatches(log::LogContext &lc) override {
+    return m_SchedDB->getRepackReportBatches(lc);
+  }
+
+  JobsFailedSummary getRetrieveJobsFailedSummary(log::LogContext &lc) override {
+    return m_SchedDB->getRetrieveJobsFailedSummary(lc);
+  }
+
+  void setArchiveJobBatchReported(std::list<cta::SchedulerDatabase::ArchiveJob*>& jobsBatch, log::TimingList & timingList,
+      utils::Timer & t, log::LogContext& lc) override {
+    m_SchedDB->setArchiveJobBatchReported(jobsBatch, timingList, t, lc);
+  }
+
+  void setRetrieveJobBatchReportedToUser(std::list<cta::SchedulerDatabase::RetrieveJob*>& jobsBatch, log::TimingList & timingList,
+      utils::Timer & t, log::LogContext& lc) override {
+    m_SchedDB->setRetrieveJobBatchReportedToUser(jobsBatch, timingList, t, lc);
+  }
+
+  std::list<RetrieveRequestDump> getRetrieveRequestsByVid(const std::string& vid) const override {
+    return m_SchedDB->getRetrieveRequestsByVid(vid);
+  }
+
+  std::list<RetrieveRequestDump> getRetrieveRequestsByRequester(const std::string& requester) const override {
+    return m_SchedDB->getRetrieveRequestsByRequester(requester);
+  }
+
+  std::unique_ptr<TapeMountDecisionInfo> getMountInfo(log::LogContext& logContext) override {
+    return m_SchedDB->getMountInfo(logContext);
+  }
+
+  void trimEmptyQueues(log::LogContext& lc) override {
+    m_SchedDB->trimEmptyQueues(lc);
+  }
+
+  std::unique_ptr<TapeMountDecisionInfo> getMountInfoNoLock(PurposeGetMountInfo purpose, log::LogContext& logContext) override {
+    return m_SchedDB->getMountInfoNoLock(purpose,logContext);
+  }
+
+  std::list<RetrieveQueueStatistics> getRetrieveQueueStatistics(const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria,
+          const std::set<std::string> & vidsToConsider) override {
+    return m_SchedDB->getRetrieveQueueStatistics(criteria, vidsToConsider);
+  }
+
+  SchedulerDatabase::RetrieveRequestInfo queueRetrieve(common::dataStructures::RetrieveRequest& rqst,
+    const common::dataStructures::RetrieveFileQueueCriteria &criteria, const std::optional<std::string> diskSystemName,
+    log::LogContext &logContext) override {
+    return m_SchedDB->queueRetrieve(rqst, criteria, diskSystemName, logContext);
+  }
+
+  void cancelArchive(const common::dataStructures::DeleteArchiveRequest& request, log::LogContext & lc) override {
+    m_SchedDB->cancelArchive(request,lc);
+  }
+
+  void cancelRetrieve(const std::string& instanceName, const cta::common::dataStructures::CancelRetrieveRequest& rqst,
+    log::LogContext& lc) override {
+    m_SchedDB->cancelRetrieve(instanceName, rqst, lc);
+  }
+
+  void deleteFailed(const std::string &objectId, log::LogContext & lc) override {
+    m_SchedDB->deleteFailed(objectId, lc);
+  }
+
+  std::string queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext& lc) override {
+    return m_SchedDB->queueRepack(repackRequest, lc);
+  }
+
+  std::list<common::dataStructures::RepackInfo> getRepackInfo() override {
+    return m_SchedDB->getRepackInfo();
+  }
+
+  common::dataStructures::RepackInfo getRepackInfo(const std::string& vid) override {
+    return m_SchedDB->getRepackInfo(vid);
+  }
+
+  void cancelRepack(const std::string& vid, log::LogContext & lc) override {
+    m_SchedDB->cancelRepack(vid, lc);
+  }
+
+  std::unique_ptr<RepackRequestStatistics> getRepackStatistics() override {
+    return m_SchedDB->getRepackStatistics();
+  }
+
+  std::unique_ptr<RepackRequestStatistics> getRepackStatisticsNoLock() override {
+    return m_SchedDB->getRepackStatisticsNoLock();
+  }
+
+  std::unique_ptr<RepackRequest> getNextRepackJobToExpand() override {
+    return m_SchedDB->getNextRepackJobToExpand();
+  }
+
+  void requeueRetrieveJobs(std::list<cta::SchedulerDatabase::RetrieveJob *> &jobs, log::LogContext& logContext) override {
+    return m_SchedDB->requeueRetrieveJobs(jobs, logContext);
+  }
+
+protected:
+  cta::SchedulerDatabase *m_SchedDB;
+
+}; // class SchedulerDatabaseDecorator
+
 } // namespace cta
diff --git a/scheduler/SchedulerDatabaseTest.cpp b/scheduler/SchedulerDatabaseTest.cpp
index 3c992b18de2d1ea9e4acac9a6638c49d99645946..37fd6df6ed70d0030c0257c6d81fab9d7ce7ce3a 100644
--- a/scheduler/SchedulerDatabaseTest.cpp
+++ b/scheduler/SchedulerDatabaseTest.cpp
@@ -15,15 +15,21 @@
  *               submit itself to any jurisdiction.
  */
 
-#include "objectstore/BackendRadosTestSwitch.hpp"
 #include "tests/TestsCompileTimeSwitches.hpp"
 #include "scheduler/SchedulerDatabase.hpp"
 #include "scheduler/SchedulerDatabaseFactory.hpp"
 #include "common/dataStructures/SecurityIdentity.hpp"
 #include "catalogue/InMemoryCatalogue.hpp"
+#include "common/log/DummyLogger.hpp"
+
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp"
+#else
+#include "objectstore/BackendRadosTestSwitch.hpp"
 #include "OStoreDB/OStoreDBFactory.hpp"
 #include "objectstore/BackendRados.hpp"
-#include "common/log/DummyLogger.hpp"
+#endif
+
 #ifdef STDOUT_LOGGING
 #include "common/log/StdoutLogger.hpp"
 #endif
@@ -32,6 +38,7 @@
 #include <gtest/gtest.h>
 #include <algorithm>
 #include <uuid/uuid.h>
+#include <future>
 
 namespace unitTests {
 
@@ -904,6 +911,11 @@ INSTANTIATE_TEST_CASE_P(MockSchedulerDatabaseTest, SchedulerDatabaseTest,
   ::testing::Values(SchedulerDatabaseTestParam(mockDbFactory)));
 #endif
 
+#ifdef CTA_PGSCHED
+static cta::PostgresSchedDBFactory PostgresSchedDBFactoryStatic;
+INSTANTIATE_TEST_CASE_P(PostgresSchedDBSchedulerDatabaseTest, SchedulerDatabaseTest,
+  ::testing::Values(SchedulerDatabaseTestParam(PostgresSchedDBFactoryStatic)));
+#else
 #define TEST_VFS
 #ifdef TEST_VFS
 static cta::OStoreDBFactory<cta::objectstore::BackendVFS> OStoreDBFactoryVFS;
@@ -918,5 +930,6 @@ static cta::OStoreDBFactory<cta::objectstore::BackendRados> OStoreDBFactoryRados
 INSTANTIATE_TEST_CASE_P(OStoreSchedulerDatabaseTestRados, SchedulerDatabaseTest,
   ::testing::Values(SchedulerDatabaseTestParam(OStoreDBFactoryRados)));
 #endif
+#endif
 
 } // namespace unitTests
diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
index 941c1c3fd9204372866e8d7aff363fc5a9abb22c..ce1d6dec90966e9dee28390474d439a4d4afe102 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSessionTest.cpp
@@ -41,19 +41,21 @@
 #include "common/threading/Thread.hpp"
 #include "common/utils/utils.hpp"
 #include "mediachanger/MediaChangerFacade.hpp"
-//#include "smc_struct.h"
-//#include "scheduler/DummyScheduler.hpp"
-#include "scheduler/OStoreDB/OStoreDBFactory.hpp"
 #include "scheduler/MountType.hpp"
-//#include "nameserver/NameServer.hpp"
 #include "scheduler/Scheduler.hpp"
 #include "scheduler/testingMocks/MockRetrieveMount.hpp"
 #include "scheduler/testingMocks/MockArchiveJob.hpp"
 #include "scheduler/testingMocks/MockArchiveMount.hpp"
 #include "tests/TempFile.hpp"
-#include "objectstore/BackendRadosTestSwitch.hpp"
 #include "CleanerSession.hpp"
 
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBFactory.hpp"
+#else
+#include "scheduler/OStoreDB/OStoreDBFactory.hpp"
+#include "objectstore/BackendRadosTestSwitch.hpp"
+#endif
+
 #ifdef STDOUT_LOGGING
 #include "common/log/StdoutLogger.hpp"
 #else
@@ -3409,10 +3411,21 @@ TEST_P(DataTransferSessionTest, CleanerSessionFailsShouldPutTheDriveDown) {
 #undef TEST_MOCK_DB
 #ifdef TEST_MOCK_DB
 static cta::MockSchedulerDatabaseFactory mockDbFactory;
+#ifdef CTA_PGSCHED
+INSTANTIATE_TEST_CASE_P(MockSchedulerTest, GenericSchedulerTest,
+  ::testing::Values(SchedulerTestParam(mockDbFactory)));
+#else
 INSTANTIATE_TEST_CASE_P(MockSchedulerTest, SchedulerTest,
   ::testing::Values(SchedulerTestParam(mockDbFactory)));
 #endif
+#endif
 
+#ifdef CTA_PGSCHED
+static cta::PostgresSchedDBFactory PostgresSchedDBFactoryStatic;
+
+INSTANTIATE_TEST_CASE_P(PostgresSchedDBPlusMockSchedulerTest, DataTransferSessionTest,
+                        ::testing::Values(DataTransferSessionTestParam(PostgresSchedDBFactoryStatic)));
+#else
 #define TEST_VFS
 #ifdef TEST_VFS
 static cta::OStoreDBFactory<cta::objectstore::BackendVFS> OStoreDBFactoryVFS;
@@ -3427,5 +3440,6 @@ static cta::OStoreDBFactory<cta::objectstore::BackendRados> OStoreDBFactoryRados
 INSTANTIATE_TEST_CASE_P(OStoreDBPlusMockSchedulerTestRados, DataTransferSessionTest,
   ::testing::Values(DataTransferSessionTestParam(OStoreDBFactoryRados)));
 #endif
+#endif
 
 } // namespace unitTest
diff --git a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
index 043d9e2227fceda684d7073b455609a0301e3ad7..641419945fbf1344c2909b2a254a5bea366e4d09 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp
@@ -25,7 +25,6 @@
 #include "catalogue/TapeFileWritten.hpp"
 #include "common/exception/NoSuchObject.hpp"
 #include "common/utils/utils.hpp"
-#include "objectstore/Backend.hpp"
 
 using cta::log::LogContext;
 using cta::log::Param;
diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
index 142c22eab641653fe9333e468a6e81f288478f09..6a389a24a9636c11f9612f2457ffb8ed9663ad72 100644
--- a/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
+++ b/tapeserver/castor/tape/tapeserver/daemon/RecallReportPacker.cpp
@@ -20,7 +20,6 @@
 #include "common/exception/NoSuchObject.hpp"
 #include "common/log/Logger.hpp"
 #include "common/utils/utils.hpp"
-#include "objectstore/Backend.hpp"
 
 #include <signal.h>
 #include <iostream>
diff --git a/tapeserver/daemon/DriveHandler.cpp b/tapeserver/daemon/DriveHandler.cpp
index 7d945b20941f4e3d26499eb7a6a92997ec699624..7a57ee79c01099b9cadf4bc5e6fc92db95615aca 100644
--- a/tapeserver/daemon/DriveHandler.cpp
+++ b/tapeserver/daemon/DriveHandler.cpp
@@ -22,8 +22,12 @@
 #include "DriveHandler.hpp"
 #include "DriveHandlerProxy.hpp"
 #include "rdbms/Login.hpp"
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp"
+#else
 #include "scheduler/OStoreDB/OStoreDBInit.hpp"
 #include "scheduler/OStoreDB/OStoreDBWithAgent.hpp"
+#endif
 #include "tapeserver/castor/tape/tapeserver/daemon/CleanerSession.hpp"
 #include "tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.hpp"
 #include "tapeserver/castor/tape/tapeserver/daemon/Session.hpp"
diff --git a/tapeserver/daemon/MaintenanceHandler.cpp b/tapeserver/daemon/MaintenanceHandler.cpp
index f49f0468f376cf1c310ce6819178cde9aab02ee6..04ee94f12358cb4cc34609ec2012fab4105ddce6 100644
--- a/tapeserver/daemon/MaintenanceHandler.cpp
+++ b/tapeserver/daemon/MaintenanceHandler.cpp
@@ -20,7 +20,11 @@
 #include "catalogue/Catalogue.hpp"
 #include "catalogue/CatalogueFactoryFactory.hpp"
 #include "scheduler/Scheduler.hpp"
+#ifdef CTA_PGSCHED
+#include "scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp"
+#else
 #include "scheduler/OStoreDB/OStoreDBInit.hpp"
+#endif
 #include "rdbms/Login.hpp"
 #include "scheduler/DiskReportRunner.hpp"
 #include "scheduler/RepackRequestManager.hpp"
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index b714fb1299d2310a3a061499f5fc207e72b3d6cb..6fa2ddfe202e479f55993b2893c86438efed02f2 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -22,7 +22,7 @@ add_executable(cta-unitTests
   unit_tests.cpp
   ${GMOCK_SRC})
 
-target_link_libraries(cta-unitTests
+set (CTA_UNIT_TEST_LIBS
   ctacataloguecmdlineunittests
   ctacommon
   ctacommonunittests
@@ -32,8 +32,6 @@ target_link_libraries(cta-unitTests
   ctainmemorycatalogueunittests
   ctainmemoryconnunittests
   ctainmemorystmtunittests
-  ctaobjectstore
-  ctaobjectstoreunittests
   ctardbmsunittests
   ctardbmswrapperunittests
   ctaschedulerunittests
@@ -45,8 +43,15 @@ target_link_libraries(cta-unitTests
   ctatapelabelunittests
   gtest
   pthread
-  ctatapeserverraounittests
-)
+  ctatapeserverraounittests)
+
+if(NOT CTA_USE_PGSCHED)
+  set (CTA_UNIT_TEST_LIBS ${CTA_UNIT_TEST_LIBS}
+    ctaobjectstore
+    ctaobjectstoreunittests)
+endif()
+
+target_link_libraries(cta-unitTests ${CTA_UNIT_TEST_LIBS})
 
 set_property (TARGET cta-unitTests APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
 if (OCCI_SUPPORT)
diff --git a/xroot_plugins/CMakeLists.txt b/xroot_plugins/CMakeLists.txt
index 47a5d823862e0ccf830e6bb269acb9aaa6a4b8b1..419df1c6cdc8dcae5f730bf78a020580608e7e6b 100644
--- a/xroot_plugins/CMakeLists.txt
+++ b/xroot_plugins/CMakeLists.txt
@@ -37,8 +37,13 @@ include_directories(${CMAKE_BINARY_DIR}/eos_cta ${PROTOBUF3_INCLUDE_DIRS})
 add_library(XrdSsiCta MODULE XrdSsiCtaServiceProvider.cpp XrdSsiCtaRequestProc.cpp XrdSsiCtaRequestMessage.cpp
                              ../cmdline/CtaAdminCmdParse.cpp
                              GrpcClient.cpp GrpcEndpoint.cpp)
-target_link_libraries(XrdSsiCta ${XROOTD_XRDSSI_LIB} XrdSsiLib XrdSsiPbEosCta ctascheduler ctacommon ctaobjectstore ctacatalogue
-                      EosCtaGrpc ${GRPC_LIBRARY} ${GRPC_GRPC++_LIBRARY})
+if(CTA_USE_PGSCHED)
+  target_link_libraries(XrdSsiCta ${XROOTD_XRDSSI_LIB} XrdSsiLib XrdSsiPbEosCta ctascheduler ctacommon ctacatalogue
+                        EosCtaGrpc ${GRPC_LIBRARY} ${GRPC_GRPC++_LIBRARY})
+else()
+  target_link_libraries(XrdSsiCta ${XROOTD_XRDSSI_LIB} XrdSsiLib XrdSsiPbEosCta ctascheduler ctacommon ctaobjectstore ctacatalogue
+                        EosCtaGrpc ${GRPC_LIBRARY} ${GRPC_GRPC++_LIBRARY})
+endif()
 set_property (TARGET XrdSsiCta APPEND PROPERTY INSTALL_RPATH ${PROTOBUF3_RPATH})
 if (OCCI_SUPPORT)
   set_property (TARGET XrdSsiCta APPEND PROPERTY INSTALL_RPATH ${ORACLE-INSTANTCLIENT_RPATH})
diff --git a/xroot_plugins/XrdSsiCtaServiceProvider.hpp b/xroot_plugins/XrdSsiCtaServiceProvider.hpp
index 3316d47ae3862af457c8fc9d261bb3a7392d869a..5384c38fbe4191d30cdfc6a61f7e2b11e053235a 100644
--- a/xroot_plugins/XrdSsiCtaServiceProvider.hpp
+++ b/xroot_plugins/XrdSsiCtaServiceProvider.hpp
@@ -24,7 +24,11 @@
 #include <xroot_plugins/Namespace.hpp>
 #include <XrdSsiPbLog.hpp>
 #include <scheduler/Scheduler.hpp>
+#ifdef CTA_PGSCHED
+#include <scheduler/PostgresSchedDB/PostgresSchedDBInit.hpp>
+#else
 #include <scheduler/OStoreDB/OStoreDBInit.hpp>
+#endif
 
 /*!
  * Global pointer to the Service Provider object.