diff --git a/catalogue/Catalogue.hpp b/catalogue/Catalogue.hpp
index e3caf2ed1cfc999cfdf530ce55b08cdfd141006c..872da4f7b521f99038e63986b938bbb6154c6297 100644
--- a/catalogue/Catalogue.hpp
+++ b/catalogue/Catalogue.hpp
@@ -175,6 +175,8 @@ public:
    * @param user The user for whom the file is to be retrieved.  This will be
    * used by the Catalogue to determine the mount policy to be used when
    * retrieving the file.
+   * @param activity The activity under which the user wants to start the retrieve
+   * The call will fail if the activity is set and unknown. 
    * @param lc The log context.
    *
    * @return The information required to queue the associated retrieve request(s).
@@ -183,6 +185,7 @@ public:
     const std::string &diskInstanceName,
     const uint64_t archiveFileId,
     const common::dataStructures::UserIdentity &user,
+    const optional<std::string> & activity,
     log::LogContext &lc) = 0;
 
   /**
@@ -481,6 +484,13 @@ public:
   virtual void modifyMountPolicyMaxDrivesAllowed(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const uint64_t maxDrivesAllowed) = 0;
   virtual void modifyMountPolicyComment(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const std::string &comment) = 0;
 
+  virtual void createActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & acttivity,
+    double weight, const std::string & comment) = 0;
+  virtual void modifyActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & acttivity,
+    double weight, const std::string & comment) = 0;
+  virtual void deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & acttivity) = 0;
+  virtual std::list<common::dataStructures::ActivitiesFairShareWeights> getActivitiesFairShareWeights() const = 0;
+  
   /**
    * Returns the specified archive files.  Please note that the list of files
    * is ordered by archive file ID.
@@ -594,5 +604,8 @@ public:
 
 }; // class Catalogue
 
+CTA_GENERATE_USER_EXCEPTION_CLASS(UserSpecifiedAnEmptyStringActivity);
+CTA_GENERATE_USER_EXCEPTION_CLASS(UserSpecifiedAnOutOfRangeActivityWeight);
+
 } // namespace catalogue
 } // namespace cta
diff --git a/catalogue/CatalogueRetryWrapper.hpp b/catalogue/CatalogueRetryWrapper.hpp
index 65d98eaf85da745baeeed07f636dc4d6bd5f9943..e627748b1ce024bd83f4541d3bff21baec3a8829 100644
--- a/catalogue/CatalogueRetryWrapper.hpp
+++ b/catalogue/CatalogueRetryWrapper.hpp
@@ -89,8 +89,8 @@ public:
     return retryOnLostConnection(m_log, [&]{return m_catalogue->tapeMountedForArchive(vid, drive);}, m_maxTriesToConnect);
   }
 
-  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string &diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity &user, log::LogContext &lc) override {
-    return retryOnLostConnection(m_log, [&]{return m_catalogue->prepareToRetrieveFile(diskInstanceName, archiveFileId, user, lc);}, m_maxTriesToConnect);
+  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->prepareToRetrieveFile(diskInstanceName, archiveFileId, user, activity, lc);}, m_maxTriesToConnect);
   }
 
   void tapeMountedForRetrieve(const std::string &vid, const std::string &drive) override {
@@ -344,6 +344,23 @@ public:
   void modifyMountPolicyComment(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const std::string &comment) override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->modifyMountPolicyComment(admin, name, comment);}, m_maxTriesToConnect);
   }
+  
+  void createActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity, double weight, const std::string & comment) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->createActivitiesFairShareWeight(admin, diskInstanceName, acttivity, weight, comment);}, m_maxTriesToConnect);
+  }
+  
+  void modifyActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity, double weight, const std::string & comment) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->modifyActivitiesFairShareWeight(admin, diskInstanceName, acttivity, weight, comment);}, m_maxTriesToConnect);
+  }
+  
+  void deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity) override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->deleteActivitiesFairShareWeight(admin, diskInstanceName, acttivity);}, m_maxTriesToConnect);
+  }
+  
+  std::list<common::dataStructures::ActivitiesFairShareWeights> getActivitiesFairShareWeights() const override {
+    return retryOnLostConnection(m_log, [&]{return m_catalogue->getActivitiesFairShareWeights();}, m_maxTriesToConnect);
+  }
+
 
   ArchiveFileItor getArchiveFilesItor(const TapeFileSearchCriteria &searchCriteria = TapeFileSearchCriteria()) const override {
     return retryOnLostConnection(m_log, [&]{return m_catalogue->getArchiveFilesItor(searchCriteria);}, m_maxTriesToConnect);
diff --git a/catalogue/CatalogueTest.cpp b/catalogue/CatalogueTest.cpp
index 7e98967beab39de7ede5f95c38fcf0d5b27c7309..e1989f6bd4cedeb83e617cbf61c4bc48ef41956a 100644
--- a/catalogue/CatalogueTest.cpp
+++ b/catalogue/CatalogueTest.cpp
@@ -7138,7 +7138,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   userIdentity.name = requesterName;
   userIdentity.group = "group";
   const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-    m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, dummyLc);
+    m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
 
   ASSERT_EQ(2, queueCriteria.archiveFile.tapeFiles.size());
   ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
@@ -7146,7 +7146,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId) {
   ASSERT_EQ(maxDrivesAllowed, queueCriteria.mountPolicy.maxDrivesAllowed);
 
   // Check that the diskInstanceName mismatch detection works
-  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName2, archiveFileId, userIdentity, dummyLc),
+  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName2, archiveFileId, userIdentity, cta::nullopt, dummyLc),
     exception::UserError);
 }
 
@@ -7401,7 +7401,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
 
   {
     const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, dummyLc);
+      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
 
     ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
     ASSERT_EQ(minArchiveRequestAge, queueCriteria.mountPolicy.archiveMinRequestAge);
@@ -7435,7 +7435,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
 
   {
     const common::dataStructures::RetrieveFileQueueCriteria queueCriteria =
-      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, dummyLc);
+      m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc);
 
     ASSERT_EQ(archivePriority, queueCriteria.mountPolicy.archivePriority);
     ASSERT_EQ(minArchiveRequestAge, queueCriteria.mountPolicy.archiveMinRequestAge);
@@ -7456,7 +7456,7 @@ TEST_P(cta_catalogue_CatalogueTest, prepareToRetrieveFileUsingArchiveFileId_disa
 
   m_catalogue->setTapeDisabled(m_admin, vid2, true);
 
-  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, dummyLc),
+  ASSERT_THROW(m_catalogue->prepareToRetrieveFile(diskInstanceName1, archiveFileId, userIdentity, cta::nullopt, dummyLc),
     exception::UserError);
 }
 
@@ -12151,6 +12151,87 @@ TEST_P(cta_catalogue_CatalogueTest, reclaimTape_full_lastFSeq_1_one_tape_file_su
   }
 }
 
+TEST_P(cta_catalogue_CatalogueTest, createModifyDeleteActivityWeight) {
+  using namespace cta;
+
+  const std::string diskInstanceName = "ExperimentEOS";
+  const std::string activity1 = "Reco";  
+  const std::string activity2 = "Grid";
+  const double weight1 = 0.654;
+  const double weight2 = 0.456;
+  const std::string comment = "No comment.";
+
+  m_catalogue->createActivitiesFairShareWeight(m_admin, diskInstanceName, activity1, weight1, comment);
+      
+  const auto activitiesList = m_catalogue->getActivitiesFairShareWeights();
+      
+  ASSERT_EQ(1, activitiesList.size());
+  ASSERT_EQ(1, activitiesList.front().activitiesWeights.size());
+  ASSERT_NO_THROW(activitiesList.front().activitiesWeights.at(activity1));
+  ASSERT_EQ(weight1, activitiesList.front().activitiesWeights.at(activity1));
+
+  m_catalogue->createActivitiesFairShareWeight(m_admin, diskInstanceName, activity2, weight2, comment);
+  
+  const auto activitiesList2 = m_catalogue->getActivitiesFairShareWeights();
+  
+  ASSERT_EQ(1, activitiesList2.size());
+  ASSERT_EQ(2, activitiesList2.front().activitiesWeights.size());
+  ASSERT_NO_THROW(activitiesList2.front().activitiesWeights.at(activity1));
+  ASSERT_EQ(weight1, activitiesList2.front().activitiesWeights.at(activity1));
+  ASSERT_NO_THROW(activitiesList2.front().activitiesWeights.at(activity2));
+  ASSERT_EQ(weight2, activitiesList2.front().activitiesWeights.at(activity2));
+  
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "NoSuchInstance", activity2, weight2, comment), cta::exception::UserError);
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, diskInstanceName, "NoSuchActivity", weight2, comment), cta::exception::UserError);
+  
+  ASSERT_NO_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, diskInstanceName, activity1, weight2, comment));
+  ASSERT_NO_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, diskInstanceName, activity2, weight1, comment));
+  
+  
+  const auto activitiesList3 = m_catalogue->getActivitiesFairShareWeights();
+  
+  ASSERT_EQ(1, activitiesList3.size());
+  ASSERT_EQ(2, activitiesList3.front().activitiesWeights.size());
+  ASSERT_NO_THROW(activitiesList3.front().activitiesWeights.at(activity1));
+  ASSERT_EQ(weight2, activitiesList3.front().activitiesWeights.at(activity1));
+  ASSERT_NO_THROW(activitiesList3.front().activitiesWeights.at(activity2));
+  ASSERT_EQ(weight1, activitiesList3.front().activitiesWeights.at(activity2));
+  
+  ASSERT_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, "NoSuchInstance", activity2), cta::exception::UserError);
+  ASSERT_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, diskInstanceName, "NoSuchActivity"), cta::exception::UserError);
+  
+  ASSERT_NO_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, diskInstanceName, activity1));
+  
+  const auto activitiesList4 = m_catalogue->getActivitiesFairShareWeights();
+      
+  ASSERT_EQ(1, activitiesList4.size());
+  ASSERT_EQ(1, activitiesList4.front().activitiesWeights.size());
+  ASSERT_NO_THROW(activitiesList4.front().activitiesWeights.at(activity2));
+  ASSERT_EQ(weight1, activitiesList4.front().activitiesWeights.at(activity2));
+  
+  ASSERT_NO_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, diskInstanceName, activity2));
+  
+  ASSERT_EQ(0, m_catalogue->getActivitiesFairShareWeights().size());
+}
+
+TEST_P(cta_catalogue_CatalogueTest, activitiesDataValidation) {
+  using namespace cta;
+  ASSERT_THROW(m_catalogue->createActivitiesFairShareWeight(m_admin, "", "Activity", 0.1, "No comment."), catalogue::UserSpecifiedAnEmptyStringDiskInstanceName);
+  ASSERT_THROW(m_catalogue->createActivitiesFairShareWeight(m_admin, "DiskInstance", "", 0.1, "No comment."), catalogue::UserSpecifiedAnEmptyStringActivity);
+  ASSERT_THROW(m_catalogue->createActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 0.0, "No comment."), catalogue::UserSpecifiedAnOutOfRangeActivityWeight);
+  ASSERT_THROW(m_catalogue->createActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 1.1, "No comment."), catalogue::UserSpecifiedAnOutOfRangeActivityWeight);
+  ASSERT_THROW(m_catalogue->createActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 0.1, ""), catalogue::UserSpecifiedAnEmptyStringComment);
+  
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "", "Activity", 0.1, "No comment."), catalogue::UserSpecifiedAnEmptyStringDiskInstanceName);
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "DiskInstance", "", 0.1, "No comment."), catalogue::UserSpecifiedAnEmptyStringActivity);
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 0.0, "No comment."), catalogue::UserSpecifiedAnOutOfRangeActivityWeight);
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 1.1, "No comment."), catalogue::UserSpecifiedAnOutOfRangeActivityWeight);
+  ASSERT_THROW(m_catalogue->modifyActivitiesFairShareWeight(m_admin, "DiskInstance", "Activity", 0.1, ""), catalogue::UserSpecifiedAnEmptyStringComment);
+  
+  ASSERT_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, "", "Activity"), catalogue::UserSpecifiedAnEmptyStringDiskInstanceName);
+  ASSERT_THROW(m_catalogue->deleteActivitiesFairShareWeight(m_admin, "DiskInstance", ""), catalogue::UserSpecifiedAnEmptyStringActivity);
+}
+
 TEST_P(cta_catalogue_CatalogueTest, ping) {
   using namespace cta;
 
diff --git a/catalogue/DropSchemaCmd.cpp b/catalogue/DropSchemaCmd.cpp
index a73dd2f84110d16f701dc55b8ed5b36c41bc601d..38072cc9f326a2e4111428bb2cf43ce4fb15ba5c 100644
--- a/catalogue/DropSchemaCmd.cpp
+++ b/catalogue/DropSchemaCmd.cpp
@@ -146,7 +146,8 @@ void DropSchemaCmd::dropSqliteCatalogueSchema(rdbms::Conn &conn) {
       "STORAGE_CLASS_ID",
       "TAPE_POOL",
       "LOGICAL_LIBRARY",
-      "MOUNT_POLICY"};
+      "MOUNT_POLICY",
+      "ACTIVITIES_WEIGHTS"};
     dropDatabaseTables(conn, tablesToDrop);
   } catch(exception::Exception &ex) {
     throw exception::Exception(std::string(__FUNCTION__) + " failed: " + ex.getMessage().str());
@@ -173,7 +174,8 @@ void DropSchemaCmd::dropMysqlCatalogueSchema(rdbms::Conn &conn) {
       "STORAGE_CLASS_ID",
       "TAPE_POOL",
       "LOGICAL_LIBRARY",
-      "MOUNT_POLICY"};
+      "MOUNT_POLICY",
+      "ACTIVITIES_WEIGHTS"};
     dropDatabaseTables(conn, tablesToDrop);
 
     std::list<std::string> triggersToDrop = {
@@ -237,7 +239,8 @@ void DropSchemaCmd::dropOracleCatalogueSchema(rdbms::Conn &conn) {
       "STORAGE_CLASS",
       "TAPE_POOL",
       "LOGICAL_LIBRARY",
-      "MOUNT_POLICY"
+      "MOUNT_POLICY",
+      "ACTIVITIES_WEIGHTS"
     };
 
     dropDatabaseTables(conn, tablesToDrop);
@@ -268,7 +271,8 @@ void DropSchemaCmd::dropPostgresCatalogueSchema(rdbms::Conn &conn) {
       "STORAGE_CLASS",
       "TAPE_POOL",
       "LOGICAL_LIBRARY",
-      "MOUNT_POLICY"
+      "MOUNT_POLICY",
+      "ACTIVITIES_WEIGHTS"
     };
 
     dropDatabaseTables(conn, tablesToDrop);
diff --git a/catalogue/DummyCatalogue.hpp b/catalogue/DummyCatalogue.hpp
index 5e83c60901025a45c9803006f015e233617a0311..a6980d3ca149d33bcab23a741c5a29048d08ef7b 100644
--- a/catalogue/DummyCatalogue.hpp
+++ b/catalogue/DummyCatalogue.hpp
@@ -34,6 +34,7 @@ public:
   DummyCatalogue() {}
   virtual ~DummyCatalogue() { }
 
+  void createActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity, double weight, const std::string & comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createAdminUser(const common::dataStructures::SecurityIdentity& admin, const std::string& username, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createArchiveRoute(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& storageClassName, const uint32_t copyNb, const std::string& tapePoolName, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createLogicalLibrary(const common::dataStructures::SecurityIdentity& admin, const std::string& name, const bool isDisabled, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
@@ -43,6 +44,7 @@ public:
   void createStorageClass(const common::dataStructures::SecurityIdentity& admin, const common::dataStructures::StorageClass& storageClass) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createTape(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const std::string &mediaType, const std::string &vendor, const std::string& logicalLibraryName, const std::string& tapePoolName, const uint64_t capacityInBytes, const bool disabled, const bool full, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void createTapePool(const common::dataStructures::SecurityIdentity& admin, const std::string& name, const std::string & vo, const uint64_t nbPartialTapes, const bool encryptionValue, const cta::optional<std::string> &supply, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteAdminUser(const std::string& username) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteArchiveFile(const std::string& instanceName, const uint64_t archiveFileId, log::LogContext &lc) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteArchiveRoute(const std::string& diskInstanceName, const std::string& storageClassName, const uint32_t copyNb) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
@@ -54,6 +56,7 @@ public:
   void deleteTape(const std::string& vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void deleteTapePool(const std::string& name) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void filesWrittenToTape(const std::set<TapeItemWrittenPointer>& event) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  std::list<common::dataStructures::ActivitiesFairShareWeights> getActivitiesFairShareWeights() const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   std::list<common::dataStructures::AdminUser> getAdminUsers() const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   common::dataStructures::ArchiveFile getArchiveFileById(const uint64_t id) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   ArchiveFileItor getArchiveFilesItor(const TapeFileSearchCriteria& searchCriteria) const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
@@ -72,6 +75,7 @@ public:
   common::dataStructures::VidToTapeMap getAllTapes() const override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   std::list<TapeForWriting> getTapesForWriting(const std::string& logicalLibraryName) const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   bool isAdmin(const common::dataStructures::SecurityIdentity& admin) const { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  void modifyActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& acttivity, double weight, const std::string & comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void modifyAdminUserComment(const common::dataStructures::SecurityIdentity& admin, const std::string& username, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void modifyArchiveRouteComment(const common::dataStructures::SecurityIdentity& admin, const std::string& instanceName, const std::string& storageClassName, const uint32_t copyNb, const std::string& comment) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void modifyArchiveRouteTapePoolName(const common::dataStructures::SecurityIdentity& admin, const std::string& instanceName, const std::string& storageClassName, const uint32_t copyNb, const std::string& tapePoolName) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
@@ -105,7 +109,7 @@ public:
   uint64_t checkAndGetNextArchiveFileId(const std::string &diskInstanceName, const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   common::dataStructures::ArchiveFileQueueCriteria getArchiveFileQueueCriteria(const std::string &diskInstanceName,
     const std::string &storageClassName, const common::dataStructures::UserIdentity &user) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
-  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& instanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity& user, log::LogContext &lc) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
+  common::dataStructures::RetrieveFileQueueCriteria prepareToRetrieveFile(const std::string& diskInstanceName, const uint64_t archiveFileId, const common::dataStructures::UserIdentity& user, const optional<std::string>& activity, log::LogContext& lc) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void reclaimTape(const common::dataStructures::SecurityIdentity& admin, const std::string& vid) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void setTapeDisabled(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const bool disabledValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
   void setTapeFull(const common::dataStructures::SecurityIdentity& admin, const std::string& vid, const bool fullValue) override { throw exception::Exception(std::string("In ")+__PRETTY_FUNCTION__+": not implemented"); }
diff --git a/catalogue/RdbmsCatalogue.cpp b/catalogue/RdbmsCatalogue.cpp
index fc5424be8cfd910d457a65f3fc8d045932936a58..766de6b418976d9fb2982c035c0a031e62bcd00a 100644
--- a/catalogue/RdbmsCatalogue.cpp
+++ b/catalogue/RdbmsCatalogue.cpp
@@ -69,8 +69,8 @@ RdbmsCatalogue::RdbmsCatalogue(
   m_groupMountPolicyCache(10),
   m_userMountPolicyCache(10),
   m_expectedNbArchiveRoutesCache(10),
-  m_isAdminCache(10) {
-}
+  m_isAdminCache(10),
+  m_activitiesFairShareWeights(10) {}
 
 //------------------------------------------------------------------------------
 // destructor
@@ -3974,6 +3974,223 @@ void RdbmsCatalogue::modifyMountPolicyComment(const common::dataStructures::Secu
   }
 }
 
+//------------------------------------------------------------------------------
+// createActivitiesFairShareWeight
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::createActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, 
+    const std::string& diskInstanceName, const std::string& activity, double weight, const std::string & comment) {
+  try {
+    if (diskInstanceName.empty()) {
+      throw UserSpecifiedAnEmptyStringDiskInstanceName("Cannot create activity weight because the disk instance name is"
+        " an empty string");
+    }
+    
+    if (activity.empty()) {
+      throw UserSpecifiedAnEmptyStringActivity("Cannot create activity weight because the activity name is"
+        " an empty string");
+    }
+    
+    if (weight <= 0 || weight > 1) {
+      throw UserSpecifiedAnOutOfRangeActivityWeight("Cannot create activity because the activity weight is out of ]0, 1] range.");
+    }
+    
+    if (comment.empty()) {
+      throw UserSpecifiedAnEmptyStringComment("Cannot create activity weight because the comment is"
+        " an empty string");
+    }
+    
+    const time_t now = time(nullptr);
+    const char *const sql =
+      "INSERT INTO ACTIVITIES_WEIGHTS ("
+        "DISK_INSTANCE_NAME,"
+        "ACTIVITY,"
+        "WEIGHT,"
+    
+        "USER_COMMENT,"
+
+        "CREATION_LOG_USER_NAME,"
+        "CREATION_LOG_HOST_NAME,"
+        "CREATION_LOG_TIME,"
+
+        "LAST_UPDATE_USER_NAME,"
+        "LAST_UPDATE_HOST_NAME,"
+        "LAST_UPDATE_TIME)"
+    
+      "VALUES ("
+        ":DISK_INSTANCE_NAME,"
+        ":ACTIVITY,"
+        ":WEIGHT,"
+
+        ":USER_COMMENT,"
+
+        ":CREATION_LOG_USER_NAME,"
+        ":CREATION_LOG_HOST_NAME,"
+        ":CREATION_LOG_TIME,"
+
+        ":LAST_UPDATE_USER_NAME,"
+        ":LAST_UPDATE_HOST_NAME,"
+        ":LAST_UPDATE_TIME)";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
+    stmt.bindString(":ACTIVITY", activity);
+    stmt.bindString(":WEIGHT", std::to_string(weight));
+    
+    stmt.bindString(":USER_COMMENT", comment);
+
+    stmt.bindString(":CREATION_LOG_USER_NAME", admin.username);
+    stmt.bindString(":CREATION_LOG_HOST_NAME", admin.host);
+    stmt.bindUint64(":CREATION_LOG_TIME", now);
+
+    stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username);
+    stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host);
+    stmt.bindUint64(":LAST_UPDATE_TIME", now);
+    
+    stmt.executeNonQuery();
+
+    conn.commit();
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// modifyActivitiesFairShareWeight
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::modifyActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& activity, double weight, const std::string& comment) {
+  try {
+    if (diskInstanceName.empty()) {
+      throw UserSpecifiedAnEmptyStringDiskInstanceName("Cannot create activity weight because the disk instance name is"
+        " an empty string");
+    }
+    
+    if (activity.empty()) {
+      throw UserSpecifiedAnEmptyStringActivity("Cannot create activity weight because the activity name is"
+        " an empty string");
+    }
+    
+    if (weight <= 0 || weight > 1) {
+      throw UserSpecifiedAnOutOfRangeActivityWeight("Cannot create activity because the activity weight is out of ]0, 1] range.");
+    }
+    
+    if (comment.empty()) {
+      throw UserSpecifiedAnEmptyStringComment("Cannot modify activity weight because the comment is"
+        " an empty string");
+    }
+    
+    const time_t now = time(nullptr);
+    const char *const sql =
+      "UPDATE ACTIVITIES_WEIGHTS SET "
+        "WEIGHT = :WEIGHT,"
+        "LAST_UPDATE_USER_NAME = :LAST_UPDATE_USER_NAME,"
+        "LAST_UPDATE_HOST_NAME = :LAST_UPDATE_HOST_NAME,"
+        "LAST_UPDATE_TIME = :LAST_UPDATE_TIME,"
+        "USER_COMMENT = :USER_COMMENT "
+      "WHERE "
+        "DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND "
+        "ACTIVITY = :ACTIVITY";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
+    stmt.bindString(":ACTIVITY", activity);
+    stmt.bindString(":WEIGHT", std::to_string(weight));
+    
+    stmt.bindString(":USER_COMMENT", comment);
+    stmt.bindString(":LAST_UPDATE_USER_NAME", admin.username);
+    stmt.bindString(":LAST_UPDATE_HOST_NAME", admin.host);
+    stmt.bindUint64(":LAST_UPDATE_TIME", now);
+    stmt.executeNonQuery();
+
+    if(0 == stmt.getNbAffectedRows()) {
+      throw exception::UserError(std::string("Cannot modify activity fair share weight ") + activity + " because it does not exist");
+    }
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+
+//------------------------------------------------------------------------------
+// deleteActivitiesFairShareWeight
+//------------------------------------------------------------------------------
+void RdbmsCatalogue::deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity& admin, const std::string& diskInstanceName, const std::string& activity) {
+  try {
+    if (diskInstanceName.empty()) {
+      throw UserSpecifiedAnEmptyStringDiskInstanceName("Cannot create activity weight because the disk instance name is"
+        " an empty string");
+    }
+    
+    if (activity.empty()) {
+      throw UserSpecifiedAnEmptyStringActivity("Cannot create activity weight because the activity name is"
+        " an empty string");
+    }
+    
+    const char *const sql = "DELETE FROM ACTIVITIES_WEIGHTS WHERE DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME AND ACTIVITY = :ACTIVITY";
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
+    stmt.bindString(":ACTIVITY", activity);
+    stmt.executeNonQuery();
+
+    if(0 == stmt.getNbAffectedRows()) {
+      throw exception::UserError(std::string("Cannot delete activity weight ") + activity + " because it does not exist");
+    }
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// getActivitiesFairShareWeights
+//------------------------------------------------------------------------------
+std::list<common::dataStructures::ActivitiesFairShareWeights> RdbmsCatalogue::getActivitiesFairShareWeights() const {
+  try {
+    std::string sql =
+      "SELECT "
+        "ACTIVITIES_WEIGHTS.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
+        "ACTIVITIES_WEIGHTS.ACTIVITY AS ACTIVITY,"
+        "ACTIVITIES_WEIGHTS.WEIGHT AS WEIGHT "
+      "FROM "
+        "ACTIVITIES_WEIGHTS";
+
+    auto conn = m_connPool.getConn();
+    auto stmt = conn.createStmt(sql);
+    auto rset = stmt.executeQuery();
+
+    std::map<std::string, common::dataStructures::ActivitiesFairShareWeights> activitiesMap;
+    while(rset.next()) {
+      common::dataStructures::ActivitiesFairShareWeights * activity;
+      auto diskInstanceName = rset.columnString("DISK_INSTANCE_NAME");
+      try {
+        activity = & activitiesMap.at(diskInstanceName);
+      } catch (std::out_of_range) {
+        activity = & activitiesMap[diskInstanceName];
+        activity->diskInstance = diskInstanceName;
+      }
+      activity->setWeightFromString(rset.columnString("ACTIVITY"), rset.columnString("WEIGHT"));
+    }
+    std::list<common::dataStructures::ActivitiesFairShareWeights> ret;
+    for (auto & dia: activitiesMap) {
+      ret.push_back(dia.second);
+    }
+    return ret;
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 //------------------------------------------------------------------------------
 // insertArchiveFile
 //------------------------------------------------------------------------------
@@ -4694,55 +4911,60 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsCatalogue::prepareToRetri
   const std::string &diskInstanceName,
   const uint64_t archiveFileId,
   const common::dataStructures::UserIdentity &user,
+  const optional<std::string>& activity,
   log::LogContext &lc) {
   try {
     cta::utils::Timer t;
-    auto conn = m_connPool.getConn();
-    const auto getConnTime = t.secs(utils::Timer::resetCounter);
-    auto archiveFile = getArchiveFileToRetrieveByArchiveFileId(conn, archiveFileId);
-    const auto getArchiveFileTime = t.secs(utils::Timer::resetCounter);
-    if(nullptr == archiveFile.get()) {
-      exception::UserError ex;
-      ex.getMessage() << "No tape files available for archive file with archive file ID " << archiveFileId;
-      throw ex;
-    }
+    common::dataStructures::RetrieveFileQueueCriteria criteria;
+    {
+      auto conn = m_connPool.getConn();
+      const auto getConnTime = t.secs(utils::Timer::resetCounter);
+      auto archiveFile = getArchiveFileToRetrieveByArchiveFileId(conn, archiveFileId);
+      const auto getArchiveFileTime = t.secs(utils::Timer::resetCounter);
+      if(nullptr == archiveFile.get()) {
+        exception::UserError ex;
+        ex.getMessage() << "No tape files available for archive file with archive file ID " << archiveFileId;
+        throw ex;
+      }
 
-    if(diskInstanceName != archiveFile->diskInstance) {
-      exception::UserError ue;
-      ue.getMessage() << "Cannot retrieve file because the disk instance of the request does not match that of the"
-        " archived file: archiveFileId=" << archiveFileId << " path=" << archiveFile->diskFileInfo.path <<
-        " requestDiskInstance=" << diskInstanceName << " archiveFileDiskInstance=" << archiveFile->diskInstance;
-      throw ue;
-    }
+      if(diskInstanceName != archiveFile->diskInstance) {
+        exception::UserError ue;
+        ue.getMessage() << "Cannot retrieve file because the disk instance of the request does not match that of the"
+          " archived file: archiveFileId=" << archiveFileId << " path=" << archiveFile->diskFileInfo.path <<
+          " requestDiskInstance=" << diskInstanceName << " archiveFileDiskInstance=" << archiveFile->diskInstance;
+        throw ue;
+      }
 
-    t.reset();
-    const RequesterAndGroupMountPolicies mountPolicies = getMountPolicies(conn, diskInstanceName, user.name,
-      user.group);
-     const auto getMountPoliciesTime = t.secs(utils::Timer::resetCounter);
+      t.reset();
+      const RequesterAndGroupMountPolicies mountPolicies = getMountPolicies(conn, diskInstanceName, user.name,
+        user.group);
+       const auto getMountPoliciesTime = t.secs(utils::Timer::resetCounter);
+
+      log::ScopedParamContainer spc(lc);
+      spc.add("getConnTime", getConnTime)
+         .add("getArchiveFileTime", getArchiveFileTime)
+         .add("getMountPoliciesTime", getMountPoliciesTime);
+      lc.log(log::INFO, "Catalogue::prepareToRetrieve internal timings");
+
+      // Requester mount policies overrule requester group mount policies
+      common::dataStructures::MountPolicy mountPolicy;
+      if(!mountPolicies.requesterMountPolicies.empty()) {
+        mountPolicy = mountPolicies.requesterMountPolicies.front();
+      } else if(!mountPolicies.requesterGroupMountPolicies.empty()) {
+        mountPolicy = mountPolicies.requesterGroupMountPolicies.front();
+      } else {
+        exception::UserError ue;
+        ue.getMessage() << "Cannot retrieve file because there are no mount rules for the requester or their group:" <<
+          " archiveFileId=" << archiveFileId << " path=" << archiveFile->diskFileInfo.path << " requester=" <<
+          diskInstanceName << ":" << user.name << ":" << user.group;
+        throw ue;
+      }
 
-    log::ScopedParamContainer spc(lc);
-    spc.add("getConnTime", getConnTime)
-       .add("getArchiveFileTime", getArchiveFileTime)
-       .add("getMountPoliciesTime", getMountPoliciesTime);
-    lc.log(log::INFO, "Catalogue::prepareToRetrieve internal timings");
 
-    // Requester mount policies overrule requester group mount policies
-    common::dataStructures::MountPolicy mountPolicy;
-    if(!mountPolicies.requesterMountPolicies.empty()) {
-      mountPolicy = mountPolicies.requesterMountPolicies.front();
-    } else if(!mountPolicies.requesterGroupMountPolicies.empty()) {
-      mountPolicy = mountPolicies.requesterGroupMountPolicies.front();
-    } else {
-      exception::UserError ue;
-      ue.getMessage() << "Cannot retrieve file because there are no mount rules for the requester or their group:" <<
-        " archiveFileId=" << archiveFileId << " path=" << archiveFile->diskFileInfo.path << " requester=" <<
-        diskInstanceName << ":" << user.name << ":" << user.group;
-      throw ue;
+      criteria.archiveFile = *archiveFile;
+      criteria.mountPolicy = mountPolicy;
     }
-
-    common::dataStructures::RetrieveFileQueueCriteria criteria;
-    criteria.archiveFile = *archiveFile;
-    criteria.mountPolicy = mountPolicy;
+    criteria.activitiesFairShareWeight = getCachedActivitiesWeights(diskInstanceName);
     return criteria;
   } catch(exception::UserError &) {
     throw;
@@ -5277,6 +5499,58 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
   }
 }
 
+//------------------------------------------------------------------------------
+// getCachedActivitiesWeights
+//------------------------------------------------------------------------------
+common::dataStructures::ActivitiesFairShareWeights 
+RdbmsCatalogue::getCachedActivitiesWeights(const std::string& diskInstance) const {
+  try {
+    auto getNonCachedValue = [&] {
+      auto conn = m_connPool.getConn();
+      return getActivitiesWeights(conn, diskInstance);
+    };
+    return m_activitiesFairShareWeights.getCachedValue(diskInstance, getNonCachedValue);
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
+//------------------------------------------------------------------------------
+// getActivitiesWeights
+//------------------------------------------------------------------------------
+common::dataStructures::ActivitiesFairShareWeights 
+RdbmsCatalogue::getActivitiesWeights(rdbms::Conn& conn, const std::string& diskInstanceName) const {
+  try {
+    const char *const sql =
+      "SELECT "
+        "ACTIVITIES_WEIGHTS.ACTIVITY AS ACTIVITY,"
+        "ACTIVITIES_WEIGHTS.WEIGHT AS WEIGHT "
+      "FROM "
+        "ACTIVITIES_WEIGHTS "
+      "WHERE "
+        "ACTIVITIES_WEIGHTS.DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME";
+    auto stmt = conn.createStmt(sql);
+    stmt.bindString(":DISK_INSTANCE_NAME", diskInstanceName);
+    auto rset = stmt.executeQuery();
+    common::dataStructures::ActivitiesFairShareWeights afsw;
+    afsw.diskInstance = diskInstanceName;
+    while (rset.next()) {
+      // The weight is a string encoded double with values in [0, 1], like in FTS.
+      // All the checks are performed in setWeightFromString().
+      afsw.setWeightFromString(rset.columnString("ACTIVITY"), rset.columnString("WEIGHT"));
+    }
+    return afsw;
+  } catch(exception::UserError &) {
+    throw;
+  } catch(exception::Exception &ex) {
+    ex.getMessage().str(std::string(__FUNCTION__) + ": " + ex.getMessage().str());
+    throw;
+  }
+}
+
 //------------------------------------------------------------------------------
 // getArchiveFileByDiskFileId
 //------------------------------------------------------------------------------
diff --git a/catalogue/RdbmsCatalogue.hpp b/catalogue/RdbmsCatalogue.hpp
index 584074885cb595fa34ef76c0b16ac1de35f02e9a..0609fd0e4db319fe46757fb2bc29372e22975ca9 100644
--- a/catalogue/RdbmsCatalogue.hpp
+++ b/catalogue/RdbmsCatalogue.hpp
@@ -170,6 +170,8 @@ public:
    * @param user The user for whom the file is to be retrieved.  This will be
    * used by the Catalogue to determine the mount policy to be used when
    * retrieving the file.
+   * @param activity The activity under which the user wants to start the retrieve
+   * The call will fail if the activity is set and unknown. 
    * @param lc The log context.
    *
    * @return The information required to queue the associated retrieve request(s).
@@ -178,6 +180,7 @@ public:
     const std::string &diskInstanceName,
     const uint64_t archiveFileId,
     const common::dataStructures::UserIdentity &user,
+    const optional<std::string> & activity,
     log::LogContext &lc) override;
 
   /**
@@ -494,7 +497,15 @@ public:
   void modifyMountPolicyRetrieveMinRequestAge(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const uint64_t minRetrieveRequestAge) override;
   void modifyMountPolicyMaxDrivesAllowed(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const uint64_t maxDrivesAllowed) override;
   void modifyMountPolicyComment(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const std::string &comment) override;
+  
+  void createActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & activity,
+    double weight, const std::string & comment) override;
+  void modifyActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & activity,
+    double weight, const std::string & comment) override;
+  void deleteActivitiesFairShareWeight(const common::dataStructures::SecurityIdentity &admin, const std::string & diskInstanceName, const std::string & activity) override;
+  std::list<common::dataStructures::ActivitiesFairShareWeights> getActivitiesFairShareWeights() const override;
 
+  
   /**
    * Throws a UserError exception if the specified searchCriteria is not valid
    * due to a user error.
@@ -1080,6 +1091,25 @@ protected:
     rdbms::Conn &conn,
     const uint64_t archiveFileId) const;
 
+  /**
+   * Returns a cached version of the (possibly empty) activities to weight map
+   * for the given dsk instance.
+   * @param diskInstance
+   * @return activities to weight map (ActivitiesFairShareWeights)
+   */
+  common::dataStructures::ActivitiesFairShareWeights getCachedActivitiesWeights(
+    const std::string &diskInstanceName) const;
+  
+  /**
+   * Returns a the (possibly empty) activities to weight map for the given dsk instance.
+   * @param conn The database connection.
+   * @param diskInstance
+   * @return activities to weight map (ActivitiesFairShareWeights)
+   */
+  common::dataStructures::ActivitiesFairShareWeights getActivitiesWeights(
+    rdbms::Conn &conn,
+    const std::string &diskInstanceName) const;  
+  
   /**
    * Returns the specified archive file.   A nullptr pointer is returned if
    * there is no corresponding row in the ARCHIVE_FILE table.  Please note that
@@ -1270,6 +1300,11 @@ protected:
    * Cached version of isAdmin() results.
    */
   mutable TimeBasedCache<common::dataStructures::SecurityIdentity, bool> m_isAdminCache;
+  
+  /**
+   * Cached version of the activities to weight maps.
+   */
+  mutable TimeBasedCache<std::string, common::dataStructures::ActivitiesFairShareWeights> m_activitiesFairShareWeights;
 
 }; // class RdbmsCatalogue
 
diff --git a/catalogue/common_catalogue_schema.sql b/catalogue/common_catalogue_schema.sql
index a5f2a3b80170a6d71f2d5ad4de64a8fe835c2d84..5b9e24abf730e06bdecb754986ac591426485095 100644
--- a/catalogue/common_catalogue_schema.sql
+++ b/catalogue/common_catalogue_schema.sql
@@ -197,6 +197,33 @@ CREATE TABLE TAPE_FILE(
 CREATE INDEX TAPE_FILE_VID_IDX ON TAPE_FILE(VID);
 CREATE INDEX TAPE_FILE_ARCHIVE_FILE_ID_IDX ON TAPE_FILE(ARCHIVE_FILE_ID);
 CREATE INDEX TAPE_FILE_SBV_SBF_IDX ON TAPE_FILE(SUPERSEDED_BY_VID, SUPERSEDED_BY_FSEQ);
+CREATE TABLE ACTIVITIES_WEIGHTS (
+  DISK_INSTANCE_NAME       VARCHAR(100),
+  ACTIVITY                 VARCHAR(100),
+  WEIGHT                   VARCHAR(100),
+  USER_COMMENT             VARCHAR(1000)   CONSTRAINT ACTIV_WEIGHTS_UC_NN   NOT NULL,
+  CREATION_LOG_USER_NAME   VARCHAR(100)    CONSTRAINT ACTIV_WEIGHTS_CLUN_NN NOT NULL,
+  CREATION_LOG_HOST_NAME   VARCHAR(100)    CONSTRAINT ACTIV_WEIGHTS_CLHN_NN NOT NULL,
+  CREATION_LOG_TIME        NUMERIC(20, 0)  CONSTRAINT ACTIV_WEIGHTS_CLT_NN  NOT NULL,
+  LAST_UPDATE_USER_NAME    VARCHAR(100)    CONSTRAINT ACTIV_WEIGHTS_LUUN_NN NOT NULL,
+  LAST_UPDATE_HOST_NAME    VARCHAR(100)    CONSTRAINT ACTIV_WEIGHTS_LUHN_NN NOT NULL,
+  LAST_UPDATE_TIME         NUMERIC(20, 0)  CONSTRAINT ACTIV_WEIGHTS_LUT_NN  NOT NULL
+CREATE TABLE USAGESTATS (
+  GID                     NUMERIC(6)      DEFAULT 0 CONSTRAINT NN_USAGESTATS_GID NOT NULL,
+  TIMESTAMP               NUMERIC(20, 0)  DEFAULT 0 CONSTRAINT NN_USAGESTATS_TS NOT NULL,
+  MAXFILEID               NUMERIC(20, 0),
+  FILECOUNT               NUMERIC(20, 0),
+  FILESIZE                NUMERIC(20, 0),
+  SEGCOUNT                NUMERIC(20, 0),
+  SEGSIZE                 NUMERIC(20, 0),
+  SEG2COUNT               NUMERIC(20, 0),
+  SEG2SIZE                NUMERIC(20, 0),
+  CONSTRAINT PK_USAGESTATS_GID_TS PRIMARY KEY (GID, TIMESTAMP);
+);
+CREATE TABLE EXPERIMENTS (
+ NAME                     VARCHAR(20),
+ GID                      NUMERIC(6, 0) CONSTRAINT EXPERIMENTS_GID_PK PRIMARY KEY
+);
 INSERT INTO CTA_CATALOGUE(
   SCHEMA_VERSION_MAJOR,
   SCHEMA_VERSION_MINOR)
diff --git a/catalogue/oracle_catalogue_usage_stats.sql b/catalogue/oracle_catalogue_usage_stats.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f138e20b393df3b8c18f18a4e47212ea97104786
--- /dev/null
+++ b/catalogue/oracle_catalogue_usage_stats.sql
@@ -0,0 +1,142 @@
+/*****************************************************************************
+ *              oracle_catalogue_usage_stats.sql
+ *
+ * This file is part of the Castor/CTA project.
+ * See http://cern.ch/castor and http://cern.ch/eoscta
+ * Copyright (C) 2019  CERN
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 3
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * This script adds the necessary PL/SQL code to an existing CTA Catalogue
+ * schema in order to support the daily usage statistics gathering.
+ *
+ * This script should be ported to the other supported DBs in a future time.
+ *
+ * @author Castor Dev team, castor-dev@cern.ch
+ *****************************************************************************/
+
+-- This table will be used to safely store the legacy CASTOR usage statistics
+CREATE TABLE CastorUsageStats (
+  gid NUMBER(6) DEFAULT 0 CONSTRAINT NN_CastorUsageStats_gid NOT NULL,
+  timestamp NUMBER  DEFAULT 0 CONSTRAINT NN_CastorUsageStats_ts NOT NULL,
+  maxFileId INTEGER, fileCount INTEGER, fileSize INTEGER,
+  segCount INTEGER, segSize INTEGER, segCompressedSize INTEGER,
+  seg2Count INTEGER, seg2Size INTEGER, seg2CompressedSize INTEGER
+);
+
+/* Get current time as a time_t (Unix time) */
+CREATE OR REPLACE FUNCTION getTime RETURN NUMBER IS
+  epoch            TIMESTAMP WITH TIME ZONE;
+  now              TIMESTAMP WITH TIME ZONE;
+  interval         INTERVAL DAY(9) TO SECOND;
+  interval_days    NUMBER;
+  interval_hours   NUMBER;
+  interval_minutes NUMBER;
+  interval_seconds NUMBER;
+BEGIN
+  epoch := TO_TIMESTAMP_TZ('01-JAN-1970 00:00:00 00:00',
+    'DD-MON-YYYY HH24:MI:SS TZH:TZM');
+  now := SYSTIMESTAMP AT TIME ZONE '00:00';
+  interval         := now - epoch;
+  interval_days    := EXTRACT(DAY    FROM (interval));
+  interval_hours   := EXTRACT(HOUR   FROM (interval));
+  interval_minutes := EXTRACT(MINUTE FROM (interval));
+  interval_seconds := EXTRACT(SECOND FROM (interval));
+
+  RETURN interval_days * 24 * 60 * 60 + interval_hours * 60 * 60 +
+    interval_minutes * 60 + interval_seconds;
+END;
+/
+
+-- Helper procedure to insert/accumulate statistics in the UsageStats table
+CREATE OR REPLACE PROCEDURE insertNSStats(inGid IN INTEGER, inTimestamp IN NUMBER, inMaxFileId IN INTEGER,
+                                          inFileCount IN INTEGER, inFileSize IN INTEGER,
+                                          inSegCount IN INTEGER, inSegSize IN INTEGER,
+                                          inSeg2Count IN INTEGER, inSeg2Size IN INTEGER) AS
+  CONSTRAINT_VIOLATED EXCEPTION;
+  PRAGMA EXCEPTION_INIT(CONSTRAINT_VIOLATED, -1);
+BEGIN
+  INSERT INTO UsageStats (gid, timestamp, maxFileId, fileCount, fileSize,
+                          segCount, segSize, seg2Count, seg2Size)
+    VALUES (inGid, inTimestamp, inMaxFileId, inFileCount, inFileSize,
+            inSegCount, inSegSize, inSeg2Count, inSeg2Size);
+EXCEPTION WHEN CONSTRAINT_VIOLATED THEN
+  UPDATE UsageStats SET
+    maxFileId = CASE WHEN inMaxFileId > maxFileId THEN inMaxFileId ELSE maxFileId END,
+    fileCount = fileCount + inFileCount,
+    fileSize = fileSize + inFileSize,
+    segCount = segCount + inSegCount,
+    segSize = segSize + inSegSize,
+    seg2Count = seg2Count + inSeg2Count,
+    seg2Size = seg2Size + inSeg2Size
+  WHERE gid = inGid AND timestamp = inTimestamp;
+END;
+/
+
+-- This procedure is run as a database job to generate statistics from the namespace
+-- Taken as is from CASTOR, cf. https://gitlab.cern.ch/castor/CASTOR/tree/master/ns/oracleTrailer.sql
+CREATE OR REPLACE PROCEDURE gatherCatalogueStats AS
+  varTimestamp NUMBER := trunc(getTime());
+BEGIN
+  -- File-level statistics
+  FOR g IN (SELECT disk_file_gid, MAX(archive_file_id) maxId,
+                   COUNT(*) fileCount, SUM(size_in_bytes) fileSize
+              FROM Archive_File
+             WHERE creation_time < varTimestamp
+             GROUP BY disk_file_gid) LOOP
+    insertNSStats(g.disk_file_gid, varTimestamp, g.maxId, g.fileCount, g.fileSize, 0, 0, 0, 0);
+  END LOOP;
+  COMMIT;
+  -- Tape-level statistics
+  FOR g IN (SELECT disk_file_gid, copy_nb, SUM(size_in_bytes) segSize, COUNT(*) segCount
+              FROM Tape_File, Archive_File
+             WHERE Tape_File.archive_file_id = Archive_File.archive_file_id
+               AND Archive_File.creation_time < varTimestamp
+             GROUP BY disk_file_gid, copy_nb) LOOP
+    IF g.copy_nb = 1 THEN
+      insertNSStats(g.disk_file_gid, varTimestamp, 0, 0, 0, g.segCount, g.segSize, 0, 0);
+    ELSE
+      insertNSStats(g.disk_file_gid, varTimestamp, 0, 0, 0, 0, 0, g.segCount, g.segSize);
+    END IF;
+  END LOOP;
+  COMMIT;
+  -- Also compute totals
+  INSERT INTO UsageStats (gid, timestamp, maxFileId, fileCount, fileSize,
+                          segCount, segSize, seg2Count, seg2Size)
+    (SELECT -1, varTimestamp, MAX(maxFileId), SUM(fileCount), SUM(fileSize),
+            SUM(segCount), SUM(segSize), SUM(seg2Count), SUM(seg2Size)
+       FROM UsageStats
+      WHERE timestamp = varTimestamp);
+  COMMIT;
+END;
+/
+
+/* Database job for the statistics */
+BEGIN
+  -- Remove database jobs before recreating them
+  FOR j IN (SELECT job_name FROM user_scheduler_jobs
+             WHERE job_name = 'STATSJOB')
+  LOOP
+    DBMS_SCHEDULER.DROP_JOB(j.job_name, TRUE);
+  END LOOP;
+
+  -- Create a db job to be run every day executing the gatherNSStats procedure
+  DBMS_SCHEDULER.CREATE_JOB(
+      JOB_NAME        => 'StatsJob',
+      JOB_TYPE        => 'PLSQL_BLOCK',
+      JOB_ACTION      => 'BEGIN gatherCatalogueStats(); END;',
+      START_DATE      => SYSDATE,
+      REPEAT_INTERVAL => 'FREQ=DAILY; INTERVAL=1',
+      ENABLED         => TRUE,
+      COMMENTS        => 'Gathering of catalogue usage statistics');
+END;
+/
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
index cac4940c7d4f676c836f2ce81f6b685080f50330..5c0965ce33438e166a913155209838590cc863b8 100644
--- a/common/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -28,6 +28,7 @@ include_directories (${XROOTD_INCLUDE_DIR})
 set_source_files_properties(CRC.cpp PROPERTIES COMPILE_FLAGS -O2)
 
 set (COMMON_LIB_SRC_FILES
+  dataStructures/ActivitiesFairShareWeights.cpp
   dataStructures/AdminUser.cpp
   dataStructures/ArchiveFile.cpp
   dataStructures/ArchiveFileQueueCriteria.cpp
diff --git a/common/dataStructures/ActivitiesFairShareWeights.cpp b/common/dataStructures/ActivitiesFairShareWeights.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..765fceff15e9c5dbd301464e30dbefd24231845b
--- /dev/null
+++ b/common/dataStructures/ActivitiesFairShareWeights.cpp
@@ -0,0 +1,41 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "ActivitiesFairShareWeights.hpp"
+#include "common/exception/Exception.hpp"
+
+namespace cta { namespace common { namespace dataStructures {
+
+void ActivitiesFairShareWeights::setWeightFromDouble(const std::string & activity, double weight) {
+  if (weight < 0 || weight > 1)
+    throw cta::exception::Exception("In ActivitiesFairShareWeights::setWeightFromDouble(): weight out of range.");
+  activitiesWeights[activity] = weight;
+}
+
+void ActivitiesFairShareWeights::setWeightFromString(const std::string& activity, const std::string& sweight) {
+  if (sweight.empty())
+    throw cta::exception::Exception("In ActivitiesFairShareWeights::setWeightFromString() empty string.");
+  size_t pos;
+  double weight = std::stod(sweight, &pos);
+  if (pos != sweight.size())
+    throw cta::exception::Exception("In ActivitiesFairShareWeights::setWeightFromString(): bad format: garbage at the end of string.");
+  setWeightFromDouble(activity, weight);
+}
+
+
+}}} // namespace cta::common::dataStructures.
diff --git a/common/dataStructures/ActivitiesFairShareWeights.hpp b/common/dataStructures/ActivitiesFairShareWeights.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..737f534ed212b885950c3ebb4e6d1c96eed12929
--- /dev/null
+++ b/common/dataStructures/ActivitiesFairShareWeights.hpp
@@ -0,0 +1,35 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <string>
+#include <map>
+
+namespace cta { namespace common { namespace dataStructures {
+
+struct ActivitiesFairShareWeights {
+  std::string diskInstance;
+  std::map<std::string, double> activitiesWeights;
+  /** set the weight for the activity, checking the value is in ]0, 1] */
+  void setWeightFromDouble(const std::string & activity, double weight);
+  /** set the weight for an activity, first checking the string can be fully converted to a double, and then */
+  void setWeightFromString(const std::string & activity, const std::string &sweight);
+};
+  
+}}}
\ No newline at end of file
diff --git a/common/dataStructures/DesiredDriveState.hpp b/common/dataStructures/DesiredDriveState.hpp
index 07ba573959e94127df1dd68ade71d3978d011657..4f6302fece57ad07fba43c6e996f6e2b7fe030da 100644
--- a/common/dataStructures/DesiredDriveState.hpp
+++ b/common/dataStructures/DesiredDriveState.hpp
@@ -35,6 +35,7 @@ struct DesiredDriveState {
   bool operator==(const DesiredDriveState &rhs) const {
     return up == rhs.up && forceDown == rhs.forceDown;
   }
+  DesiredDriveState(): up(false), forceDown(false) {}
 };
 
 std::ostream &operator<<(std::ostream& os, const DesiredDriveState& obj);
diff --git a/common/dataStructures/DriveState.cpp b/common/dataStructures/DriveState.cpp
index 73d83e009935df39b9628ee114a0c026f00ee92f..7678a739d57e971bfe949c489c4874d19d12bad5 100644
--- a/common/dataStructures/DriveState.cpp
+++ b/common/dataStructures/DriveState.cpp
@@ -24,31 +24,6 @@ namespace cta {
 namespace common {
 namespace dataStructures {
 
-//------------------------------------------------------------------------------
-// constructor
-//------------------------------------------------------------------------------
-DriveState::DriveState():
-  sessionId(0),
-  bytesTransferredInSession(0),
-  filesTransferredInSession(0),
-  latestBandwidth(0),
-  sessionStartTime(0),
-  mountStartTime(0),
-  transferStartTime(0),
-  unloadStartTime(0),
-  unmountStartTime(0),
-  drainingStartTime(0),
-  downOrUpStartTime(0),
-  probeStartTime(0),
-  cleanupStartTime(0),
-  lastUpdateTime(0),
-  startStartTime(0),
-  shutdownTime(0),
-  mountType(dataStructures::MountType::NoMount),
-  driveStatus(dataStructures::DriveStatus::Down),
-  desiredDriveState({false, false}),
-  nextMountType(dataStructures::MountType::NoMount) {}
-
 //------------------------------------------------------------------------------
 // operator==
 //------------------------------------------------------------------------------
@@ -77,9 +52,24 @@ bool DriveState::operator==(const DriveState &rhs) const {
       && desiredDriveState==rhs.desiredDriveState
       && currentVid==rhs.currentVid
       && currentTapePool==rhs.currentTapePool
+      && currentPriority == rhs.currentPriority
+      && bool(currentActivityAndWeight) == bool(rhs.currentActivityAndWeight)
+      && (currentActivityAndWeight? (
+        currentActivityAndWeight.value().activity 
+          == rhs.currentActivityAndWeight.value().activity
+        && currentActivityAndWeight.value().weight
+          == rhs.currentActivityAndWeight.value().weight
+         ): true)
       && nextMountType == rhs.nextMountType
       && nextTapepool == rhs.nextTapepool
-      && nextVid == rhs.nextVid;
+      && nextVid == rhs.nextVid
+      && bool(nextActivityAndWeight) == bool(rhs.nextActivityAndWeight)
+      && (nextActivityAndWeight? (
+        nextActivityAndWeight.value().activity 
+          == rhs.nextActivityAndWeight.value().activity
+        && nextActivityAndWeight.value().weight
+          == rhs.nextActivityAndWeight.value().weight
+         ): true);
 }
 
 //------------------------------------------------------------------------------
@@ -118,9 +108,25 @@ std::ostream &operator<<(std::ostream &os, const DriveState &obj) {
      << " desiredState=" << obj.desiredDriveState
      << " currentVid=" << obj.currentVid
      << " currentTapePool=" << obj.currentTapePool
-     << " nextMountType=" << obj.nextMountType
+     << " currentPriority=" << obj.currentPriority
+     << " currentActivity=";
+  if (obj.currentActivityAndWeight) {
+    os << "(" << obj.currentActivityAndWeight.value().activity
+       << "," << obj.currentActivityAndWeight.value().weight << ")";
+  } else {
+    os << "(none)";
+  }
+  os << " nextMountType=" << obj.nextMountType
      << " nextVid=" << obj.nextVid
-     << " nextTapePool=" << obj.nextTapepool << ")";
+     << " nextTapePool=" << obj.nextTapepool
+     << " currentNext=";
+  if (obj.nextActivityAndWeight) {
+    os << "(" << obj.nextActivityAndWeight.value().activity
+       << "," << obj.nextActivityAndWeight.value().weight << ")";
+  } else {
+    os << "(none)";
+  }
+  os << ")";
 }
 
 } // namespace dataStructures
diff --git a/common/dataStructures/DriveState.hpp b/common/dataStructures/DriveState.hpp
index b4568c450f119abf333ecb31988ab328e99c4ed7..64afdd153d429decfa095e48e724065130709a45 100644
--- a/common/dataStructures/DriveState.hpp
+++ b/common/dataStructures/DriveState.hpp
@@ -24,6 +24,7 @@
 #include "DriveStatus.hpp"
 #include "MountType.hpp"
 #include "DesiredDriveState.hpp"
+#include "common/optional.hpp"
 
 namespace cta {
 namespace common {
@@ -35,8 +36,6 @@ namespace dataStructures {
  */
 struct DriveState {
 
-  DriveState();
-
   bool operator==(const DriveState &rhs) const;
 
   bool operator!=(const DriveState &rhs) const;
@@ -44,31 +43,38 @@ struct DriveState {
   std::string driveName;
   std::string host;
   std::string logicalLibrary;
-  uint64_t sessionId;
-  uint64_t bytesTransferredInSession;
-  uint64_t filesTransferredInSession;
-  double latestBandwidth; /** < Byte per seconds */
-  time_t sessionStartTime;
-  time_t mountStartTime;
-  time_t transferStartTime;
-  time_t unloadStartTime;
-  time_t unmountStartTime;
-  time_t drainingStartTime;
-  time_t downOrUpStartTime;
-  time_t probeStartTime;
-  time_t cleanupStartTime;
-  time_t lastUpdateTime;
-  time_t startStartTime;
-  time_t shutdownTime;
-  MountType mountType;
-  DriveStatus driveStatus;
+  uint64_t sessionId = 0;
+  uint64_t bytesTransferredInSession = 0;
+  uint64_t filesTransferredInSession = 0;
+  double latestBandwidth = 0.0; /** < Byte per seconds */
+  time_t sessionStartTime = 0;
+  time_t mountStartTime = 0;
+  time_t transferStartTime = 0;
+  time_t unloadStartTime = 0;
+  time_t unmountStartTime = 0;
+  time_t drainingStartTime = 0;
+  time_t downOrUpStartTime = 0;
+  time_t probeStartTime = 0;
+  time_t cleanupStartTime = 0;
+  time_t lastUpdateTime = 0;
+  time_t startStartTime = 0;
+  time_t shutdownTime = 0;
+  MountType mountType = MountType::NoMount;
+  DriveStatus driveStatus = DriveStatus::Down;
   DesiredDriveState desiredDriveState;
   std::string currentVid;
   std::string currentTapePool;
-  MountType nextMountType;
+  uint64_t currentPriority = 0;
+  struct ActivityAndWeight {
+    std::string activity;
+    double weight;
+  };
+  optional<ActivityAndWeight> currentActivityAndWeight;
+  MountType nextMountType = MountType::NoMount;
   std::string nextVid;
   std::string nextTapepool;
-
+  uint64_t nextPriority = 0;
+  optional<ActivityAndWeight> nextActivityAndWeight;
 }; // struct DriveState
 
 std::ostream &operator<<(std::ostream &os, const DriveState &obj);
diff --git a/common/dataStructures/RetrieveFileQueueCriteria.cpp b/common/dataStructures/RetrieveFileQueueCriteria.cpp
index df8980f6f6b9800a85e41e1bf5519b9ffb0d5276..237b7b639e6cf0de97d8c3fb71260d9b0f4446ff 100644
--- a/common/dataStructures/RetrieveFileQueueCriteria.cpp
+++ b/common/dataStructures/RetrieveFileQueueCriteria.cpp
@@ -25,6 +25,7 @@ RetrieveFileQueueCriteria& RetrieveFileQueueCriteria::operator=(const RetrieveFi
     if(this != &other){
         this->archiveFile = other.archiveFile;
         this->mountPolicy = other.mountPolicy;
+        this->activitiesFairShareWeight = other.activitiesFairShareWeight;
     }
     return *this;
 }
diff --git a/common/dataStructures/RetrieveFileQueueCriteria.hpp b/common/dataStructures/RetrieveFileQueueCriteria.hpp
index 9e1dc809bcf031f711886dd2d1cc212a3a10d05b..ba5965acd011425b45e57927e4eca2ad494558c8 100644
--- a/common/dataStructures/RetrieveFileQueueCriteria.hpp
+++ b/common/dataStructures/RetrieveFileQueueCriteria.hpp
@@ -20,6 +20,7 @@
 
 #include "common/dataStructures/ArchiveFile.hpp"
 #include "common/dataStructures/MountPolicy.hpp"
+#include "common/dataStructures/ActivitiesFairShareWeights.hpp"
 
 #include <map>
 #include <stdint.h>
@@ -43,6 +44,11 @@ struct RetrieveFileQueueCriteria {
    */
   MountPolicy mountPolicy;
   
+  /**
+   * The fair shares for the disk instance of the file (if any).
+   */
+  ActivitiesFairShareWeights activitiesFairShareWeight;
+  
   RetrieveFileQueueCriteria &operator=(const RetrieveFileQueueCriteria& other);
 
 }; // struct RetrieveFileQueueCriteria
diff --git a/common/dataStructures/RetrieveRequest.hpp b/common/dataStructures/RetrieveRequest.hpp
index ecab33381a774dcbf3013c4cba5aa5230e8009af..5a82075162cd328bd241c6807dc4be8dd23073ce 100644
--- a/common/dataStructures/RetrieveRequest.hpp
+++ b/common/dataStructures/RetrieveRequest.hpp
@@ -28,6 +28,7 @@
 #include "common/dataStructures/UserIdentity.hpp"
 #include "common/dataStructures/ArchiveRoute.hpp"
 #include "LifecycleTimings.hpp"
+#include "common/optional.hpp"
 
 namespace cta {
 namespace common {
@@ -51,6 +52,8 @@ struct RetrieveRequest {
   DiskFileInfo diskFileInfo;
   EntryLog creationLog;
   LifecycleTimings lifecycleTimings;
+  optional<std::string> activity;
+
 }; // struct RetrieveRequest
 
 std::ostream &operator<<(std::ostream &os, const RetrieveRequest &obj);
diff --git a/common/exception/Exception.hpp b/common/exception/Exception.hpp
index e62940f360ba05e99cbf7e0c51065d35887b8dfe..69f6d413daef0c3320ca20a193d7e00f5e2d6c01 100644
--- a/common/exception/Exception.hpp
+++ b/common/exception/Exception.hpp
@@ -121,8 +121,4 @@ protected:
 
 }} // namespace cta::exception
 
-#define CTA_GENERATE_EXCEPTION_CLASS(A)                          \
-class A: public cta::exception::Exception {                      \
-public:                                                          \
-  A(const std::string & w = ""): cta::exception::Exception(w) {} \
-}
+#define CTA_GENERATE_EXCEPTION_CLASS(A) class A: public cta::exception::Exception { using Exception::Exception; }
diff --git a/common/exception/UserError.hpp b/common/exception/UserError.hpp
index 4cbfb57dc6fe784b1c059d1bb49c105c520cdeec..731d14195197ec7aa34b92120629858dc0bb77f3 100644
--- a/common/exception/UserError.hpp
+++ b/common/exception/UserError.hpp
@@ -43,3 +43,5 @@ public:
 
 } // namespace exception
 } // namespace cta
+
+#define CTA_GENERATE_USER_EXCEPTION_CLASS(A) class A: public cta::exception::UserError { using UserError::UserError; }
diff --git a/continuousintegration/orchestration/tests/client_ar.sh b/continuousintegration/orchestration/tests/client_ar.sh
index 83f1c8618ee9df0cf834e2894b98349f69a60502..f0c463ada5106ee9f4f46ab201dc81814e48baaf 100644
--- a/continuousintegration/orchestration/tests/client_ar.sh
+++ b/continuousintegration/orchestration/tests/client_ar.sh
@@ -252,9 +252,9 @@ fi
 echo "###"
 echo "${TAPEONLY}/${ARCHIVED} on tape only"
 echo "###"
-echo "Sleeping 400 seconds to allow MGM-FST communication to settle after disk copy deletion."
-sleep 400
-echo "###"
+#echo "Sleeping 400 seconds to allow MGM-FST communication to settle after disk copy deletion."
+#sleep 400
+#echo "###"
 
 
 if [[ $TAPEAWAREGC == 1 ]]; then
@@ -279,7 +279,7 @@ done
 # CAREFULL HERE: ${STATUS_FILE} contains lines like: 99/test9900001
 for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do
   echo -n "Recalling files to ${EOS_DIR}/${subdir} using ${NB_PROCS} processes..."
-  cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOSINSTANCE} prepare -s ${EOS_DIR}/${subdir}/TEST_FILE_NAME 2>${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME && rm ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME || echo ERROR with xrootd transfer for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME"
+  cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOSINSTANCE} prepare -s ${EOS_DIR}/${subdir}/TEST_FILE_NAME?activity=T0Reprocess 2>${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME && rm ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME || echo ERROR with xrootd transfer for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/RETRIEVE_TEST_FILE_NAME"
   echo Done.
 done
 
diff --git a/objectstore/AlgorithmsTest.cpp b/objectstore/AlgorithmsTest.cpp
index 94999d4ed2645e11a3c76dcadaccd6439a10d3c0..8eced3a140e60fcdde65026ac1da358daa43b1c7 100644
--- a/objectstore/AlgorithmsTest.cpp
+++ b/objectstore/AlgorithmsTest.cpp
@@ -78,7 +78,7 @@ void fillRetrieveRequests(
     rqc.mountPolicy.retrievePriority = 1;
     requestPtrs.emplace_back(new cta::objectstore::RetrieveRequest(rrAddr, be));
     requests.emplace_back(ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransferForUser>::InsertedElement{
-      requestPtrs.back().get(), 1, i, 667, mp, serializers::RetrieveJobStatus::RJS_ToTransferForUser
+      requestPtrs.back().get(), 1, i, 667, mp, serializers::RetrieveJobStatus::RJS_ToTransferForUser, cta::nullopt
     });
     auto &rr = *requests.back().retrieveRequest;
     rr.initialize();
diff --git a/objectstore/ArchiveQueue.hpp b/objectstore/ArchiveQueue.hpp
index f2b68d1a74d92be09921a46d3c58bfdae26776fb..efbdc99d60f12475c997f31d993a859478cfe996 100644
--- a/objectstore/ArchiveQueue.hpp
+++ b/objectstore/ArchiveQueue.hpp
@@ -141,28 +141,11 @@ public:
   static const uint64_t c_maxShardSize = 25000;
 };
 
-class ArchiveQueueToTransferForUser: public ArchiveQueue {
-  using ArchiveQueue::ArchiveQueue;
-};
-
-class ArchiveQueueToReportForUser: public ArchiveQueue {
-  using ArchiveQueue::ArchiveQueue;
-};
-
-class ArchiveQueueFailed: public ArchiveQueue {
-  using ArchiveQueue::ArchiveQueue;
-};
-
-class ArchiveQueueToTransferForRepack: public ArchiveQueue{
-  using ArchiveQueue::ArchiveQueue;
-};
-
-class ArchiveQueueToReportToRepackForSuccess : public ArchiveQueue{
-  using ArchiveQueue::ArchiveQueue;
-};
-
-class ArchiveQueueToReportToRepackForFailure: public ArchiveQueue{
-  using ArchiveQueue::ArchiveQueue;
-};
+class ArchiveQueueToTransferForUser: public ArchiveQueue { using ArchiveQueue::ArchiveQueue; };
+class ArchiveQueueToReportForUser: public ArchiveQueue { using ArchiveQueue::ArchiveQueue; };
+class ArchiveQueueFailed: public ArchiveQueue { using ArchiveQueue::ArchiveQueue; };
+class ArchiveQueueToTransferForRepack: public ArchiveQueue{ using ArchiveQueue::ArchiveQueue; };
+class ArchiveQueueToReportToRepackForSuccess : public ArchiveQueue{ using ArchiveQueue::ArchiveQueue; };
+class ArchiveQueueToReportToRepackForFailure: public ArchiveQueue{ using ArchiveQueue::ArchiveQueue; };
   
 }}
diff --git a/objectstore/CMakeLists.txt b/objectstore/CMakeLists.txt
index 8e2dbcda78d4d46206b274e2231cd4173663c3f8..d30eb6aa7f687da85aebd42283ea7f34644a7115 100644
--- a/objectstore/CMakeLists.txt
+++ b/objectstore/CMakeLists.txt
@@ -29,7 +29,8 @@ set (CTAProtoFiles
 
 PROTOBUF3_GENERATE_CPP(CTAProtoSources CTAProtoHeaders ${CTAProtoFiles})
 
-set (CTAProtoDependants objectstore/Agent.hpp
+set (CTAProtoDependants 
+  objectstore/Agent.hpp
   objectstore/ArchiveRequest.hpp
   objectstore/CreationLog.hpp
   objectstore/DriveRegister.hpp
@@ -39,6 +40,7 @@ set (CTAProtoDependants objectstore/Agent.hpp
   objectstore/RepackIndex.hpp
   objectstore/RepackRequest.hpp
   objectstore/RepackQueue.hpp
+  objectstore/RetrieveActivityCountMap.hpp
   objectstore/RetrieveRequest.hpp
   objectstore/RootEntry.hpp
   objectstore/SchedulerGlobalLock.hpp
@@ -100,7 +102,8 @@ add_library (ctaobjectstore SHARED
   GarbageCollector.cpp
   SchedulerGlobalLock.cpp
   ValueCountMap.cpp
-  Helpers.cpp)
+  Helpers.cpp
+  RetrieveActivityCountMap.cpp)
 set_property(TARGET ctaobjectstore PROPERTY SOVERSION "${CTA_SOVERSION}")
 set_property(TARGET ctaobjectstore PROPERTY   VERSION "${CTA_LIBVERSION}")
 
diff --git a/objectstore/DriveRegister.hpp b/objectstore/DriveRegister.hpp
index 1754d81e1fb9789e6c7b3751a5a7f5892c47c747..58cbe002e8c198cb5e65c155d8d6ba5a3a3ea633 100644
--- a/objectstore/DriveRegister.hpp
+++ b/objectstore/DriveRegister.hpp
@@ -77,7 +77,7 @@ public:
   void removeDrive(const std::string & driveName);
 
   /**
-   * JSON dump of the drive 
+   * JSON dump of the drive register
    * @return 
    */
   std::string dump();
diff --git a/objectstore/DriveState.cpp b/objectstore/DriveState.cpp
index 4ee44117dc3a15e7cde082b7345f711405a7fde7..634d9c71ae3d908023aeb6a86a7fec4619a649b9 100644
--- a/objectstore/DriveState.cpp
+++ b/objectstore/DriveState.cpp
@@ -18,9 +18,13 @@
 
 #include "DriveState.hpp"
 #include "GenericObject.hpp"
+#include <google/protobuf/util/json_util.h>
 
 namespace cta { namespace objectstore {
 
+//------------------------------------------------------------------------------
+// DriveState::DriveState())
+//------------------------------------------------------------------------------
 DriveState::DriveState(GenericObject& go):
 ObjectOps<serializers::DriveState, serializers::DriveState_t>(go.objectStore()) {
   // Here we transplant the generic object into the new object
@@ -29,6 +33,9 @@ ObjectOps<serializers::DriveState, serializers::DriveState_t>(go.objectStore())
   getPayloadFromHeader();
 }
 
+//------------------------------------------------------------------------------
+// DriveState::garbageCollect())
+//------------------------------------------------------------------------------
 void DriveState::garbageCollect(const std::string& presumedOwner, AgentReference& agentReference, log::LogContext& lc, cta::catalogue::Catalogue& catalogue) {
   // The drive state is easily replaceable. We just delete it on garbage collection.
   checkPayloadWritable();
@@ -40,47 +47,36 @@ void DriveState::garbageCollect(const std::string& presumedOwner, AgentReference
   lc.log(log::INFO, "In DriveState::garbageCollect(): Garbage collected and removed drive state object.");
 }
 
+//------------------------------------------------------------------------------
+// DriveState::initialize())
+//------------------------------------------------------------------------------
 void DriveState::initialize(const std::string & driveName) {
-  // Setup underlying object
+  // Setup underlying object with defaults from dataStructures::DriveState
   ObjectOps<serializers::DriveState, serializers::DriveState_t>::initialize();
   m_payload.set_drivename(driveName);
-  m_payload.set_host("");
-  m_payload.set_logicallibrary("");
-  m_payload.set_sessionid(0);
-  m_payload.set_bytestransferedinsession(0);
-  m_payload.set_filestransferedinsession(0);
-  m_payload.set_latestbandwidth(0);
-  m_payload.set_sessionstarttime(0);
-  m_payload.set_mountstarttime(0);
-  m_payload.set_transferstarttime(0);
-  m_payload.set_unloadstarttime(0);
-  m_payload.set_unmountstarttime(0);
-  m_payload.set_drainingstarttime(0);
-  // In the absence of info, we sent down now.
-  m_payload.set_downorupstarttime(::time(nullptr));
-  m_payload.set_probestarttime(0);
-  m_payload.set_cleanupstarttime(0);
-  m_payload.set_lastupdatetime(0);
-  m_payload.set_startstarttime(0);
-  m_payload.set_shutdowntime(0);
-  m_payload.set_mounttype((uint32_t)common::dataStructures::MountType::NoMount);
-  m_payload.set_drivestatus((uint32_t)common::dataStructures::DriveStatus::Down);
-  m_payload.set_desiredup(false);
-  m_payload.set_desiredforcedown(false);
-  m_payload.set_currentvid("");
-  m_payload.set_currenttapepool("");
+  cta::common::dataStructures::DriveState driveState;
+  driveState.driveName = driveName;
+  driveState.downOrUpStartTime = ::time(nullptr);
+  setState(driveState);
   // This object is good to go (to storage)
   m_payloadInterpreted = true;
 }
 
-
+//------------------------------------------------------------------------------
+// DriveState::DriveState())
+//------------------------------------------------------------------------------
 DriveState::DriveState(const std::string& address, Backend& os):
   ObjectOps<serializers::DriveState, serializers::DriveState_t>(os, address) { }
 
+//------------------------------------------------------------------------------
+// DriveState::DriveState())
+//------------------------------------------------------------------------------
 DriveState::DriveState(Backend& os):
   ObjectOps<serializers::DriveState, serializers::DriveState_t>(os) { }
 
-
+//------------------------------------------------------------------------------
+// DriveState::getState())
+//------------------------------------------------------------------------------
 cta::common::dataStructures::DriveState DriveState::getState() {
   cta::common::dataStructures::DriveState ret;
   ret.driveName                   = m_payload.drivename();
@@ -108,9 +104,29 @@ cta::common::dataStructures::DriveState DriveState::getState() {
   ret.desiredDriveState.forceDown = m_payload.desiredforcedown();
   ret.currentVid                  = m_payload.currentvid();
   ret.currentTapePool             = m_payload.currenttapepool();
+  ret.currentPriority             = m_payload.current_priority();
+  if (m_payload.has_current_activity())
+    ret.currentActivityAndWeight = 
+      cta::common::dataStructures::DriveState::ActivityAndWeight{
+        m_payload.current_activity(), m_payload.current_activity_weight()};
+  if (m_payload.has_nextmounttype())
+    ret.nextMountType = (common::dataStructures::MountType) m_payload.nextmounttype();
+  if (m_payload.has_nexttapepool())
+    ret.nextTapepool = m_payload.nexttapepool();
+  if (m_payload.has_nextvid())
+    ret.nextVid = m_payload.nextvid();
+  if (m_payload.has_next_priority())
+    ret.nextPriority = m_payload.next_priority();
+  if (m_payload.has_next_activity())
+    ret.nextActivityAndWeight =
+        cta::common::dataStructures::DriveState::ActivityAndWeight{
+          m_payload.next_activity(), m_payload.next_activity_weight()};
   return ret;
 }
 
+//------------------------------------------------------------------------------
+// DriveState::setState())
+//------------------------------------------------------------------------------
 void DriveState::setState(cta::common::dataStructures::DriveState& state) {
   // There should be no need to set the drive name.
   m_payload.set_host(state.host);
@@ -137,6 +153,38 @@ void DriveState::setState(cta::common::dataStructures::DriveState& state) {
   m_payload.set_desiredforcedown(state.desiredDriveState.forceDown);
   m_payload.set_currentvid(state.currentVid);
   m_payload.set_currenttapepool(state.currentTapePool);
+  m_payload.set_current_priority(state.currentPriority);
+  if (state.currentActivityAndWeight) {
+    m_payload.set_current_activity(state.currentActivityAndWeight.value().activity);
+    m_payload.set_current_activity_weight(state.currentActivityAndWeight.value().weight);
+  } else {
+    m_payload.clear_current_activity();
+    m_payload.clear_current_activity_weight();
+  }
+  m_payload.set_nextvid(state.nextVid);
+  m_payload.set_nexttapepool(state.nextTapepool);
+  m_payload.set_next_priority(state.nextPriority);
+  m_payload.set_nextmounttype((uint32_t)state.nextMountType);
+  if (state.nextActivityAndWeight) {
+    m_payload.set_next_activity(state.nextActivityAndWeight.value().activity);
+    m_payload.set_next_activity_weight(state.nextActivityAndWeight.value().weight);
+  } else {
+    m_payload.clear_next_activity();
+    m_payload.clear_next_activity_weight();
+  }
+}
+
+//------------------------------------------------------------------------------
+// DriveState::dump())
+//------------------------------------------------------------------------------
+std::string DriveState::dump() {
+  checkPayloadReadable();
+  google::protobuf::util::JsonPrintOptions options;
+  options.add_whitespace = true;
+  options.always_print_primitive_fields = true;
+  std::string headerDump;
+  google::protobuf::util::MessageToJsonString(m_payload, &headerDump, options);
+  return headerDump;
 }
 
 }} // namespace cta::objectstore
diff --git a/objectstore/DriveState.hpp b/objectstore/DriveState.hpp
index 83fb27263b521dc8f3127843d31e272d412b73ff..4c7d8da08936aebeb5b3324d72a2ff6e5afe2eed 100644
--- a/objectstore/DriveState.hpp
+++ b/objectstore/DriveState.hpp
@@ -45,6 +45,12 @@ public:
   // Data access
   cta::common::dataStructures::DriveState getState();
   void setState(cta::common::dataStructures::DriveState & state);
+  
+  /**
+   * JSON dump of the drive state
+   * @return 
+   */
+  std::string dump();
 };
 
 }} // namespace cta::objectstore
\ No newline at end of file
diff --git a/objectstore/GarbageCollector.cpp b/objectstore/GarbageCollector.cpp
index 279a2234509c3022e383b9fd6965d713ead816e6..6ff68c09a4772e5e8803ce99735dd61d315efc73 100644
--- a/objectstore/GarbageCollector.cpp
+++ b/objectstore/GarbageCollector.cpp
@@ -559,7 +559,7 @@ void GarbageCollector::OwnedObjectSorter::lockFetchAndUpdateRetrieveJobs(Agent&
         for (auto &tf: rr->getArchiveFile().tapeFiles) {
           if (tf.vid == vid) {
             jta.push_back({tf.copyNb, tf.fSeq, rr->getAddressIfSet(), rr->getArchiveFile().fileSize, 
-                rr->getRetrieveFileQueueCriteria().mountPolicy, rr->getEntryLog().time});
+                rr->getRetrieveFileQueueCriteria().mountPolicy, rr->getEntryLog().time, rr->getActivity()});
           }
         }
       }
diff --git a/objectstore/GarbageCollectorTest.cpp b/objectstore/GarbageCollectorTest.cpp
index 0f786e45579ee25c495ac7fcee596fa5480158d6..249b1f99ce40875ddf88c426298ded9f2c4995f7 100644
--- a/objectstore/GarbageCollectorTest.cpp
+++ b/objectstore/GarbageCollectorTest.cpp
@@ -608,7 +608,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
       cta::objectstore::ScopedExclusiveLock rql(rq);
       rq.fetch();
       std::list <cta::objectstore::RetrieveQueue::JobToAdd> jta;
-      jta.push_back({1,rqc.archiveFile.tapeFiles.front().fSeq, rr.getAddressIfSet(), rqc.archiveFile.fileSize, rqc.mountPolicy, sReq.creationLog.time});
+      jta.push_back({1,rqc.archiveFile.tapeFiles.front().fSeq, rr.getAddressIfSet(), rqc.archiveFile.fileSize, rqc.mountPolicy, sReq.creationLog.time, cta::nullopt});
       rq.addJobsAndCommit(jta, agentRef, lc);
     }
     if (pass < 5) { pass++; continue; }
diff --git a/objectstore/GenericObject.cpp b/objectstore/GenericObject.cpp
index b0cd9a98d0f5ec00e20a931746cd785785417e5e..c0f6806a2f0d464d61b39f9c67eefd08028b6431 100644
--- a/objectstore/GenericObject.cpp
+++ b/objectstore/GenericObject.cpp
@@ -187,6 +187,9 @@ std::string GenericObject::dump() {
     case serializers::DriveRegister_t:
       bodyDump = dumpWithType<DriveRegister>(this);
       break;
+    case serializers::DriveState_t:
+      bodyDump = dumpWithType<DriveState>(this);
+      break;
     case serializers::ArchiveQueue_t:
       bodyDump = dumpWithType<cta::objectstore::ArchiveQueue>(this);
       break;
diff --git a/objectstore/Helpers.cpp b/objectstore/Helpers.cpp
index 6eff98a143208124434afcc62528a40515e80633..a8d1489f6783ec8ff8f2956e6e90f8ad56c88921 100644
--- a/objectstore/Helpers.cpp
+++ b/objectstore/Helpers.cpp
@@ -613,7 +613,7 @@ void Helpers::getLockedAndFetchedDriveState(DriveState& driveState, ScopedExclus
       }
     } catch (DriveRegister::NoSuchDrive &) {
       // OK, we do need to create the drive status.
-      driveState.setAddress(agentReference.nextId(std::string ("DriveStatus-")+driveName));
+      driveState.setAddress(agentReference.nextId(std::string ("DriveState-")+driveName));
       driveState.initialize(driveName);
       agentReference.addToOwnership(driveState.getAddressIfSet(), be);
       driveState.setOwner(agentReference.getAgentAddress());
diff --git a/objectstore/RetrieveActivityCountMap.cpp b/objectstore/RetrieveActivityCountMap.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fc393567f6bd81e2cafd33ebe65bfa58017c0557
--- /dev/null
+++ b/objectstore/RetrieveActivityCountMap.cpp
@@ -0,0 +1,165 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "RetrieveActivityCountMap.hpp"
+#include "common/exception/Exception.hpp"
+
+#include <algorithm>
+#include <sstream>
+#include <google/protobuf/util/json_util.h>
+
+namespace cta { namespace objectstore {
+
+//------------------------------------------------------------------------------
+// Constructor
+//------------------------------------------------------------------------------
+RetrieveActivityCountMap::RetrieveActivityCountMap(
+  google::protobuf::RepeatedPtrField<serializers::RetrieveActivityCountPair>* retrieveActivityCountMap):
+  m_activityCountMap(*retrieveActivityCountMap) { }
+
+//------------------------------------------------------------------------------
+// RetrieveActivityCountMap::incCount()
+//------------------------------------------------------------------------------
+void RetrieveActivityCountMap::incCount(const RetrieveActivityDescription& activityDescription) {
+  // Find the entry for this value (might fail)
+  auto counter = std::find(m_activityCountMap.begin(), m_activityCountMap.end(), activityDescription);
+  if (counter != m_activityCountMap.end()) {
+    if (counter->count() < 1) {
+      std::stringstream err;
+      err << "In ValueCountMap::incCount: unexpected count value=" << toString(counter->retrieve_activity_weight()) 
+          << " count=" << counter->count();
+      throw  cta::exception::Exception(err.str());
+    } else {
+      counter->set_count(counter->count()+1);
+      // Update the weight to the latest version (in case weights got updated since last time).
+      if (counter->retrieve_activity_weight().creation_time() < activityDescription.creationTime) {
+        counter->mutable_retrieve_activity_weight()->set_weight(activityDescription.weight);
+        counter->mutable_retrieve_activity_weight()->set_creation_time(activityDescription.creationTime);
+      }
+    }
+  } else {
+    // Create the new entry if necessary.
+    auto newCounter = m_activityCountMap.Add();
+    newCounter->mutable_retrieve_activity_weight()->set_priority(activityDescription.priority);
+    newCounter->mutable_retrieve_activity_weight()->set_disk_instance_name(activityDescription.diskInstanceName);
+    newCounter->mutable_retrieve_activity_weight()->set_activity(activityDescription.activity);
+    newCounter->mutable_retrieve_activity_weight()->set_weight(activityDescription.weight);
+    newCounter->mutable_retrieve_activity_weight()->set_creation_time(activityDescription.creationTime);
+    newCounter->set_count(1);
+  }
+}
+
+//------------------------------------------------------------------------------
+// RetrieveActivityCountMap::decCount()
+//------------------------------------------------------------------------------
+void RetrieveActivityCountMap::decCount(const RetrieveActivityDescription& activityDescription) {
+  // Find the entry for this value. Failing is an error.
+  auto counter = std::find(m_activityCountMap.begin(), m_activityCountMap.end(), activityDescription);
+  if (counter == m_activityCountMap.end()) {
+    std::stringstream err;
+    err << "In RetrieveActivityCountMap::decCount: no entry found for value=" << toString(activityDescription);
+    throw  cta::exception::Exception(err.str());
+  }
+  // Decrement the value and remove the entry if needed.
+  if (counter->count() < 1) {
+    std::stringstream err;
+    err << "In ValueCountMap::decCount: entry with wrong count value=" << toString(activityDescription) << " count=" << counter->count();
+    throw  cta::exception::Exception(err.str());
+  }
+  counter->set_count(counter->count()-1);
+  if (!counter->count()) {
+    auto size=m_activityCountMap.size();
+    counter->Swap(&(*(m_activityCountMap.end()-1)));
+    m_activityCountMap.RemoveLast();
+    // Cross check that the size has decreased.
+    if (size -1 != m_activityCountMap.size()) {
+      std::stringstream err;
+      err << "In ValueCountMap::decCount: unexpected size after trimming empty entry. expectedSize=" << size -1 << " newSize=" << m_activityCountMap.size();
+      throw  cta::exception::Exception(err.str());
+    }
+    // Cross check we cannot find the value.
+    auto counter2 = std::find(m_activityCountMap.begin(), m_activityCountMap.end(), activityDescription);
+    if (m_activityCountMap.end() != counter2) {
+      std::stringstream err;
+      err << "In ValueCountMap::decCount: still found the value after trimming empty entry. value=" << toString(counter2->retrieve_activity_weight()) << " count=" << counter2->count();
+      throw  cta::exception::Exception(err.str());
+    }
+  }
+}
+
+//------------------------------------------------------------------------------
+// RetrieveActivityCountMap::getActivities()
+//------------------------------------------------------------------------------
+std::list<RetrieveActivityDescription> RetrieveActivityCountMap::getActivities(uint64_t priority) {
+  std::list<RetrieveActivityDescription> ret;
+  for (auto & ad: m_activityCountMap) {
+    if (ad.retrieve_activity_weight().priority() == priority)
+      ret.push_back({ad.retrieve_activity_weight().priority(), ad.retrieve_activity_weight().disk_instance_name(), 
+          ad.retrieve_activity_weight().activity(), ad.retrieve_activity_weight().creation_time(),
+          ad.retrieve_activity_weight().weight(), ad.count()});
+  }
+  return ret;
+}
+
+
+//------------------------------------------------------------------------------
+// RetrieveActivityCountMap::clear()
+//------------------------------------------------------------------------------
+void RetrieveActivityCountMap::clear() {
+  m_activityCountMap.Clear();
+}
+
+//------------------------------------------------------------------------------
+// operator==()
+//------------------------------------------------------------------------------
+bool operator==(const serializers::RetrieveActivityCountPair & serialized, const RetrieveActivityDescription & memory) {
+  return (serialized.retrieve_activity_weight().priority() == memory.priority)
+      && (serialized.retrieve_activity_weight().disk_instance_name() == memory.diskInstanceName)
+      && (serialized.retrieve_activity_weight().activity() == memory.activity);
+}
+
+//------------------------------------------------------------------------------
+// toString()
+//------------------------------------------------------------------------------
+std::string toString(const RetrieveActivityDescription & ad) {
+  serializers::RetrieveActivityWeight raw;
+  raw.set_priority(ad.priority);
+  raw.set_disk_instance_name(ad.diskInstanceName);
+  raw.set_activity(ad.activity);
+  raw.set_creation_time(ad.creationTime);
+  raw.set_weight(ad.weight);
+  return toString(raw);
+}
+
+//------------------------------------------------------------------------------
+// toString()
+//------------------------------------------------------------------------------
+std::string toString(const serializers::RetrieveActivityWeight & raw){
+  using namespace google::protobuf::util;
+
+  std::string json;
+  JsonPrintOptions options;
+
+  options.always_print_primitive_fields = true;
+  MessageToJsonString(raw, &json, options);
+
+  return json;
+}
+    
+
+}} // namespace cta::objectstore.
\ No newline at end of file
diff --git a/objectstore/RetrieveActivityCountMap.hpp b/objectstore/RetrieveActivityCountMap.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..63912143364503b073ea1b5542c006b35c1f7c7a
--- /dev/null
+++ b/objectstore/RetrieveActivityCountMap.hpp
@@ -0,0 +1,56 @@
+/*
+ * The CERN Tape Archive (CTA) project
+ * Copyright (C) 2015  CERN
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "objectstore/cta.pb.h"
+
+#include <list>
+
+namespace cta { namespace objectstore {
+
+struct RetrieveActivityDescription {
+  uint64_t priority;
+  std::string diskInstanceName;
+  std::string activity;
+  time_t creationTime;
+  double weight;
+  uint64_t count;
+};
+
+/** A helper class allowing manipulation of arrays of ValueCountPairs, used as containers for running
+ * counters for properties with multiple possible values. When considering the retrieve mounts, all activities
+ * will be considered for the same mount (and highest priority one will be accounted). So this class does not
+ * select any and gives the full list in getActivities(). Having multiple activities sharing the drive is not
+ * expected to be a frequent occurrence. */
+class RetrieveActivityCountMap {
+public:
+  RetrieveActivityCountMap (google::protobuf::RepeatedPtrField<serializers::RetrieveActivityCountPair>* retrieveActivityCountMap);
+  void incCount(const RetrieveActivityDescription & activityDescription);
+  void decCount(const RetrieveActivityDescription & activityDescription);
+  void clear();
+  std::list<RetrieveActivityDescription> getActivities(uint64_t priority);
+private:
+  google::protobuf::RepeatedPtrField<serializers::RetrieveActivityCountPair>& m_activityCountMap;
+};
+
+std::string toString(const RetrieveActivityDescription &);
+std::string toString(const serializers::RetrieveActivityWeight &);
+bool operator==(const serializers::RetrieveActivityCountPair &, const RetrieveActivityDescription &);
+
+}} // namespace cta::objectstore
\ No newline at end of file
diff --git a/objectstore/RetrieveQueue.cpp b/objectstore/RetrieveQueue.cpp
index 7eada734fcb264979109762bda188fedfc2d63e8..755e48550338480eb412a9098909741e884246d1 100644
--- a/objectstore/RetrieveQueue.cpp
+++ b/objectstore/RetrieveQueue.cpp
@@ -22,6 +22,7 @@
 #include "EntryLogSerDeser.hpp"
 #include "ValueCountMap.hpp"
 #include "AgentReference.hpp"
+#include "RetrieveActivityCountMap.hpp"
 #include <google/protobuf/util/json_util.h>
 
 namespace cta { namespace objectstore {
@@ -287,6 +288,7 @@ void RetrieveQueue::addJobsAndCommit(std::list<JobToAdd> & jobsToAdd, AgentRefer
   ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap());
   ValueCountMap priorityMap(m_payload.mutable_prioritymap());
   ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap());
+  RetrieveActivityCountMap retrieveActivityCountMap(m_payload.mutable_activity_map());
   // We need to figure out which job will be added to which shard.
   // We might have to split shards if they would become too big.
   // For a given jobs, there a 4 possible cases:
@@ -462,6 +464,9 @@ void RetrieveQueue::addJobsAndCommit(std::list<JobToAdd> & jobsToAdd, AgentRefer
       maxDriveAllowedMap.incCount(j.policy.maxDrivesAllowed);
       priorityMap.incCount(j.policy.retrievePriority);
       minRetrieveRequestAgeMap.incCount(j.policy.retrieveMinRequestAge);
+      if (j.activityDescription) {
+        retrieveActivityCountMap.incCount(j.activityDescription.value());
+      }
       // oldestjobcreationtime is initialized to 0 when 
       if (m_payload.oldestjobcreationtime()) {
         if ((uint64_t)j.startTime < m_payload.oldestjobcreationtime())
@@ -568,6 +573,10 @@ RetrieveQueue::JobsSummary RetrieveQueue::getJobsSummary() {
     ret.priority = priorityMap.maxValue();
     ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap());
     ret.minRetrieveRequestAge = minRetrieveRequestAgeMap.minValue();
+    RetrieveActivityCountMap retrieveActivityCountMap(m_payload.mutable_activity_map());
+    for (auto ra: retrieveActivityCountMap.getActivities(ret.priority)) {
+      ret.activityCounts.push_back({ra.diskInstanceName, ra.activity, ra.weight, ra.count});
+    }
   } else {
     ret.maxDrivesAllowed = 0;
     ret.priority = 0;
@@ -646,6 +655,7 @@ void RetrieveQueue::removeJobsAndCommit(const std::list<std::string>& jobsToRemo
   ValueCountMap maxDriveAllowedMap(m_payload.mutable_maxdrivesallowedmap());
   ValueCountMap priorityMap(m_payload.mutable_prioritymap());
   ValueCountMap minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap());
+  RetrieveActivityCountMap retrieveActivityCountMap(m_payload.mutable_activity_map());
   // Make a working copy of the jobs to remove. We will progressively trim this local list.
   auto localJobsToRemove = jobsToRemove;
   // The jobs are expected to be removed from the front shards first (poped in order)
@@ -672,6 +682,14 @@ void RetrieveQueue::removeJobsAndCommit(const std::list<std::string>& jobsToRemo
       maxDriveAllowedMap.decCount(j.maxDrivesAllowed);
       priorityMap.decCount(j.priority);
       minRetrieveRequestAgeMap.decCount(j.minRetrieveRequestAge);
+      if (j.activityDescription) {
+        // We have up a partial activity description, but this is enough to decCount.
+        RetrieveActivityDescription activityDescription;
+        activityDescription.priority = j.priority;
+        activityDescription.diskInstanceName = j.activityDescription.value().diskInstanceName;
+        activityDescription.activity = j.activityDescription.value().activity;
+        retrieveActivityCountMap.decCount(activityDescription);
+      }
     }
     // In all cases, we should update the global statistics.
     m_payload.set_retrievejobscount(m_payload.retrievejobscount() - removalResult.jobsRemoved);
diff --git a/objectstore/RetrieveQueue.hpp b/objectstore/RetrieveQueue.hpp
index 5db628853406dcbf8f1b7edaa2f8195240c1f4eb..d9566754e42c0a23d05491452a6470d247d37674 100644
--- a/objectstore/RetrieveQueue.hpp
+++ b/objectstore/RetrieveQueue.hpp
@@ -22,6 +22,7 @@
 #include "objectstore/cta.pb.h"
 #include "RetrieveRequest.hpp"
 #include "scheduler/RetrieveRequestDump.hpp"
+#include "RetrieveActivityCountMap.hpp"
 
 namespace cta { namespace objectstore {
   
@@ -65,6 +66,7 @@ public:
     uint64_t fileSize;
     cta::common::dataStructures::MountPolicy policy;
     time_t startTime;
+    optional<RetrieveActivityDescription> activityDescription; 
   };
   void addJobsAndCommit(std::list<JobToAdd> & jobsToAdd, AgentReference & agentReference, log::LogContext & lc);
   // This version will check for existence of the job in the queue before
@@ -82,6 +84,13 @@ public:
     uint64_t priority;
     uint64_t minRetrieveRequestAge;
     uint64_t maxDrivesAllowed;
+    struct ActivityCount {
+      std::string diskInstanceName;
+      std::string activity;
+      double weight;
+      uint64_t count;
+    };
+    std::list<ActivityCount> activityCounts;
   };
   JobsSummary getJobsSummary();
   struct JobDump {
@@ -148,36 +157,12 @@ private:
   uint64_t m_maxShardSize = c_defaultMaxShardSize;
 };
 
-class RetrieveQueueToTransferForUser : public RetrieveQueue {
-public:
-  template<typename...Ts> RetrieveQueueToTransferForUser(Ts&...args): RetrieveQueue(args...) {}
-};
-
-class RetrieveQueueToReportForUser : public RetrieveQueue {
-public:
-  template<typename...Ts> RetrieveQueueToReportForUser(Ts&...args): RetrieveQueue(args...) {}
-};
-
-class RetrieveQueueFailed : public RetrieveQueue {
-public:
-  template<typename...Ts> RetrieveQueueFailed(Ts&...args): RetrieveQueue(args...) {}
-};
-  
-class RetrieveQueueToReportToRepackForSuccess : public RetrieveQueue {
-public:
-  template<typename...Ts> RetrieveQueueToReportToRepackForSuccess(Ts&...args): RetrieveQueue(args...) {}
-};
-
-class RetrieveQueueToReportToRepackForFailure: public RetrieveQueue{
-public:
-  template<typename...Ts> RetrieveQueueToReportToRepackForFailure(Ts&...args): RetrieveQueue(args...) {}
-};
-
-class RetrieveQueueToTransferForRepack : public RetrieveQueue {
-public:
-  template<typename...Ts> RetrieveQueueToTransferForRepack(Ts&...args): RetrieveQueue(args...) {}
-};
-
+class RetrieveQueueToTransferForUser : public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
+class RetrieveQueueToReportForUser : public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
+class RetrieveQueueFailed : public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
+class RetrieveQueueToReportToRepackForSuccess : public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
+class RetrieveQueueToReportToRepackForFailure: public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
+class RetrieveQueueToTransferForRepack : public RetrieveQueue { using RetrieveQueue::RetrieveQueue; };
 
 }}
 
diff --git a/objectstore/RetrieveQueueAlgorithms.hpp b/objectstore/RetrieveQueueAlgorithms.hpp
index 1cb0627f45442cd1a7a1887668d95a170d900ecf..45dd12db5e3914b59ba2ae0c082e95f00d9530f0 100644
--- a/objectstore/RetrieveQueueAlgorithms.hpp
+++ b/objectstore/RetrieveQueueAlgorithms.hpp
@@ -29,7 +29,9 @@ struct ContainerTraits<RetrieveQueue,C>
 {
   struct ContainerSummary : public RetrieveQueue::JobsSummary {
     ContainerSummary() : RetrieveQueue::JobsSummary() {}
-    ContainerSummary(const RetrieveQueue::JobsSummary &c) : RetrieveQueue::JobsSummary({c.jobs,c.bytes,c.oldestJobStartTime,c.priority,c.minRetrieveRequestAge,c.maxDrivesAllowed}) {}
+    ContainerSummary(const RetrieveQueue::JobsSummary &c) : 
+      RetrieveQueue::JobsSummary({c.jobs,c.bytes,c.oldestJobStartTime,c.priority,
+          c.minRetrieveRequestAge,c.maxDrivesAllowed,c.activityCounts}) {}
     void addDeltaToLog(const ContainerSummary&, log::ScopedParamContainer&) const;
   };
   
@@ -42,6 +44,7 @@ struct ContainerTraits<RetrieveQueue,C>
     uint64_t filesize;
     cta::common::dataStructures::MountPolicy policy;
     serializers::RetrieveJobStatus status;
+    optional<RetrieveActivityDescription> activityDescription;
     typedef std::list<InsertedElement> list;
   };
 
@@ -276,7 +279,7 @@ addReferencesAndCommit(Container &cont, typename InsertedElement::list &elemMemC
   std::list<RetrieveQueue::JobToAdd> jobsToAdd;
   for (auto &e : elemMemCont) {
     RetrieveRequest &rr = *e.retrieveRequest;
-    jobsToAdd.push_back({e.copyNb, e.fSeq, rr.getAddressIfSet(), e.filesize, e.policy, ::time(nullptr)});
+    jobsToAdd.push_back({e.copyNb, e.fSeq, rr.getAddressIfSet(), e.filesize, e.policy, ::time(nullptr), e.activityDescription});
   }
   cont.addJobsAndCommit(jobsToAdd, agentRef, lc);
 }
@@ -289,7 +292,7 @@ addReferencesIfNecessaryAndCommit(Container& cont, typename InsertedElement::lis
   std::list<RetrieveQueue::JobToAdd> jobsToAdd;
   for (auto &e : elemMemCont) {
     RetrieveRequest &rr = *e.retrieveRequest;
-    jobsToAdd.push_back({e.copyNb, e.fSeq, rr.getAddressIfSet(), e.filesize, e.policy, ::time(nullptr)});
+    jobsToAdd.push_back({e.copyNb, e.fSeq, rr.getAddressIfSet(), e.filesize, e.policy, ::time(nullptr), e.activityDescription});
   }
   cont.addJobsIfNecessaryAndCommit(jobsToAdd, agentRef, lc);
 }
diff --git a/objectstore/RetrieveQueueShard.cpp b/objectstore/RetrieveQueueShard.cpp
index 96d6aee89054645804a790b3e71ea2e84c67bb3d..76ebe1e5c36e03698b3c8f7894f8dbade013fe38 100644
--- a/objectstore/RetrieveQueueShard.cpp
+++ b/objectstore/RetrieveQueueShard.cpp
@@ -96,12 +96,15 @@ auto RetrieveQueueShard::removeJobs(const std::list<std::string>& jobsToRemove)
           const auto & j = jl->Get(i);
           ret.removedJobs.emplace_back(JobInfo());
           ret.removedJobs.back().address = j.address();
+          ret.removedJobs.back().fSeq = j.fseq();
           ret.removedJobs.back().copyNb = j.copynb();
           ret.removedJobs.back().maxDrivesAllowed = j.maxdrivesallowed();
           ret.removedJobs.back().minRetrieveRequestAge = j.minretrieverequestage();
           ret.removedJobs.back().priority = j.priority();
           ret.removedJobs.back().size = j.size();
           ret.removedJobs.back().startTime = j.starttime();
+          if (j.has_activity())
+            ret.removedJobs.back().activityDescription = JobInfo::ActivityDescription{ j.disk_instance_name(), j.activity() };
           ret.bytesRemoved += j.size();
           totalSize -= j.size();
           ret.jobsRemoved++;
@@ -136,7 +139,10 @@ auto RetrieveQueueShard::dumpJobs() -> std::list<JobInfo> {
   std::list<JobInfo> ret;
   for (auto &j: m_payload.retrievejobs()) {
     ret.emplace_back(JobInfo{j.size(), j.address(), (uint16_t)j.copynb(), j.priority(), 
-        j.minretrieverequestage(), j.maxdrivesallowed(), (time_t)j.starttime(), j.fseq()});
+        j.minretrieverequestage(), j.maxdrivesallowed(), (time_t)j.starttime(), j.fseq(), nullopt});
+    if (j.has_activity()) {
+      ret.back().activityDescription = JobInfo::ActivityDescription{ j.disk_instance_name(), j.activity() };
+    }
   }
   return ret;
 }
@@ -154,6 +160,12 @@ std::list<RetrieveQueue::JobToAdd> RetrieveQueueShard::dumpJobsToAdd() {
     ret.back().policy.retrievePriority = j.priority();
     ret.back().startTime = j.starttime();
     ret.back().retrieveRequestAddress = j.address();
+    if (j.has_activity()) {
+      RetrieveActivityDescription rad;
+      rad.diskInstanceName = j.disk_instance_name();
+      rad.activity = j.activity();
+      ret.back().activityDescription = rad;
+    }
   }
   return ret;
 }
@@ -252,6 +264,10 @@ void RetrieveQueueShard::addJob(const RetrieveQueue::JobToAdd& jobToAdd) {
   j->set_maxdrivesallowed(jobToAdd.policy.maxDrivesAllowed);
   j->set_priority(jobToAdd.policy.retrievePriority);
   j->set_minretrieverequestage(jobToAdd.policy.retrieveMinRequestAge);
+  if (jobToAdd.activityDescription) {
+    j->set_disk_instance_name(jobToAdd.activityDescription.value().diskInstanceName);
+    j->set_activity(jobToAdd.activityDescription.value().activity);
+  }
   m_payload.set_retrievejobstotalsize(m_payload.retrievejobstotalsize()+jobToAdd.fileSize);
   // Sort the shard
   size_t jobIndex = m_payload.retrievejobs_size() - 1;
@@ -284,6 +300,10 @@ void RetrieveQueueShard::addJobsThroughCopy(JobsToAddSet& jobsToAdd) {
     rjp.set_maxdrivesallowed(jobToAdd.policy.maxDrivesAllowed);
     rjp.set_priority(jobToAdd.policy.retrievePriority);
     rjp.set_minretrieverequestage(jobToAdd.policy.retrieveMinRequestAge);
+    if (jobToAdd.activityDescription) {
+      rjp.set_disk_instance_name(jobToAdd.activityDescription.value().diskInstanceName);
+      rjp.set_activity(jobToAdd.activityDescription.value().activity);
+    }
     i = serializedJobsToAdd.insert(i, rjp);
     totalSize+=jobToAdd.fileSize;
   }
@@ -296,8 +316,4 @@ void RetrieveQueueShard::addJobsThroughCopy(JobsToAddSet& jobsToAdd) {
   m_payload.set_retrievejobstotalsize(totalSize);
 }
 
-
-
-
-
 }}
\ No newline at end of file
diff --git a/objectstore/RetrieveQueueShard.hpp b/objectstore/RetrieveQueueShard.hpp
index 01c231740890084da9cfd7a2c0f6256c83b24f0f..7da9d10d72144578e2975e25332770e85d0b489b 100644
--- a/objectstore/RetrieveQueueShard.hpp
+++ b/objectstore/RetrieveQueueShard.hpp
@@ -53,6 +53,11 @@ public:
     uint64_t maxDrivesAllowed;
     time_t startTime;
     uint64_t fSeq;
+    struct ActivityDescription {
+      std::string diskInstanceName;
+      std::string activity;
+    };
+    optional<ActivityDescription> activityDescription;
   };
   std::list<JobInfo> dumpJobs();
   
diff --git a/objectstore/RetrieveQueueTest.cpp b/objectstore/RetrieveQueueTest.cpp
index c9f114016e8a241be582bb2782119b4f2ee2274e..098114e6289ee943527374f1714ef091042cd2de 100644
--- a/objectstore/RetrieveQueueTest.cpp
+++ b/objectstore/RetrieveQueueTest.cpp
@@ -89,8 +89,8 @@ TEST(ObjectStore, RetrieveQueueShardingAndOrderingTest) {
     rq.insert();
   }
   {
-    // Read the queue and insert jobs 3 by 3 (the insertion size is 
-    // expected to be << shard size (5 here).
+    // Read the queue and insert jobs 10 by 10 (the insertion size is 
+    // expected to be << shard size (25 here).
     auto jobsToAddNow = jobsToAdd;
     while (jobsToAddNow.size()) {
       std::list<cta::objectstore::RetrieveQueue::JobToAdd> jobsBatch;
@@ -154,4 +154,140 @@ TEST(ObjectStore, RetrieveQueueShardingAndOrderingTest) {
   ASSERT_FALSE(rq.exists());
 }
 
+TEST(ObjectStore, RetrieveQueueActivityCounts) {
+  cta::objectstore::BackendVFS be;
+  cta::log::DummyLogger dl("dummy", "dummyLogger");
+  cta::log::LogContext lc(dl);
+  cta::objectstore::AgentReference agentRef("unitTest", dl);
+  std::mt19937 gen((std::random_device())());
+  // Create 1000 jobs references.
+  std::list<cta::objectstore::RetrieveQueue::JobToAdd> jobsToAdd;
+  const size_t totalJobs = 100, shardSize=25, batchSize=10;
+  for (size_t i=0; i<totalJobs; i++) {
+    cta::objectstore::RetrieveQueue::JobToAdd jta;
+    jta.copyNb = 1;
+    jta.fSeq = i;
+    jta.fileSize = 1000;
+    jta.policy.maxDrivesAllowed = 10;
+    jta.policy.retrieveMinRequestAge = 10;
+    jta.policy.retrievePriority = 1;
+    jta.startTime = ::time(nullptr);
+    std::stringstream address;
+    address << "someRequest-" << i;
+    jta.retrieveRequestAddress = address.str();
+    // Some (but not all) jobs will be assigned an activity (and weight).
+    if (!(i % 3)) { 
+      cta::objectstore::RetrieveActivityDescription ad;
+      ad.diskInstanceName = "diskInstance";
+      ad.creationTime = jta.startTime;
+      ad.priority = 1;
+      if (!(i % 2)) {
+        ad.activity = "A";
+        ad.weight = 0.1;
+      } else {
+        ad.activity = "B";
+        ad.weight = 0.2;
+      }
+      jta.activityDescription = ad;
+    }
+    jobsToAdd.push_back(jta);
+  }
+  // By construction, first job has lowest start time.
+  auto minStartTime=jobsToAdd.front().startTime;
+  std::string retrieveQueueAddress = agentRef.nextId("RetrieveQueue");
+  { 
+    // Try to create the retrieve queue
+    cta::objectstore::RetrieveQueue rq(retrieveQueueAddress, be);
+    rq.initialize("V12345");
+    // Set a small shard size to validate multi shard behaviors
+    rq.setShardSize(shardSize);
+    rq.insert();
+  }
+  {
+    // Read the queue and insert jobs 10 by 10 (the insertion size is 
+    // expected to be << shard size (25 here).
+    auto jobsToAddNow = jobsToAdd;
+    while (jobsToAddNow.size()) {
+      std::list<cta::objectstore::RetrieveQueue::JobToAdd> jobsBatch;
+      for (size_t i=0; i<batchSize; i++) {
+        if (jobsToAddNow.size()) {
+          auto j=std::next(jobsToAddNow.begin(), (std::uniform_int_distribution<size_t>(0, jobsToAddNow.size() -1))(gen));
+          jobsBatch.emplace_back(*j);
+          jobsToAddNow.erase(j);
+        }
+      }
+      cta::objectstore::RetrieveQueue rq(retrieveQueueAddress, be);
+      cta::objectstore::ScopedExclusiveLock rql(rq);
+      rq.fetch();
+      rq.addJobsAndCommit(jobsBatch, agentRef, lc);
+    }
+  }
+  {
+    // Try to read back
+    cta::objectstore::RetrieveQueue rq(retrieveQueueAddress, be);
+    ASSERT_THROW(rq.fetch(), cta::exception::Exception);
+    cta::objectstore::ScopedExclusiveLock lock(rq);
+    ASSERT_NO_THROW(rq.fetch());
+    // Pop jobs while we can. They should come out in fseq order as there is
+    // no interleaved push and pop.
+    auto jobsSummary = rq.getJobsSummary();
+    ASSERT_EQ(minStartTime, jobsSummary.oldestJobStartTime);
+    // File fSeqs are in [0, 99], 34 multiples of 3 (0 included) odds are activity A, evens are B, 17 each. 
+    ASSERT_EQ(2, jobsSummary.activityCounts.size());
+    typedef decltype(jobsSummary.activityCounts.front()) acCount;
+    auto jsA = std::find_if(jobsSummary.activityCounts.begin(), jobsSummary.activityCounts.end(), [](const acCount &ac){return ac.activity == "A"; });
+    ASSERT_NE(jobsSummary.activityCounts.end(), jsA);
+    ASSERT_EQ(17, jsA->count);
+    ASSERT_EQ(0.1, jsA->weight);
+    auto jsB = std::find_if(jobsSummary.activityCounts.begin(), jobsSummary.activityCounts.end(), [](const acCount &ac){return ac.activity == "B"; });
+    ASSERT_NE(jobsSummary.activityCounts.end(), jsB);
+    ASSERT_EQ(17, jsB->count);
+    ASSERT_EQ(0.2, jsB->weight);
+    uint64_t nextExpectedFseq=0;
+    while (rq.getJobsSummary().jobs) {
+      auto candidateJobs = rq.getCandidateList(std::numeric_limits<uint64_t>::max(), 50, std::set<std::string>());
+      std::set<std::string> jobsToSkip;
+      std::list<std::string> jobsToDelete;
+      for (auto &j: candidateJobs.candidates) {
+        std::stringstream address;
+        address << "someRequest-" << nextExpectedFseq;
+        ASSERT_EQ(address.str(), j.address);
+        jobsToSkip.insert(j.address);
+        jobsToDelete.emplace_back(j.address);
+        nextExpectedFseq++;
+      }
+      auto candidateJobs2 = rq.getCandidateList(std::numeric_limits<uint64_t>::max(), 1, jobsToSkip);
+      if (candidateJobs2.candidateFiles) {
+        std::stringstream address;
+        address << "someRequest-" << nextExpectedFseq;
+        ASSERT_EQ(address.str(), candidateJobs2.candidates.front().address);
+      }
+      rq.removeJobsAndCommit(jobsToDelete);
+      // We should empty the queue in 2 rounds. After the first one, we get the jobs 0-49 out.
+      auto jobsSummary2 = rq.getJobsSummary();
+      if (jobsSummary2.jobs) {
+        auto jsA2 = std::find_if(jobsSummary2.activityCounts.begin(), jobsSummary2.activityCounts.end(), [](const acCount &ac){return ac.activity == "A"; });
+        ASSERT_NE(jobsSummary2.activityCounts.end(), jsA2);
+        ASSERT_EQ(8, jsA2->count);
+        ASSERT_EQ(0.1, jsA2->weight);
+        auto jsB2 = std::find_if(jobsSummary2.activityCounts.begin(), jobsSummary2.activityCounts.end(), [](const acCount &ac){return ac.activity == "B"; });
+        ASSERT_NE(jobsSummary2.activityCounts.end(), jsB2);
+        ASSERT_EQ(9, jsB2->count);
+        ASSERT_EQ(0.2, jsB2->weight);
+      } else {
+        // Of course, we should have no activity.
+        ASSERT_EQ(0, jobsSummary2.activityCounts.size());
+      }
+    }
+    ASSERT_EQ(nextExpectedFseq, totalJobs);
+  }
+
+  // Delete the root entry
+  cta::objectstore::RetrieveQueue rq(retrieveQueueAddress, be);
+  cta::objectstore::ScopedExclusiveLock lock(rq);
+  rq.fetch();
+  rq.removeIfEmpty(lc);
+  ASSERT_FALSE(rq.exists()); 
+}
+
 }
diff --git a/objectstore/RetrieveRequest.cpp b/objectstore/RetrieveRequest.cpp
index 7325e3fc71bf22eddf8f7d4b2e1e87fa871998d6..f1a5be8b1fdf95f739e0d95a1aee476e5052877f 100644
--- a/objectstore/RetrieveRequest.cpp
+++ b/objectstore/RetrieveRequest.cpp
@@ -158,7 +158,16 @@ queueForFailure:;
     objectstore::MountPolicySerDeser mp;
     std::list<RetrieveQueue::JobToAdd> jta;
     jta.push_back({activeCopyNb, activeFseq, getAddressIfSet(), m_payload.archivefile().filesize(), 
-      mp, (signed)m_payload.schedulerrequest().entrylog().time()});
+      mp, (signed)m_payload.schedulerrequest().entrylog().time(), nullopt});
+    if (m_payload.has_activity_weight()) {
+      RetrieveActivityDescription activityDescription;
+      activityDescription.priority = m_payload.activity_weight().priority();
+      activityDescription.diskInstanceName = m_payload.activity_weight().disk_instance_name();
+      activityDescription.activity = m_payload.activity_weight().activity();
+      activityDescription.weight = m_payload.activity_weight().weight();
+      activityDescription.creationTime = m_payload.activity_weight().creation_time();
+      jta.back().activityDescription = activityDescription;
+    }
     rq.addJobsIfNecessaryAndCommit(jta, agentReference, lc);
     auto queueUpdateTime = t.secs(utils::Timer::resetCounter);
     // We can now make the transition official.
@@ -217,7 +226,16 @@ queueForTransfer:;
     mp.deserialize(m_payload.mountpolicy());
     std::list<RetrieveQueue::JobToAdd> jta;
     jta.push_back({bestTapeFile->copynb(), bestTapeFile->fseq(), getAddressIfSet(), m_payload.archivefile().filesize(), 
-      mp, (signed)m_payload.schedulerrequest().entrylog().time()});
+      mp, (signed)m_payload.schedulerrequest().entrylog().time(), nullopt});
+    if (m_payload.has_activity_weight()) {
+      RetrieveActivityDescription activityDescription;
+      activityDescription.priority = m_payload.activity_weight().priority();
+      activityDescription.diskInstanceName = m_payload.activity_weight().disk_instance_name();
+      activityDescription.activity = m_payload.activity_weight().activity();
+      activityDescription.weight = m_payload.activity_weight().weight();
+      activityDescription.creationTime = m_payload.activity_weight().creation_time();
+      jta.back().activityDescription = activityDescription;
+    }
     rq.addJobsIfNecessaryAndCommit(jta, agentReference, lc);
     auto jobsSummary=rq.getJobsSummary();
     auto queueUpdateTime = t.secs(utils::Timer::resetCounter);
@@ -432,6 +450,40 @@ void RetrieveRequest::setRetrieveFileQueueCriteria(const cta::common::dataStruct
   }
 }
 
+//------------------------------------------------------------------------------
+// RetrieveRequest::setActivityIfNeeded()
+//------------------------------------------------------------------------------
+void RetrieveRequest::setActivityIfNeeded(const cta::common::dataStructures::RetrieveRequest& retrieveRequest,
+    const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria) {
+  checkPayloadWritable();
+  if (retrieveRequest.activity) {
+    auto * activity = m_payload.mutable_activity_weight();
+    activity->set_priority(criteria.mountPolicy.retrievePriority);
+    activity->set_activity(retrieveRequest.activity.value());
+    activity->set_disk_instance_name(criteria.activitiesFairShareWeight.diskInstance);
+    activity->set_weight(criteria.activitiesFairShareWeight.activitiesWeights.at(retrieveRequest.activity.value()));
+    activity->set_creation_time(retrieveRequest.creationLog.time);
+  }
+}
+
+//------------------------------------------------------------------------------
+// RetrieveRequest::getActivity()
+//------------------------------------------------------------------------------
+optional<RetrieveActivityDescription> RetrieveRequest::getActivity() {
+  checkPayloadReadable();
+  optional<RetrieveActivityDescription> ret;
+  if (m_payload.has_activity_weight()) {
+    RetrieveActivityDescription activity;
+    activity.priority = m_payload.activity_weight().priority();
+    activity.diskInstanceName = m_payload.activity_weight().disk_instance_name();
+    activity.activity = m_payload.activity_weight().activity();
+    activity.weight = m_payload.activity_weight().weight();
+    activity.creationTime = m_payload.activity_weight().creation_time();
+    ret = activity;
+  }
+  return ret;
+}
+
 //------------------------------------------------------------------------------
 // RetrieveRequest::dumpJobs()
 //------------------------------------------------------------------------------
diff --git a/objectstore/RetrieveRequest.hpp b/objectstore/RetrieveRequest.hpp
index 67e287f7c9213c684a03e938ae76bf9bf91ffe14..ec82378e1b40156a0f251d10918729eb72976faa 100644
--- a/objectstore/RetrieveRequest.hpp
+++ b/objectstore/RetrieveRequest.hpp
@@ -22,6 +22,7 @@
 #include "objectstore/cta.pb.h"
 #include "TapeFileSerDeser.hpp"
 #include "JobQueueType.hpp"
+#include "RetrieveActivityCountMap.hpp"
 #include <list>
 #include "common/dataStructures/DiskFileInfo.hpp"
 #include "common/dataStructures/EntryLog.hpp"
@@ -234,6 +235,9 @@ public:
   cta::common::dataStructures::RetrieveRequest getSchedulerRequest();
   
   void setRetrieveFileQueueCriteria(const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria);
+  void setActivityIfNeeded(const cta::common::dataStructures::RetrieveRequest & retrieveRequest,
+    const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria);
+  optional<RetrieveActivityDescription> getActivity();
   cta::common::dataStructures::RetrieveFileQueueCriteria getRetrieveFileQueueCriteria();
   cta::common::dataStructures::ArchiveFile getArchiveFile();
   cta::common::dataStructures::EntryLog getEntryLog();
diff --git a/objectstore/Sorter.cpp b/objectstore/Sorter.cpp
index 606e3fa04164191107b65a2f351dba22c2317a7d..c1159267d2cd8eff0946008edb01d5e491289520 100644
--- a/objectstore/Sorter.cpp
+++ b/objectstore/Sorter.cpp
@@ -159,7 +159,7 @@ void Sorter::executeRetrieveAlgorithm(const std::string vid, std::string& queueA
     Sorter::RetrieveJob job = std::get<0>(jobToAdd->jobToQueue);
     succeededJobs[job.jobDump.copyNb] = jobToAdd;
     previousOwner = job.previousOwner->getAgentAddress();
-    jobsToAdd.push_back({job.retrieveRequest.get(),job.jobDump.copyNb,job.fSeq,job.fileSize,job.mountPolicy,job.jobDump.status});
+    jobsToAdd.push_back({job.retrieveRequest.get(),job.jobDump.copyNb,job.fSeq,job.fileSize,job.mountPolicy,job.jobDump.status,job.activityDescription});
   }
   try{
     algo.referenceAndSwitchOwnershipIfNecessary(vid,previousOwner,queueAddress,jobsToAdd,lc);
diff --git a/objectstore/Sorter.hpp b/objectstore/Sorter.hpp
index ddf8d57c58b71f25683fcd6b87bd2d353fe163c1..b68b1a8e4634587b584fff06335db6a18d63b847 100644
--- a/objectstore/Sorter.hpp
+++ b/objectstore/Sorter.hpp
@@ -125,6 +125,7 @@ public:
     uint64_t fSeq;
     common::dataStructures::MountPolicy mountPolicy;
     cta::objectstore::JobQueueType jobQueueType;
+    optional<RetrieveActivityDescription> activityDescription;
   };
   
   /**
diff --git a/objectstore/cta.proto b/objectstore/cta.proto
index 9b8fefea134df0aab64d37438079363802422000..daab50293501736f6dcc6ff96158ac8bf4cf9579 100644
--- a/objectstore/cta.proto
+++ b/objectstore/cta.proto
@@ -231,10 +231,16 @@ message DriveState {
   required bool desiredUp = 5019;
   required bool desiredForceDown = 5020;
   optional string currentvid = 5021;
+  optional uint64 current_priority = 5028;
+  optional string current_activity = 5029;
+  optional double current_activity_weight = 5030;
   optional string currenttapepool = 5022;
   optional uint32 nextmounttype = 5023;
   optional string nextvid = 5024;
   optional string nexttapepool = 5025;
+  optional uint64 next_priority = 5031;
+  optional string next_activity = 5032;
+  optional double next_activity_weight = 5033;
 // TODO: implement or remove  required EntryLog creationlog = 5023;
 }
 
@@ -390,15 +396,24 @@ message RetrieveRequestRepackInfo {
 }
 
 // The different timings of the lifecycle of a RetrieveRequest (creation time, first select time, request complete)
-message LifecycleTimings{
+message LifecycleTimings {
   optional uint64 creation_time = 9160 [default = 0];
   optional uint64 first_selected_time = 9161 [default = 0];
   optional uint64 completed_time = 9162 [default = 0];
 }
 
+message RetrieveActivityWeight {
+  required uint64 priority = 9170;
+  required string disk_instance_name = 9171;
+  required string activity = 9172;
+  required double weight = 9173;
+  required int64 creation_time = 9174;
+}
+
 message RetrieveRequest {
   required SchedulerRetrieveRequest schedulerrequest = 9150;
   required MountPolicy mountpolicy = 9151;
+  optional RetrieveActivityWeight activity_weight = 9160;
   required ArchiveFile archivefile = 9152;
   required uint32 activecopynb = 9153;
   repeated RetrieveJob jobs = 9154;
@@ -449,6 +464,7 @@ message ArchiveQueue {
 }
 
 message RetrieveJobPointer {
+  // The retrieve job pointer needs to hold the sufficient information for all the running counters of the queue (priority, activity...)
   required uint64 size = 3101;
   required string address = 3102;
   required uint32 copynb = 3103;
@@ -457,6 +473,9 @@ message RetrieveJobPointer {
   required uint64 minretrieverequestage = 3105;
   required uint64 maxdrivesallowed = 3106;
   required uint64 starttime = 3108;
+  // For activity (if present), we need disk instance and activity name (priority is always provided)
+  optional string disk_instance_name = 3109;
+  optional string activity = 3110;
 }
 
 message RetrieveQueueShardPointer {
@@ -472,12 +491,18 @@ message RetrieveQueueShard {
   required uint64 retrievejobstotalsize = 10501;
 }
 
+message RetrieveActivityCountPair {
+  required RetrieveActivityWeight retrieve_activity_weight = 10600;
+  required uint64 count = 10601;
+}
+
 message RetrieveQueue {
   required string vid = 10100;
   repeated RetrieveQueueShardPointer retrievequeueshards = 10111;
   repeated ValueCountPair prioritymap = 10131;
   repeated ValueCountPair minretrieverequestagemap = 10132;
   repeated ValueCountPair maxdrivesallowedmap = 10133;
+  repeated RetrieveActivityCountPair activity_map = 10136;
   required uint64 retrievejobstotalsize = 10140;
   required uint64 retrievejobscount = 10145;
   required uint64 oldestjobcreationtime = 10150;
diff --git a/scheduler/ArchiveMount.hpp b/scheduler/ArchiveMount.hpp
index 2073545e4d03613f27ddaf10fb90cf96f5407a18..5ff30a24ba8982e6a20db21d3f8114ac85bd6573 100644
--- a/scheduler/ArchiveMount.hpp
+++ b/scheduler/ArchiveMount.hpp
@@ -83,6 +83,14 @@ namespace cta {
      * @return The mount transaction id.
      */
     std::string getMountTransactionId() const override;
+    
+    /**
+     * Return nullopt as activities are for retrieve mounts;
+     * 
+     * @return nullopt.
+     */
+    optional<std::string> getActivity() const override { return nullopt; }
+
 
     /**
      * Indicates that the mount was completed.
diff --git a/scheduler/LabelMount.hpp b/scheduler/LabelMount.hpp
index 25778cb741fa63a50121b4f89ae321dcdafdfe6f..613ceb3411b0582241bcd7512e132e1585d4677e 100644
--- a/scheduler/LabelMount.hpp
+++ b/scheduler/LabelMount.hpp
@@ -73,6 +73,14 @@ namespace cta {
      * @return The mount transaction id.
      */
     std::string getMountTransactionId() const override;
+    
+    /**
+     * Return nullopt as activities are for retrieve mounts;
+     * 
+     * @return nullopt.
+     */
+    optional<std::string> getActivity() const override { return nullopt; }
+
 
     /**
      * Indicates that the mount was cancelled.
diff --git a/scheduler/OStoreDB/MemQueues.cpp b/scheduler/OStoreDB/MemQueues.cpp
index 8bef86b8a4aa9cea635a88e2a29d3836a12e1096..7a1cafd3a8660db7dbc335a8894c1280838eb7b6 100644
--- a/scheduler/OStoreDB/MemQueues.cpp
+++ b/scheduler/OStoreDB/MemQueues.cpp
@@ -55,7 +55,7 @@ void MemQueue<objectstore::RetrieveRequest, objectstore::RetrieveQueue>::special
       if (j.copyNb == job.copyNb) {
         auto criteria = request.getRetrieveFileQueueCriteria();
         jtal.push_back({j.copyNb, j.fSeq, request.getAddressIfSet(), criteria.archiveFile.fileSize, 
-            criteria.mountPolicy, request.getEntryLog().time});
+            criteria.mountPolicy, request.getEntryLog().time, request.getActivity()});
         request.setActiveCopyNumber(j.copyNb);
         request.setOwner(queueAddress);
         goto jobAdded;
diff --git a/scheduler/OStoreDB/OStoreDB.cpp b/scheduler/OStoreDB/OStoreDB.cpp
index b378b9e31816409b9d17331da43f5b220bbbef7b..15f98fa6641e1c6e680120de14fd6e76bfbe0fb6 100644
--- a/scheduler/OStoreDB/OStoreDB.cpp
+++ b/scheduler/OStoreDB/OStoreDB.cpp
@@ -31,6 +31,7 @@
 #include "objectstore/Sorter.hpp"
 #include "objectstore/Helpers.hpp"
 #include "common/exception/Exception.hpp"
+#include "common/exception/UserError.hpp"
 #include "common/utils/utils.hpp"
 #include "scheduler/LogicalLibrary.hpp"
 #include "common/dataStructures/MountPolicy.hpp"
@@ -186,7 +187,7 @@ void OStoreDB::ping() {
 void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, RootEntry& re, 
     log::LogContext & logContext) {
   utils::Timer t, t2;
-  // Walk the archive queues for user for statistics
+  // Walk the archive queues for USER for statistics
   for (auto & aqp: re.dumpArchiveQueues(JobQueueType::JobsToTransferForUser)) {
     objectstore::ArchiveQueue aqueue(aqp.address, m_objectStore);
     // debug utility variable
@@ -232,7 +233,7 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
           .add("processingTime", processingTime);
     logContext.log(log::INFO, "In OStoreDB::fetchMountInfo(): fetched an archive for user queue.");
   }
-  // Walk the archive queues for user for statistics
+  // Walk the archive queues for REPACK for statistics
   for (auto & aqp: re.dumpArchiveQueues(JobQueueType::JobsToTransferForRepack)) {
     objectstore::ArchiveQueue aqueue(aqp.address, m_objectStore);
     // debug utility variable
@@ -300,18 +301,56 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
     }
     // If there are files queued, we create an entry for this retrieve queue in the
     // mount candidates list.
-    if (rqueue.getJobsSummary().jobs) {
-      tmdi.potentialMounts.push_back(SchedulerDatabase::PotentialMount());
-      auto & m = tmdi.potentialMounts.back();
-      m.vid = rqp.vid;
-      m.type = cta::common::dataStructures::MountType::Retrieve;
-      m.bytesQueued = rqueue.getJobsSummary().bytes;
-      m.filesQueued = rqueue.getJobsSummary().jobs;      
-      m.oldestJobStartTime = rqueue.getJobsSummary().oldestJobStartTime;
-      m.priority = rqueue.getJobsSummary().priority;
-      m.maxDrivesAllowed = rqueue.getJobsSummary().maxDrivesAllowed;
-      m.minRequestAge = rqueue.getJobsSummary().minRetrieveRequestAge;
-      m.logicalLibrary = ""; // The logical library is not known here, and will be determined by the caller.
+    auto rqSummary = rqueue.getJobsSummary();
+    if (rqSummary.jobs) {
+      // Check if we have activities and if all the jobs are covered by one or not (possible mixed case).
+      bool jobsWithoutActivity = true;
+      if (rqSummary.activityCounts.size()) {
+        if (rqSummary.activityCounts.size() >= rqSummary.jobs)
+          jobsWithoutActivity = false;
+        // In all cases, we create one potential mount per activity
+        for (auto ac: rqSummary.activityCounts) {
+          tmdi.potentialMounts.push_back(SchedulerDatabase::PotentialMount());
+          auto & m = tmdi.potentialMounts.back();
+          m.vid = rqp.vid;
+          m.type = cta::common::dataStructures::MountType::Retrieve;
+          m.bytesQueued = rqueue.getJobsSummary().bytes;
+          m.filesQueued = rqueue.getJobsSummary().jobs;
+          m.oldestJobStartTime = rqueue.getJobsSummary().oldestJobStartTime;
+          m.priority = rqueue.getJobsSummary().priority;
+          m.maxDrivesAllowed = rqueue.getJobsSummary().maxDrivesAllowed;
+          m.minRequestAge = rqueue.getJobsSummary().minRetrieveRequestAge;
+          m.logicalLibrary = ""; // The logical library is not known here, and will be determined by the caller.
+          m.tapePool = "";       // The tape pool is not know and will be determined by the caller.
+          m.vendor = "";         // The vendor is not known here, and will be determined by the caller.
+          m.mediaType = "";      // The logical library is not known here, and will be determined by the caller.
+          m.vo = "";             // The vo is not known here, and will be determined by the caller.
+          m.capacityInBytes = 0; // The capacity is not known here, and will be determined by the caller.
+          m.activityNameAndWeightedMountCount = PotentialMount::ActivityNameAndWeightedMountCount();
+          m.activityNameAndWeightedMountCount.value().activity = ac.activity;
+          m.activityNameAndWeightedMountCount.value().weight = ac.weight;
+          m.activityNameAndWeightedMountCount.value().weightedMountCount = 0.0; // This value will be computed later by the caller.
+          m.activityNameAndWeightedMountCount.value().mountCount = 0; // This value will be computed later by the caller.
+        }
+      }
+      if (jobsWithoutActivity) {
+        tmdi.potentialMounts.push_back(SchedulerDatabase::PotentialMount());
+        auto & m = tmdi.potentialMounts.back();
+        m.vid = rqp.vid;
+        m.type = cta::common::dataStructures::MountType::Retrieve;
+        m.bytesQueued = rqueue.getJobsSummary().bytes;
+        m.filesQueued = rqueue.getJobsSummary().jobs;      
+        m.oldestJobStartTime = rqueue.getJobsSummary().oldestJobStartTime;
+        m.priority = rqueue.getJobsSummary().priority;
+        m.maxDrivesAllowed = rqueue.getJobsSummary().maxDrivesAllowed;
+        m.minRequestAge = rqueue.getJobsSummary().minRetrieveRequestAge;
+        m.logicalLibrary = ""; // The logical library is not known here, and will be determined by the caller.
+        m.tapePool = "";       // The tape pool is not know and will be determined by the caller.
+        m.vendor = "";         // The vendor is not known here, and will be determined by the caller.
+        m.mediaType = "";      // The logical library is not known here, and will be determined by the caller.
+        m.vo = "";             // The vo is not known here, and will be determined by the caller.
+        m.capacityInBytes = 0; // The capacity is not known here, and will be determined by the caller.
+      }
     } else {
       tmdi.queueTrimRequired = true;
     }
@@ -341,6 +380,7 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
     (int)cta::common::dataStructures::DriveStatus::CleaningUp };
   std::set<int> activeMountTypes = {
     (int)cta::common::dataStructures::MountType::ArchiveForUser,
+    (int)cta::common::dataStructures::MountType::ArchiveForRepack,
     (int)cta::common::dataStructures::MountType::Retrieve,
     (int)cta::common::dataStructures::MountType::Label };
   for (const auto &d : driveStates) {
@@ -354,6 +394,8 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
       tmdi.existingOrNextMounts.back().bytesTransferred = d.bytesTransferredInSession;
       tmdi.existingOrNextMounts.back().filesTransferred = d.filesTransferredInSession;
       tmdi.existingOrNextMounts.back().latestBandwidth = d.latestBandwidth;
+      if (d.currentActivityAndWeight)
+        tmdi.existingOrNextMounts.back().activity = d.currentActivityAndWeight.value().activity;
     }
     if (activeMountTypes.count((int)d.nextMountType)) {
       tmdi.existingOrNextMounts.push_back(ExistingMount());
@@ -365,6 +407,8 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
       tmdi.existingOrNextMounts.back().bytesTransferred = 0;
       tmdi.existingOrNextMounts.back().filesTransferred = 0;
       tmdi.existingOrNextMounts.back().latestBandwidth = 0;
+      if (d.nextActivityAndWeight)
+        tmdi.existingOrNextMounts.back().activity = d.currentActivityAndWeight.value().activity;
     }
   }
   auto registerProcessingTime = t.secs(utils::Timer::resetCounter);
@@ -517,7 +561,7 @@ std::unique_ptr<SchedulerDatabase::RetrieveMount> OStoreDB::TapeMountDecisionInf
         const std::string& mediaType,
         const std::string& vendor,
         const uint64_t capacityInBytes,
-        time_t startTime) {
+        time_t startTime, const optional<common::dataStructures::DriveState::ActivityAndWeight> &) {
   throw cta::exception::Exception("In OStoreDB::TapeMountDecisionInfoNoLock::createRetrieveMount(): This function should not be called");
 }
 
@@ -1058,7 +1102,8 @@ void OStoreDB::setRetrieveJobBatchReportedToUser(std::list<cta::SchedulerDatabas
       );
       insertedElements.emplace_back(CaRQF::InsertedElement{
         &j.job->m_retrieveRequest, tf_it->copyNb, tf_it->fSeq, tf_it->compressedSize,
-        common::dataStructures::MountPolicy(), serializers::RetrieveJobStatus::RJS_Failed
+        common::dataStructures::MountPolicy(), serializers::RetrieveJobStatus::RJS_Failed,
+        j.job->m_activityDescription
       });
     }
     try {
@@ -1084,17 +1129,37 @@ std::list<SchedulerDatabase::RetrieveQueueStatistics> OStoreDB::getRetrieveQueue
 //------------------------------------------------------------------------------
 // OStoreDB::queueRetrieve()
 //------------------------------------------------------------------------------
-std::string OStoreDB::queueRetrieve(const cta::common::dataStructures::RetrieveRequest& rqst,
+std::string OStoreDB::queueRetrieve(cta::common::dataStructures::RetrieveRequest& rqst,
   const cta::common::dataStructures::RetrieveFileQueueCriteria& criteria, log::LogContext &logContext) {
   assertAgentAddressSet();
   auto mutexForHelgrind = cta::make_unique<cta::threading::Mutex>();
   cta::threading::MutexLocker mlForHelgrind(*mutexForHelgrind);
-  auto *mutexForHelgrindAddr = mutexForHelgrind.release();
   cta::utils::Timer timer;
   // Get the best vid from the cache
   std::set<std::string> candidateVids;
   for (auto & tf:criteria.archiveFile.tapeFiles) candidateVids.insert(tf.vid);
   std::string bestVid=Helpers::selectBestRetrieveQueue(candidateVids, m_catalogue, m_objectStore);
+  // Check that the activity is fine (if applying: disk instance uses them or it is sent).
+  if (rqst.activity || criteria.activitiesFairShareWeight.activitiesWeights.size()) {
+    // Activity is set. It should exist in the catlogue
+    if (rqst.activity) {
+      try {
+        criteria.activitiesFairShareWeight.activitiesWeights.at(rqst.activity.value());
+      } catch (std::out_of_range &) {
+        throw cta::exception::UserError(std::string("Unknown fair share activity \"") + rqst.activity.value() + "\" for disk instance \"" 
+            + criteria.activitiesFairShareWeight.diskInstance + "\"");
+      }
+    } else {
+      try {
+        criteria.activitiesFairShareWeight.activitiesWeights.at("default"); 
+        rqst.activity = "default";
+      } catch (std::out_of_range &) {
+        throw cta::exception::UserError(
+            std::string("Missing fair share activity \"default\"  while queuing with undefined activity for disk instance \"")
+            + criteria.activitiesFairShareWeight.diskInstance + "\"");
+      }
+    }
+  }
   // Check that the requested retrieve job (for the provided vid) exists, and record the copynb.
   uint64_t bestCopyNb;
   for (auto & tf: criteria.archiveFile.tapeFiles) {
@@ -1115,6 +1180,7 @@ std::string OStoreDB::queueRetrieve(const cta::common::dataStructures::RetrieveR
   rReq->initialize();
   rReq->setSchedulerRequest(rqst);
   rReq->setRetrieveFileQueueCriteria(criteria);
+  rReq->setActivityIfNeeded(rqst, criteria);
   rReq->setCreationTime(rqst.creationLog.time);
   // Find the job corresponding to the vid (and check we indeed have one).
   auto jobs = rReq->getJobs();
@@ -1158,6 +1224,7 @@ std::string OStoreDB::queueRetrieve(const cta::common::dataStructures::RetrieveR
           .add("insertionTime", insertionTime);
     delayIfNecessary(logContext);
     auto rReqPtr = rReq.release();
+    auto *mutexForHelgrindAddr = mutexForHelgrind.release();
     auto * et = new EnqueueingTask([rReqPtr, job, bestVid, mutexForHelgrindAddr, this]{
       std::unique_ptr<cta::threading::Mutex> mutexForHelgrind(mutexForHelgrindAddr);
       std::unique_ptr<objectstore::RetrieveRequest> rReq(rReqPtr);
@@ -2745,6 +2812,7 @@ void OStoreDB::setDriveDown(common::dataStructures::DriveState & driveState,
   driveState.desiredDriveState.forceDown=false;
   driveState.currentVid="";
   driveState.currentTapePool="";
+  driveState.currentActivityAndWeight = nullopt;
 }
 
 //------------------------------------------------------------------------------
@@ -2783,6 +2851,7 @@ void OStoreDB::setDriveUpOrMaybeDown(common::dataStructures::DriveState & driveS
   driveState.driveStatus=targetStatus;
   driveState.currentVid="";
   driveState.currentTapePool="";
+  driveState.currentActivityAndWeight = nullopt;
 }
 
 //------------------------------------------------------------------------------
@@ -2816,6 +2885,7 @@ void OStoreDB::setDriveProbing(common::dataStructures::DriveState & driveState,
   driveState.driveStatus=inputs.status;
   driveState.currentVid="";
   driveState.currentTapePool="";
+  driveState.currentActivityAndWeight = nullopt;
 }
 
 //------------------------------------------------------------------------------
@@ -2828,8 +2898,7 @@ void OStoreDB::setDriveStarting(common::dataStructures::DriveState & driveState,
     driveState.lastUpdateTime = inputs.reportTime;
     return;
   }
-  // If we are changing state, then all should be reset. We are not supposed to
-  // know the direction yet.
+  // If we are changing state, then all should be reset.
   driveState.sessionId=inputs.mountSessionId;
   driveState.bytesTransferredInSession=0;
   driveState.filesTransferredInSession=0;
@@ -2850,6 +2919,12 @@ void OStoreDB::setDriveStarting(common::dataStructures::DriveState & driveState,
   driveState.driveStatus=common::dataStructures::DriveStatus::Starting;
   driveState.currentVid=inputs.vid;
   driveState.currentTapePool=inputs.tapepool;
+  if (inputs.activityAndWeigh) {
+    common::dataStructures::DriveState::ActivityAndWeight aaw;
+    aaw.activity = inputs.activityAndWeigh.value().activity;
+    aaw.weight = inputs.activityAndWeigh.value().weight;
+    driveState.currentActivityAndWeight = aaw;
+  }
 }
 
 //------------------------------------------------------------------------------
@@ -3045,6 +3120,7 @@ void OStoreDB::setDriveCleaningUp(common::dataStructures::DriveState & driveStat
   driveState.driveStatus=common::dataStructures::DriveStatus::CleaningUp;
   driveState.currentVid=inputs.vid;
   driveState.currentTapePool=inputs.tapepool;
+  driveState.currentActivityAndWeight = nullopt;
 }
 
 //------------------------------------------------------------------------------
@@ -3077,6 +3153,7 @@ void OStoreDB::setDriveShutdown(common::dataStructures::DriveState & driveState,
   driveState.driveStatus=common::dataStructures::DriveStatus::CleaningUp;
   driveState.currentVid=inputs.vid;
   driveState.currentTapePool=inputs.tapepool;
+  driveState.currentActivityAndWeight = nullopt;
 }
 //------------------------------------------------------------------------------
 // OStoreDB::TapeMountDecisionInfo::createArchiveMount()
@@ -3140,7 +3217,7 @@ std::unique_ptr<SchedulerDatabase::ArchiveMount>
     inputs.latestBandwidth = 0;
     inputs.mountSessionId = am.mountInfo.mountId;
     inputs.reportTime = startTime;
-    inputs.status = common::dataStructures::DriveStatus::Mounting;
+    inputs.status = common::dataStructures::DriveStatus::Starting;
     inputs.vid = tape.vid;
     inputs.tapepool = tape.tapePool;
     log::LogContext lc(m_oStoreDB.m_logger);
@@ -3166,7 +3243,8 @@ std::unique_ptr<SchedulerDatabase::RetrieveMount>
   OStoreDB::TapeMountDecisionInfo::createRetrieveMount(
     const std::string& vid, const std::string & tapePool, const std::string driveName, 
     const std::string& logicalLibrary, const std::string& hostName,const std::string& vo, const std::string& mediaType,
-      const std::string& vendor,const uint64_t capacityInBytes, time_t startTime) {
+    const std::string& vendor,const uint64_t capacityInBytes, time_t startTime, 
+    const optional<common::dataStructures::DriveState::ActivityAndWeight>& activityAndWeight) {
   // In order to create the mount, we have to:
   // Check we actually hold the scheduling lock
   // Check the tape exists, add it to ownership and set its activity status to 
@@ -3198,6 +3276,7 @@ std::unique_ptr<SchedulerDatabase::RetrieveMount>
   rm.mountInfo.mediaType = mediaType;
   rm.mountInfo.vendor = vendor;
   rm.mountInfo.capacityInBytes = capacityInBytes;
+  if(activityAndWeight) rm.mountInfo.activity = activityAndWeight.value().activity;
   // Update the status of the drive in the registry
   {
     // Get hold of the drive registry
@@ -3212,9 +3291,10 @@ std::unique_ptr<SchedulerDatabase::RetrieveMount>
     inputs.mountType = common::dataStructures::MountType::Retrieve;
     inputs.mountSessionId = rm.mountInfo.mountId;
     inputs.reportTime = startTime;
-    inputs.status = common::dataStructures::DriveStatus::Mounting;
+    inputs.status = common::dataStructures::DriveStatus::Starting;
     inputs.vid = rm.mountInfo.vid;
     inputs.tapepool = rm.mountInfo.tapePool;
+    inputs.activityAndWeigh = activityAndWeight;
     log::LogContext lc(m_oStoreDB.m_logger);
     m_oStoreDB.updateDriveStatus(driveInfo, inputs, lc);
   }
@@ -3527,7 +3607,7 @@ void OStoreDB::RetrieveMount::flushAsyncSuccessReports(std::list<cta::SchedulerD
       insertedRequests.push_back(RQTRTRFSAlgo::InsertedElement{&req->m_retrieveRequest, req->selectedCopyNb, 
           req->archiveFile.tapeFiles.at(req->selectedCopyNb).fSeq, req->archiveFile.fileSize,
           cta::common::dataStructures::MountPolicy::s_defaultMountPolicyForRepack,
-          serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess});
+          serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess, req->m_activityDescription});
       requestToJobMap[&req->m_retrieveRequest] = req;
     }
     RQTRTRFSAlgo rQTRTRFSAlgo(m_oStoreDB.m_objectStore, *m_oStoreDB.m_agentReference);
@@ -4406,7 +4486,8 @@ void OStoreDB::RetrieveJob::failTransfer(const std::string &failureReason, log::
 
       CaRqtr::InsertedElement::list insertedElements;
       insertedElements.push_back(CaRqtr::InsertedElement{
-        &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy, serializers::RetrieveJobStatus::RJS_Failed
+        &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy,
+        serializers::RetrieveJobStatus::RJS_Failed, m_activityDescription
       });
       m_retrieveRequest.commit();
       rel.release();
@@ -4472,7 +4553,8 @@ void OStoreDB::RetrieveJob::failTransfer(const std::string &failureReason, log::
 
       CaRqtr::InsertedElement::list insertedElements;
       insertedElements.push_back(CaRqtr::InsertedElement{
-        &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy, serializers::RetrieveJobStatus::RJS_ToTransferForUser
+        &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy, serializers::RetrieveJobStatus::RJS_ToTransferForUser, 
+        m_activityDescription
       });
 
       CaRqtr caRqtr(m_oStoreDB.m_objectStore, *m_oStoreDB.m_agentReference);
@@ -4535,7 +4617,8 @@ void OStoreDB::RetrieveJob::failReport(const std::string &failureReason, log::Lo
         CaRqtr caRqtr(m_oStoreDB.m_objectStore, *m_oStoreDB.m_agentReference);
         CaRqtr::InsertedElement::list insertedElements;
         insertedElements.push_back(CaRqtr::InsertedElement{
-          &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy, serializers::RetrieveJobStatus::RJS_ToReportToUserForFailure
+          &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy,
+          serializers::RetrieveJobStatus::RJS_ToReportToUserForFailure, m_activityDescription
         });
         caRqtr.referenceAndSwitchOwnership(tf.vid, insertedElements, lc);
         log::ScopedParamContainer params(lc);
@@ -4553,7 +4636,8 @@ void OStoreDB::RetrieveJob::failReport(const std::string &failureReason, log::Lo
         CaRqtr caRqtr(m_oStoreDB.m_objectStore, *m_oStoreDB.m_agentReference);
         CaRqtr::InsertedElement::list insertedElements;
         insertedElements.push_back(CaRqtr::InsertedElement{
-          &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy, serializers::RetrieveJobStatus::RJS_Failed
+          &m_retrieveRequest, tf.copyNb, tf.fSeq, af.fileSize, rfqc.mountPolicy,
+          serializers::RetrieveJobStatus::RJS_Failed, m_activityDescription
         });
         caRqtr.referenceAndSwitchOwnership(tf.vid, insertedElements, lc);
         log::ScopedParamContainer params(lc);
diff --git a/scheduler/OStoreDB/OStoreDB.hpp b/scheduler/OStoreDB/OStoreDB.hpp
index 97c4ad316a05e11bbb030445af97bd28ad731fd9..de93c966c41a97b7ac19cf97f262e5b439630e0d 100644
--- a/scheduler/OStoreDB/OStoreDB.hpp
+++ b/scheduler/OStoreDB/OStoreDB.hpp
@@ -29,6 +29,7 @@
 #include "objectstore/ArchiveRequest.hpp"
 #include "objectstore/DriveRegister.hpp"
 #include "objectstore/RetrieveRequest.hpp"
+#include "objectstore/RetrieveActivityCountMap.hpp"
 #include "objectstore/RepackQueue.hpp"
 #include "objectstore/RepackRequest.hpp"
 #include "objectstore/SchedulerGlobalLock.hpp"
@@ -108,7 +109,7 @@ public:
       const std::string& vo, const std::string& mediaType,
       const std::string& vendor,
       const uint64_t capacityInBytes,
-      time_t startTime) override;
+      time_t startTime, const optional<common::dataStructures::DriveState::ActivityAndWeight> &activityAndWeight) override;
     virtual ~TapeMountDecisionInfo();
   private:
     TapeMountDecisionInfo (OStoreDB & oStoreDB);
@@ -135,7 +136,7 @@ public:
       const std::string& vo, const std::string& mediaType,
       const std::string& vendor,
       const uint64_t capacityInBytes,
-      time_t startTime) override;
+      time_t startTime, const optional<common::dataStructures::DriveState::ActivityAndWeight> &activityAndWeight) override;
     virtual ~TapeMountDecisionInfoNoLock();
   };
 
@@ -256,6 +257,7 @@ public:
     std::unique_ptr<objectstore::RetrieveRequest::AsyncJobDeleter> m_jobDelete;
     std::unique_ptr<objectstore::RetrieveRequest::AsyncJobSucceedForRepackReporter> m_jobSucceedForRepackReporter;
     objectstore::RetrieveRequest::RepackInfo m_repackInfo;
+    optional<objectstore::RetrieveActivityDescription> m_activityDescription;
   };
   static RetrieveJob * castFromSchedDBJob(SchedulerDatabase::RetrieveJob * job);
 
@@ -294,7 +296,7 @@ public:
   
   CTA_GENERATE_EXCEPTION_CLASS(RetrieveRequestHasNoCopies);
   CTA_GENERATE_EXCEPTION_CLASS(TapeCopyNumberOutOfRange);
-  std::string queueRetrieve(const cta::common::dataStructures::RetrieveRequest& rqst,
+  std::string queueRetrieve(cta::common::dataStructures::RetrieveRequest& rqst,
     const cta::common::dataStructures::RetrieveFileQueueCriteria &criteria, log::LogContext &logContext) override;
 
   std::list<RetrieveRequestDump> getRetrieveRequestsByVid(const std::string& vid) const override;
@@ -547,6 +549,7 @@ private:
     double latestBandwidth;
     std::string vid;
     std::string tapepool;
+    optional<common::dataStructures::DriveState::ActivityAndWeight> activityAndWeigh;
   };
   /** Collection of smaller scale parts of reportDriveStats */
   struct ReportDriveStatsInputs {
diff --git a/scheduler/OStoreDB/OStoreDBFactory.hpp b/scheduler/OStoreDB/OStoreDBFactory.hpp
index b578aedce0b2731c402c5541a803ffffbb9b8912..bb9cb26357f5e2c09419bcc3c783c59daec5a918 100644
--- a/scheduler/OStoreDB/OStoreDBFactory.hpp
+++ b/scheduler/OStoreDB/OStoreDBFactory.hpp
@@ -202,7 +202,7 @@ public:
     return m_OStoreDB.getRetrieveQueueStatistics(criteria, vidsToConsider);
   }
 
-  std::string queueRetrieve(const common::dataStructures::RetrieveRequest& rqst,
+  std::string queueRetrieve(common::dataStructures::RetrieveRequest& rqst,
     const common::dataStructures::RetrieveFileQueueCriteria &criteria, log::LogContext &logContext) override {
     return m_OStoreDB.queueRetrieve(rqst, criteria, logContext);
   }
diff --git a/scheduler/RetrieveMount.cpp b/scheduler/RetrieveMount.cpp
index 43136f9f3c299f242d121519189e85bbd69368af..a629077989c32cf0e3087a949469fdfc801426c5 100644
--- a/scheduler/RetrieveMount.cpp
+++ b/scheduler/RetrieveMount.cpp
@@ -56,6 +56,13 @@ std::string cta::RetrieveMount::getVid() const{
   return m_dbMount->mountInfo.vid;
 }
 
+//------------------------------------------------------------------------------
+// getActivity()
+//------------------------------------------------------------------------------
+cta::optional<std::string> cta::RetrieveMount::getActivity() const {
+  return m_dbMount->mountInfo.activity;
+}
+
 //------------------------------------------------------------------------------
 // getMountTransactionId()
 //------------------------------------------------------------------------------
diff --git a/scheduler/RetrieveMount.hpp b/scheduler/RetrieveMount.hpp
index 82d3f6e3f22b33de090ca2a2396fd9f71aabedfb..fff81a1bea77c8350a4dfa8686049477bd3ee4dd 100644
--- a/scheduler/RetrieveMount.hpp
+++ b/scheduler/RetrieveMount.hpp
@@ -69,6 +69,14 @@ namespace cta {
      */
     virtual std::string getVid() const;
     
+    /**
+     * Returns the (optional) activity for this mount.
+     * 
+     * @return 
+     */
+    optional<std::string> getActivity() const override;
+
+    
     /**
      * Returns the mount transaction id.
      *
diff --git a/scheduler/Scheduler.cpp b/scheduler/Scheduler.cpp
index 16f0d76829485c41d2dbda2b76bcd94f366fb4f5..4a356161ae67de90c45c2f629a8bceabd11cd576 100644
--- a/scheduler/Scheduler.cpp
+++ b/scheduler/Scheduler.cpp
@@ -203,14 +203,14 @@ void Scheduler::queueArchiveRequestForRepackBatch(std::list<cta::objectstore::Ar
 //------------------------------------------------------------------------------
 void Scheduler::queueRetrieve(
   const std::string &instanceName,
-  const common::dataStructures::RetrieveRequest &request,
+  common::dataStructures::RetrieveRequest &request,
   log::LogContext & lc) {
   using utils::postEllipsis;
   using utils::midEllipsis;
   utils::Timer t;
   // Get the queue criteria
   common::dataStructures::RetrieveFileQueueCriteria queueCriteria;
-  queueCriteria = m_catalogue.prepareToRetrieveFile(instanceName, request.archiveFileID, request.requester, lc);
+  queueCriteria = m_catalogue.prepareToRetrieveFile(instanceName, request.archiveFileID, request.requester, request.activity, lc);
   auto catalogueTime = t.secs(cta::utils::Timer::resetCounter);
   std::string selectedVid = m_db.queueRetrieve(request, queueCriteria, lc);
   auto schedulerDbTime = t.secs();
@@ -250,10 +250,11 @@ void Scheduler::queueRetrieve(
      .add("policyMaxDrives", queueCriteria.mountPolicy.maxDrivesAllowed)
      .add("policyMinAge", queueCriteria.mountPolicy.retrieveMinRequestAge)
      .add("policyPriority", queueCriteria.mountPolicy.retrievePriority);
+  if (request.activity)
+    spc.add("activity", request.activity.value());
   lc.log(log::INFO, "Queued retrieve request");
 }
 
-
 //------------------------------------------------------------------------------
 // deleteArchive
 //------------------------------------------------------------------------------
@@ -751,7 +752,7 @@ std::list<common::dataStructures::DriveState> Scheduler::getDriveStates(const co
 //------------------------------------------------------------------------------
 void Scheduler::sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo>& mountInfo,
     const std::string & logicalLibraryName, const std::string & driveName, utils::Timer & timer,
-    std::map<tpType, uint32_t> & existingMountsSummary, std::set<std::string> & tapesInUse, std::list<catalogue::TapeForWriting> & tapeList,
+    ExistingMountSummary & existingMountsSummary, std::set<std::string> & tapesInUse, std::list<catalogue::TapeForWriting> & tapeList,
     double & getTapeInfoTime, double & candidateSortingTime, double & getTapeForWriteTime, log::LogContext & lc) {
   // The library information is not know for the tapes involved in retrieves. We 
   // need to query the catalogue now about all those tapes.
@@ -792,11 +793,10 @@ void Scheduler::sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::T
   for (auto & em: mountInfo->existingOrNextMounts) {
     // If a mount is still listed for our own drive, it is a leftover that we disregard.
     if (em.driveName!=driveName) {
-      try {
-        existingMountsSummary.at(tpType(em.tapePool, common::dataStructures::getMountBasicType(em.type)))++;
-      } catch (std::out_of_range &) {
-        existingMountsSummary[tpType(em.tapePool, common::dataStructures::getMountBasicType(em.type))] = 1;
-      }
+      existingMountsSummary[TapePoolMountPair(em.tapePool, common::dataStructures::getMountBasicType(em.type))].totalMounts++;
+      if (em.activity)
+        existingMountsSummary[TapePoolMountPair(em.tapePool, common::dataStructures::getMountBasicType(em.type))]
+          .activityMounts[em.activity.value()].value++;
       if (em.vid.size()) {
         tapesInUse.insert(em.vid);
         log::ScopedParamContainer params(lc);
@@ -809,17 +809,25 @@ void Scheduler::sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::T
   }
   
   // We can now filter out the potential mounts for which their mount criteria
-  // is already met, filter out the potential mounts for which the maximum mount
+  // is not yet met, filter out the potential mounts for which the maximum mount
   // quota is already reached, and weight the remaining by how much of their quota 
   // is reached
   for (auto m = mountInfo->potentialMounts.begin(); m!= mountInfo->potentialMounts.end();) {
     // Get summary data
-    uint32_t existingMounts;
+    uint32_t existingMounts = 0;
+    uint32_t activityMounts = 0;
     try {
-      existingMounts = existingMountsSummary.at(tpType(m->tapePool, common::dataStructures::getMountBasicType(m->type)));
-    } catch (std::out_of_range &) {
-      existingMounts = 0;
-    } 
+      existingMounts = existingMountsSummary
+          .at(TapePoolMountPair(m->tapePool, common::dataStructures::getMountBasicType(m->type)))
+             .totalMounts;
+    } catch (std::out_of_range &) {}
+    if (m->activityNameAndWeightedMountCount) {
+      try {
+        activityMounts = existingMountsSummary
+          .at(TapePoolMountPair(m->tapePool, common::dataStructures::getMountBasicType(m->type)))
+             .activityMounts.at(m->activityNameAndWeightedMountCount.value().activity).value;
+      } catch (std::out_of_range &) {}
+    }
     uint32_t effectiveExistingMounts = 0;
     if (m->type == common::dataStructures::MountType::ArchiveForUser) effectiveExistingMounts = existingMounts;
     bool mountPassesACriteria = false;
@@ -851,6 +859,16 @@ void Scheduler::sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::T
     } else {
       // populate the mount with a weight 
       m->ratioOfMountQuotaUsed = 1.0L * existingMounts / m->maxDrivesAllowed;
+      if (m->activityNameAndWeightedMountCount) {
+        m->activityNameAndWeightedMountCount.value().mountCount = activityMounts;
+        // Protect against division by zero
+        if (m->activityNameAndWeightedMountCount.value().weight) {
+          m->activityNameAndWeightedMountCount.value().weightedMountCount = 
+              1.0 * activityMounts / m->activityNameAndWeightedMountCount.value().weight;
+        } else {
+          m->activityNameAndWeightedMountCount.value().weightedMountCount = std::numeric_limits<double>::max();
+        }
+      }
       log::ScopedParamContainer params(lc);
       params.add("tapePool", m->tapePool);
       if ( m->type == common::dataStructures::MountType::Retrieve) {
@@ -917,7 +935,7 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, const
   std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> mountInfo;
   mountInfo = m_db.getMountInfoNoLock(lc);
   getMountInfoTime = timer.secs(utils::Timer::resetCounter);
-  std::map<tpType, uint32_t> existingMountsSummary;
+  ExistingMountSummary existingMountsSummary;
   std::set<std::string> tapesInUse;
   std::list<catalogue::TapeForWriting> tapeList;
   
@@ -941,7 +959,7 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, const
           catalogueTime = getTapeInfoTime + getTapeForWriteTime;
           uint32_t existingMounts = 0;
           try {
-            existingMounts=existingMountsSummary.at(tpType(m->tapePool, common::dataStructures::getMountBasicType(m->type)));
+            existingMounts=existingMountsSummary.at(TapePoolMountPair(m->tapePool, common::dataStructures::getMountBasicType(m->type))).totalMounts;
           } catch (...) {}
           log::ScopedParamContainer params(lc);
           params.add("tapePool", m->tapePool)
@@ -974,15 +992,21 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, const
       log::ScopedParamContainer params(lc);
       uint32_t existingMounts = 0;
       try {
-        existingMounts=existingMountsSummary.at(tpType(m->tapePool, m->type));
+        existingMounts=existingMountsSummary.at(TapePoolMountPair(m->tapePool, m->type)).totalMounts;
       } catch (...) {}
       schedulerDbTime = getMountInfoTime;
       catalogueTime = getTapeInfoTime + getTapeForWriteTime;
       params.add("tapePool", m->tapePool)
             .add("tapeVid", m->vid)
             .add("mountType", common::dataStructures::toString(m->type))
-            .add("existingMounts", existingMounts)
-            .add("bytesQueued", m->bytesQueued)
+            .add("existingMounts", existingMounts);
+      if (m->activityNameAndWeightedMountCount) {
+        params.add("activity", m->activityNameAndWeightedMountCount.value().activity)
+              .add("activityMounts", m->activityNameAndWeightedMountCount.value().weightedMountCount)
+              .add("ActivityMountCount", m->activityNameAndWeightedMountCount.value().mountCount)
+              .add("ActivityWeight", m->activityNameAndWeightedMountCount.value().weight);
+      }
+      params.add("bytesQueued", m->bytesQueued)
             .add("minBytesToWarrantMount", m_minBytesToWarrantAMount)
             .add("filesQueued", m->filesQueued)
             .add("minFilesToWarrantMount", m_minFilesToWarrantAMount)
@@ -1052,7 +1076,7 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
   }
   __attribute__((unused)) SchedulerDatabase::TapeMountDecisionInfo & debugMountInfo = *mountInfo;
   
-  std::map<tpType, uint32_t> existingMountsSummary;
+  ExistingMountSummary existingMountsSummary;
   std::set<std::string> tapesInUse;
   std::list<catalogue::TapeForWriting> tapeList;
   
@@ -1088,12 +1112,11 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
                 time(NULL)).release());
             mountCreationTime += timer.secs(utils::Timer::resetCounter);
             internalRet->m_sessionRunning = true;
-            internalRet->setDriveStatus(common::dataStructures::DriveStatus::Starting);
             driveStatusSetTime += timer.secs(utils::Timer::resetCounter);
             log::ScopedParamContainer params(lc);
             uint32_t existingMounts = 0;
             try {
-              existingMounts=existingMountsSummary.at(tpType(m->tapePool, m->type));
+              existingMounts=existingMountsSummary.at(TapePoolMountPair(m->tapePool, common::dataStructures::getMountBasicType(m->type))).totalMounts;
             } catch (...) {}
             schedulerDbTime = getMountInfoTime + queueTrimingTime + mountCreationTime + driveStatusSetTime;
             catalogueTime = getTapeInfoTime + getTapeForWriteTime;
@@ -1139,6 +1162,12 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
       try {
         // create the mount, and populate its DB side.
         decisionTime += timer.secs(utils::Timer::resetCounter);
+        optional<common::dataStructures::DriveState::ActivityAndWeight> actvityAndWeight;
+        if (m->activityNameAndWeightedMountCount) {
+          actvityAndWeight = common::dataStructures::DriveState::ActivityAndWeight{ 
+            m->activityNameAndWeightedMountCount.value().activity,
+            m->activityNameAndWeightedMountCount.value().weight };
+        }
         std::unique_ptr<RetrieveMount> internalRet (
           new RetrieveMount(mountInfo->createRetrieveMount(m->vid, 
             m->tapePool,
@@ -1149,17 +1178,16 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
             m->mediaType,
             m->vendor,
             m->capacityInBytes,
-            time(NULL))));
+            time(NULL), actvityAndWeight)));
         mountCreationTime += timer.secs(utils::Timer::resetCounter);
         internalRet->m_sessionRunning = true;
         internalRet->m_diskRunning = true;
         internalRet->m_tapeRunning = true;
-        internalRet->setDriveStatus(common::dataStructures::DriveStatus::Starting);
         driveStatusSetTime += timer.secs(utils::Timer::resetCounter);
         log::ScopedParamContainer params(lc);
         uint32_t existingMounts = 0;
         try {
-          existingMounts=existingMountsSummary.at(tpType(m->tapePool, m->type));
+          existingMounts=existingMountsSummary.at(TapePoolMountPair(m->tapePool, m->type)).totalMounts;
         } catch (...) {}
         schedulerDbTime = getMountInfoTime + queueTrimingTime + mountCreationTime + driveStatusSetTime;
         catalogueTime = getTapeInfoTime + getTapeForWriteTime;
@@ -1169,7 +1197,14 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
               .add("mediaType",m->mediaType)
               .add("vendor",m->vendor)
               .add("mountType", common::dataStructures::toString(m->type))
-              .add("existingMounts", existingMounts)
+              .add("existingMounts", existingMounts);
+        if (m->activityNameAndWeightedMountCount) {
+          params.add("activity", m->activityNameAndWeightedMountCount.value().activity)
+                .add("activityMounts", m->activityNameAndWeightedMountCount.value().weightedMountCount)
+                .add("ActivityMountCount", m->activityNameAndWeightedMountCount.value().mountCount)
+                .add("ActivityWeight", m->activityNameAndWeightedMountCount.value().weight);
+        }
+        params.add("bytesQueued", m->bytesQueued)
               .add("bytesQueued", m->bytesQueued)
               .add("minBytesToWarrantMount", m_minBytesToWarrantAMount)
               .add("filesQueued", m->filesQueued)
diff --git a/scheduler/Scheduler.hpp b/scheduler/Scheduler.hpp
index d19820ed86e91aa9ab21d2ac6473805ab869f386..5cddb3c05e23cd21744967f7b3db27f150eddd21 100644
--- a/scheduler/Scheduler.hpp
+++ b/scheduler/Scheduler.hpp
@@ -147,7 +147,7 @@ public:
    * Throws a UserError exception in case of wrong request parameters (ex. unknown file id)
    * Throws a (Non)RetryableError exception in case something else goes wrong with the request
    */
-  void queueRetrieve(const std::string &instanceName, const cta::common::dataStructures::RetrieveRequest &request,
+  void queueRetrieve(const std::string &instanceName, cta::common::dataStructures::RetrieveRequest &request,
     log::LogContext &lc);
   
   /** 
@@ -272,14 +272,23 @@ private:
    */
   double m_repackRequestExpansionTimeLimit = 30;
   
-  typedef std::pair<std::string, common::dataStructures::MountType> tpType;
+  typedef std::pair<std::string, common::dataStructures::MountType> TapePoolMountPair;
+  struct MountCounts {
+    uint32_t totalMounts = 0;
+    struct AutoZeroUint32_t {
+      uint32_t value = 0;
+    };
+    std::map<std::string, AutoZeroUint32_t> activityMounts;
+  };
+  typedef std::map<TapePoolMountPair, MountCounts> ExistingMountSummary;
+  
   /**
    * Common part to getNextMountDryRun() and getNextMount() to populate mount decision info.
    * The structure should be pre-loaded by the calling function.
    */
   void sortAndGetTapesForMountInfo(std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> &mountInfo, 
     const std::string & logicalLibraryName, const std::string & driveName, utils::Timer & timer, 
-    std::map<tpType, uint32_t> & existingMountsSummary, std::set<std::string> & tapesInUse, std::list<catalogue::TapeForWriting> & tapeList,
+    ExistingMountSummary & existingMountsSummary, std::set<std::string> & tapesInUse, std::list<catalogue::TapeForWriting> & tapeList,
     double & getTapeInfoTime, double & candidateSortingTime, double & getTapeForWriteTime, log::LogContext & lc);
   
   /**
diff --git a/scheduler/SchedulerDatabase.hpp b/scheduler/SchedulerDatabase.hpp
index c58874553c05aaf371df6380ba44b45f0a96bb4c..414b3ee460c356479f818b284c999e04d47ca690 100644
--- a/scheduler/SchedulerDatabase.hpp
+++ b/scheduler/SchedulerDatabase.hpp
@@ -270,7 +270,7 @@ public:
    * @param logContext context allowing logging db operation
    * @return the selected vid (mostly for logging)
    */
-  virtual std::string queueRetrieve(const cta::common::dataStructures::RetrieveRequest &rqst,
+  virtual std::string queueRetrieve(cta::common::dataStructures::RetrieveRequest &rqst,
     const cta::common::dataStructures::RetrieveFileQueueCriteria &criteria, log::LogContext &logContext) = 0;
 
   /**
@@ -351,6 +351,7 @@ public:
       std::string host;
       uint64_t capacityInBytes;
       uint64_t mountId;
+      optional<std::string> activity;
     } mountInfo;
     virtual const MountInfo & getMountInfo() = 0;
     virtual std::list<std::unique_ptr<cta::SchedulerDatabase::RetrieveJob>> getNextJobBatch(uint64_t filesRequested,
@@ -543,6 +544,17 @@ public:
     std::string logicalLibrary;   /**< The logical library (for a retrieve) */
     double ratioOfMountQuotaUsed; /**< The [ 0.0, 1.0 ] ratio of existing 
                                    * mounts/quota (for faire share of mounts)*/
+    uint32_t mountCount;          /**< The number of mounts for this tape pool (which is the current "chargeable" entity for quotas. */
+    struct ActivityNameAndWeightedMountCount {
+      std::string activity;
+      double weight = 0.0;
+      uint32_t mountCount = 0;
+      double weightedMountCount = 0.0;
+    };                            /**< Struct describing the activity if we have one for this mount. */
+    
+    optional<ActivityNameAndWeightedMountCount> activityNameAndWeightedMountCount;
+                                  /**< Description if the activity for this potential mount. */
+    
     
     bool operator < (const PotentialMount &other) const {
       if (priority < other.priority)
@@ -553,8 +565,21 @@ public:
         return false;
       if (other.type == cta::common::dataStructures::MountType::ArchiveForUser && type != cta::common::dataStructures::MountType::ArchiveForUser)
         return true;
-      if (ratioOfMountQuotaUsed < other.ratioOfMountQuotaUsed)
+      // If we have achieved a HIGHER ratio of our mount allowance, then the other mount will be privileged
+      if (ratioOfMountQuotaUsed > other.ratioOfMountQuotaUsed)
         return true;
+      if (ratioOfMountQuotaUsed < other.ratioOfMountQuotaUsed)
+        return false;
+      // If we have activities (and the mounts are for the same tape pool) we can compare them.
+      // If not, it does not matter too much: one mount will go, increasing its ratio, and next time it will
+      // the tapepool. So for different tape pools, we do not order. Likewise, both mounts should have an activity to
+      // be comparable
+      if (activityNameAndWeightedMountCount && other.activityNameAndWeightedMountCount && tapePool == other.tapePool) {
+        if (activityNameAndWeightedMountCount.value().weightedMountCount > other.activityNameAndWeightedMountCount.value().weightedMountCount)
+          return true;
+        if (activityNameAndWeightedMountCount.value().weightedMountCount < other.activityNameAndWeightedMountCount.value().weightedMountCount)
+          return false;
+      }
       if(minRequestAge < other.minRequestAge)
 	return true;
       if(minRequestAge > other.minRequestAge)
@@ -585,6 +610,8 @@ public:
     uint64_t bytesTransferred;
     uint64_t filesTransferred;
     double latestBandwidth;
+    uint64_t priority;
+    optional<std::string> activity;
   };
   
   /**
@@ -629,7 +656,7 @@ public:
       const std::string& vo, const std::string& mediaType,
       const std::string& vendor,
       const uint64_t capacityInBytes,
-      time_t startTime) = 0;
+      time_t startTime, const optional<common::dataStructures::DriveState::ActivityAndWeight> &) = 0;
     /** Destructor: releases the global lock if not already done */
     virtual ~TapeMountDecisionInfo() {};
   };
diff --git a/scheduler/SchedulerTest.cpp b/scheduler/SchedulerTest.cpp
index 3067e541ae811a8175c9a2cd399127a319de6f68..d54ccba329051a6bd24fff8e19aeb92fc61880af 100644
--- a/scheduler/SchedulerTest.cpp
+++ b/scheduler/SchedulerTest.cpp
@@ -162,7 +162,7 @@ public:
     const uint64_t minArchiveRequestAge = 2;
     const uint64_t retrievePriority = 3;
     const uint64_t minRetrieveRequestAge = 4;
-    const uint64_t maxDrivesAllowed = 5;
+    const uint64_t maxDrivesAllowed = 50;
     const std::string mountPolicyComment = "create mount group";
 
     ASSERT_TRUE(catalogue.getMountPolicies().empty());
@@ -2622,6 +2622,252 @@ TEST_P(SchedulerTest, expandRepackRequestExpansionTimeLimitReached) {
   }
 }
 
+TEST_P(SchedulerTest, archiveReportMultipleAndQueueRetrievesWithActivities) {
+  using namespace cta;
+
+  Scheduler &scheduler = getScheduler();
+  auto &catalogue = getCatalogue();
+  
+  setupDefaultCatalogue();
+#ifdef STDOUT_LOGGING
+  log::StdoutLogger dl("dummy", "unitTest");
+#else
+  log::DummyLogger dl("", "");
+#endif
+  log::LogContext lc(dl);
+  
+  // We want to virtually archive files on 10 different tapes that will be asked for by different activities.
+  // Activity A will have a weight of .4, B 0.3, and this allows partially predicting the mount order for them:
+  // (A or B) (the other) A B A B A (A or B) (the other) A.
+  // We hence need to create files on 10 different tapes and recall them with the respective activities.
+  std::map<size_t, uint64_t> archiveFileIds;
+  cta::range<size_t> fileRange(10);
+  for (auto i: fileRange) {
+    // Queue several archive requests.
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.group="group2";
+    diskFileInfo.owner="cms_user";
+    diskFileInfo.path="path/to/file";
+    diskFileInfo.path += std::to_string(i);
+    cta::common::dataStructures::ArchiveRequest request;
+    request.checksumType="ADLER32";
+    request.checksumValue="1234abcd";
+    request.creationLog=creationLog;
+    request.diskFileInfo=diskFileInfo;
+    request.diskFileID="diskFileID";
+    request.diskFileID += std::to_string(i);
+    request.fileSize=100*1000*1000;
+    cta::common::dataStructures::UserIdentity requester;
+    requester.name = s_userName;
+    requester.group = "userGroup";
+    request.requester = requester;
+    request.srcURL="srcURL";
+    request.storageClass=s_storageClassName;
+    archiveFileIds[i] = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
+    scheduler.queueArchiveWithGivenId(archiveFileIds[i], s_diskInstance, request, lc);
+  }
+  scheduler.waitSchedulerDbSubthreadsComplete();
+  
+  // Check that we have the files in the queues
+  // TODO: for this to work all the time, we need an index of all requests
+  // (otherwise we miss the selected ones).
+  // Could also be limited to querying by ID (global index needed)
+  std::map<size_t, bool> found;
+  for (auto & tp: scheduler.getPendingArchiveJobs(lc)) {
+    for (auto & req: tp.second) {
+      for (auto i:fileRange)
+        if (req.archiveFileID == archiveFileIds.at(i))
+          found[i] = true;
+    }
+  }
+  for (auto i:fileRange) {
+    ASSERT_NO_THROW(found.at(i));
+    ASSERT_TRUE(found.at(i));
+  }
+
+  // Create the environment for the migrations to happen (library + tapes) 
+  const std::string libraryComment = "Library comment";
+  const bool libraryIsDisabled = false;
+  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
+    libraryIsDisabled, libraryComment);
+  {
+    auto libraries = catalogue.getLogicalLibraries();
+    ASSERT_EQ(1, libraries.size());
+    ASSERT_EQ(s_libraryName, libraries.front().name);
+    ASSERT_EQ(libraryComment, libraries.front().comment);
+  }
+  const uint64_t capacityInBytes = 12345678;
+  const std::string tapeComment = "Tape comment";
+  bool notDisabled = false;
+  bool notFull = false;
+  const std::string driveName = "tape_drive";
+  for (auto i:fileRange) {
+    catalogue.createTape(s_adminOnAdminHost, s_vid + std::to_string(i), s_mediaType, s_vendor, s_libraryName, s_tapePoolName, capacityInBytes,
+      notDisabled, notFull, tapeComment);
+    catalogue.tapeLabelled(s_vid + std::to_string(i), "tape_drive");    
+  }
+
+
+  {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    // This first initialization is normally done by the dataSession function.
+    cta::common::dataStructures::DriveInfo driveInfo = { driveName, "myHost", s_libraryName };
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Down, lc);
+    scheduler.reportDriveStatus(driveInfo, cta::common::dataStructures::MountType::NoMount, cta::common::dataStructures::DriveStatus::Up, lc);
+    for (auto i:fileRange) {
+      i=i;
+      mount.reset(scheduler.getNextMount(s_libraryName, "drive0", lc).release());
+      ASSERT_NE(nullptr, mount.get());
+      ASSERT_EQ(cta::common::dataStructures::MountType::ArchiveForUser, mount.get()->getMountType());
+      auto & osdb=getSchedulerDB();
+      auto mi=osdb.getMountInfo(lc);
+      ASSERT_EQ(1, mi->existingOrNextMounts.size());
+      ASSERT_EQ("TestTapePool", mi->existingOrNextMounts.front().tapePool);
+      std::unique_ptr<cta::ArchiveMount> archiveMount;
+      archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
+      ASSERT_NE(nullptr, archiveMount.get());
+      std::list<std::unique_ptr<cta::ArchiveJob>> archiveJobBatch = archiveMount->getNextJobBatch(1,1,lc);
+      ASSERT_NE(nullptr, archiveJobBatch.front().get());
+      ASSERT_EQ(1, archiveJobBatch.size());
+      std::unique_ptr<ArchiveJob> archiveJob = std::move(archiveJobBatch.front());
+      archiveJob->tapeFile.blockId = 1;
+      archiveJob->tapeFile.fSeq = 1;
+      archiveJob->tapeFile.checksumType = "ADLER32";
+      archiveJob->tapeFile.checksumValue = "1234abcd";
+      archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
+      archiveJob->tapeFile.copyNb = 1;
+      archiveJob->validate();
+      std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
+      std::queue<cta::catalogue::TapeItemWritten> sTapeItems;
+      sDBarchiveJobBatch.emplace(std::move(archiveJob));
+      archiveMount->reportJobsBatchTransferred(sDBarchiveJobBatch, sTapeItems, lc);
+      // Mark the tape full so we get one file per tape.
+      archiveMount->setTapeFull();
+      archiveMount->complete();
+    }
+  }
+  
+  {
+    // Emulate the the reporter process reporting successful transfer to tape to the disk system
+    // The jobs get reported by tape, so we need to report 10*1 file (one per tape).
+    for (auto i:fileRange) {
+      i=i;
+      auto jobsToReport = scheduler.getNextArchiveJobsToReportBatch(10, lc);
+      ASSERT_EQ(1, jobsToReport.size());
+      disk::DiskReporterFactory factory;
+      log::TimingList timings;
+      utils::Timer t;
+      scheduler.reportArchiveJobsBatch(jobsToReport, factory, timings, t, lc);
+    }
+    ASSERT_EQ(0, scheduler.getNextArchiveJobsToReportBatch(10, lc).size());
+  }
+  
+  {
+    // Declare activities in the catalogue.
+    catalogue.createActivitiesFairShareWeight(s_adminOnAdminHost, s_diskInstance, "A", 0.4, "No comment");
+    catalogue.createActivitiesFairShareWeight(s_adminOnAdminHost, s_diskInstance, "B", 0.3, "No comment");
+    auto activities = catalogue.getActivitiesFairShareWeights();
+    ASSERT_EQ(1, activities.size());
+    auto ac=activities.front();
+    ASSERT_EQ(s_diskInstance, ac.diskInstance);
+    ASSERT_EQ(2, ac.activitiesWeights.size());
+    ASSERT_NO_THROW(ac.activitiesWeights.at("A"));
+    ASSERT_EQ(0.4, ac.activitiesWeights.at("A"));
+    ASSERT_NO_THROW(ac.activitiesWeights.at("B"));
+    ASSERT_EQ(0.3, ac.activitiesWeights.at("B"));
+  }
+
+  {
+    cta::common::dataStructures::EntryLog creationLog;
+    creationLog.host="host2";
+    creationLog.time=0;
+    creationLog.username="admin1";
+    cta::common::dataStructures::DiskFileInfo diskFileInfo;
+    diskFileInfo.group="group2";
+    diskFileInfo.owner="cms_user";
+    diskFileInfo.path="path/to/file";
+    for (auto i:fileRange) {
+      cta::common::dataStructures::RetrieveRequest request;
+      request.archiveFileID = archiveFileIds.at(i);
+      request.creationLog = creationLog;
+      request.diskFileInfo = diskFileInfo;
+      request.dstURL = "dstURL";
+      request.requester.name = s_userName;
+      request.requester.group = "userGroup";
+      if (i < 6)
+        request.activity = "A";
+      else 
+        request.activity = "B";
+      scheduler.queueRetrieve(s_diskInstance, request, lc);
+    }
+    scheduler.waitSchedulerDbSubthreadsComplete();
+  }
+
+  // Check that the retrieve requests are queued
+  {
+    auto rqsts = scheduler.getPendingRetrieveJobs(lc);
+    // We expect 10 tape with queued jobs
+    ASSERT_EQ(10, rqsts.size());
+    // We expect each queue to contain 1 job
+    for (auto & q: rqsts) {
+      ASSERT_EQ(1, q.second.size());
+      // We expect the job to be single copy
+      auto & job = q.second.back();
+      ASSERT_EQ(1, job.tapeCopies.size());
+      // Check the remote target
+      ASSERT_EQ("dstURL", job.request.dstURL);
+    }
+    // We expect each tape to be seen
+    for (auto i:fileRange) {
+      ASSERT_NO_THROW(rqsts.at(s_vid + std::to_string(i)));
+    }
+  }
+
+  
+  enum ExpectedActivity {
+    Unknown,
+    A,
+    B
+  };
+  
+  std::vector<ExpectedActivity> expectedActivities = { Unknown, Unknown, A, B, A, B, A, Unknown, Unknown, A};
+  size_t i=0;
+  for (auto ea: expectedActivities) {
+    // Emulate a tape server by asking for a mount and then a file (and succeed the transfer)
+    std::unique_ptr<cta::TapeMount> mount;
+    std::string drive="drive";
+    drive += std::to_string(++i);
+    mount.reset(scheduler.getNextMount(s_libraryName, drive, lc).release());
+    ASSERT_NE(nullptr, mount.get());
+    ASSERT_EQ(cta::common::dataStructures::MountType::Retrieve, mount.get()->getMountType());
+    ASSERT_TRUE((bool)mount.get()->getActivity());
+    if (ea != Unknown) {
+      std::string expectedActivity(ea==A?"A":"B"), activity(mount.get()->getActivity().value());
+      ASSERT_EQ(expectedActivity, activity);
+    }
+    std::unique_ptr<cta::RetrieveMount> retrieveMount;
+    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
+    ASSERT_NE(nullptr, retrieveMount.get());
+    std::unique_ptr<cta::RetrieveJob> retrieveJob;
+    auto jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(1, jobBatch.size());
+    retrieveJob.reset(jobBatch.front().release());
+    ASSERT_NE(nullptr, retrieveJob.get());
+    retrieveJob->asyncSetSuccessful();
+    std::queue<std::unique_ptr<cta::RetrieveJob> > jobQueue;
+    jobQueue.push(std::move(retrieveJob));
+    retrieveMount->flushAsyncSuccessReports(jobQueue, lc);
+    jobBatch = retrieveMount->getNextJobBatch(1,1,lc);
+    ASSERT_EQ(0, jobBatch.size());
+  }
+}
+
+
 #undef TEST_MOCK_DB
 #ifdef TEST_MOCK_DB
 static cta::MockSchedulerDatabaseFactory mockDbFactory;
diff --git a/scheduler/TapeMount.hpp b/scheduler/TapeMount.hpp
index f30c67058576ddb92a62020f3429862d84718223..01dcae8da022e73022237d5ec385d08dfe422c6d 100644
--- a/scheduler/TapeMount.hpp
+++ b/scheduler/TapeMount.hpp
@@ -20,6 +20,7 @@
 
 #include "common/dataStructures/MountType.hpp"
 #include "common/dataStructures/DriveStatus.hpp"
+#include "common/optional.hpp"
 #include "tapeserver/castor/tape/tapeserver/daemon/TapeSessionStats.hpp"
 
 #include <string>
@@ -52,6 +53,14 @@ namespace cta {
      * @return The mount transaction id.
      */
     virtual std::string getMountTransactionId() const = 0;    
+    
+    /**
+     * Return the activity this mount is running for.
+     * 
+     * @return optional, populated with the activity name if appropriate.
+     */
+    
+    virtual optional<std::string> getActivity() const = 0;
 
     /**
      * Returns the mount transaction id.
diff --git a/scheduler/TapeMountDummy.hpp b/scheduler/TapeMountDummy.hpp
index 82cc05df81871a9df412e411f15971c4d32f6525..ca98d766ef6cf51c6d73ca9dbb7b3e7d7d377e9d 100644
--- a/scheduler/TapeMountDummy.hpp
+++ b/scheduler/TapeMountDummy.hpp
@@ -35,6 +35,9 @@ class TapeMountDummy: public TapeMount {
   cta::common::dataStructures::MountType getMountType() const override {
     throw exception::Exception("In DummyTapeMount::getMountType() : not implemented");
   }
+  optional<std::string> getActivity() const override {
+    throw exception::Exception("In DummyTapeMount::getActivity() : not implemented");
+  }
   uint32_t getNbFiles() const override {
     throw exception::Exception("In DummyTapeMount::getNbFiles() : not implemented");
   }
diff --git a/xroot_plugins/XrdSsiCtaRequestMessage.cpp b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
index 5b647afef01af02522875652f8379f3c70fbb961..5b73e2aa9db9f4705773079eec6a79f7908d23b9 100644
--- a/xroot_plugins/XrdSsiCtaRequestMessage.cpp
+++ b/xroot_plugins/XrdSsiCtaRequestMessage.cpp
@@ -494,6 +494,11 @@ void RequestMessage::processPREPARE(const cta::eos::Notification &notification,
    {
       throw PbException("Invalid archiveFileID " + archiveFileIdStr);
    }
+   
+   // Activity value is a string. The parameter might be present or not.
+   try {
+     request.activity = notification.file().xattr().at("activity");
+   } catch (...) {}
 
    cta::utils::Timer t;
 
@@ -503,6 +508,10 @@ void RequestMessage::processPREPARE(const cta::eos::Notification &notification,
    // Create a log entry
    cta::log::ScopedParamContainer params(m_lc);
    params.add("fileId", request.archiveFileID).add("schedulerTime", t.secs());
+   try {
+     // Print out the received activity in the logs for the moment.
+     params.add("activity", notification.file().xattr().at("activity"));
+   } catch (...) {}
    m_lc.log(cta::log::INFO, "In RequestMessage::processPREPARE(): queued file for retrieve.");
 
    // Set response type
@@ -852,7 +861,7 @@ void RequestMessage::processDrive_Ls(cta::xrd::Response &response)
       std::vector<std::vector<std::string>> responseTable;
       std::vector<std::string> headers = {
          "library","drive","host","desired","request","status","since","vid","tapepool","files",
-         "MBytes","MB/s","session","age"
+         "MBytes","MB/s","session","priority","activity","age"
       };
       responseTable.push_back(headers);
 
@@ -919,6 +928,8 @@ void RequestMessage::processDrive_Ls(cta::xrd::Response &response)
             default:
                currentRow.push_back(std::to_string(static_cast<unsigned long long>(ds.sessionId)));
          }
+         currentRow.push_back(std::to_string(ds.currentPriority));
+         currentRow.push_back(ds.currentActivityAndWeight?ds.currentActivityAndWeight.value().activity: "-");
          currentRow.push_back(std::to_string(timeSinceLastUpdate_s) +
             (timeSinceLastUpdate_s > DRIVE_TIMEOUT ? " [STALE]" : ""));
          responseTable.push_back(currentRow);