Commit abe6dbc4 authored by Eric Cano's avatar Eric Cano
Browse files

Added automatic trimming of empty queues at schedule time.

parent 6a055b3b
......@@ -106,6 +106,8 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
m.maxDrivesAllowed = aqueue.getJobsSummary().maxDrivesAllowed;
m.minArchiveRequestAge = aqueue.getJobsSummary().minArchiveRequestAge;
m.logicalLibrary = "";
} else {
tmdi.queueTrimRequired = true;
}
}
// Walk the retrieve queues for statistics
......@@ -129,6 +131,8 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, Ro
m.maxDrivesAllowed = rqueue.getJobsSummary().maxDrivesAllowed;
m.minArchiveRequestAge = rqueue.getJobsSummary().minArchiveRequestAge;
m.logicalLibrary = ""; // The logical library is not known here, and will be determined by the caller.
} else {
tmdi.queueTrimRequired = true;
}
}
// Collect information about the existing and next mounts
......@@ -202,6 +206,53 @@ std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo>
return ret;
}
//------------------------------------------------------------------------------
// OStoreDB::trimEmptyQueues()
//------------------------------------------------------------------------------
void OStoreDB::trimEmptyQueues(log::LogContext& lc) {
// We will trim empty queues from the root entry.
lc.log(log::INFO, "In OStoreDB::trimEmptyQueues(): will start trimming empty queues");
// Get an exclusive lock on the root entry, we have good chances to need it.
RootEntry re(m_objectStore);
ScopedExclusiveLock rel(re);
try {
auto archiveQueueList = re.dumpArchiveQueues();
for (auto & a: archiveQueueList) {
ArchiveQueue aq(a.address, m_objectStore);
ScopedSharedLock aql(aq);
aq.fetch();
if (!aq.dumpJobs().size()) {
aql.release();
re.removeArchiveQueueAndCommit(a.tapePool);
log::ScopedParamContainer params(lc);
params.add("tapePool", a.tapePool)
.add("queueObject", a.address);
lc.log(log::INFO, "In OStoreDB::trimEmptyQueues(): deleted empty archive queue.");
}
}
auto retrieveQeueueList = re.dumpRetrieveQueues();
for (auto & r:retrieveQeueueList) {
RetrieveQueue rq(r.address, m_objectStore);
ScopedSharedLock rql(rq);
rq.fetch();
if (!rq.dumpJobs().size()) {
rql.release();
re.removeRetrieveQueueAndCommit(r.vid);
log::ScopedParamContainer params(lc);
params.add("vid", r.vid)
.add("queueObject", r.address);
lc.log(log::INFO, "In OStoreDB::trimEmptyQueues(): deleted empty retrieve queue.");
}
}
} catch (cta::exception::Exception & ex) {
log::ScopedParamContainer params(lc);
params.add("exceptionMessage", ex.getMessageValue());
lc.log(log::ERR, "In OStoreDB::trimEmptyQueues(): got an exception. Stack trace follows.");
lc.logBacktrace(log::ERR, ex.backtrace());
}
}
//------------------------------------------------------------------------------
// OStoreDB::getMountInfoNoLock()
//------------------------------------------------------------------------------
......
......@@ -113,6 +113,7 @@ private:
public:
std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> getMountInfo() override;
std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> getMountInfoNoLock() override;
void trimEmptyQueues(log::LogContext& lc) override;
/* === Archive Mount handling ============================================= */
class ArchiveMount: public SchedulerDatabase::ArchiveMount {
......
......@@ -129,7 +129,11 @@ public:
std::unique_ptr<TapeMountDecisionInfo> getMountInfo() override {
return m_OStoreDB.getMountInfo();
}
void trimEmptyQueues(log::LogContext& lc) override {
m_OStoreDB.trimEmptyQueues(lc);
}
std::unique_ptr<TapeMountDecisionInfo> getMountInfoNoLock() override {
return m_OStoreDB.getMountInfoNoLock();
}
......
......@@ -405,6 +405,7 @@ std::unique_ptr<TapeMount> Scheduler::getNextMount(const std::string &logicalLib
// First, get the mount-related info from the DB
std::unique_ptr<SchedulerDatabase::TapeMountDecisionInfo> mountInfo;
mountInfo = m_db.getMountInfo();
if (mountInfo->queueTrimRequired) m_db.trimEmptyQueues(lc);
__attribute__((unused)) SchedulerDatabase::TapeMountDecisionInfo & debugMountInfo = *mountInfo;
// The library information is not know for the tapes involved in retrieves. We
......
......@@ -430,6 +430,7 @@ public:
std::vector<PotentialMount> potentialMounts; /**< All the potential mounts */
std::vector<ExistingMount> existingOrNextMounts; /**< Existing mounts */
std::map<std::string, DedicationEntry> dedicationInfo; /**< Drives dedication info */
bool queueTrimRequired = false; /**< Indicates an empty queue was encountered */
/**
* Create a new archive mount. This implicitly releases the global scheduling
* lock.
......@@ -457,6 +458,12 @@ public:
*/
virtual std::unique_ptr<TapeMountDecisionInfo> getMountInfo() = 0;
/**
* A function running a queue trim. This should be called if the corresponding
* bit was set in the TapeMountDecisionInfo returned by getMountInfo().
*/
virtual void trimEmptyQueues(log::LogContext & lc) = 0;
/**
* A function dumping the relevant mount information for reporting the system
* status. It is identical to getMountInfo, yet does not take the global lock.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment