Commit a41e9d1f authored by Steven Murray's avatar Steven Murray
Browse files

Removed Scheduler::queueArchive() and updated unit-tests accordingly

parent dc2baa2c
......@@ -105,52 +105,6 @@ uint64_t Scheduler::checkAndGetNextArchiveFileId(const std::string &instanceName
return archiveFileId;
}
//------------------------------------------------------------------------------
// queueArchive
//------------------------------------------------------------------------------
uint64_t Scheduler::queueArchive(const std::string &instanceName, const common::dataStructures::ArchiveRequest &request,
log::LogContext & lc) {
cta::utils::Timer t;
using utils::postEllipsis;
using utils::midEllipsis;
auto catalogueInfo = m_catalogue.prepareForNewFile(instanceName, request.storageClass, request.requester);
auto catalogueTime = t.secs(cta::utils::Timer::resetCounter);
m_db.queueArchive(instanceName, request, catalogueInfo, lc);
auto schedulerDbTime = t.secs();
log::ScopedParamContainer spc(lc);
spc.add("instanceName", instanceName)
.add("storageClass", request.storageClass)
.add("diskFileID", request.diskFileID)
.add("fileSize", request.fileSize)
.add("fileId", catalogueInfo.fileId);
for (auto & ctp: catalogueInfo.copyToPoolMap) {
std::stringstream tp;
tp << "tapePool" << ctp.first;
spc.add(tp.str(), ctp.second);
}
spc.add("policyName", catalogueInfo.mountPolicy.name)
.add("policyArchiveMinAge", catalogueInfo.mountPolicy.archiveMinRequestAge)
.add("policyArchivePriority", catalogueInfo.mountPolicy.archivePriority)
.add("policyMaxDrives", catalogueInfo.mountPolicy.maxDrivesAllowed)
.add("diskFilePath", request.diskFileInfo.path)
.add("diskFileOwner", request.diskFileInfo.owner)
.add("diskFileGroup", request.diskFileInfo.group)
.add("diskFileRecoveryBlob", postEllipsis(request.diskFileInfo.recoveryBlob, 20))
.add("checksumValue", request.checksumValue)
.add("checksumType", request.checksumType)
.add("archiveReportURL", midEllipsis(request.archiveReportURL, 50, 15))
.add("creationHost", request.creationLog.host)
.add("creationTime", request.creationLog.time)
.add("creationUser", request.creationLog.username)
.add("requesterName", request.requester.name)
.add("requesterGroup", request.requester.group)
.add("srcURL", midEllipsis(request.srcURL, 50, 15))
.add("catalogueTime", catalogueTime)
.add("schedulerDbTime", schedulerDbTime);
lc.log(log::INFO, "Queued archive request");
return catalogueInfo.fileId;
}
//------------------------------------------------------------------------------
// queueArchiveWithGivenId
//------------------------------------------------------------------------------
......
......@@ -109,18 +109,6 @@ public:
const common::dataStructures::UserIdentity &user,
log::LogContext &lc);
/**
* Queue an archive request and return the CTA file ID.
* Throws a UserError exception in case of wrong request parameters (ex. no route to tape)
* Throws a (Non)RetryableError exception in case something else goes wrong with the request
* @param instanceName name of the EOS instance
* @param request the archive request
* @param lc a log context allowing logging from within the scheduler routine.
* @return
*/
uint64_t queueArchive(const std::string &instanceName, const cta::common::dataStructures::ArchiveRequest &request,
log::LogContext &lc);
/**
* Queue the specified archive request.
* Throws a UserError exception in case of wrong request parameters (ex. no route to tape)
......
......@@ -264,7 +264,9 @@ TEST_P(SchedulerTest, archive_to_new_file) {
log::DummyLogger dl("");
log::LogContext lc(dl);
scheduler.queueArchive(s_diskInstance, request, lc);
const uint64_t archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass,
request.requester, lc);
scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
{
auto rqsts = scheduler.getPendingArchiveJobs(lc);
......@@ -395,7 +397,8 @@ TEST_P(SchedulerTest, archive_and_retrieve_new_file) {
request.requester = requester;
request.srcURL="srcURL";
request.storageClass=s_storageClassName;
archiveFileId = scheduler.queueArchive(s_diskInstance, request, lc);
archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
}
// Check that we have the file in the queues
......@@ -570,7 +573,8 @@ TEST_P(SchedulerTest, retry_archive_until_max_reached) {
request.requester = requester;
request.srcURL="srcURL";
request.storageClass=s_storageClassName;
archiveFileId = scheduler.queueArchive(s_diskInstance, request, lc);
archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
}
// Create the environment for the migration to happen (library + tape)
......@@ -684,7 +688,8 @@ TEST_P(SchedulerTest, showqueues) {
request.requester = requester;
request.srcURL="srcURL";
request.storageClass=s_storageClassName;
archiveFileId = scheduler.queueArchive(s_diskInstance, request, lc);
archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, request.storageClass, request.requester, lc);
scheduler.queueArchiveWithGivenId(archiveFileId, s_diskInstance, request, lc);
}
// get the queues from scheduler
......
......@@ -1267,7 +1267,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionGooddayMigration) {
ar.diskFileInfo.owner = "z";
ar.diskFileInfo.group = "g";
ar.diskFileInfo.recoveryBlob = "b";
archiveFileIds.push_back(scheduler.queueArchive(s_diskInstance,ar,logContext));
const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
archiveFileIds.push_back(archiveFileId);
scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
}
}
// Report the drive's existence and put it up in the drive register.
......@@ -1407,7 +1409,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionMissingFilesMigration) {
ar.diskFileInfo.owner = "z";
ar.diskFileInfo.group = "g";
ar.diskFileInfo.recoveryBlob = "b";
archiveFileIds.push_back(scheduler.queueArchive(s_diskInstance,ar,logContext));
const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
archiveFileIds.push_back(archiveFileId);
scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
// Delete the file: the migration will fail.
sourceFiles.clear();
}
......@@ -1545,7 +1549,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullMigration) {
ar.diskFileInfo.owner = "z";
ar.diskFileInfo.group = "g";
ar.diskFileInfo.recoveryBlob = "b";
archiveFileIds.push_back(scheduler.queueArchive(s_diskInstance,ar,logContext));
const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
archiveFileIds.push_back(archiveFileId);
scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
}
}
// Report the drive's existence and put it up in the drive register.
......@@ -1695,7 +1701,9 @@ TEST_P(DataTransferSessionTest, DataTransferSessionTapeFullOnFlushMigration) {
ar.diskFileInfo.owner = "z";
ar.diskFileInfo.group = "g";
ar.diskFileInfo.recoveryBlob = "b";
archiveFileIds.push_back(scheduler.queueArchive(s_diskInstance,ar,logContext));
const auto archiveFileId = scheduler.checkAndGetNextArchiveFileId(s_diskInstance, ar.storageClass, ar.requester, logContext);
archiveFileIds.push_back(archiveFileId);
scheduler.queueArchiveWithGivenId(archiveFileId,s_diskInstance,ar,logContext);
}
}
// Report the drive's existence and put it up in the drive register.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment