Commit be18a603 authored by Cedric Caffy's avatar Cedric Caffy
Browse files

[repack] Corrected the problem of Agent ownership removal for repack request...

[repack] Corrected the problem of Agent ownership removal for repack request and repack sub-requests
parent 79e17ff1
......@@ -76,7 +76,7 @@ struct RepackInfo {
bool forceDisabledTape;
bool noRecall;
common::dataStructures::EntryLog creationLog;
cta::optional<time_t> repackFinishedTime;
time_t repackFinishedTime = 0;
RepackDestinationInfo::List destinationInfos;
// std::string tag;
// uint64_t totalFiles;
......
......@@ -91,7 +91,8 @@ void cta::objectstore::Agent::removeAndUnregisterSelf(log::LogContext & lc) {
endIndex++;
if (++currentCount >= 25 || ownedObj == ownershipList.end()) {
log::ScopedParamContainer params(lc);
params.add("objects", currentObjs)
params.add("agentObject",getAddressIfSet())
.add("objects", currentObjs)
.add("startIndex", startIndex)
.add("endIndex", endIndex - 1)
.add("totalObjects", ownershipList.size());
......@@ -104,7 +105,7 @@ void cta::objectstore::Agent::removeAndUnregisterSelf(log::LogContext & lc) {
}
// Prepare exception to be thrown.
std::stringstream exSs;
exSs << "In Agent::removeAndUnregisterSelf: agent still owns objects. Here's the first few:";
exSs << "In Agent::removeAndUnregisterSelf: agent (agentObject=" << getAddressIfSet() << ") still owns objects. Here's the first few:";
size_t count=0;
for(auto i=ownershipList.begin(); i!=ownershipList.end(); i++) {
exSs << " " << *i;
......
......@@ -125,6 +125,7 @@ TEST(ObjectStore, ArchiveQueueAlgorithms) {
BackendVFS be;
AgentReference agentRef("unitTestGarbageCollector", dl);
Agent agent(agentRef.getAgentAddress(), be);
// Create the root entry
RootEntry re(be);
re.initialize();
......@@ -172,12 +173,23 @@ TEST(ObjectStore, ArchiveQueueAlgorithms) {
}
ContainerAlgorithms<ArchiveQueue,ArchiveQueueToTransferForUser> archiveAlgos(be, agentRef);
archiveAlgos.referenceAndSwitchOwnership("Tapepool", requests, lc);
for(auto & ar: archiveRequests){
cta::objectstore::ScopedExclusiveLock sel(*ar);
ar->fetch();
ASSERT_TRUE(ar->getJobOwner(1).find("ArchiveQueueToTransferForUser-Tapepool-unitTestGarbageCollector") != std::string::npos);
}
// Now get the requests back
ContainerTraits<ArchiveQueue,ArchiveQueueToTransferForUser>::PopCriteria popCriteria;
popCriteria.bytes = std::numeric_limits<decltype(popCriteria.bytes)>::max();
popCriteria.files = 100;
auto poppedJobs = archiveAlgos.popNextBatch("Tapepool", popCriteria, lc);
ASSERT_EQ(poppedJobs.summary.files, 10);
for(auto & ar: archiveRequests){
cta::objectstore::ScopedExclusiveLock sel(*ar);
ar->fetch();
ASSERT_EQ(agentRef.getAgentAddress(),ar->getJobOwner(1));
}
}
TEST(ObjectStore, RetrieveQueueAlgorithms) {
......@@ -193,8 +205,13 @@ TEST(ObjectStore, RetrieveQueueAlgorithms) {
// Here we check for the ability to detect dead (but empty agents) and clean them up
BackendVFS be;
//Agent1 for queueing
AgentReference agentRef("unitTestGarbageCollector", dl);
Agent agent(agentRef.getAgentAddress(), be);
//Agent2 for popping
AgentReference agentRef2("Agent2", dl);
Agent agent2(agentRef2.getAgentAddress(), be);
// Create the root entry
RootEntry re(be);
re.initialize();
......@@ -203,52 +220,42 @@ TEST(ObjectStore, RetrieveQueueAlgorithms) {
EntryLogSerDeser el("user0", "unittesthost", time(NULL));
ScopedExclusiveLock rel(re);
re.addOrGetAgentRegisterPointerAndCommit(agentRef, el, lc);
re.addOrGetAgentRegisterPointerAndCommit(agentRef2, el, lc);
rel.release();
agent.initialize();
agent.insertAndRegisterSelf(lc);
agent2.initialize();
agent2.insertAndRegisterSelf(lc);
std::list<std::unique_ptr<RetrieveRequest> > requestsPtrs;
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer>::InsertedElement::list requests;
fillRetrieveRequests(requests, requestsPtrs, be, agentRef); //memory leak here
auto a1 = agentRef.getAgentAddress();
auto a2 = agentRef2.getAgentAddress();
{
// Second agent to test referenceAndSwitchOwnershipIfNecessary
BackendVFS be2;
AgentReference agentRef2("Agent 2", dl);
Agent agent2(agentRef2.getAgentAddress(), be2);
// Create the root entry
RootEntry re2(be2);
re2.initialize();
re2.insert();
// Create the agent register
EntryLogSerDeser el2("user0", "unittesthost", time(NULL));
ScopedExclusiveLock rel2(re2);
re2.addOrGetAgentRegisterPointerAndCommit(agentRef2, el2, lc);
rel2.release();
agent2.initialize();
agent2.insertAndRegisterSelf(lc);
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer>::InsertedElement::list requests2;
std::list<std::unique_ptr<RetrieveRequest> > requestsPtrs2;
fillRetrieveRequests(requests2, requestsPtrs2,be2, agentRef2);
auto a1 = agentRef2.getAgentAddress();
auto a2 = agentRef2.getAgentAddress();
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer> retrieveAlgos2(be2, agentRef2);
retrieveAlgos2.referenceAndSwitchOwnershipIfNecessary("VID",
a2, a1, requests2, lc);
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer> queueRetrieveAlgo(be, agentRef2);
queueRetrieveAlgo.referenceAndSwitchOwnership("VID",
a1, requests, lc);
//Test that the owner of these requests is the queue with VID and Agent2
for(auto &request: requestsPtrs){
cta::objectstore::RetrieveRequest rr(request->getAddressIfSet(),be);
cta::objectstore::ScopedExclusiveLock sel(rr);
rr.fetch();
ASSERT_TRUE(rr.getOwner().find("RetrieveQueueToTransferForUser-VID-Agent2") != std::string::npos);
}
}
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer> retrieveAlgos(be, agentRef);
ContainerAlgorithms<RetrieveQueue,RetrieveQueueToTransfer> popRetrieveAlgos(be, agentRef);
try {
ASSERT_EQ(requests.size(), 10);
retrieveAlgos.referenceAndSwitchOwnership("VID",
agentRef.getAgentAddress(), requests, lc);
// Now get the requests back
ContainerTraits<RetrieveQueue,RetrieveQueueToTransfer>::PopCriteria popCriteria;
popCriteria.bytes = std::numeric_limits<decltype(popCriteria.bytes)>::max();
popCriteria.files = 100;
auto poppedJobs = retrieveAlgos.popNextBatch("VID", popCriteria, lc);
auto poppedJobs = popRetrieveAlgos.popNextBatch("VID", popCriteria, lc);
ASSERT_EQ(poppedJobs.summary.files, 10);
// Validate that the summary has the same information as the popped elements
......@@ -256,6 +263,13 @@ TEST(ObjectStore, RetrieveQueueAlgorithms) {
for(auto &e: poppedJobs.elements) {
s += ContainerTraits<RetrieveQueue,RetrieveQueueToTransfer>::getElementSummary(e);
}
//Check that the popped jobs owner is now the agent1 and not the queue
for(auto & elt: poppedJobs.elements){
cta::objectstore::RetrieveRequest rr(elt.retrieveRequest->getAddressIfSet(),be);
cta::objectstore::ScopedExclusiveLock sel(rr);
rr.fetch();
ASSERT_EQ(a1, rr.getOwner());
}
ASSERT_EQ(s, poppedJobs.summary);
} catch (ContainerTraits<RetrieveQueue,RetrieveQueueToTransfer>::OwnershipSwitchFailure & ex) {
for (auto & e: ex.failedElements) {
......
......@@ -1106,6 +1106,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveAllStatusesAndQueues) {
cta::catalogue::DummyCatalogue catalogue;
// Here we check that can successfully call RetrieveRequests's garbage collector
cta::objectstore::BackendVFS be;
std::string backendPath = be.getParams()->getPath();
// Create the root entry
cta::objectstore::RootEntry re(be);
re.initialize();
......@@ -1122,7 +1123,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveAllStatusesAndQueues) {
// continue agent creation.
cta::objectstore::Agent agent(agentRef.getAgentAddress(), be);
agent.initialize();
agent.setTimeout_us(10000);
agent.setTimeout_us(100000000);
agent.insertAndRegisterSelf(lc);
// Create all agents to be garbage collected
cta::objectstore::AgentReference agentRefToTransferForUser("ToTransferForUser", dl);
......
......@@ -22,6 +22,7 @@
#include "RepackQueueAlgorithms.hpp"
#include "Algorithms.hpp"
#include "MountPolicySerDeser.hpp"
#include "AgentWrapper.hpp"
#include <google/protobuf/util/json_util.h>
#include <iostream>
......@@ -81,6 +82,7 @@ void RepackRequest::initialize() {
m_payload.set_force_disabled_tape(false);
m_payload.set_no_recall(false);
m_payload.set_is_complete(false);
m_payload.set_repack_finished_time(0);
// This object is good to go (to storage)
m_payloadInterpreted = true;
}
......@@ -122,6 +124,9 @@ void RepackRequest::setStatus(common::dataStructures::RepackInfo::Status repackS
checkPayloadWritable();
// common::dataStructures::RepackInfo::Status and serializers::RepackRequestStatus are defined using the same values,
// hence the cast.
if(repackStatus == common::dataStructures::RepackInfo::Status::Complete || repackStatus == common::dataStructures::RepackInfo::Status::Failed){
m_payload.set_repack_finished_time(time(nullptr));
}
m_payload.set_status((serializers::RepackRequestStatus)repackStatus);
}
......@@ -155,9 +160,7 @@ common::dataStructures::RepackInfo RepackRequest::getInfo() {
EntryLogSerDeser creationLog;
creationLog.deserialize(m_payload.creation_log());
ret.creationLog = creationLog;
if(m_payload.has_repack_finished_time()){
ret.repackFinishedTime = m_payload.repack_finished_time();
}
ret.repackFinishedTime = m_payload.repack_finished_time();
for(auto & rdi: m_payload.destination_infos()){
RepackInfo::RepackDestinationInfo rdiToInsert;
rdiToInsert.vid = rdi.vid();
......@@ -323,6 +326,7 @@ void RepackRequest::setStatus(){
m_payload.set_repack_finished_time(time(nullptr));
setStatus(common::dataStructures::RepackInfo::Status::Complete);
}
removeFromOwnerAgentOwnership();
return;
}
}
......@@ -417,6 +421,14 @@ auto RepackRequest::getOrPrepareSubrequestInfo(std::set<uint64_t> fSeqs, AgentRe
return ret;
}
void RepackRequest::removeFromOwnerAgentOwnership(){
checkPayloadReadable();
checkPayloadWritable();
cta::objectstore::Agent ag(getOwner(),m_objectStore);
cta::objectstore::AgentWrapper agWrapper(ag);
agWrapper.removeFromOwnership(getAddressIfSet(),m_objectStore);
}
//------------------------------------------------------------------------------
// RepackRequest::setLastExpandedFSeq()
//------------------------------------------------------------------------------
......
......@@ -97,6 +97,12 @@ public:
* This function implicitly records the information it generates (commit up t the caller);
*/
SubrequestInfo::set getOrPrepareSubrequestInfo (std::set<uint64_t> fSeqs, AgentReference & agentRef);
/**
* Remove this request from its owner ownership
*/
void removeFromOwnerAgentOwnership();
private:
struct RepackSubRequestPointer {
std::string address;
......
......@@ -1218,7 +1218,7 @@ RetrieveRequest::AsyncRetrieveToArchiveTransformer * RetrieveRequest::asyncTrans
const cta::objectstore::serializers::ArchiveFile& archiveFile = retrieveRequestPayload.archivefile();
archiveRequestPayload.set_archivefileid(archiveFile.archivefileid());
archiveRequestPayload.set_checksumblob(archiveFile.checksumblob());
archiveRequestPayload.set_creationtime(::time(nullptr));
archiveRequestPayload.set_creationtime(archiveFile.creationtime());//This is the ArchiveFile creation time
archiveRequestPayload.set_diskfileid(archiveFile.diskfileid());
archiveRequestPayload.set_diskinstance(archiveFile.diskinstance());
archiveRequestPayload.set_filesize(archiveFile.filesize());
......@@ -1255,9 +1255,12 @@ RetrieveRequest::AsyncRetrieveToArchiveTransformer * RetrieveRequest::asyncTrans
const cta::objectstore::serializers::MountPolicy& retrieveRequestMP = retrieveRequestPayload.mountpolicy();
archiveRequestMP->CopyFrom(retrieveRequestMP);
//TODO : Should creation log just be initialized or should it be copied from the retrieveRequest ?
//Creation log is used by the queueing: job start time = archiveRequest creationLog.time
cta::objectstore::serializers::EntryLog *archiveRequestCL = archiveRequestPayload.mutable_creationlog();
archiveRequestCL->CopyFrom(retrieveRequestMP.creationlog());
archiveRequestCL->set_host(cta::utils::getShortHostname());
//Set the request creation time to now
archiveRequestCL->set_time(time(nullptr));
//Create archive jobs for each copyNb ro rearchive
RetrieveRequest::RepackInfoSerDeser repackInfoSerDeser;
repackInfoSerDeser.deserialize(retrieveRequestPayload.repack_info());
......
......@@ -45,7 +45,7 @@ void Sorter::executeArchiveAlgorithm(const std::string tapePool, std::string& qu
jobsToAdd.push_back({ job.archiveRequest.get() ,job.jobDump.copyNb,job.archiveFile, job.mountPolicy,cta::nullopt });
}
try{
algo.referenceAndSwitchOwnershipIfNecessary(tapePool,previousOwner,queueAddress,jobsToAdd,lc);
algo.referenceAndSwitchOwnership(tapePool,previousOwner,jobsToAdd,lc);
} catch (typename Algo::OwnershipSwitchFailure &failure){
for(auto &failedAR: failure.failedElements){
try{
......@@ -167,7 +167,7 @@ void Sorter::executeRetrieveAlgorithm(const std::string vid, std::string& queueA
jobsToAdd.push_back({job.retrieveRequest.get(),job.jobDump.copyNb,job.fSeq,job.fileSize,job.mountPolicy,job.activityDescription,job.diskSystemName});
}
try{
algo.referenceAndSwitchOwnershipIfNecessary(vid,previousOwner,queueAddress,jobsToAdd,lc);
algo.referenceAndSwitchOwnership(vid,previousOwner,jobsToAdd,lc);
} catch(typename Algo::OwnershipSwitchFailure &failure){
for(auto& failedRR: failure.failedElements){
try {
......
......@@ -632,7 +632,7 @@ message RepackRequest {
repeated RepackSubRequestPointer subrequests = 11570;
repeated RepackDestinationInfo destination_infos = 11571;
required EntryLog creation_log = 11572;
optional uint64 repack_finished_time = 11573;
required uint64 repack_finished_time = 11573;
}
message RepackRequestIndexPointer {
......
......@@ -48,6 +48,7 @@
#include <iostream>
#include <bits/unique_ptr.h>
#include "common/utils/utils.hpp"
#include "objectstore/AgentWrapper.hpp"
namespace cta {
using namespace objectstore;
......@@ -1700,7 +1701,7 @@ OStoreDB::RetrieveQueueItor_t* OStoreDB::getRetrieveJobItorPtr(const std::string
//------------------------------------------------------------------------------
// OStoreDB::queueRepack()
//------------------------------------------------------------------------------
void OStoreDB::queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest,log::LogContext & lc) {
std::string OStoreDB::queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest,log::LogContext & lc) {
std::string vid = repackRequest.m_vid;
common::dataStructures::RepackInfo::Type repackType = repackRequest.m_repackType;
std::string bufferURL = repackRequest.m_repackBufferURL;
......@@ -1729,6 +1730,9 @@ void OStoreDB::queueRepack(const SchedulerDatabase::QueueRepackRequest & repackR
// We're good to go to create the object. We need to own it.
m_agentReference->addToOwnership(rr->getAddressIfSet(), m_objectStore);
rr->insert();
std::string repackRequestAddress = rr->getAddressIfSet();
// If latency needs to the improved, the next steps could be deferred like they are for archive and retrieve requests.
typedef objectstore::ContainerAlgorithms<RepackQueue, RepackQueuePending> RQPAlgo;
{
......@@ -1738,6 +1742,7 @@ void OStoreDB::queueRepack(const SchedulerDatabase::QueueRepackRequest & repackR
RQPAlgo rqpAlgo(m_objectStore, *m_agentReference);
rqpAlgo.referenceAndSwitchOwnership(nullopt, m_agentReference->getAgentAddress(), elements, lc);
}
return repackRequestAddress;
}
//------------------------------------------------------------------------------
......@@ -2021,7 +2026,7 @@ std::unique_ptr<SchedulerDatabase::RepackReportBatch> OStoreDB::getNextSuccessfu
// As we are popping from a single report queue, all requests should concern only one repack request.
if (repackRequestAddresses.size() != 1) {
std::stringstream err;
err << "In OStoreDB::getNextSuccessfulArchiveRepackReportBatch(): reports for several repack requests in the same queue. ";
err << "In OStoreDB::getNextSuccessfulRetrieveRepackReportBatch(): reports for several repack requests in the same queue. ";
for (auto & rr: repackRequestAddresses) { err << rr << " "; }
throw exception::Exception(err.str());
}
......@@ -2827,6 +2832,7 @@ void OStoreDB::RepackRequest::fail() {
ScopedExclusiveLock rrl(m_repackRequest);
m_repackRequest.fetch();
m_repackRequest.setStatus(common::dataStructures::RepackInfo::Status::Failed);
m_repackRequest.removeFromOwnerAgentOwnership();
m_repackRequest.commit();
}
......@@ -2899,7 +2905,13 @@ void OStoreDB::cancelRepack(const std::string& vid, log::LogContext & lc) {
rr.deleteAllSubrequests();
// And then delete the request
std::string repackRequestOwner = rr.getOwner();
rr.remove();
try {
//In the case the owner is not a Repack queue,
//the owner is an agent. We remove it from its ownership
rr.removeFromOwnerAgentOwnership();
} catch(const cta::exception::Exception &ex){
//The owner is a queue, so continue
}
// We now need to dereference, from a queue if needed and from the index for sure.
Helpers::removeRepackRequestToIndex(vid, m_objectStore, lc);
if (repackRequestOwner.size()) {
......@@ -2910,14 +2922,20 @@ void OStoreDB::cancelRepack(const std::string& vid, log::LogContext & lc) {
try {
rql.lock(rq);
rq.fetch();
std::list<std::string> reqs{rr.getAddressIfSet()};
rq.removeRequestsAndCommit(reqs);
}
catch (objectstore::Backend::NoSuchObject &) { return; }
catch (objectstore::ObjectOpsBase::WrongType &) { return; }
std::list<std::string> reqs{rr.getAddressIfSet()};
rq.removeRequestsAndCommit(reqs);
catch (objectstore::ObjectOpsBase::WrongType &) {
}
}
//Delete the repack request now
rr.remove();
return;
} catch (cta::exception::Exception &) {}
} catch (cta::exception::Exception &ex) {
lc.log(cta::log::ERR,ex.getMessageValue());
return;
}
}
}
throw exception::UserError("In OStoreDB::cancelRepack(): No repack request for this VID.");
......@@ -5234,11 +5252,14 @@ void OStoreDB::RepackArchiveReportBatch::report(log::LogContext& lc){
}
timingList.insertAndReset("asyncUpdateOrDeleteCompletionTime", t);
// 3) Just remove all jobs from ownership
std::list<std::string> jobsToUnown;
for (auto sri: m_subrequestList) jobsToUnown.push_back(sri.subrequest->getAddressIfSet());
for (auto &sri: m_subrequestList) {
jobsToUnown.emplace_back(sri.subrequest->getAddressIfSet());
}
m_oStoreDb.m_agentReference->removeBatchFromOwnership(jobsToUnown, m_oStoreDb.m_objectStore);
timingList.insertAndReset("ownershipRemoval", t);
log::ScopedParamContainer params(lc);
timingList.insertAndReset("ownershipRemovalTime", t);
timingList.addToLog(params);
params.add("archiveReportType",( newStatus == cta::objectstore::serializers::ArchiveJobStatus::AJS_Complete) ? "ArchiveSuccesses" : "ArchiveFailures");
lc.log(log::INFO, "In OStoreDB::RepackArchiveReportBatch::report(): reported a batch of jobs.");
......
......@@ -386,7 +386,7 @@ public:
JobsFailedSummary getRetrieveJobsFailedSummary(log::LogContext &logContext) override;
/* === Repack requests handling =========================================== */
void queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext &logContext) override;
std::string queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext &logContext) override;
std::list<common::dataStructures::RepackInfo> getRepackInfo() override;
common::dataStructures::RepackInfo getRepackInfo(const std::string& vid) override;
......
......@@ -233,8 +233,8 @@ public:
m_OStoreDB.deleteFailed(objectId, lc);
}
void queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext& lc) override {
m_OStoreDB.queueRepack(repackRequest, lc);
std::string queueRepack(const SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext& lc) override {
return m_OStoreDB.queueRepack(repackRequest, lc);
}
std::list<common::dataStructures::RepackInfo> getRepackInfo() override {
......
......@@ -335,7 +335,7 @@ void Scheduler::queueRepack(const common::dataStructures::SecurityIdentity &cliI
if (repackBufferURL.empty()) throw exception::UserError("Empty buffer URL.");
utils::Timer t;
checkTapeFullBeforeRepack(vid);
m_db.queueRepack(repackRequestToQueue, lc);
std::string repackRequestAddress = m_db.queueRepack(repackRequestToQueue, lc);
log::TimingList tl;
tl.insertAndReset("schedulerDbTime", t);
log::ScopedParamContainer params(lc);
......@@ -347,7 +347,8 @@ void Scheduler::queueRepack(const common::dataStructures::SecurityIdentity &cliI
.add("creationHostName",repackRequestToQueue.m_creationLog.host)
.add("creationUserName",repackRequestToQueue.m_creationLog.username)
.add("creationTime",repackRequestToQueue.m_creationLog.time)
.add("bufferURL", repackRequest.m_repackBufferURL);
.add("bufferURL", repackRequest.m_repackBufferURL)
.add("repackRequestAddress", repackRequestAddress);
tl.addToLog(params);
lc.log(log::INFO, "In Scheduler::queueRepack(): success.");
}
......
......@@ -446,7 +446,7 @@ public:
};
/*============ Repack management: user side ================================*/
virtual void queueRepack(const cta::SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext & lc) = 0;
virtual std::string queueRepack(const cta::SchedulerDatabase::QueueRepackRequest & repackRequest, log::LogContext & lc) = 0;
virtual std::list<common::dataStructures::RepackInfo> getRepackInfo() = 0;
virtual common::dataStructures::RepackInfo getRepackInfo(const std::string & vid) = 0;
virtual void cancelRepack(const std::string & vid, log::LogContext & lc) = 0;
......
......@@ -93,9 +93,10 @@ namespace cta { namespace xrd {
repackRequestItem->set_total_failed_files(repackRequest.failedFilesToRetrieve + repackRequest.failedFilesToArchive);
repackRequestItem->set_status(toString(repackRequest.status));
uint64_t repackTime = time(nullptr) - repackRequest.creationLog.time;
if(repackRequest.status == common::dataStructures::RepackInfo::Status::Complete || repackRequest.status == common::dataStructures::RepackInfo::Status::Failed){
repackRequestItem->set_repack_finished_time(repackRequest.repackFinishedTime.value());
repackTime = repackRequest.repackFinishedTime.value() - repackRequest.creationLog.time;
repackRequestItem->set_repack_finished_time(repackRequest.repackFinishedTime);
if(repackRequest.repackFinishedTime != 0){
//repackFinishedTime != 0: repack is finished
repackTime = repackRequest.repackFinishedTime - repackRequest.creationLog.time;
}
repackRequestItem->set_repack_time(repackTime);
repackRequestItem->mutable_creation_log()->set_username(repackRequest.creationLog.username);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment