Commit 250f6709 authored by Michael Davis's avatar Michael Davis
Browse files

[catalogue] Removes compressedSize

* TapeFile::compressedSize is renamed to fileSize
* TapeFileWritten::compressedSize is removed, as it contains the same
  information as TapeFileWritten::size
parent bfbd36d1
This diff is collapsed.
......@@ -276,7 +276,7 @@ void MysqlCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer> &
try {
// If this is a file (as opposed to a placeholder), do the full processing.
const auto &fileEvent=dynamic_cast<const TapeFileWritten &>(event);
totalCompressedBytesWritten += fileEvent.compressedSize;
totalCompressedBytesWritten += fileEvent.size;
} catch (std::bad_cast&) {}
}
......@@ -369,7 +369,7 @@ void MysqlCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
tapeFile.vid = event.vid;
tapeFile.fSeq = event.fSeq;
tapeFile.blockId = event.blockId;
tapeFile.compressedSize = event.compressedSize;
tapeFile.fileSize = event.size;
tapeFile.copyNb = event.copyNb;
tapeFile.creationTime = now;
insertTapeFile(conn, tapeFile, event.archiveFileId);
......@@ -454,7 +454,7 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
tapeFile.vid = selectRset.columnString("VID");
tapeFile.fSeq = selectRset.columnUint64("FSEQ");
tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
if (!selectRset.columnIsNull("SSBY_VID")) {
......@@ -500,7 +500,7 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......@@ -568,7 +568,7 @@ void MysqlCatalogue::deleteArchiveFile(const std::string &diskInstanceName, cons
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......
......@@ -48,7 +48,7 @@ namespace {
rdbms::wrapper::OcciColumn vid;
rdbms::wrapper::OcciColumn fSeq;
rdbms::wrapper::OcciColumn blockId;
rdbms::wrapper::OcciColumn compressedSize;
rdbms::wrapper::OcciColumn fileSize;
rdbms::wrapper::OcciColumn copyNb;
rdbms::wrapper::OcciColumn creationTime;
rdbms::wrapper::OcciColumn archiveFileId;
......@@ -63,7 +63,7 @@ namespace {
vid("VID", nbRows),
fSeq("FSEQ", nbRows),
blockId("BLOCK_ID", nbRows),
compressedSize("LOGICAL_SIZE_IN_BYTES", nbRows),
fileSize("LOGICAL_SIZE_IN_BYTES", nbRows),
copyNb("COPY_NB", nbRows),
creationTime("CREATION_TIME", nbRows),
archiveFileId("ARCHIVE_FILE_ID", nbRows) {
......@@ -354,14 +354,14 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
checkTapeFileWrittenFieldsAreSet(__FUNCTION__, fileEvent);
totalCompressedBytesWritten += fileEvent.compressedSize;
totalCompressedBytesWritten += fileEvent.size;
// Store the length of each field and implicitly calculate the maximum field
// length of each column
tapeFileBatch.vid.setFieldLenToValueLen(i, fileEvent.vid);
tapeFileBatch.fSeq.setFieldLenToValueLen(i, fileEvent.fSeq);
tapeFileBatch.blockId.setFieldLenToValueLen(i, fileEvent.blockId);
tapeFileBatch.compressedSize.setFieldLenToValueLen(i, fileEvent.compressedSize);
tapeFileBatch.fileSize.setFieldLenToValueLen(i, fileEvent.size);
tapeFileBatch.copyNb.setFieldLenToValueLen(i, fileEvent.copyNb);
tapeFileBatch.creationTime.setFieldLenToValueLen(i, now);
tapeFileBatch.archiveFileId.setFieldLenToValueLen(i, fileEvent.archiveFileId);
......@@ -435,7 +435,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
tapeFileBatch.vid.setFieldValue(i, event.vid);
tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
tapeFileBatch.blockId.setFieldValue(i, event.blockId);
tapeFileBatch.compressedSize.setFieldValue(i, event.compressedSize);
tapeFileBatch.fileSize.setFieldValue(i, event.size);
tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
tapeFileBatch.creationTime.setFieldValue(i, now);
tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
......@@ -481,7 +481,7 @@ void OracleCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
occiStmt.setColumn(tapeFileBatch.vid);
occiStmt.setColumn(tapeFileBatch.fSeq);
occiStmt.setColumn(tapeFileBatch.blockId);
occiStmt.setColumn(tapeFileBatch.compressedSize);
occiStmt.setColumn(tapeFileBatch.fileSize);
occiStmt.setColumn(tapeFileBatch.copyNb);
occiStmt.setColumn(tapeFileBatch.creationTime);
occiStmt.setColumn(tapeFileBatch.archiveFileId);
......@@ -842,7 +842,7 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
tapeFile.vid = selectRset.columnString("VID");
tapeFile.fSeq = selectRset.columnUint64("FSEQ");
tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -888,7 +888,7 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......@@ -953,7 +953,7 @@ void OracleCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->compressedSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......
......@@ -47,7 +47,7 @@ namespace {
rdbms::wrapper::PostgresColumn vid;
rdbms::wrapper::PostgresColumn fSeq;
rdbms::wrapper::PostgresColumn blockId;
rdbms::wrapper::PostgresColumn compressedSize;
rdbms::wrapper::PostgresColumn fileSize;
rdbms::wrapper::PostgresColumn copyNb;
rdbms::wrapper::PostgresColumn creationTime;
rdbms::wrapper::PostgresColumn archiveFileId;
......@@ -62,7 +62,7 @@ namespace {
vid("VID", nbRows),
fSeq("FSEQ", nbRows),
blockId("BLOCK_ID", nbRows),
compressedSize("LOGICAL_SIZE_IN_BYTES", nbRows),
fileSize("LOGICAL_SIZE_IN_BYTES", nbRows),
copyNb("COPY_NB", nbRows),
creationTime("CREATION_TIME", nbRows),
archiveFileId("ARCHIVE_FILE_ID", nbRows) {
......@@ -347,7 +347,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
checkTapeFileWrittenFieldsAreSet(__FUNCTION__, fileEvent);
totalCompressedBytesWritten += fileEvent.compressedSize;
totalCompressedBytesWritten += fileEvent.size;
fileEvents.insert(fileEvent);
} catch (std::bad_cast&) {}
......@@ -422,7 +422,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
tapeFileBatch.vid.setFieldValue(i, event.vid);
tapeFileBatch.fSeq.setFieldValue(i, event.fSeq);
tapeFileBatch.blockId.setFieldValue(i, event.blockId);
tapeFileBatch.compressedSize.setFieldValue(i, event.compressedSize);
tapeFileBatch.fileSize.setFieldValue(i, event.size);
tapeFileBatch.copyNb.setFieldValue(i, event.copyNb);
tapeFileBatch.creationTime.setFieldValue(i, now);
tapeFileBatch.archiveFileId.setFieldValue(i, event.archiveFileId);
......@@ -475,7 +475,7 @@ void PostgresCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer
postgresStmt.setColumn(tapeFileBatch.vid);
postgresStmt.setColumn(tapeFileBatch.fSeq);
postgresStmt.setColumn(tapeFileBatch.blockId);
postgresStmt.setColumn(tapeFileBatch.compressedSize);
postgresStmt.setColumn(tapeFileBatch.fileSize);
postgresStmt.setColumn(tapeFileBatch.copyNb);
postgresStmt.setColumn(tapeFileBatch.creationTime);
postgresStmt.setColumn(tapeFileBatch.archiveFileId);
......@@ -773,7 +773,7 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
tapeFile.vid = selectRset.columnString("VID");
tapeFile.fSeq = selectRset.columnUint64("FSEQ");
tapeFile.blockId = selectRset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = selectRset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = selectRset.columnUint64("COPY_NB");
tapeFile.creationTime = selectRset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -819,7 +819,7 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......@@ -886,7 +886,7 @@ void PostgresCatalogue::deleteArchiveFile(const std::string &diskInstanceName, c
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......
......@@ -4442,7 +4442,7 @@ std::list<common::dataStructures::ArchiveFile> RdbmsCatalogue::getFilesForRepack
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
......@@ -5390,7 +5390,7 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -5483,7 +5483,7 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -5629,7 +5629,7 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -5726,7 +5726,7 @@ std::unique_ptr<common::dataStructures::ArchiveFile> RdbmsCatalogue::getArchiveF
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile->checksumType; // Duplicated for convenience
......@@ -5824,7 +5824,7 @@ void RdbmsCatalogue::checkTapeFileWrittenFieldsAreSet(const std::string &calling
if(event.vid.empty()) throw exception::Exception("vid is an empty string");
if(0 == event.fSeq) throw exception::Exception("fSeq is 0");
if(0 == event.blockId && event.fSeq != 1) throw exception::Exception("blockId is 0 and fSeq is not 1");
if(0 == event.compressedSize) throw exception::Exception("compressedSize is 0");
if(0 == event.size) throw exception::Exception("size is 0");
if(0 == event.copyNb) throw exception::Exception("copyNb is 0");
if(event.tapeDrive.empty()) throw exception::Exception("tapeDrive is an empty string");
} catch (exception::Exception &ex) {
......
......@@ -59,7 +59,7 @@ namespace {
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
......
......@@ -59,7 +59,7 @@ namespace {
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.fileSize = rset.columnUint64("LOGICAL_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
......
......@@ -101,7 +101,7 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......@@ -170,7 +170,7 @@ void SqliteCatalogue::deleteArchiveFile(const std::string &diskInstanceName, con
<< " fSeq: " << it->fSeq
<< " blockId: " << it->blockId
<< " creationTime: " << it->creationTime
<< " compressedSize: " << it->compressedSize
<< " fileSize: " << it->fileSize
<< " checksumType: " << it->checksumType //this shouldn't be here: repeated field
<< " checksumValue: " << it->checksumValue //this shouldn't be here: repeated field
<< " copyNb: " << it->copyNb //this shouldn't be here: repeated field
......@@ -387,7 +387,7 @@ void SqliteCatalogue::filesWrittenToTape(const std::set<TapeItemWrittenPointer>
try {
// If this is a file (as opposed to a placeholder), do the full processing.
const auto &fileEvent=dynamic_cast<const TapeFileWritten &>(event);
totalCompressedBytesWritten += fileEvent.compressedSize;
totalCompressedBytesWritten += fileEvent.size;
} catch (std::bad_cast&) {}
}
......@@ -479,7 +479,7 @@ void SqliteCatalogue::fileWrittenToTape(rdbms::Conn &conn, const TapeFileWritten
tapeFile.vid = event.vid;
tapeFile.fSeq = event.fSeq;
tapeFile.blockId = event.blockId;
tapeFile.compressedSize = event.compressedSize;
tapeFile.fileSize = event.size;
tapeFile.copyNb = event.copyNb;
tapeFile.creationTime = now;
insertTapeFile(conn, tapeFile, event.archiveFileId);
......
......@@ -28,7 +28,6 @@ TapeFileWritten::TapeFileWritten() :
archiveFileId(0),
size(0),
blockId(0),
compressedSize(0),
copyNb(0) {
}
......@@ -49,7 +48,6 @@ bool TapeFileWritten::operator==(const TapeFileWritten &rhs) const {
checksumValue == rhs.checksumValue &&
storageClassName == rhs.storageClassName &&
blockId == rhs.blockId &&
compressedSize == rhs.compressedSize &&
copyNb == rhs.copyNb &&
tapeDrive == rhs.tapeDrive;
}
......@@ -72,7 +70,6 @@ std::ostream &operator<<(std::ostream &os, const TapeFileWritten &obj) {
"vid=" << obj.vid << ","
"fSeq=" << obj.fSeq << ","
"blockId=" << obj.blockId << ","
"compressedSize=" << obj.compressedSize << ","
"copyNb=" << obj.copyNb << ","
"tapeDrive=" << obj.tapeDrive <<
"}";
......
......@@ -104,12 +104,6 @@ struct TapeFileWritten: public TapeItemWritten {
*/
uint64_t blockId;
/**
* The compressed size of the tape file in bytes. In other words the actual
* number of bytes it occupies on tape.
*/
uint64_t compressedSize;
/**
* The copy number of the tape file.
*/
......
......@@ -54,7 +54,7 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
tapeFile1.vid = "VID1";
tapeFile1.fSeq = 5678;
tapeFile1.blockId = 9012;
tapeFile1.compressedSize = 5;
tapeFile1.fileSize = 5;
tapeFile1.copyNb = 1;
archiveFile1.tapeFiles.push_back(tapeFile1);
......@@ -64,7 +64,7 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
tapeFile2.vid = "VID2";
tapeFile2.fSeq = 3456;
tapeFile2.blockId = 7890;
tapeFile2.compressedSize = 6;
tapeFile2.fileSize = 6;
tapeFile2.copyNb = 2;
archiveFile1.tapeFiles.push_back(tapeFile2);
......@@ -95,7 +95,6 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
ASSERT_EQ(tapeFile1.vid, copyNbToTapeFileItor->vid);
ASSERT_EQ(tapeFile1.fSeq, copyNbToTapeFileItor->fSeq);
ASSERT_EQ(tapeFile1.blockId, copyNbToTapeFileItor->blockId);
ASSERT_EQ(tapeFile1.compressedSize, copyNbToTapeFileItor->compressedSize);
ASSERT_EQ(tapeFile1.copyNb, copyNbToTapeFileItor->copyNb);
}
......@@ -106,7 +105,6 @@ TEST_F(cta_common_dataStructures_ArchiveFileTest, copy_constructor) {
ASSERT_EQ(tapeFile2.vid, copyNbToTapeFileItor->vid);
ASSERT_EQ(tapeFile2.fSeq, copyNbToTapeFileItor->fSeq);
ASSERT_EQ(tapeFile2.blockId, copyNbToTapeFileItor->blockId);
ASSERT_EQ(tapeFile2.compressedSize, copyNbToTapeFileItor->compressedSize);
ASSERT_EQ(tapeFile2.copyNb, copyNbToTapeFileItor->copyNb);
}
}
......
......@@ -30,7 +30,7 @@ namespace dataStructures {
TapeFile::TapeFile():
fSeq(0),
blockId(0),
compressedSize(0),
fileSize(0),
copyNb(0),
creationTime(0) {}
......@@ -41,7 +41,7 @@ bool TapeFile::operator==(const TapeFile &rhs) const {
return vid==rhs.vid
&& fSeq==rhs.fSeq
&& blockId==rhs.blockId
&& compressedSize==rhs.compressedSize
&& fileSize==rhs.fileSize
&& copyNb==rhs.copyNb
&& creationTime==rhs.creationTime;
}
......@@ -67,7 +67,7 @@ std::ostream &operator<<(std::ostream &os, const TapeFile &obj) {
os << "(vid=" << obj.vid
<< " fSeq=" << obj.fSeq
<< " blockId=" << obj.blockId
<< " compressedSize=" << obj.compressedSize
<< " fileSize=" << obj.fileSize
<< " copyNb=" << obj.copyNb
<< " creationTime=" << obj.creationTime << ")";
return os;
......
......@@ -56,10 +56,10 @@ struct TapeFile {
// TODO: change denomination to match SCSI nomenclature (logical object identifier).
uint64_t blockId;
/**
* The compressed size of the tape file in bytes. In other words the
* actual number of bytes it occupies on tape.
* The uncompressed (logical) size of the tape file in bytes. This field is redundant as it already exists in the
* ArchiveFile class, so it may be removed in future.
*/
uint64_t compressedSize;
uint64_t fileSize;
/**
* The copy number of the file. Copy numbers start from 1. Copy number 0
* is an invalid copy number.
......
......@@ -61,8 +61,7 @@ void fillRetrieveRequests(
{
cta::common::dataStructures::TapeFile tf;
tf.blockId = 0;
tf.compressedSize = 1;
tf.compressedSize = 1;
tf.fileSize = 1;
tf.copyNb = 1;
tf.creationTime = time(nullptr);
tf.fSeq = i;
......
......@@ -564,8 +564,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
{
cta::common::dataStructures::TapeFile tf;
tf.blockId=0;
tf.compressedSize=1;
tf.compressedSize=1;
tf.fileSize=1;
tf.copyNb=1;
tf.creationTime=time(nullptr);
tf.fSeq=pass;
......@@ -575,8 +574,7 @@ TEST(ObjectStore, GarbageCollectorRetrieveRequest) {
{
cta::common::dataStructures::TapeFile tf;
tf.blockId=0;
tf.compressedSize=1;
tf.compressedSize=1;
tf.fileSize=1;
tf.copyNb=2;
tf.creationTime=time(nullptr);
tf.fSeq=pass;
......
......@@ -42,7 +42,7 @@ public:
ostf.set_vid(vid);
ostf.set_fseq(fSeq);
ostf.set_blockid(blockId);
ostf.set_compressedsize(compressedSize);
ostf.set_filesize(fileSize);
ostf.set_copynb(copyNb);
ostf.set_creationtime(creationTime);
ostf.set_checksumtype(checksumType);
......@@ -53,7 +53,7 @@ public:
vid=ostf.vid();
fSeq=ostf.fseq();
blockId=ostf.blockid();
compressedSize=ostf.compressedsize();
fileSize=ostf.filesize();
copyNb=ostf.copynb();
creationTime=ostf.creationtime();
checksumType=ostf.checksumtype();
......
......@@ -176,7 +176,7 @@ message TapeFile {
required string vid = 9120;
required uint64 fseq = 9121;
required uint64 blockid = 9122;
required uint64 compressedsize = 9123;
required uint64 filesize = 9123;
required uint32 copynb = 9124;
required uint64 creationtime = 9125;
required string checksumtype = 9126;
......
......@@ -60,7 +60,6 @@ cta::catalogue::TapeItemWrittenPointer cta::ArchiveJob::validateAndGetTapeFileWr
fileReport.blockId = tapeFile.blockId;
fileReport.checksumType = tapeFile.checksumType;
fileReport.checksumValue = tapeFile.checksumValue;
fileReport.compressedSize = tapeFile.compressedSize;
fileReport.copyNb = tapeFile.copyNb;
fileReport.diskFileId = archiveFile.diskFileId;
fileReport.diskFileUser = archiveFile.diskFileInfo.owner;
......
......@@ -1101,7 +1101,7 @@ void OStoreDB::setRetrieveJobBatchReportedToUser(std::list<cta::SchedulerDatabas
"In OStoreDB::setRetrieveJobBatchReported(): tape copy not found"
);
insertedElements.emplace_back(CaRQF::InsertedElement{
&j.job->m_retrieveRequest, tf_it->copyNb, tf_it->fSeq, tf_it->compressedSize,
&j.job->m_retrieveRequest, tf_it->copyNb, tf_it->fSeq, j.job->archiveFile.fileSize,
common::dataStructures::MountPolicy(), serializers::RetrieveJobStatus::RJS_Failed,
j.job->m_activityDescription
});
......
......@@ -480,7 +480,7 @@ TEST_P(SchedulerTest, archive_report_and_retrieve_new_file) {
archiveJob->tapeFile.fSeq = 1;
archiveJob->tapeFile.checksumType = "ADLER32";
archiveJob->tapeFile.checksumValue = "1234abcd";
archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.copyNb = 1;
archiveJob->validate();
std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
......@@ -680,7 +680,7 @@ TEST_P(SchedulerTest, archive_and_retrieve_failure) {
archiveJob->tapeFile.fSeq = 1;
archiveJob->tapeFile.checksumType = "ADLER32";
archiveJob->tapeFile.checksumValue = "1234abcd";
archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.copyNb = 1;
archiveJob->validate();
std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
......@@ -931,7 +931,7 @@ TEST_P(SchedulerTest, archive_and_retrieve_report_failure) {
archiveJob->tapeFile.fSeq = 1;
archiveJob->tapeFile.checksumType = "ADLER32";
archiveJob->tapeFile.checksumValue = "1234abcd";
archiveJob->tapeFile.compressedSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.fileSize = archiveJob->archiveFile.fileSize;
archiveJob->tapeFile.copyNb = 1;
archiveJob->validate();
std::queue<std::unique_ptr <cta::ArchiveJob >> sDBarchiveJobBatch;
......@@ -1435,7 +1435,6 @@ TEST_P(SchedulerTest, expandRepackRequest) {
const std::string tapeDrive = "tape_drive";
const uint64_t nbArchiveFilesPerTape = 10;
const uint64_t archiveFileSize = 2 * 1000 * 1000 * 1000;
const uint64_t compressedFileSize = archiveFileSize;
//Simulate the writing of 10 files per tape in the catalogue
std::set<catalogue::TapeItemWrittenPointer> tapeFilesWrittenCopy1;
......@@ -1463,7 +1462,6 @@ TEST_P(SchedulerTest, expandRepackRequest) {
fileWritten.vid = currentVid;
fileWritten.fSeq = j;
fileWritten.blockId = j * 100;
fileWritten.compressedSize = compressedFileSize;
fileWritten.copyNb = 1;
fileWritten.tapeDrive = tapeDrive;
tapeFilesWrittenCopy1.emplace(fileWrittenUP.release());
......@@ -1512,7 +1510,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
//Test that the informations are correct for each file
//ASSERT_EQ(retrieveJob.request.tapePool,s_tapePoolName);
ASSERT_EQ(retrieveJob.request.archiveFileID,archiveFileId++);
ASSERT_EQ(retrieveJob.fileSize,compressedFileSize);
ASSERT_EQ(retrieveJob.fileSize,archiveFileSize);
std::stringstream ss;
ss<<"file://"<<tempDirectory.path()<<"/"<<allVid.at(i-1)<<"/"<<std::setw(9)<<std::setfill('0')<<j;
ASSERT_EQ(retrieveJob.request.dstURL, ss.str());
......@@ -1520,7 +1518,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.checksumType,checksumType);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.checksumValue,checksumValue);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.blockId,j*100);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.compressedSize,compressedFileSize);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.fileSize,archiveFileSize);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.fSeq,j);
ASSERT_EQ(retrieveJob.tapeCopies[vid].second.vid,vid);
++j;
......@@ -1610,7 +1608,7 @@ TEST_P(SchedulerTest, expandRepackRequest) {
ASSERT_EQ(tapeFile.fSeq,j);
ASSERT_EQ(tapeFile.checksumType, checksumType);
ASSERT_EQ(tapeFile.checksumValue,checksumValue);
ASSERT_EQ(tapeFile.compressedSize, compressedFileSize);
ASSERT_EQ(tapeFile.fileSize, archiveFileSize);
//Testing scheduler retrieve request
ASSERT_EQ(schedulerRetrieveRequest.archiveFileID,archiveFileId++);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment