Commit ba7cb07b authored by Steven Murray's avatar Steven Murray
Browse files

Replaced application-level cursor over archive file listings with a database cursor

parent ae06de9d
/*
* The CERN Tape Archive (CTA) project
* Copyright (C) 2015 CERN
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "catalogue/ArchiveFileBuilder.hpp"
#include "common/exception/Exception.hpp"
namespace cta {
namespace catalogue {
//------------------------------------------------------------------------------
// append
//------------------------------------------------------------------------------
std::unique_ptr<common::dataStructures::ArchiveFile> ArchiveFileBuilder::append(
const common::dataStructures::ArchiveFile &tapeFile) {
// If there is currently no ArchiveFile object under construction
if(nullptr == m_archiveFile.get()) {
// If the tape file represents an ArchiveFile object with no tape files
if(tapeFile.tapeFiles.empty()) {
// Archive file is already complete
return std::unique_ptr<common::dataStructures::ArchiveFile>(new common::dataStructures::ArchiveFile(tapeFile));
}
// If the tape file exists then it must be alone
if(tapeFile.tapeFiles.size() != 1) {
exception::Exception ex;
ex.getMessage() << __FUNCTION__ << " failed: Expected exactly one tape file to be appended at a time: actual=" <<
tapeFile.tapeFiles.size();
throw ex;
}
// Start constructing one
m_archiveFile.reset(new common::dataStructures::ArchiveFile(tapeFile));
// There could be more tape files so return incomplete
return std::unique_ptr<common::dataStructures::ArchiveFile>();
}
// If the tape file represents an ArchiveFile object with no tape files
if(tapeFile.tapeFiles.empty()) {
// The ArchiveFile object under construction is complete,
// therefore return it and start the construction of the next
std::unique_ptr<common::dataStructures::ArchiveFile> tmp;
tmp = std::move(m_archiveFile);
m_archiveFile.reset(new common::dataStructures::ArchiveFile(tapeFile));
return tmp;
}
// If the tape file to be appended belongs to the ArchiveFile object
// currently under construction
if(tapeFile.archiveFileID == m_archiveFile->archiveFileID) {
// The tape file must exist and must be alone
if(tapeFile.tapeFiles.size() != 1) {
exception::Exception ex;
ex.getMessage() << __FUNCTION__ << " failed: Expected exactly one tape file to be appended at a time: actual=" <<
tapeFile.tapeFiles.size() << " archiveFileID=" << tapeFile.archiveFileID;
throw ex;
}
// Append the tape file
const auto tapeFileMapItor = tapeFile.tapeFiles.begin();
const auto copyNbOfTapeFileToAppend = tapeFileMapItor->first;
if(m_archiveFile->tapeFiles.find(copyNbOfTapeFileToAppend) != m_archiveFile->tapeFiles.end()) {
exception::Exception ex;
ex.getMessage() << __FUNCTION__ << " failed: Found two tape files for the same archive file with the same copy"
" numbers: archiveFileID=" << tapeFile.archiveFileID << " copyNb=" << copyNbOfTapeFileToAppend;
throw ex;
}
m_archiveFile->tapeFiles[copyNbOfTapeFileToAppend] = tapeFileMapItor->second;
// There could be more tape files so return incomplete
return std::unique_ptr<common::dataStructures::ArchiveFile>();
}
// Reaching this point means the tape file to be appended belongs to the next
// ArchiveFile to be constructed.
// ArchiveFile object under construction is complete,
// therefore return it and start the construction of the next
std::unique_ptr<common::dataStructures::ArchiveFile> tmp;
tmp = std::move(m_archiveFile);
m_archiveFile.reset(new common::dataStructures::ArchiveFile(tapeFile));
return tmp;
}
//------------------------------------------------------------------------------
// getArchiveFile
//------------------------------------------------------------------------------
common::dataStructures::ArchiveFile *ArchiveFileBuilder::getArchiveFile() {
return m_archiveFile.get();
}
//------------------------------------------------------------------------------
// clear
//------------------------------------------------------------------------------
void ArchiveFileBuilder::clear() {
m_archiveFile.reset();
}
} // namespace catalogue
} // namespace cta
/*
* The CERN Tape Archive (CTA) project
* Copyright (C) 2015 CERN
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "common/dataStructures/ArchiveFile.hpp"
#include <memory>
namespace cta {
namespace catalogue {
/**
* Builds ArchiveFile objects from a stream of tape files ordered by archive ID
* and then copy number.
*/
class ArchiveFileBuilder {
public:
/**
* Appends the specified tape file to the ArchiveFile object currently
* construction.
*
* If this append method is called with the tape file of the next ArchiveFile
* to be constructed then this means the current ArchiveFile under
* construction is complete and this method will therefore return the current
* and complete ArchiveFile object. The appened tape file will be remembered
* by this builder object and used to start the construction of the next
* ArchiveFile object.
*
* If this append method is called with an ArchiveFile with no tape files at
* all then this means the current ArchiveFile under
* construction is complete and this method will therefore return the current
* and complete ArchiveFile object. The appened tape file will be remembered
* by this builder object and used to start the construction of the next
* ArchiveFile object.
*
* If the call to this append does not complete the ArchiveFile object
* currently under construction then this method will returns an empty unique
* pointer.
*
* @param tapeFile The tape file to be appended or an archive file with no
* tape files at all.
*/
std::unique_ptr<common::dataStructures::ArchiveFile> append(const common::dataStructures::ArchiveFile &tapeFile);
/**
* Returns a pointer to the ArchiveFile object currently under construction.
* A return value of nullptr means there there is no ArchiveFile object
* currently under construction.
*
* @return The ArchiveFile object currently under construction or nullptr
* if there isn't one.
*/
common::dataStructures::ArchiveFile *getArchiveFile();
/**
* If there is an ArchiveFile under construction then it is forgotten.
*/
void clear();
private:
/**
* The Archivefile object currently under construction.
*/
std::unique_ptr<common::dataStructures::ArchiveFile> m_archiveFile;
}; // class ArchiveFileBuilder
} // namespace catalogue
} // namespace cta
...@@ -67,7 +67,7 @@ ArchiveFileItor &ArchiveFileItor::operator=(ArchiveFileItor &&rhs) { ...@@ -67,7 +67,7 @@ ArchiveFileItor &ArchiveFileItor::operator=(ArchiveFileItor &&rhs) {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// hasMore // hasMore
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
bool ArchiveFileItor::hasMore() const { bool ArchiveFileItor::hasMore() {
if(nullptr == m_impl) { if(nullptr == m_impl) {
throw exception::Exception(std::string(__FUNCTION__) + " failed: " throw exception::Exception(std::string(__FUNCTION__) + " failed: "
"This iterator is invalid"); "This iterator is invalid");
......
...@@ -84,7 +84,7 @@ public: ...@@ -84,7 +84,7 @@ public:
/** /**
* Returns true if a call to next would return another archive file. * Returns true if a call to next would return another archive file.
*/ */
bool hasMore() const; bool hasMore();
/** /**
* Returns the next archive or throws an exception if there isn't one. * Returns the next archive or throws an exception if there isn't one.
......
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
/** /**
* Returns true if a call to next would return another archive file. * Returns true if a call to next would return another archive file.
*/ */
virtual bool hasMore() const = 0; virtual bool hasMore() = 0;
/** /**
* Returns the next archive or throws an exception if there isn't one. * Returns the next archive or throws an exception if there isn't one.
......
...@@ -34,6 +34,7 @@ endif(OCCI_SUPPORT) ...@@ -34,6 +34,7 @@ endif(OCCI_SUPPORT)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow")
set (CATALOGUE_LIB_SRC_FILES set (CATALOGUE_LIB_SRC_FILES
ArchiveFileBuilder.cpp
ArchiveFileRow.cpp ArchiveFileRow.cpp
ArchiveFileItor.cpp ArchiveFileItor.cpp
ArchiveFileItorImpl.cpp ArchiveFileItorImpl.cpp
......
...@@ -448,23 +448,14 @@ public: ...@@ -448,23 +448,14 @@ public:
virtual void modifyMountPolicyComment(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const std::string &comment) = 0; virtual void modifyMountPolicyComment(const common::dataStructures::SecurityIdentity &admin, const std::string &name, const std::string &comment) = 0;
/** /**
* Returns an iterator over the list of archive files that meet the specified * Returns the specified archive files. Please note that the list of files
* search criteria. * is ordered by archive file ID.
*
* Please note that the list is ordered by archive file ID.
*
* Please note that this method will throw an exception if the
* nbArchiveFilesToPrefetch parameter is set to 0. The parameter must be set
* to a value greater than or equal to 1.
* *
* @param searchCriteria The search criteria. * @param searchCriteria The search criteria.
* @param nbArchiveFilesToPrefetch The number of archive files to prefetch. * @return The archive files.
* This parameter must be set to a value equal to or greater than 1.
* @return An iterator over the list of archive files.
*/ */
virtual ArchiveFileItor getArchiveFileItor( virtual ArchiveFileItor getArchiveFiles(
const TapeFileSearchCriteria &searchCriteria = TapeFileSearchCriteria(), const TapeFileSearchCriteria &searchCriteria = TapeFileSearchCriteria()) const = 0;
const uint64_t nbArchiveFilesToPrefetch = 1000) const = 0;
/** /**
* Returns a summary of the tape files that meet the specified search * Returns a summary of the tape files that meet the specified search
......
...@@ -29,15 +29,17 @@ namespace catalogue { ...@@ -29,15 +29,17 @@ namespace catalogue {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// create // create
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
std::unique_ptr<Catalogue> CatalogueFactory::create(const rdbms::Login &login, const uint64_t nbConns) { std::unique_ptr<Catalogue> CatalogueFactory::create(const rdbms::Login &login, const uint64_t nbConns,
const uint64_t nbArchiveFileListingConns) {
try { try {
switch(login.dbType) { switch(login.dbType) {
case rdbms::Login::DBTYPE_IN_MEMORY: case rdbms::Login::DBTYPE_IN_MEMORY:
return cta::make_unique<InMemoryCatalogue>(nbConns); return cta::make_unique<InMemoryCatalogue>(nbConns, nbArchiveFileListingConns);
case rdbms::Login::DBTYPE_ORACLE: case rdbms::Login::DBTYPE_ORACLE:
return cta::make_unique<OracleCatalogue>(login.username, login.password, login.database, nbConns); return cta::make_unique<OracleCatalogue>(login.username, login.password, login.database, nbConns,
nbArchiveFileListingConns);
case rdbms::Login::DBTYPE_SQLITE: case rdbms::Login::DBTYPE_SQLITE:
return cta::make_unique<SqliteCatalogue>(login.database, nbConns); return cta::make_unique<SqliteCatalogue>(login.database, nbConns, nbArchiveFileListingConns);
case rdbms::Login::DBTYPE_NONE: case rdbms::Login::DBTYPE_NONE:
throw exception::Exception("Cannot create a catalogue without a database type"); throw exception::Exception("Cannot create a catalogue without a database type");
default: default:
......
...@@ -43,11 +43,16 @@ public: ...@@ -43,11 +43,16 @@ public:
* *
* @param login The database connection details. * @param login The database connection details.
* @param nbConns The maximum number of concurrent connections to the * @param nbConns The maximum number of concurrent connections to the
* underlying relational database. * underlying relational database for all operations accept listing archive
* files which can be relatively long operations.
* @param nbArchiveFileListingConns The maximum number of concurrent
* connections to the underlying relational database for the sole purpose of
* listing archive files.
* @return The newly created CTA catalogue object. Please note that it is the * @return The newly created CTA catalogue object. Please note that it is the
* responsibility of the caller to delete the returned CTA catalogue object. * responsibility of the caller to delete the returned CTA catalogue object.
*/ */
static std::unique_ptr<Catalogue> create(const rdbms::Login &login, const uint64_t nbConns); static std::unique_ptr<Catalogue> create(const rdbms::Login &login, const uint64_t nbConns,
const uint64_t nbArchiveFileListingConns = 5);
}; // class CatalogueFactory }; // class CatalogueFactory
......
This diff is collapsed.
...@@ -27,8 +27,8 @@ namespace catalogue { ...@@ -27,8 +27,8 @@ namespace catalogue {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// constructor // constructor
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
InMemoryCatalogue::InMemoryCatalogue(const uint64_t nbConns): InMemoryCatalogue::InMemoryCatalogue(const uint64_t nbConns, const uint64_t nbArchiveFileListingConns):
SchemaCreatingSqliteCatalogue("file::memory:?cache=shared", nbConns) { SchemaCreatingSqliteCatalogue("file::memory:?cache=shared", nbConns, nbArchiveFileListingConns) {
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
......
...@@ -34,9 +34,14 @@ public: ...@@ -34,9 +34,14 @@ public:
/** /**
* Constructor. * Constructor.
* *
* @param nbConns The maximum number of concurrent connections to the underyling database. * @param nbConns The maximum number of concurrent connections to the
* underlying relational database for all operations accept listing archive
* files which can be relatively long operations.
* @param nbArchiveFileListingConns The maximum number of concurrent
* connections to the underlying relational database for the sole purpose of
* listing archive files.
*/ */
InMemoryCatalogue(const uint64_t nbConns); InMemoryCatalogue(const uint64_t nbConns, const uint64_t nbArchiveFileListingConns);
/** /**
* Destructor. * Destructor.
......
...@@ -39,10 +39,11 @@ OracleCatalogue::OracleCatalogue( ...@@ -39,10 +39,11 @@ OracleCatalogue::OracleCatalogue(
const std::string &username, const std::string &username,
const std::string &password, const std::string &password,
const std::string &database, const std::string &database,
const uint64_t nbConns): const uint64_t nbConns,
const uint64_t nbArchiveFileListingConns):
RdbmsCatalogue( RdbmsCatalogue(
rdbms::ConnFactoryFactory::create(rdbms::Login(rdbms::Login::DBTYPE_ORACLE, username, password, database)), rdbms::ConnFactoryFactory::create(rdbms::Login(rdbms::Login::DBTYPE_ORACLE, username, password, database)),
nbConns) { nbConns, nbArchiveFileListingConns) {
} }
......
...@@ -43,13 +43,18 @@ public: ...@@ -43,13 +43,18 @@ public:
* @param password The database password. * @param password The database password.
* @param database The database name. * @param database The database name.
* @param nbConns The maximum number of concurrent connections to the * @param nbConns The maximum number of concurrent connections to the
* underlying relational database. * underlying relational database for all operations accept listing archive
* files which can be relatively long operations.
* @param nbArchiveFileListingConns The maximum number of concurrent
* connections to the underlying relational database for the sole purpose of
* listing archive files.
*/ */
OracleCatalogue( OracleCatalogue(
const std::string &username, const std::string &username,
const std::string &password, const std::string &password,
const std::string &database, const std::string &database,
const uint64_t nbConns); const uint64_t nbConns,
const uint64_t nbArchiveFileListingConns);
/** /**
* Destructor. * Destructor.
......
...@@ -16,36 +16,204 @@ ...@@ -16,36 +16,204 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "catalogue/ArchiveFileItor.hpp"
#include "catalogue/RdbmsArchiveFileItorImpl.hpp" #include "catalogue/RdbmsArchiveFileItorImpl.hpp"
#include "common/exception/Exception.hpp"
namespace cta { namespace cta {
namespace catalogue { namespace catalogue {
namespace {
/**
* Populates an ArchiveFile object with the current column values of the
* specified result set.
*
* @param rset The result set to be used to populate the ArchiveFile object.
* @return The populated ArchiveFile object.
*/
static common::dataStructures::ArchiveFile populateArchiveFile(const rdbms::Rset &rset) {
rset.columnUint64("ARCHIVE_FILE_ID");
if(!rset.columnIsNull("VID")) {
rset.columnUint64("COPY_NB");
}
common::dataStructures::ArchiveFile archiveFile;
archiveFile.archiveFileID = rset.columnUint64("ARCHIVE_FILE_ID");
archiveFile.diskInstance = rset.columnString("DISK_INSTANCE_NAME");
archiveFile.diskFileId = rset.columnString("DISK_FILE_ID");
archiveFile.diskFileInfo.path = rset.columnString("DISK_FILE_PATH");
archiveFile.diskFileInfo.owner = rset.columnString("DISK_FILE_USER");
archiveFile.diskFileInfo.group = rset.columnString("DISK_FILE_GROUP");
archiveFile.diskFileInfo.recoveryBlob = rset.columnString("DISK_FILE_RECOVERY_BLOB");
archiveFile.fileSize = rset.columnUint64("SIZE_IN_BYTES");
archiveFile.checksumType = rset.columnString("CHECKSUM_TYPE");
archiveFile.checksumValue = rset.columnString("CHECKSUM_VALUE");
archiveFile.storageClass = rset.columnString("STORAGE_CLASS_NAME");
archiveFile.creationTime = rset.columnUint64("ARCHIVE_FILE_CREATION_TIME");
archiveFile.reconciliationTime = rset.columnUint64("RECONCILIATION_TIME");
// If there is a tape file
if (!rset.columnIsNull("VID")) {
common::dataStructures::TapeFile tapeFile;
tapeFile.vid = rset.columnString("VID");
tapeFile.fSeq = rset.columnUint64("FSEQ");
tapeFile.blockId = rset.columnUint64("BLOCK_ID");
tapeFile.compressedSize = rset.columnUint64("COMPRESSED_SIZE_IN_BYTES");
tapeFile.copyNb = rset.columnUint64("COPY_NB");
tapeFile.creationTime = rset.columnUint64("TAPE_FILE_CREATION_TIME");
tapeFile.checksumType = archiveFile.checksumType; // Duplicated for convenience
tapeFile.checksumValue = archiveFile.checksumValue; // Duplicated for convenience
archiveFile.tapeFiles[rset.columnUint64("COPY_NB")] = tapeFile;
}
return archiveFile;
}
} // anonymous namespace
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// constructor // constructor
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
RdbmsArchiveFileItorImpl::RdbmsArchiveFileItorImpl( RdbmsArchiveFileItorImpl::RdbmsArchiveFileItorImpl(
const RdbmsCatalogue &catalogue, rdbms::ConnPool &connPool,
const uint64_t nbArchiveFilesToPrefetch,
const TapeFileSearchCriteria &searchCriteria): const TapeFileSearchCriteria &searchCriteria):
m_catalogue(catalogue), m_connPool(connPool),
m_nbArchiveFilesToPrefetch(nbArchiveFilesToPrefetch),
m_searchCriteria(searchCriteria), m_searchCriteria(searchCriteria),
m_nextArchiveFileId(1) { m_rsetIsEmpty(true),
m_hasMoreHasBeenCalled(false) {
try { try {
if(1 > m_nbArchiveFilesToPrefetch) { std::string sql =
exception::Exception ex; "SELECT "
ex.getMessage() << "nbArchiveFilesToPrefetch must equal to or greater than 1: actual=" << "ARCHIVE_FILE.ARCHIVE_FILE_ID AS ARCHIVE_FILE_ID,"
m_nbArchiveFilesToPrefetch; "ARCHIVE_FILE.DISK_INSTANCE_NAME AS DISK_INSTANCE_NAME,"
throw ex; "ARCHIVE_FILE.DISK_FILE_ID AS DISK_FILE_ID,"
"ARCHIVE_FILE.DISK_FILE_PATH AS DISK_FILE_PATH,"
"ARCHIVE_FILE.DISK_FILE_USER AS DISK_FILE_USER,"
"ARCHIVE_FILE.DISK_FILE_GROUP AS DISK_FILE_GROUP,"
"ARCHIVE_FILE.DISK_FILE_RECOVERY_BLOB AS DISK_FILE_RECOVERY_BLOB,"
"ARCHIVE_FILE.SIZE_IN_BYTES AS SIZE_IN_BYTES,"
"ARCHIVE_FILE.CHECKSUM_TYPE AS CHECKSUM_TYPE,"
"ARCHIVE_FILE.CHECKSUM_VALUE AS CHECKSUM_VALUE,"
"ARCHIVE_FILE.STORAGE_CLASS_NAME AS STORAGE_CLASS_NAME,"
"ARCHIVE_FILE.CREATION_TIME AS ARCHIVE_FILE_CREATION_TIME,"
"ARCHIVE_FILE.RECONCILIATION_TIME AS RECONCILIATION_TIME,"
"TAPE_FILE.VID AS VID,"
"TAPE_FILE.FSEQ AS FSEQ,"
"TAPE_FILE.BLOCK_ID AS BLOCK_ID,"
"TAPE_FILE.COMPRESSED_SIZE_IN_BYTES AS COMPRESSED_SIZE_IN_BYTES,"
"TAPE_FILE.COPY_NB AS COPY_NB,"
"TAPE_FILE.CREATION_TIME AS TAPE_FILE_CREATION_TIME, "
"TAPE.TAPE_POOL_NAME AS TAPE_POOL_NAME "
"FROM "
"ARCHIVE_FILE "
"LEFT OUTER JOIN TAPE_FILE ON "
"ARCHIVE_FILE.ARCHIVE_FILE_ID = TAPE_FILE.ARCHIVE_FILE_ID "
"LEFT OUTER JOIN TAPE ON "
"TAPE_FILE.VID = TAPE.VID";
if(
searchCriteria.archiveFileId ||
searchCriteria.diskInstance ||
searchCriteria.diskFileId ||
searchCriteria.diskFilePath ||
searchCriteria.diskFileUser ||
searchCriteria.diskFileGroup ||
searchCriteria.storageClass ||
searchCriteria.vid ||
searchCriteria.tapeFileCopyNb ||
searchCriteria.tapePool) {
sql += " WHERE ";
}
bool addedAWhereConstraint = false;
if(searchCriteria.archiveFileId) {
sql += " ARCHIVE_FILE.ARCHIVE_FILE_ID = :ARCHIVE_FILE_ID";
addedAWhereConstraint = true;
}
if(searchCriteria.diskInstance) {
if(addedAWhereConstraint) sql += " AND ";
sql += "ARCHIVE_FILE.DISK_INSTANCE_NAME = :DISK_INSTANCE_NAME";
addedAWhereConstraint = true;
}
if(searchCriteria.diskFileId) {
if(addedAWhereConstraint) sql += " AND ";
sql += "ARCHIVE_FILE.DISK_FILE_ID = :DISK_FILE_ID";
addedAWhereConstraint = true;
}
if(searchCriteria.diskFilePath) {
if(addedAWhereConstraint) sql += " AND ";
sql += "ARCHIVE_FILE.DISK_FILE_PATH = :DISK_FILE_PATH";
addedAWhereConstraint = true;
}
if(searchCriteria.diskFileUser) {
if(addedAWhereConstraint) sql += " AND ";
sql += "ARCHIVE_FILE.DISK_FILE_USER = :DISK_FILE_USER";
addedAWhereConstraint = true;
}
if(searchCriteria.diskFileGroup) {
if(addedAWhereConstraint) sql += " AND ";