Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
dCache
cta
Commits
a00bd53c
Commit
a00bd53c
authored
May 11, 2016
by
Steven Murray
Browse files
SqliteCatalogue unit-tests now test LEFT OUTER JOIN of getArchiveFiles()
parent
cd4d1c44
Changes
7
Expand all
Hide whitespace changes
Inline
Side-by-side
catalogue/SqliteCatalogue.cpp
View file @
a00bd53c
This diff is collapsed.
Click to expand it.
catalogue/SqliteCatalogue.hpp
View file @
a00bd53c
...
...
@@ -268,7 +268,8 @@ protected:
uint64_t
getExpectedNbArchiveRoutes
(
const
std
::
string
&
storageClass
)
const
;
/**
* Creates the specified archive file without any tape copies.
* Creates the specified archive file without any tape copies. Any tape
* copies in the specified archive file will simply be ignored.
*
* @param archiveFile The archive file to be created.
*/
...
...
catalogue/SqliteCatalogueTest.cpp
View file @
a00bd53c
...
...
@@ -855,20 +855,20 @@ TEST_F(cta_catalogue_SqliteCatalogueTest, createArchiveFile) {
files
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
files
.
size
());
const
common
::
dataStructures
::
ArchiveFile
front
File
=
files
.
front
();
ASSERT_EQ
(
file
.
archiveFileID
,
front
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
front
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
front
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
front
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
front
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
front
File
.
storageClass
);
ASSERT_EQ
(
file
.
diskInstance
,
front
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
front
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
front
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
front
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
front
File
.
drData
.
drBlob
);
const
common
::
dataStructures
::
ArchiveFile
archive
File
=
files
.
front
();
ASSERT_EQ
(
file
.
archiveFileID
,
archive
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
archive
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
archive
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
archive
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
archive
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
archive
File
.
storageClass
);
ASSERT_EQ
(
file
.
diskInstance
,
archive
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
archive
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
archive
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
archive
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
archive
File
.
drData
.
drBlob
);
}
TEST_F
(
cta_catalogue_SqliteCatalogueTest
,
createArchiveFile_same_twice
)
{
...
...
@@ -1011,7 +1011,7 @@ TEST_F(cta_catalogue_SqliteCatalogueTest, prepareForNewFile) {
ASSERT_EQ
(
maxDrivesAllowed
,
queueCriteria
.
mountPolicy
.
maxDrivesAllowed
);
}
TEST_F
(
cta_catalogue_SqliteCatalogueTest
,
createTapeFile
)
{
TEST_F
(
cta_catalogue_SqliteCatalogueTest
,
createTapeFile
_2_files
)
{
using
namespace
cta
;
catalogue
::
TestingSqliteCatalogue
catalogue
;
...
...
@@ -1023,62 +1023,161 @@ TEST_F(cta_catalogue_SqliteCatalogueTest, createTapeFile) {
catalogue
.
createStorageClass
(
m_cliSI
,
storageClassName
,
nbCopies
,
"create storage class"
);
common
::
dataStructures
::
ArchiveFile
file
;
file
.
archiveFileID
=
1234
;
file
.
diskFileID
=
"EOS_file_ID"
;
file
.
fileSize
=
1
;
file
.
checksumType
=
"checksum_type"
;
file
.
checksumValue
=
"cheskum_value"
;
file
.
storageClass
=
storageClassName
;
const
uint64_t
archiveFileId
=
1234
;
file
.
diskInstance
=
"recovery_instance"
;
file
.
drData
.
drPath
=
"recovery_path"
;
file
.
drData
.
drOwner
=
"recovery_owner"
;
file
.
drData
.
drGroup
=
"recovery_group"
;
file
.
drData
.
drBlob
=
"recovery_blob"
;
// Create a bare archive file, i.e. one with any tape copies
common
::
dataStructures
::
ArchiveFile
bareArchiveFile
;
bareArchiveFile
.
archiveFileID
=
archiveFileId
;
bareArchiveFile
.
diskFileID
=
"EOS_file_ID"
;
bareArchiveFile
.
fileSize
=
1
;
bareArchiveFile
.
checksumType
=
"checksum_type"
;
bareArchiveFile
.
checksumValue
=
"cheskum_value"
;
bareArchiveFile
.
storageClass
=
storageClassName
;
catalogue
.
createArchiveFile
(
file
);
bareArchiveFile
.
diskInstance
=
"recovery_instance"
;
bareArchiveFile
.
drData
.
drPath
=
"recovery_path"
;
bareArchiveFile
.
drData
.
drOwner
=
"recovery_owner"
;
bareArchiveFile
.
drData
.
drGroup
=
"recovery_group"
;
bareArchiveFile
.
drData
.
drBlob
=
"recovery_blob"
;
std
::
list
<
common
::
dataStructures
::
ArchiveFile
>
files
;
files
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
files
.
size
());
catalogue
.
createArchiveFile
(
bareArchiveFile
);
{
std
::
list
<
common
::
dataStructures
::
ArchiveFile
>
archiveFiles
;
archiveFiles
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
archiveFiles
.
size
());
ASSERT_TRUE
(
archiveFiles
.
front
().
tapeCopies
.
empty
());
const
common
::
dataStructures
::
ArchiveFile
archiveFile
=
archiveFiles
.
front
();
ASSERT_EQ
(
bareArchiveFile
.
archiveFileID
,
archiveFile
.
archiveFileID
);
ASSERT_EQ
(
bareArchiveFile
.
diskFileID
,
archiveFile
.
diskFileID
);
ASSERT_EQ
(
bareArchiveFile
.
fileSize
,
archiveFile
.
fileSize
);
ASSERT_EQ
(
bareArchiveFile
.
checksumType
,
archiveFile
.
checksumType
);
ASSERT_EQ
(
bareArchiveFile
.
checksumValue
,
archiveFile
.
checksumValue
);
ASSERT_EQ
(
bareArchiveFile
.
storageClass
,
archiveFile
.
storageClass
);
ASSERT_EQ
(
bareArchiveFile
.
diskInstance
,
archiveFile
.
diskInstance
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drPath
,
archiveFile
.
drData
.
drPath
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drOwner
,
archiveFile
.
drData
.
drOwner
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drGroup
,
archiveFile
.
drData
.
drGroup
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drBlob
,
archiveFile
.
drData
.
drBlob
);
ASSERT_TRUE
(
bareArchiveFile
.
tapeCopies
.
empty
());
}
const
common
::
dataStructures
::
ArchiveFile
frontFile
=
files
.
front
(
);
ASSERT_TRUE
(
catalogue
.
getTapeFiles
().
empty
()
);
ASSERT_EQ
(
file
.
archiveFileID
,
frontFile
.
archiv
eFile
ID
)
;
ASSERT_EQ
(
file
.
diskFileID
,
frontFile
.
diskFileID
)
;
ASSERT_EQ
(
file
.
fileSize
,
front
File
.
f
ileSize
)
;
ASSERT_EQ
(
file
.
checksumType
,
frontFile
.
checksumType
)
;
ASSERT_EQ
(
file
.
checksumValue
,
front
File
.
c
hecksumValue
)
;
ASSERT_EQ
(
file
.
storageClass
,
frontFile
.
storageClass
)
;
common
::
dataStructures
::
TapeFile
tap
eFile
1
;
tapeFile1
.
vid
=
"VID1"
;
tape
File
1
.
f
Seq
=
5678
;
tapeFile1
.
blockId
=
9012
;
tape
File
1
.
c
ompressedSize
=
5
;
tapeFile1
.
copyNb
=
1
;
ASSERT_EQ
(
file
.
diskInstance
,
frontFile
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
frontFile
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
frontFile
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
frontFile
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
frontFile
.
drData
.
drBlob
);
catalogue
.
createTapeFile
(
tapeFile1
,
archiveFileId
);
ASSERT_TRUE
(
catalogue
.
getTapeFiles
().
empty
());
{
std
::
list
<
common
::
dataStructures
::
ArchiveFile
>
archiveFiles
;
archiveFiles
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
archiveFiles
.
size
());
const
common
::
dataStructures
::
ArchiveFile
archiveFile
=
archiveFiles
.
front
();
ASSERT_EQ
(
bareArchiveFile
.
archiveFileID
,
archiveFile
.
archiveFileID
);
ASSERT_EQ
(
bareArchiveFile
.
diskFileID
,
archiveFile
.
diskFileID
);
ASSERT_EQ
(
bareArchiveFile
.
fileSize
,
archiveFile
.
fileSize
);
ASSERT_EQ
(
bareArchiveFile
.
checksumType
,
archiveFile
.
checksumType
);
ASSERT_EQ
(
bareArchiveFile
.
checksumValue
,
archiveFile
.
checksumValue
);
ASSERT_EQ
(
bareArchiveFile
.
storageClass
,
archiveFile
.
storageClass
);
ASSERT_EQ
(
bareArchiveFile
.
diskInstance
,
archiveFile
.
diskInstance
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drPath
,
archiveFile
.
drData
.
drPath
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drOwner
,
archiveFile
.
drData
.
drOwner
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drGroup
,
archiveFile
.
drData
.
drGroup
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drBlob
,
archiveFile
.
drData
.
drBlob
);
ASSERT_EQ
(
1
,
archiveFile
.
tapeCopies
.
size
());
{
auto
copyNbToTapeFileItor
=
archiveFile
.
tapeCopies
.
find
(
1
);
ASSERT_EQ
(
1
,
copyNbToTapeFileItor
->
first
);
ASSERT_EQ
(
tapeFile1
.
vid
,
copyNbToTapeFileItor
->
second
.
vid
);
ASSERT_EQ
(
tapeFile1
.
fSeq
,
copyNbToTapeFileItor
->
second
.
fSeq
);
ASSERT_EQ
(
tapeFile1
.
blockId
,
copyNbToTapeFileItor
->
second
.
blockId
);
ASSERT_EQ
(
tapeFile1
.
compressedSize
,
copyNbToTapeFileItor
->
second
.
compressedSize
);
ASSERT_EQ
(
tapeFile1
.
copyNb
,
copyNbToTapeFileItor
->
second
.
copyNb
);
}
}
{
const
std
::
list
<
common
::
dataStructures
::
TapeFile
>
tapeFiles
=
catalogue
.
getTapeFiles
();
ASSERT_EQ
(
1
,
tapeFiles
.
size
());
ASSERT_EQ
(
tapeFile1
.
vid
,
tapeFiles
.
front
().
vid
);
ASSERT_EQ
(
tapeFile1
.
fSeq
,
tapeFiles
.
front
().
fSeq
);
ASSERT_EQ
(
tapeFile1
.
blockId
,
tapeFiles
.
front
().
blockId
);
ASSERT_EQ
(
tapeFile1
.
compressedSize
,
tapeFiles
.
front
().
compressedSize
);
ASSERT_EQ
(
tapeFile1
.
copyNb
,
tapeFiles
.
front
().
copyNb
);
}
common
::
dataStructures
::
TapeFile
tapeFile
;
tapeFile
.
vid
=
"VID"
;
tapeFile
.
fSeq
=
56
78
;
tapeFile
.
blockId
=
90
12
;
tapeFile
.
compressedSize
=
5
;
tapeFile
.
copyNb
=
1
;
common
::
dataStructures
::
TapeFile
tapeFile
2
;
tapeFile
2
.
vid
=
"VID
2
"
;
tapeFile
2
.
fSeq
=
34
56
;
tapeFile
2
.
blockId
=
78
90
;
tapeFile
2
.
compressedSize
=
6
;
tapeFile
2
.
copyNb
=
2
;
c
onst
uint64_t
archiveFileId
=
1234
;
c
atalogue
.
createTapeFile
(
tapeFile2
,
archiveFileId
)
;
catalogue
.
createTapeFile
(
tapeFile
,
archiveFileId
);
{
std
::
list
<
common
::
dataStructures
::
ArchiveFile
>
archiveFiles
;
archiveFiles
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
archiveFiles
.
size
());
const
common
::
dataStructures
::
ArchiveFile
archiveFile
=
archiveFiles
.
front
();
ASSERT_EQ
(
bareArchiveFile
.
archiveFileID
,
archiveFile
.
archiveFileID
);
ASSERT_EQ
(
bareArchiveFile
.
diskFileID
,
archiveFile
.
diskFileID
);
ASSERT_EQ
(
bareArchiveFile
.
fileSize
,
archiveFile
.
fileSize
);
ASSERT_EQ
(
bareArchiveFile
.
checksumType
,
archiveFile
.
checksumType
);
ASSERT_EQ
(
bareArchiveFile
.
checksumValue
,
archiveFile
.
checksumValue
);
ASSERT_EQ
(
bareArchiveFile
.
storageClass
,
archiveFile
.
storageClass
);
ASSERT_EQ
(
bareArchiveFile
.
diskInstance
,
archiveFile
.
diskInstance
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drPath
,
archiveFile
.
drData
.
drPath
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drOwner
,
archiveFile
.
drData
.
drOwner
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drGroup
,
archiveFile
.
drData
.
drGroup
);
ASSERT_EQ
(
bareArchiveFile
.
drData
.
drBlob
,
archiveFile
.
drData
.
drBlob
);
ASSERT_EQ
(
2
,
archiveFile
.
tapeCopies
.
size
());
{
auto
copyNbToTapeFileItor
=
archiveFile
.
tapeCopies
.
find
(
1
);
ASSERT_EQ
(
1
,
copyNbToTapeFileItor
->
first
);
ASSERT_EQ
(
tapeFile1
.
vid
,
copyNbToTapeFileItor
->
second
.
vid
);
ASSERT_EQ
(
tapeFile1
.
fSeq
,
copyNbToTapeFileItor
->
second
.
fSeq
);
ASSERT_EQ
(
tapeFile1
.
blockId
,
copyNbToTapeFileItor
->
second
.
blockId
);
ASSERT_EQ
(
tapeFile1
.
compressedSize
,
copyNbToTapeFileItor
->
second
.
compressedSize
);
ASSERT_EQ
(
tapeFile1
.
copyNb
,
copyNbToTapeFileItor
->
second
.
copyNb
);
}
const
std
::
list
<
common
::
dataStructures
::
TapeFile
>
tapeFiles
=
catalogue
.
getTapeFiles
();
{
auto
copyNbToTapeFileItor
=
archiveFile
.
tapeCopies
.
find
(
2
);
ASSERT_EQ
(
2
,
copyNbToTapeFileItor
->
first
);
ASSERT_EQ
(
tapeFile2
.
vid
,
copyNbToTapeFileItor
->
second
.
vid
);
ASSERT_EQ
(
tapeFile2
.
fSeq
,
copyNbToTapeFileItor
->
second
.
fSeq
);
ASSERT_EQ
(
tapeFile2
.
blockId
,
copyNbToTapeFileItor
->
second
.
blockId
);
ASSERT_EQ
(
tapeFile2
.
compressedSize
,
copyNbToTapeFileItor
->
second
.
compressedSize
);
ASSERT_EQ
(
tapeFile2
.
copyNb
,
copyNbToTapeFileItor
->
second
.
copyNb
);
}
}
{
const
std
::
list
<
common
::
dataStructures
::
TapeFile
>
tapeFiles
=
catalogue
.
getTapeFiles
();
ASSERT_EQ
(
1
,
tapeFiles
.
size
());
ASSERT_EQ
(
tapeFile
.
vid
,
tapeFiles
.
front
().
vid
);
ASSERT_EQ
(
tapeFile
.
fSeq
,
tapeFiles
.
front
().
fSeq
);
ASSERT_EQ
(
tapeFile
.
blockId
,
tapeFiles
.
front
().
blockId
);
ASSERT_EQ
(
tapeFile
.
compressedSize
,
tapeFiles
.
front
().
compressedSize
);
ASSERT_EQ
(
tapeFile
.
copyNb
,
tapeFiles
.
front
().
copyNb
);
ASSERT_EQ
(
2
,
tapeFiles
.
size
());
}
}
TEST_F
(
cta_catalogue_SqliteCatalogueTest
,
getTapeLastFseq_no_such_tape
)
{
...
...
@@ -1204,20 +1303,20 @@ TEST_F(cta_catalogue_SqliteCatalogueTest, getArchiveFile) {
files
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
files
.
size
());
const
common
::
dataStructures
::
ArchiveFile
front
File
=
files
.
front
();
const
common
::
dataStructures
::
ArchiveFile
archive
File
=
files
.
front
();
ASSERT_EQ
(
file
.
archiveFileID
,
front
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
front
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
front
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
front
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
front
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
front
File
.
storageClass
);
ASSERT_EQ
(
file
.
archiveFileID
,
archive
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
archive
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
archive
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
archive
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
archive
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
archive
File
.
storageClass
);
ASSERT_EQ
(
file
.
diskInstance
,
front
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
front
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
front
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
front
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
front
File
.
drData
.
drBlob
);
ASSERT_EQ
(
file
.
diskInstance
,
archive
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
archive
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
archive
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
archive
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
archive
File
.
drData
.
drBlob
);
ASSERT_TRUE
(
file
.
tapeCopies
.
empty
());
...
...
@@ -1282,20 +1381,20 @@ TEST_F(cta_catalogue_SqliteCatalogueTest, fileWrittenToTape) {
files
=
catalogue
.
getArchiveFiles
(
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
,
""
);
ASSERT_EQ
(
1
,
files
.
size
());
const
common
::
dataStructures
::
ArchiveFile
front
File
=
files
.
front
();
const
common
::
dataStructures
::
ArchiveFile
archive
File
=
files
.
front
();
ASSERT_EQ
(
file
.
archiveFileID
,
front
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
front
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
front
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
front
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
front
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
front
File
.
storageClass
);
ASSERT_EQ
(
file
.
archiveFileID
,
archive
File
.
archiveFileID
);
ASSERT_EQ
(
file
.
diskFileID
,
archive
File
.
diskFileID
);
ASSERT_EQ
(
file
.
fileSize
,
archive
File
.
fileSize
);
ASSERT_EQ
(
file
.
checksumType
,
archive
File
.
checksumType
);
ASSERT_EQ
(
file
.
checksumValue
,
archive
File
.
checksumValue
);
ASSERT_EQ
(
file
.
storageClass
,
archive
File
.
storageClass
);
ASSERT_EQ
(
file
.
diskInstance
,
front
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
front
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
front
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
front
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
front
File
.
drData
.
drBlob
);
ASSERT_EQ
(
file
.
diskInstance
,
archive
File
.
diskInstance
);
ASSERT_EQ
(
file
.
drData
.
drPath
,
archive
File
.
drData
.
drPath
);
ASSERT_EQ
(
file
.
drData
.
drOwner
,
archive
File
.
drData
.
drOwner
);
ASSERT_EQ
(
file
.
drData
.
drGroup
,
archive
File
.
drData
.
drGroup
);
ASSERT_EQ
(
file
.
drData
.
drBlob
,
archive
File
.
drData
.
drBlob
);
ASSERT_TRUE
(
file
.
tapeCopies
.
empty
());
...
...
catalogue/SqliteRset.cpp
View file @
a00bd53c
...
...
@@ -29,42 +29,68 @@ namespace cta {
namespace
catalogue
{
/**
* A map from column name to column index.
* A map from column name to column index
and type
.
*
* Please note that this class is intentionally hidden within this cpp file to
* enable the SqliteRset class to be used by code compiled against the CXX11 ABI
* and by code compiled against a pre-CXX11 ABI.
*/
class
SqliteRset
::
Col
umn
NameToIdx
{
class
SqliteRset
::
ColNameToIdx
AndType
{
public:
/**
* Adds the specified column name to index mapping.
* Structure to store a column's index and type. With SQLite 3 the type of a
* column needs to be stored before any type conversion has taken place. This
* is because the result of calling the sqlite3_column_type() function is no
* longer meaningful after such a conversion.
*/
struct
IdxAndType
{
/**
* The index of the column.
*/
int
colIdx
;
/**
* The type of the column as return by the sqlite3_column_type() function
* before any type conversion has taken place.
*/
int
colType
;
/**
* Constructor. Set both member-variables to 0.
*/
IdxAndType
()
:
colIdx
(
0
),
colType
(
0
)
{
}
};
/**
* Adds the specified mapping from column name to column index and type.
*
* This method throws an exception if the specified column name is a
* duplicate, in other words has already been added to the map.
*
* @param name The name of the column.
* @param idx
The index of the column
.
* @param idx
AndType The column index and type
.
*/
void
add
(
const
std
::
string
&
name
,
const
int
idx
)
{
if
(
m_nameToIdx
.
end
()
!=
m_nameToIdx
.
find
(
name
))
{
void
add
(
const
std
::
string
&
name
,
const
IdxAndType
&
idxAndType
)
{
if
(
m_nameToIdx
AndType
.
end
()
!=
m_nameToIdx
AndType
.
find
(
name
))
{
throw
std
::
runtime_error
(
std
::
string
(
__FUNCTION__
)
+
" failed: "
+
name
+
" is a duplicate"
);
}
m_nameToIdx
[
name
]
=
idx
;
m_nameToIdx
AndType
[
name
]
=
idx
AndType
;
}
/**
* Returns the index of the column with the specified name.
* Returns the index
and type
of the column with the specified name.
*
* This method throws an exception if the specified column name is not in the
* map.
*
* @return the index of the column with the specified name.
* @param name The name of the column.
* @return The index and type of the column.
*/
int
getIdx
(
const
std
::
string
&
name
)
const
{
auto
it
=
m_nameToIdx
.
find
(
name
);
if
(
m_nameToIdx
.
end
()
==
it
)
{
IdxAndType
getIdxAndType
(
const
std
::
string
&
name
)
const
{
auto
it
=
m_nameToIdx
AndType
.
find
(
name
);
if
(
m_nameToIdx
AndType
.
end
()
==
it
)
{
throw
std
::
runtime_error
(
std
::
string
(
__FUNCTION__
)
+
" failed: Unknown column name "
+
name
);
}
return
it
->
second
;
...
...
@@ -75,8 +101,8 @@ public:
*
* @return the index of the column with the specified name.
*/
int
operator
[](
const
std
::
string
&
name
)
const
{
return
getIdx
(
name
);
IdxAndType
operator
[](
const
std
::
string
&
name
)
const
{
return
getIdx
AndType
(
name
);
}
/**
...
...
@@ -85,7 +111,7 @@ public:
* @return True if this map is empty.
*/
bool
empty
()
const
{
return
m_nameToIdx
.
empty
();
return
m_nameToIdx
AndType
.
empty
();
}
private:
...
...
@@ -93,9 +119,9 @@ private:
/**
* The underlying STL map from column name to column index.
*/
std
::
map
<
std
::
string
,
int
>
m_nameToIdx
;
std
::
map
<
std
::
string
,
IdxAndType
>
m_nameToIdx
AndType
;
};
// class SqliteRset::Col
umn
NameToIdx
};
// class SqliteRset::ColNameToIdx
//------------------------------------------------------------------------------
// constructor
...
...
@@ -103,14 +129,14 @@ private:
SqliteRset
::
SqliteRset
(
SqliteStmt
&
stmt
)
:
m_stmt
(
stmt
),
m_nextHasNotBeenCalled
(
true
)
{
m_colNameToIdx
.
reset
(
new
Col
umn
NameToIdx
());
m_col
umn
NameToIdx
AndType
.
reset
(
new
ColNameToIdx
AndType
());
}
//------------------------------------------------------------------------------
// destructor.
//------------------------------------------------------------------------------
SqliteRset
::~
SqliteRset
()
throw
()
{
//m_colNameToIdx.release();
//m_col
umn
NameToIdx
AndType
.release();
}
//------------------------------------------------------------------------------
...
...
@@ -136,7 +162,7 @@ bool SqliteRset::next() {
m_nextHasNotBeenCalled
=
false
;
if
(
SQLITE_ROW
==
stepRc
)
{
populateColNameToIdxMap
();
populateColNameToIdx
AndType
Map
();
}
}
...
...
@@ -150,37 +176,57 @@ bool SqliteRset::next() {
//------------------------------------------------------------------------------
// populateColNameToIdxMap
//------------------------------------------------------------------------------
void
SqliteRset
::
populateColNameToIdxMap
()
{
void
SqliteRset
::
populateColNameToIdx
AndType
Map
()
{
try
{
const
int
nbCols
=
sqlite3_column_count
(
m_stmt
.
get
());
for
(
int
i
=
0
;
i
<
nbCols
;
i
++
)
{
const
char
*
name
=
sqlite3_column_name
(
m_stmt
.
get
(),
i
);
if
(
NULL
==
name
)
{
// Get the name of the column
const
char
*
colName
=
sqlite3_column_name
(
m_stmt
.
get
(),
i
);
if
(
NULL
==
colName
)
{
std
::
ostringstream
msg
;
msg
<<
"Failed to get column name for column index "
<<
i
;
throw
std
::
runtime_error
(
msg
.
str
());
}
m_colNameToIdx
->
add
(
name
,
i
);
// Get the type of the column
ColNameToIdxAndType
::
IdxAndType
idxAndType
;
idxAndType
.
colIdx
=
i
;
idxAndType
.
colType
=
sqlite3_column_type
(
m_stmt
.
get
(),
i
);
// Add the mapping from column name to index and type
m_columnNameToIdxAndType
->
add
(
colName
,
idxAndType
);
}
}
catch
(
std
::
exception
&
ne
)
{
throw
std
::
runtime_error
(
std
::
string
(
__FUNCTION__
)
+
" failed: "
+
ne
.
what
());
}
}
//------------------------------------------------------------------------------
// columnIsNull
//------------------------------------------------------------------------------
bool
SqliteRset
::
columnIsNull
(
const
char
*
const
colName
)
const
{
const
ColNameToIdxAndType
::
IdxAndType
idxAndType
=
(
*
m_columnNameToIdxAndType
)[
colName
];
return
SQLITE_NULL
==
idxAndType
.
colType
;
}
//------------------------------------------------------------------------------
// columnText
//------------------------------------------------------------------------------
const
char
*
SqliteRset
::
columnText
(
const
char
*
const
colName
)
const
{
const
int
colIdx
=
(
*
m_colNameToIdx
)[
colName
];
return
(
const
char
*
)
sqlite3_column_text
(
m_stmt
.
get
(),
colIdx
);
const
ColNameToIdxAndType
::
IdxAndType
idxAndType
=
(
*
m_columnNameToIdxAndType
)[
colName
];
if
(
SQLITE_NULL
==
idxAndType
.
colType
)
{
return
""
;
}
else
{
return
(
const
char
*
)
sqlite3_column_text
(
m_stmt
.
get
(),
idxAndType
.
colIdx
);
}
}
//------------------------------------------------------------------------------
// columnUint64
//------------------------------------------------------------------------------
uint64_t
SqliteRset
::
columnUint64
(
const
char
*
const
colName
)
const
{
const
int
colIdx
=
(
*
m_colNameToIdx
)[
colName
];
return
(
uint64_t
)
sqlite3_column_int64
(
m_stmt
.
get
(),
colIdx
);
const
ColNameToIdxAndType
::
IdxAndType
idxAndType
=
(
*
m_col
umn
NameToIdx
AndType
)[
colName
];
return
(
uint64_t
)
sqlite3_column_int64
(
m_stmt
.
get
(),
idxAndType
.
colIdx
);
}
}
// namespace catalogue
...
...
catalogue/SqliteRset.hpp
View file @
a00bd53c
...
...
@@ -63,6 +63,14 @@ public:
*/
bool
next
();
/**
* Returns true if the specified column contains a null value.
*
* @param colName The name of the column.
* @return True if the specified column contains a null value.
*/
bool
columnIsNull
(
const
char
*
const
colName
)
const
;
/**
* Returns the value of the specified column as a string.
*
...
...
@@ -71,13 +79,14 @@ public:
* compiled against a pre-CXX11 ABI.
*
* Please note that if the value of the column is NULL within the database
* then a NULL pointer is returned.
* then an empty string shall be returned. Use the columnIsNull() method to
* determine whether not a column contains a NULL value.
*
* @param colName The name of the column.
* @return The string value of the specified column
or NULL if the value of
*
the column within the database is NULL. Please note that it is th
e
*
responsibility of the
call
er
to
free the memory associated with the string
*
using delete[] operat
or.
* @return The string value of the specified column
. Please note that the
*
returned string should not be deleted. The string should be copied befor
e
*
the next
call to
the next() method. The SqliteRset class is responsible
*
for freeing the mem
or
y
.
*/
const
char
*
columnText
(
const
char
*
const
colName
)
const
;
...
...
@@ -107,21 +116,21 @@ private:
* order to enable the SqliteRset class to be used by code compiled against
* the CXX11 ABI and used by code compiled against the pre-CXX11 ABI.
*/
class
Col
umn
NameToIdx
;
class
ColNameToIdx
AndType
;
/**
* Map from column name to column index.
* Map from column name to column index
and type
.
*
* Please note that the type of the map is intentionally forward declared in
* order to avoid std::string being used. This is to aid with working with
* pre and post CXX11 ABIs.
*/
std
::
unique_ptr
<
Col
umn
NameToIdx
>
m_colNameToIdx
;
std
::
unique_ptr
<
ColNameToIdx
AndType
>
m_col
umn
NameToIdx
AndType
;
/**
* Populates the map from column name to column index.
* Populates the map from column name to column index
and type
.
*/
void
populateColNameToIdxMap
();