SchedulerTest.cpp 21.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/*
 * The CERN Tape Archive (CTA) project
 * Copyright (C) 2015  CERN
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

19
20
#include "catalogue/InMemoryCatalogue.hpp"
#include "catalogue/SchemaCreatingSqliteCatalogue.hpp"
21
22
23
#include "common/admin/AdminUser.hpp"
#include "common/admin/AdminHost.hpp"
#include "common/archiveRoutes/ArchiveRoute.hpp"
24
#include "common/make_unique.hpp"
25
#include "scheduler/ArchiveMount.hpp"
26
#include "scheduler/ArchiveRequest.hpp"
27
28
#include "scheduler/LogicalLibrary.hpp"
#include "scheduler/MountRequest.hpp"
29
#include "scheduler/OStoreDB/OStoreDBFactory.hpp"
30
#include "scheduler/RetrieveMount.hpp"
31
32
#include "scheduler/Scheduler.hpp"
#include "scheduler/SchedulerDatabase.hpp"
33
#include "scheduler/SchedulerDatabaseFactory.hpp"
34
#include "scheduler/TapeMount.hpp"
35
#include "tests/TempFile.hpp"
36
37
38
39
40
41
42
43

#include <exception>
#include <gtest/gtest.h>
#include <memory>
#include <utility>

namespace unitTests {

44
45
namespace {

46
47
48
49
50
51
52
53
54
55
56
57
/**
 * This structure is used to parameterize scheduler tests.
 */
struct SchedulerTestParam {
  cta::SchedulerDatabaseFactory &dbFactory;

  SchedulerTestParam(
    cta::SchedulerDatabaseFactory &dbFactory):
    dbFactory(dbFactory) {
 }
}; // struct SchedulerTestParam

58
59
}

60
61
62
63
64
65
66
/**
 * The scheduler test is a parameterized test.  It takes a pair of name server
 * and scheduler database factories as a parameter.
 */
class SchedulerTest: public ::testing::TestWithParam<SchedulerTestParam> {
public:

67
  SchedulerTest() {
68
69
  }

70
71
72
73
74
75
76
  class FailedToGetCatalogue: public std::exception {
  public:
    const char *what() const throw() {
      return "Failed to get catalogue";
    }
  };

77
78
79
80
81
82
83
84
85
86
87
88
  class FailedToGetScheduler: public std::exception {
  public:
    const char *what() const throw() {
      return "Failed to get scheduler";
    }
  };

  virtual void SetUp() {
    using namespace cta;

    const SchedulerTestParam &param = GetParam();
    m_db = param.dbFactory.create();
89
90
91
92
    const uint64_t nbConns = 1;
    //m_catalogue = make_unique<catalogue::SchemaCreatingSqliteCatalogue>(m_tempSqliteFile.path(), nbConns);
    m_catalogue = make_unique<catalogue::InMemoryCatalogue>(nbConns);
    m_scheduler = make_unique<Scheduler>(*m_catalogue, *m_db, 5, 2*1000*1000);
93
94
95
96
  }

  virtual void TearDown() {
    m_scheduler.reset();
97
    m_catalogue.reset();
98
99
100
    m_db.reset();
  }

101
102
103
104
105
106
107
108
  cta::catalogue::Catalogue &getCatalogue() {
    cta::catalogue::Catalogue *const ptr = m_catalogue.get();
    if(NULL == ptr) {
      throw FailedToGetCatalogue();
    }
    return *ptr;
  }
    
109
110
111
112
113
114
115
  cta::Scheduler &getScheduler() {
    cta::Scheduler *const ptr = m_scheduler.get();
    if(NULL == ptr) {
      throw FailedToGetScheduler();
    }
    return *ptr;
  }
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  
  void setupDefaultCatalogue() {
    using namespace cta;
    auto & catalogue=getCatalogue();

    const std::string mountPolicyName = "mount_group";
    const uint64_t archivePriority = 1;
    const uint64_t minArchiveRequestAge = 2;
    const uint64_t retrievePriority = 3;
    const uint64_t minRetrieveRequestAge = 4;
    const uint64_t maxDrivesAllowed = 5;
    const std::string mountPolicyComment = "create mount group";

    ASSERT_TRUE(catalogue.getMountPolicies().empty());

    catalogue.createMountPolicy(
      s_adminOnAdminHost,
      mountPolicyName,
      archivePriority,
      minArchiveRequestAge,
      retrievePriority,
      minRetrieveRequestAge,
      maxDrivesAllowed,
      mountPolicyComment);

141
    const std::list<common::dataStructures::MountPolicy> groups = catalogue.getMountPolicies();
142
143
144
145
146
147
148
149
150
151
    ASSERT_EQ(1, groups.size());
    const common::dataStructures::MountPolicy group = groups.front();
    ASSERT_EQ(mountPolicyName, group.name);
    ASSERT_EQ(archivePriority, group.archivePriority);
    ASSERT_EQ(minArchiveRequestAge, group.archiveMinRequestAge);
    ASSERT_EQ(retrievePriority, group.retrievePriority);
    ASSERT_EQ(minRetrieveRequestAge, group.retrieveMinRequestAge);
    ASSERT_EQ(maxDrivesAllowed, group.maxDrivesAllowed);
    ASSERT_EQ(mountPolicyComment, group.comment);

152
    const std::string ruleComment = "create requester mount-rule";
153
    cta::common::dataStructures::UserIdentity userIdentity;
154
    catalogue.createRequesterMountRule(s_adminOnAdminHost, mountPolicyName, s_diskInstance, s_userName, ruleComment);
155

156
157
    const std::list<common::dataStructures::RequesterMountRule> rules = catalogue.getRequesterMountRules();
    ASSERT_EQ(1, rules.size());
158

159
    const common::dataStructures::RequesterMountRule rule = rules.front();
160

161
162
163
164
165
166
    ASSERT_EQ(s_userName, rule.name);
    ASSERT_EQ(mountPolicyName, rule.mountPolicy);
    ASSERT_EQ(ruleComment, rule.comment);
    ASSERT_EQ(s_adminOnAdminHost.username, rule.creationLog.username);
    ASSERT_EQ(s_adminOnAdminHost.host, rule.creationLog.host);
    ASSERT_EQ(rule.creationLog, rule.lastModificationLog);
167

168
169
170
171
172
173
    common::dataStructures::StorageClass storageClass;
    storageClass.diskInstance = s_diskInstance;
    storageClass.name = s_storageClassName;
    storageClass.nbCopies = 1;
    storageClass.comment = "create storage class";
    m_catalogue->createStorageClass(s_adminOnAdminHost, storageClass);
174
175
176
177
178
179
180
181

    const uint16_t nbPartialTapes = 1;
    const std::string tapePoolComment = "Tape-pool comment";
    const bool tapePoolEncryption = false;
    ASSERT_NO_THROW(catalogue.createTapePool(s_adminOnAdminHost, s_tapePoolName,
      nbPartialTapes, tapePoolEncryption, tapePoolComment));
    const uint16_t copyNb = 1;
    const std::string archiveRouteComment = "Archive-route comment";
182
183
    catalogue.createArchiveRoute(s_adminOnAdminHost, s_diskInstance, s_storageClassName, copyNb, s_tapePoolName,
      archiveRouteComment);
184
  }
185

186
private:
187

188
189
  // Prevent copying
  SchedulerTest(const SchedulerTest &) = delete;
190

191
192
  // Prevent assignment
  SchedulerTest & operator= (const SchedulerTest &) = delete;
193

194
195
196
197
198
199
200
  std::unique_ptr<cta::SchedulerDatabase> m_db;
  std::unique_ptr<cta::catalogue::Catalogue> m_catalogue;
  std::unique_ptr<cta::Scheduler> m_scheduler;
  
protected:
  // Default parameters for storage classes, etc...
  const std::string s_userName = "user_name";
201
  const std::string s_diskInstance = "disk_instance";
202
203
204
205
206
  const std::string s_storageClassName = "TestStorageClass";
  const cta::common::dataStructures::SecurityIdentity s_adminOnAdminHost = { "admin1", "host1" };
  const std::string s_tapePoolName = "TestTapePool";
  const std::string s_libraryName = "TestLogicalLibrary";
  const std::string s_vid = "TestVid";
207
  //TempFile m_tempSqliteFile;
208

209
}; // class SchedulerTest
210

211
212
TEST_P(SchedulerTest, archive_to_new_file) {
  using namespace cta;
213

214
215
216
217
218
219
220
  setupDefaultCatalogue();
  Scheduler &scheduler = getScheduler();
  
  cta::common::dataStructures::EntryLog creationLog;
  creationLog.host="host2";
  creationLog.time=0;
  creationLog.username="admin1";
221
222
223
224
225
  cta::common::dataStructures::DiskFileInfo diskFileInfo;
  diskFileInfo.recoveryBlob="blob";
  diskFileInfo.group="group2";
  diskFileInfo.owner="cms_user";
  diskFileInfo.path="path/to/file";
226
227
228
229
230
231
  cta::common::dataStructures::ArchiveRequest request;
  request.checksumType="Adler32";
  request.checksumValue="1111";
  request.creationLog=creationLog;
  request.diskpoolName="diskpool1";
  request.diskpoolThroughput=200*1000*1000;
232
  request.diskFileInfo=diskFileInfo;
233
234
235
236
237
238
239
240
  request.diskFileID="diskFileID";
  request.fileSize=100*1000*1000;
  cta::common::dataStructures::UserIdentity requester;
  requester.name = s_userName;
  requester.group = "userGroup";
  request.requester = requester;
  request.srcURL="srcURL";
  request.storageClass=s_storageClassName;
241

242
  scheduler.queueArchive(s_diskInstance, request);
243
244

  {
245
    auto rqsts = scheduler.getPendingArchiveJobs();
246
247
248
    ASSERT_EQ(1, rqsts.size());
    auto poolItor = rqsts.cbegin();
    ASSERT_FALSE(poolItor == rqsts.cend());
249
250
251
252
    const std::string pool = poolItor->first;
    ASSERT_TRUE(s_tapePoolName == pool);
    auto poolRqsts = poolItor->second;
    ASSERT_EQ(1, poolRqsts.size());
253
254
255
256
    std::set<std::string> remoteFiles;
    std::set<std::string> archiveFiles;
    for(auto rqstItor = poolRqsts.cbegin();
      rqstItor != poolRqsts.cend(); rqstItor++) {
257
      remoteFiles.insert(rqstItor->request.diskFileInfo.path);
258
    }
259
    ASSERT_EQ(1, remoteFiles.size());
260
    ASSERT_FALSE(remoteFiles.find(request.diskFileInfo.path) == remoteFiles.end());
261
262
263
  }
}

264
TEST_P(SchedulerTest, delete_archive_request) {
265
266
267
  using namespace cta;

  Scheduler &scheduler = getScheduler();
268
269
  
  setupDefaultCatalogue();
270

271
272
273
274
  cta::common::dataStructures::EntryLog creationLog;
  creationLog.host="host2";
  creationLog.time=0;
  creationLog.username="admin1";
275
276
277
278
279
  cta::common::dataStructures::DiskFileInfo diskFileInfo;
  diskFileInfo.recoveryBlob="blob";
  diskFileInfo.group="group2";
  diskFileInfo.owner="cms_user";
  diskFileInfo.path="path/to/file";
280
281
282
283
284
285
  cta::common::dataStructures::ArchiveRequest request;
  request.checksumType="Adler32";
  request.checksumValue="1111";
  request.creationLog=creationLog;
  request.diskpoolName="diskpool1";
  request.diskpoolThroughput=200*1000*1000;
286
  request.diskFileInfo=diskFileInfo;
287
288
289
290
291
292
293
294
  request.diskFileID="diskFileID";
  request.fileSize=100*1000*1000;
  cta::common::dataStructures::UserIdentity requester;
  requester.name = s_userName;
  requester.group = "userGroup";
  request.requester = requester;
  request.srcURL="srcURL";
  request.storageClass=s_storageClassName;
295

296
  auto archiveFileId = scheduler.queueArchive(s_diskInstance, request);
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
  
  // Check that we have the file in the queues
  // TODO: for this to work all the time, we need an index of all requests
  // (otherwise we miss the selected ones).
  // Could also be limited to querying by ID (global index needed)
  bool found=false;
  for (auto & tp: scheduler.getPendingArchiveJobs()) {
    for (auto & req: tp.second) {
      if (req.archiveFileID == archiveFileId)
        found = true;
    }
  }
  ASSERT_TRUE(found);
  
  // Remove the request
  cta::common::dataStructures::DeleteArchiveRequest dar;
  dar.archiveFileID = archiveFileId;
  dar.requester.group = "group1";
  dar.requester.name = "user1";
316
  scheduler.deleteArchive("disk_instance", dar);
317
318
319
320
321
322
323
324
325
326
  
  // Validate that the request is gone.
  found=false;
  for (auto & tp: scheduler.getPendingArchiveJobs()) {
    for (auto & req: tp.second) {
      if (req.archiveFileID == archiveFileId)
        found = true;
    }
  }
  ASSERT_FALSE(found);
327
328
}

329
TEST_P(SchedulerTest, archive_and_retrieve_new_file) {
330
331
332
  using namespace cta;

  Scheduler &scheduler = getScheduler();
333
334
  auto &catalogue = getCatalogue();
  
335
336
337
338
339
340
341
342
343
  setupDefaultCatalogue();
  
  uint64_t archiveFileId;
  {
    // Queue an archive request.
    cta::common::dataStructures::EntryLog creationLog;
    creationLog.host="host2";
    creationLog.time=0;
    creationLog.username="admin1";
344
345
346
347
348
    cta::common::dataStructures::DiskFileInfo diskFileInfo;
    diskFileInfo.recoveryBlob="blob";
    diskFileInfo.group="group2";
    diskFileInfo.owner="cms_user";
    diskFileInfo.path="path/to/file";
349
    cta::common::dataStructures::ArchiveRequest request;
350
351
    request.checksumType="adler32";
    request.checksumValue="1234abcd";
352
353
354
    request.creationLog=creationLog;
    request.diskpoolName="diskpool1";
    request.diskpoolThroughput=200*1000*1000;
355
    request.diskFileInfo=diskFileInfo;
356
357
358
359
360
361
362
363
    request.diskFileID="diskFileID";
    request.fileSize=100*1000*1000;
    cta::common::dataStructures::UserIdentity requester;
    requester.name = s_userName;
    requester.group = "userGroup";
    request.requester = requester;
    request.srcURL="srcURL";
    request.storageClass=s_storageClassName;
364
    archiveFileId = scheduler.queueArchive(s_diskInstance, request);
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
  }
  
  // Check that we have the file in the queues
  // TODO: for this to work all the time, we need an index of all requests
  // (otherwise we miss the selected ones).
  // Could also be limited to querying by ID (global index needed)
  bool found=false;
  for (auto & tp: scheduler.getPendingArchiveJobs()) {
    for (auto & req: tp.second) {
      if (req.archiveFileID == archiveFileId)
        found = true;
    }
  }
  ASSERT_TRUE(found);

  // Create the environment for the migration to happen (library + tape) 
    const std::string libraryComment = "Library comment";
382
  ASSERT_NO_THROW(catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
383
384
    libraryComment));
  {
385
    auto libraries = catalogue.getLogicalLibraries();
386
    ASSERT_EQ(1, libraries.size());
387
388
    ASSERT_EQ(s_libraryName, libraries.front().name);
    ASSERT_EQ(libraryComment, libraries.front().comment);
389
390
  }
  const uint64_t capacityInBytes = 12345678;
391
392
393
394
395
  const std::string tapeComment = "Tape comment";
  bool notDisabled = false;
  bool notFull = false;
  ASSERT_NO_THROW(catalogue.createTape(s_adminOnAdminHost, s_vid, s_libraryName,
    s_tapePoolName, "", capacityInBytes, notDisabled, notFull, tapeComment));
396
397
398
399
400

  {
    // Emulate a tape server by asking for a mount and then a file (and succeed
    // the transfer)
    std::unique_ptr<cta::TapeMount> mount;
401
    mount.reset(scheduler.getNextMount(s_libraryName, "drive0").release());
402
403
404
    ASSERT_NE((cta::TapeMount*)NULL, mount.get());
    ASSERT_EQ(cta::MountType::ARCHIVE, mount.get()->getMountType());
    std::unique_ptr<cta::ArchiveMount> archiveMount;
405
    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
406
407
    ASSERT_NE((cta::ArchiveMount*)NULL, archiveMount.get());
    std::unique_ptr<cta::ArchiveJob> archiveJob;
408
    archiveJob.reset(archiveMount->getNextJob().release());
409
    ASSERT_NE((cta::ArchiveJob*)NULL, archiveJob.get());
410
411
412
413
    archiveJob->tapeFile.blockId = 1;
    archiveJob->tapeFile.fSeq = 1;
    archiveJob->tapeFile.checksumType = "adler32";
    archiveJob->tapeFile.checksumValue = "1234abcd";
414
415
    archiveJob->complete();
    archiveJob.reset(archiveMount->getNextJob().release());
416
    ASSERT_EQ((cta::ArchiveJob*)NULL, archiveJob.get());
417
    archiveMount->complete();
418
419
420
  }

  {
421
422
423
424
    cta::common::dataStructures::EntryLog creationLog;
    creationLog.host="host2";
    creationLog.time=0;
    creationLog.username="admin1";
425
426
427
428
429
    cta::common::dataStructures::DiskFileInfo diskFileInfo;
    diskFileInfo.recoveryBlob="blob";
    diskFileInfo.group="group2";
    diskFileInfo.owner="cms_user";
    diskFileInfo.path="path/to/file";
430
431
    cta::common::dataStructures::RetrieveRequest request;
    request.archiveFileID = archiveFileId;
432
    request.entryLog = creationLog;
433
434
    request.diskpoolName = "diskpool1";
    request.diskpoolThroughput = 200*1000*1000;
435
    request.diskFileInfo = diskFileInfo;
436
437
438
    request.dstURL = "dstURL";
    request.requester.name = s_userName;
    request.requester.group = "userGroup";
439
    scheduler.queueRetrieve("disk_instance", request);
440
441
  }

442
  // Check that the retrieve request is queued
443
  {
444
445
    auto rqsts = scheduler.getPendingRetrieveJobs();
    // We expect 1 tape with queued jobs
446
    ASSERT_EQ(1, rqsts.size());
447
448
449
450
451
452
453
454
455
456
457
    // We expect the queue to contain 1 job
    ASSERT_EQ(1, rqsts.cbegin()->second.size());
    // We expect the job to be single copy
    auto & job = rqsts.cbegin()->second.back();
    ASSERT_EQ(1, job.tapeCopies.size());
    // We expect the copy to be on the provided tape.
    ASSERT_TRUE(s_vid == job.tapeCopies.cbegin()->first);
    // Check the remote target
    ASSERT_EQ("dstURL", job.request.dstURL);
    // Check the archive file ID
    ASSERT_EQ(archiveFileId, job.request.archiveFileID);
458
459
460
461
462
463
  }
  
  {
    // Emulate a tape server by asking for a mount and then a file (and succeed
    // the transfer)
    std::unique_ptr<cta::TapeMount> mount;
464
    mount.reset(scheduler.getNextMount(s_libraryName, "drive0").release());
465
466
467
    ASSERT_NE((cta::TapeMount*)NULL, mount.get());
    ASSERT_EQ(cta::MountType::RETRIEVE, mount.get()->getMountType());
    std::unique_ptr<cta::RetrieveMount> retrieveMount;
468
    retrieveMount.reset(dynamic_cast<cta::RetrieveMount*>(mount.release()));
469
470
    ASSERT_NE((cta::RetrieveMount*)NULL, retrieveMount.get());
    std::unique_ptr<cta::RetrieveJob> retrieveJob;
471
    retrieveJob.reset(retrieveMount->getNextJob().release());
472
    ASSERT_NE((cta::RetrieveJob*)NULL, retrieveJob.get());
473
474
    retrieveJob->complete();
    retrieveJob.reset(retrieveMount->getNextJob().release());
475
476
477
    ASSERT_EQ((cta::RetrieveJob*)NULL, retrieveJob.get());
  }
}
478

479
TEST_P(SchedulerTest, retry_archive_until_max_reached) {
480
  using namespace cta;
481
482
  
  setupDefaultCatalogue();
483

484
  auto &scheduler = getScheduler();
485
  auto &catalogue = getCatalogue();
486
  
487
488
489
490
491
492
493
  uint64_t archiveFileId;
  {
    // Queue an archive request.
    cta::common::dataStructures::EntryLog creationLog;
    creationLog.host="host2";
    creationLog.time=0;
    creationLog.username="admin1";
494
495
496
497
498
    cta::common::dataStructures::DiskFileInfo diskFileInfo;
    diskFileInfo.recoveryBlob="blob";
    diskFileInfo.group="group2";
    diskFileInfo.owner="cms_user";
    diskFileInfo.path="path/to/file";
499
500
501
502
503
504
    cta::common::dataStructures::ArchiveRequest request;
    request.checksumType="Adler32";
    request.checksumValue="1111";
    request.creationLog=creationLog;
    request.diskpoolName="diskpool1";
    request.diskpoolThroughput=200*1000*1000;
505
    request.diskFileInfo=diskFileInfo;
506
507
508
509
510
511
512
513
    request.diskFileID="diskFileID";
    request.fileSize=100*1000*1000;
    cta::common::dataStructures::UserIdentity requester;
    requester.name = s_userName;
    requester.group = "userGroup";
    request.requester = requester;
    request.srcURL="srcURL";
    request.storageClass=s_storageClassName;
514
    archiveFileId = scheduler.queueArchive(s_diskInstance, request);
515
516
  }
  
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
  // Create the environment for the migration to happen (library + tape) 
    const std::string libraryComment = "Library comment";
  catalogue.createLogicalLibrary(s_adminOnAdminHost, s_libraryName,
    libraryComment);
  {
    auto libraries = catalogue.getLogicalLibraries();
    ASSERT_EQ(1, libraries.size());
    ASSERT_EQ(s_libraryName, libraries.front().name);
    ASSERT_EQ(libraryComment, libraries.front().comment);
  }
  const uint64_t capacityInBytes = 12345678;
  const std::string tapeComment = "Tape comment";
  bool notDisabled = false;
  bool notFull = false;
  catalogue.createTape(s_adminOnAdminHost, s_vid, s_libraryName,
    s_tapePoolName, "", capacityInBytes, notDisabled, notFull, tapeComment);
  
534
535
536
  {
    // Emulate a tape server by asking for a mount and then a file
    std::unique_ptr<cta::TapeMount> mount;
537
    mount.reset(scheduler.getNextMount(s_libraryName, "drive0").release());
538
539
540
    ASSERT_NE((cta::TapeMount*)NULL, mount.get());
    ASSERT_EQ(cta::MountType::ARCHIVE, mount.get()->getMountType());
    std::unique_ptr<cta::ArchiveMount> archiveMount;
541
    archiveMount.reset(dynamic_cast<cta::ArchiveMount*>(mount.release()));
542
543
    ASSERT_NE((cta::ArchiveMount*)NULL, archiveMount.get());
    // The file should be retried 10 times
544
545
546
547
548
549
    for (int i=0; i<=5; i++) {
      std::unique_ptr<cta::ArchiveJob> archiveJob(archiveMount->getNextJob());
      if (!archiveJob.get()) {
        int __attribute__((__unused__)) debugI=i;
      }
      ASSERT_NE((cta::ArchiveJob*)NULL, archiveJob.get());
550
      // Validate we got the right file
551
      ASSERT_EQ(archiveFileId, archiveJob->archiveFile.archiveFileID);
552
      archiveJob->failed(cta::exception::Exception("Archive failed"));
553
554
555
    }
    // Then the request should be gone
    std::unique_ptr<cta::ArchiveJob> archiveJob;
556
    archiveJob.reset(archiveMount->getNextJob().release());
557
558
559
    ASSERT_EQ((cta::ArchiveJob*)NULL, archiveJob.get());
  }
}
560

561
562
TEST_P(SchedulerTest, retrieve_non_existing_file) {
  using namespace cta;
563
564
565
  
  setupDefaultCatalogue();
  
566
567
568
  Scheduler &scheduler = getScheduler();

  {
569
570
571
572
    cta::common::dataStructures::EntryLog creationLog;
    creationLog.host="host2";
    creationLog.time=0;
    creationLog.username="admin1";
573
574
575
576
577
    cta::common::dataStructures::DiskFileInfo diskFileInfo;
    diskFileInfo.recoveryBlob="blob";
    diskFileInfo.group="group2";
    diskFileInfo.owner="cms_user";
    diskFileInfo.path="path/to/file";
578
579
    cta::common::dataStructures::RetrieveRequest request;
    request.archiveFileID = 12345;
580
    request.entryLog = creationLog;
581
582
    request.diskpoolName = "diskpool1";
    request.diskpoolThroughput = 200*1000*1000;
583
    request.diskFileInfo = diskFileInfo;
584
585
586
    request.dstURL = "dstURL";
    request.requester.name = s_userName;
    request.requester.group = "userGroup";
587
    ASSERT_THROW(scheduler.queueRetrieve("disk_instance", request), cta::exception::Exception);
588
589
  }
}
590

591

592
593
594
595
#undef TEST_MOCK_DB
#ifdef TEST_MOCK_DB
static cta::MockSchedulerDatabaseFactory mockDbFactory;
INSTANTIATE_TEST_CASE_P(MockSchedulerTest, SchedulerTest,
596
  ::testing::Values(SchedulerTestParam(mockDbFactory)));
597
598
599
600
601
602
603
#endif

#define TEST_VFS
#ifdef TEST_VFS
static cta::OStoreDBFactory<cta::objectstore::BackendVFS> OStoreDBFactoryVFS;

INSTANTIATE_TEST_CASE_P(OStoreDBPlusMockSchedulerTestVFS, SchedulerTest,
604
  ::testing::Values(SchedulerTestParam(OStoreDBFactoryVFS)));
605
606
607
608
609
610
611
#endif

#undef TEST_RADOS
#ifdef TEST_RADOS
static cta::OStoreDBFactory<cta::objectstore::BackendRados> OStoreDBFactoryRados("rados://tapetest@tapetest");

INSTANTIATE_TEST_CASE_P(OStoreDBPlusMockSchedulerTestRados, SchedulerTest,
612
  ::testing::Values(SchedulerTestParam(OStoreDBFactoryRados)));
613
614
615
#endif
} // namespace unitTests