RecallReportPacker.hpp 7.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/******************************************************************************
 *
 * This file is part of the Castor project.
 * See http://castor.web.cern.ch/castor
 *
 * Copyright (C) 2003  CERN
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * 
 *
 * @author Castor Dev team, castor-dev@cern.ch
 *****************************************************************************/

24
#pragma once
25

26
#include "tapeserver/castor/tape/tapeserver/daemon/ReportPackerInterface.hpp"
Victor Kotlyar's avatar
Victor Kotlyar committed
27
#include "common/log/LogContext.hpp"
28
#include "common/threading/Thread.hpp"
29
#include "common/threading/BlockingQueue.hpp"
30
#include "scheduler/RetrieveJob.hpp"
31
#include "scheduler/RetrieveMount.hpp"
32

33
34
#include <memory>

35
36
37
38
39
namespace castor {
namespace tape {
namespace tapeserver {
namespace daemon {
  
40
class RecallReportPacker : public ReportPackerInterface<detail::Recall> {
41
public:
42
43
  /**
   * Constructor
44
   * @param tg the client to whom we report the success/failures
45
46
   * @param lc log context, copied du to threads
   */
Victor Kotlyar's avatar
Victor Kotlyar committed
47
  RecallReportPacker(cta::RetrieveMount *retrieveMount, cta::log::LogContext lc);
48
  
49
  virtual ~RecallReportPacker();
50
  
51
 /**
52
53
54
   * Create into the MigrationReportPacker a report for the successful migration
   * of migratedFile
   * @param migratedFile the file successfully migrated
55
   * @param checksum the checksum the DWT has computed for the file 
56
   */
57
  virtual void reportCompletedJob(std::unique_ptr<cta::RetrieveJob> successfulRetrieveJob);
58
59
  
  /**
60
   * Create into the MigrationReportPacker a report for the failed migration
61
   * of migratedFile
62
   * @param migratedFile the file which failed 
63
   * @param ex the reason for the failure
64
   */
65
  virtual void reportFailedJob(std::unique_ptr<cta::RetrieveJob> failedRetrieveJob, const cta::exception::Exception & ex);
66
67
68
69
       
  /**
   * Create into the MigrationReportPacker a report for the nominal end of session
   */
70
  virtual void reportEndOfSession();
71
  
72
73
74
75
76
  /**
   * Function for testing purposes. It is used to tell the report packer that this is the last report
   */
  virtual void reportTestGoingToEnd();
  
77
78
79
80
81
  /**
   * Create into the MigrationReportPacker a report for an erroneous end of session
   * @param msg The error message 
   * @param error_code The error code given by the drive
   */
82
  virtual void reportEndOfSessionWithErrors(const std::string msg,int error_code); 
83
84
85
86
87
88
  
  /**
   * Report the drive state and set it in the central drive register. This
   * function is to be used by the tape thread when running.
   * @param state the new drive state.
   */
89
  virtual void reportDriveStatus(cta::common::dataStructures::DriveStatus status);
90
91
92
93
94
95
96
97
98
99
100
  
  /**
   * Flag disk thread as done.
   */
  virtual void setDiskDone();
  
  /**
   * Flag tape thread as done. Set the drive status to draining if needed.
   */
  virtual void setTapeDone();
  
101
102
103
104
105
106
  void setTapeComplete();
  
  void setDiskComplete();
  
  bool isDiskDone();
  
107
108
109
110
111
  /**
   * Query the status of disk and tape threads (are they both done?).
   * @return true if both tape and disk threads are done.
   */
  virtual bool allThreadsDone();
112

113
114
115
  /**
   * Start the inner thread
   */
116
  void startThreads() { m_workerThread.start(); }
117
118
119
120
  
  /**
   * Stop the inner thread
   */
121
  void waitThread() { m_workerThread.wait(); }
122
  
123
124
125
126
127
  /**
   * Was there an error?
   */
  bool errorHappened();
  
128
private:
129
  //inner classes use to store content while receiving a report 
130
131
132
133
  class Report {
  public:
    virtual ~Report(){}
    virtual void execute(RecallReportPacker& packer)=0;
134
    virtual bool goingToEnd() {return false;}
135
136
137
138
  };
  class ReportTestGoingToEnd :  public Report {
  public:
    ReportTestGoingToEnd() {}
139
    void execute(RecallReportPacker& reportPacker) override {
140
141
    reportPacker.m_retrieveMount->diskComplete();
    reportPacker.m_retrieveMount->tapeComplete();}
142
    bool goingToEnd() override {return true;}
143
144
  };
  class ReportSuccessful :  public Report {
145
    /**
146
     * The successful retrieve job to be reported immediately
147
148
     */
    std::unique_ptr<cta::RetrieveJob> m_successfulRetrieveJob;
149
  public:
150
    ReportSuccessful(std::unique_ptr<cta::RetrieveJob> successfulRetrieveJob): 
151
    m_successfulRetrieveJob(std::move(successfulRetrieveJob)){}
152
    void execute(RecallReportPacker& reportPacker) override;
153
154
  };
  class ReportError : public Report {
155
    const std::string m_failureLog;
156
157
158
159
    /**
     * The failed retrieve job to be reported immediately
     */
    std::unique_ptr<cta::RetrieveJob> m_failedRetrieveJob;
160
  public:
161
162
    ReportError(std::unique_ptr<cta::RetrieveJob> failedRetrieveJob, const std::string &failureLog):
    m_failureLog(failureLog), m_failedRetrieveJob(std::move(failedRetrieveJob)) {}
163

164
    void execute(RecallReportPacker& reportPacker) override;
165
  };
166
  
167
  class ReportDriveStatus : public Report {
168
    cta::common::dataStructures::DriveStatus m_status;
169
    
170
  public:
171
    ReportDriveStatus(cta::common::dataStructures::DriveStatus status): m_status(status) {}
172
173
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
174
175
  };
  
176
177
  class ReportEndofSession : public Report {
  public:
178
    ReportEndofSession(){}
179
180
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
181

182
183
184
185
186
187
  };
  class ReportEndofSessionWithErrors : public Report {
    std::string m_message;
    int m_error_code;
  public:
    ReportEndofSessionWithErrors(std::string msg,int error_code):
188
    m_message(msg),m_error_code(error_code){}
189
  
190
191
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
192
193
  };
  
194
  class WorkerThread: public cta::threading::Thread {
195
196
197
    RecallReportPacker & m_parent;
  public:
    WorkerThread(RecallReportPacker& parent);
198
    void run() override;
199
200
  } m_workerThread;
  
201
  cta::threading::Mutex m_producterProtection;
202
203
204
205
  
  /** 
   * m_fifo is holding all the report waiting to be processed
   */
206
  cta::threading::BlockingQueue<Report*> m_fifo;
207
  
208
209
210
211
212
  /**
   * Is set as true as soon as we process a reportFailedJob
   * That we can do a sanity check to make sure we always call 
   * the right end of the session  
   */
213
  bool m_errorHappened;
214
215
216
217
218
  
  /**
   * The mount object used to send reports
   */
  cta::RetrieveMount * m_retrieveMount;
219
  
220
221
222
223
224
225
  /**
   * The successful reports that were pre-reported asynchronously.
   * They are collected and completed regularly.
   */
  std::queue<std::unique_ptr<cta::RetrieveJob> > m_successfulRetrieveJobs;
  
226
227
228
229
230
231
232
233
  /**
   * Tracking of the tape thread end
   */
  bool m_tapeThreadComplete;
  
  /**
   * Tracking of the disk thread end
   */
234
  bool m_diskThreadComplete;  
235
  
236
  cta::threading::Mutex m_mutex;
237

238
239
240
241
242
243
  /*
   * Proceed finish procedure for async execute for all reports.
   *  
   * @param reportedSuccessfuly The successful reports to check
   * @return The number of reports proceeded
   */
244
  void fullCheckAndFinishAsyncExecute();
245
246
247
248
  
  /*
   * The limit for successful reports to trigger flush.
   */
249
  const unsigned int RECALL_REPORT_PACKER_FLUSH_SIZE = 2000;
250
251
252
253
254
  
  /*
   * The time limit for successful reports to trigger flush.
   */
  const double RECALL_REPORT_PACKER_FLUSH_TIME = 180;
255
256
257
258
259
};

}}}}