RecallReportPacker.hpp 7.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/******************************************************************************
 *
 * This file is part of the Castor project.
 * See http://castor.web.cern.ch/castor
 *
 * Copyright (C) 2003  CERN
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * 
 *
 * @author Castor Dev team, castor-dev@cern.ch
 *****************************************************************************/

24
#pragma once
25
26

#include "castor/tape/tapeserver/daemon/ReportPackerInterface.hpp"
Victor Kotlyar's avatar
Victor Kotlyar committed
27
#include "common/log/LogContext.hpp"
28
#include "common/threading/Thread.hpp"
29
#include "common/threading/BlockingQueue.hpp"
30
#include "scheduler/RetrieveJob.hpp"
31
#include "scheduler/RetrieveMount.hpp"
32

33
34
#include <memory>

35
36
37
38
39
namespace castor {
namespace tape {
namespace tapeserver {
namespace daemon {
  
40
class RecallReportPacker : public ReportPackerInterface<detail::Recall> {
41
public:
42
43
  /**
   * Constructor
44
   * @param tg the client to whom we report the success/failures
45
46
   * @param lc log context, copied du to threads
   */
Victor Kotlyar's avatar
Victor Kotlyar committed
47
  RecallReportPacker(cta::RetrieveMount *retrieveMount, cta::log::LogContext lc);
48
  
49
  virtual ~RecallReportPacker();
50
  
51
 /**
52
53
54
   * Create into the MigrationReportPacker a report for the successful migration
   * of migratedFile
   * @param migratedFile the file successfully migrated
55
   * @param checksum the checksum the DWT has computed for the file 
56
   */
57
  virtual void reportCompletedJob(std::unique_ptr<cta::RetrieveJob> successfulRetrieveJob);
58
59
  
  /**
60
   * Create into the MigrationReportPacker a report for the failed migration
61
   * of migratedFile
62
   * @param migratedFile the file which failed 
63
   * @param ex the reason for the failure
64
   */
65
  virtual void reportFailedJob(std::unique_ptr<cta::RetrieveJob> failedRetrieveJob);
66
67
68
69
       
  /**
   * Create into the MigrationReportPacker a report for the nominal end of session
   */
70
  virtual void reportEndOfSession();
71
  
72
73
74
75
76
  /**
   * Function for testing purposes. It is used to tell the report packer that this is the last report
   */
  virtual void reportTestGoingToEnd();
  
77
78
79
80
81
  /**
   * Create into the MigrationReportPacker a report for an erroneous end of session
   * @param msg The error message 
   * @param error_code The error code given by the drive
   */
82
  virtual void reportEndOfSessionWithErrors(const std::string msg,int error_code); 
83
84
85
86
87
88
  
  /**
   * Report the drive state and set it in the central drive register. This
   * function is to be used by the tape thread when running.
   * @param state the new drive state.
   */
89
  virtual void reportDriveStatus(cta::common::dataStructures::DriveStatus status);
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  
  /**
   * Flag disk thread as done.
   */
  virtual void setDiskDone();
  
  /**
   * Flag tape thread as done. Set the drive status to draining if needed.
   */
  virtual void setTapeDone();
  
  /**
   * Query the status of disk and tape threads (are they both done?).
   * @return true if both tape and disk threads are done.
   */
  virtual bool allThreadsDone();
106

107
108
109
  /**
   * Start the inner thread
   */
110
  void startThreads() { m_workerThread.start(); }
111
112
113
114
  
  /**
   * Stop the inner thread
   */
115
  void waitThread() { m_workerThread.wait(); }
116
  
117
118
119
120
121
  /**
   * Was there an error?
   */
  bool errorHappened();
  
122
private:
123
  //inner classes use to store content while receiving a report 
124
125
126
127
  class Report {
  public:
    virtual ~Report(){}
    virtual void execute(RecallReportPacker& packer)=0;
128
    virtual void waitForAsyncExecuteFinished() {};
129
    virtual bool goingToEnd() {return false;}
130
131
132
133
  };
  class ReportTestGoingToEnd :  public Report {
  public:
    ReportTestGoingToEnd() {}
134
    void execute(RecallReportPacker& reportPacker) override {
135
136
    reportPacker.m_retrieveMount->diskComplete();
    reportPacker.m_retrieveMount->tapeComplete();}
137
    bool goingToEnd() override {return true;}
138
139
  };
  class ReportSuccessful :  public Report {
140
    /**
141
     * The successful retrieve job to be reported immediately
142
143
     */
    std::unique_ptr<cta::RetrieveJob> m_successfulRetrieveJob;
144
  public:
145
    ReportSuccessful(std::unique_ptr<cta::RetrieveJob> successfulRetrieveJob): 
146
    m_successfulRetrieveJob(std::move(successfulRetrieveJob)){}
147
    void execute(RecallReportPacker& reportPacker) override;
148
    void waitForAsyncExecuteFinished() override;
149
150
  };
  class ReportError : public Report {
151
152
153
154
    /**
     * The failed retrieve job to be reported immediately
     */
    std::unique_ptr<cta::RetrieveJob> m_failedRetrieveJob;
155
  public:
156
    ReportError(std::unique_ptr<cta::RetrieveJob> failedRetrieveJob):
157
158
    m_failedRetrieveJob(std::move(failedRetrieveJob)) {
    }
159

160
    void execute(RecallReportPacker& reportPacker) override;
161
  };
162
  
163
  class ReportDriveStatus : public Report {
164
    cta::common::dataStructures::DriveStatus m_status;
165
    
166
  public:
167
    ReportDriveStatus(cta::common::dataStructures::DriveStatus status): m_status(status) {}
168
169
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
170
171
  };
  
172
173
  class ReportEndofSession : public Report {
  public:
174
    ReportEndofSession(){}
175
176
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
177

178
179
180
181
182
183
  };
  class ReportEndofSessionWithErrors : public Report {
    std::string m_message;
    int m_error_code;
  public:
    ReportEndofSessionWithErrors(std::string msg,int error_code):
184
    m_message(msg),m_error_code(error_code){}
185
  
186
187
    void execute(RecallReportPacker& reportPacker) override;
    bool goingToEnd() override;
188
189
  };
  
190
  class WorkerThread: public cta::threading::Thread {
191
192
193
    RecallReportPacker & m_parent;
  public:
    WorkerThread(RecallReportPacker& parent);
194
    void run() override;
195
196
  } m_workerThread;
  
197
  cta::threading::Mutex m_producterProtection;
198
199
200
201
  
  /** 
   * m_fifo is holding all the report waiting to be processed
   */
202
  cta::threading::BlockingQueue<Report*> m_fifo;
203
  
204
205
206
207
208
  /**
   * Is set as true as soon as we process a reportFailedJob
   * That we can do a sanity check to make sure we always call 
   * the right end of the session  
   */
209
  bool m_errorHappened;
210
211
212
213
214
  
  /**
   * The mount object used to send reports
   */
  cta::RetrieveMount * m_retrieveMount;
215
216
217
218
219
220
221
222
223
224
  
  /**
   * Tracking of the tape thread end
   */
  bool m_tapeThreadComplete;
  
  /**
   * Tracking of the disk thread end
   */
  bool m_diskThreadComplete;
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
public:
  /*
   * Check if flush limit is reached and proceed finish procedure for async execute
   *  
   * @param reportedSuccessfuly The successful reports to check
   * @return The number of reports proceeded
   */
  unsigned int flushCheckAndFinishAsyncExecute(std::list <std::unique_ptr<Report>> &reportedSuccessfully);
  
  /*
   * Proceed finish procedure for async execute for all reports.
   *  
   * @param reportedSuccessfuly The successful reports to check
   * @return The number of reports proceeded
   */
  unsigned int fullCheckAndFinishAsyncExecute(std::list <std::unique_ptr<Report>> &reportedSuccessfully);
  
  /*
   * The limit for successful reports to trigger flush.
   */
  const unsigned int RECALL_REPORT_PACKER_FLUSH_SIZE = 32;
246
247
248
249
250
};

}}}}