Commit 8aafa0f4 authored by Sebastien Ponce's avatar Sebastien Ponce
Browse files

Merge branch 'v2_1_14Version'

Conflicts:
	xrootd/XrdxCastor2Ofs.cpp
	xrootd/XrdxCastor2Ofs.hpp
parents 041ba2aa 0b97d501
......@@ -139,6 +139,8 @@ INSERT INTO CastorConfig
VALUES ('DiskServer', 'HeartbeatTimeout', '180', 'The maximum amount of time in seconds that a diskserver can spend without sending any hearbeat before it is automatically set to offline.');
INSERT INTO CastorConfig
VALUES ('Draining', 'MaxNbSchedD2dPerDrain', '1000', 'The maximum number of disk to disk copies that each draining job should send to the scheduler concurrently.');
INSERT INTO CastorConfig
VALUES ('Draining', 'MaxDataSchedD2dPerDrain', '10000000000', 'The maximum amount of data that each draining job should send to the scheduler in one go.');
INSERT INTO CastorConfig
VALUES ('Rebalancing', 'Sensitivity', '5', 'The rebalancing sensitivity (in percent) : if a fileSystem is at least this percentage fuller than the average of the diskpool where it lives, rebalancing will fire.');
INSERT INTO CastorConfig
......
......@@ -21,11 +21,14 @@ END;
/* handle the creation of the Disk2DiskCopyJobs for the running drainingJobs */
CREATE OR REPLACE PROCEDURE drainRunner AS
varNbRunningJobs INTEGER;
varDataRunningJobs INTEGER;
varMaxNbOfSchedD2dPerDrain INTEGER;
varMaxDataOfSchedD2dPerDrain INTEGER;
varUnused INTEGER;
BEGIN
-- get maxNbOfSchedD2dPerDrain
-- get maxNbOfSchedD2dPerDrain and MaxDataOfSchedD2dPerDrain
varMaxNbOfSchedD2dPerDrain := TO_NUMBER(getConfigOption('Draining', 'MaxNbSchedD2dPerDrain', '1000'));
varMaxDataOfSchedD2dPerDrain := TO_NUMBER(getConfigOption('Draining', 'MaxDataSchedD2dPerDrain', '10000000000')); -- 10 GB
-- loop over draining jobs
FOR dj IN (SELECT id, fileSystem, svcClass, fileMask, euid, egid
FROM DrainingJob WHERE status = dconst.DRAININGJOB_RUNNING) LOOP
......@@ -41,7 +44,10 @@ BEGIN
CONTINUE;
END;
-- check how many disk2DiskCopyJobs are already running for this draining job
SELECT count(*) INTO varNbRunningJobs FROM Disk2DiskCopyJob WHERE drainingJob = dj.id;
SELECT count(*), nvl(sum(CastorFile.fileSize), 0) INTO varNbRunningJobs, varDataRunningJobs
FROM Disk2DiskCopyJob, CastorFile
WHERE Disk2DiskCopyJob.drainingJob = dj.id
AND CastorFile.id = Disk2DiskCopyJob.castorFile;
-- Loop over the creation of Disk2DiskCopyJobs. Select max 1000 files, taking running
-- ones into account. Also take the most important jobs first
logToDLF(NULL, dlf.LVL_SYSTEM, dlf.DRAINING_REFILL, 0, '', 'stagerd',
......@@ -61,8 +67,15 @@ BEGIN
AND NOT EXISTS (SELECT 1 FROM Disk2DiskCopyJob WHERE castorFile = CastorFile.id AND drainingJob = dj.id)
ORDER BY DiskCopy.importance DESC)
WHERE ROWNUM <= varMaxNbOfSchedD2dPerDrain-varNbRunningJobs) LOOP
createDisk2DiskCopyJob(F.cfId, F.nsOpenTime, dj.svcClass, dj.euid, dj.egid,
dconst.REPLICATIONTYPE_DRAINING, F.dcId, TRUE, dj.id, FALSE);
-- Do not schedule more that varMaxAmountOfSchedD2dPerDrain
IF varDataRunningJobs <= varMaxDataOfSchedD2dPerDrain THEN
createDisk2DiskCopyJob(F.cfId, F.nsOpenTime, dj.svcClass, dj.euid, dj.egid,
dconst.REPLICATIONTYPE_DRAINING, F.dcId, TRUE, dj.id, FALSE);
varDataRunningJobs := varDataRunningJobs + F.fileSize;
ELSE
-- enough data amount, we stop scheduling
EXIT;
END IF;
END LOOP;
UPDATE DrainingJob
SET lastModificationTime = getTime()
......
......@@ -1851,13 +1851,3 @@ END;
UPDATE UpgradeLog SET endDate = systimestamp, state = 'COMPLETE'
WHERE release = '2_1_14_14';
COMMIT;
/* Post-upgrade cleanup phase: archive all requests older than 24h and stuck in status 4 */
BEGIN
FOR s in (SELECT id FROM SubRequest WHERE status = 4 AND creationTime < getTime() - 86400) LOOP
archiveSubReq(s.id, dconst.SUBREQUEST_FINISHED);
COMMIT;
END LOOP;
END;
/
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment