Commit 4abb1e4c authored by Michael Davis's avatar Michael Davis
Browse files

[CHEP] Adds EOS reference

parent 29185001
@misc{xin_zhao_tape_usage,
author = {Xin Zhao},
title = {\textit{Tape Usage (ADC Technical Coordination Board Meeting)}},
howpublished = {URL \url{https://indico.cern.ch/event/732181/contributions/3019046/}},
year = {2018},
month = {May},
}
@article{chep2016,
author = {S. Murray and V. Bahyl and G. Cancio and E. Cano and V. Kotlyar and D. F. Kruse and J. Leduc},
title = {An efficient, modular and simple tape archiving solution for LHC Run--3},
title = {An efficient, modular and simple tape archiving solution for {LHC Run--3}},
journal = {Journal of Physics: Conference Series},
volume = {898},
number = {6},
......@@ -10,18 +18,21 @@
abstract = {The IT Storage group at CERN develops the software responsible for archiving to tape the custodial copy of the physics data generated by the LHC experiments. Physics run 3 will start in 2021 and will introduce two major challenges for which the tape archive software must be evolved. Firstly the software will need to make more efficient use of tape drives in order to sustain the predicted data rate of 150 petabytes per year as opposed to the current 50 petabytes per year. Secondly the software will need to be seamlessly integrated with EOS, which has become the de facto disk storage system provided by the IT Storage group for physics data. The tape storage software for LHC physics run 3 is code named CTA (the CERN Tape Archive). This paper describes how CTA will introduce a pre-emptive drive scheduler to use tape drives more efficiently, will encapsulate all tape software into a single module that will sit behind one or more EOS systems, and will be simpler by dropping support for obsolete backwards compatibility.}
}
@misc{xin_zhao_tape_usage,
author = {Xin Zhao},
title = {Tape Usage},
howpublished = {\url{https://indico.cern.ch/event/732181/contributions/3019046/}},
year = {2018},
month = {May},
note = {{ADC Technical Coordination Board Meeting}}
@article{eos_chep2015,
author = {A. J. Peters and E. A. Sindrilaru and G. Adde},
title = {{EOS} as the present and future solution for data storage at {CERN}},
journal = {Journal of Physics: Conference Series},
volume = {664},
number = {4},
pages = {042042},
url = {http://stacks.iop.org/1742-6596/664/i=4/a=042042},
year = {2015},
abstract = {EOS is an open source distributed disk storage system in production since 2011 at CERN. Development focus has been on low-latency analysis use cases for LHC 1 and non- LHC experiments and life-cycle management using JBOD 2 hardware for multi PB storage installations. The EOS design implies a split of hot and cold storage and introduced a change of the traditional HSM 3 functionality based workflows at CERN. The 2015 deployment brings storage at CERN to a new scale and foresees to breach 100 PB of disk storage in a distributed environment using tens of thousands of (heterogeneous) hard drives. EOS has brought to CERN major improvements compared to past storage solutions by allowing quick changes in the quality of service of the storage pools. This allows the data centre to quickly meet the changing performance and reliability requirements of the LHC experiments with minimal data movements and dynamic reconfiguration. For example, the software stack has met the specific needs of the dual computing centre set-up required by CERN and allowed the fast design of new workflows accommodating the separation of long-term tape archive and disk storage required for the LHC Run II. This paper will give a high-level state of the art overview of EOS with respect to Run II, introduce new tools and use cases and set the roadmap for the next storage solutions to come.}
}
@inproceedings{castor2007,
author = {Lo Presti, Giuseppe and Olof Barring and Alasdair Earl and Rosa Maria Garcia Rioja and Sebastien Ponce and Giulia Taurelli and Dennis Waldron and Dos Santos, Miguel Coelho},
title = {{CASTOR:} {A} Distributed Storage Resource Facility for High Performance Data Processing at {CERN}},
title = {{CASTOR:} A Distributed Storage Resource Facility for High Performance Data Processing at {CERN}},
booktitle = {24th {IEEE} Conference on Mass Storage Systems and Technologies {(MSST} 2007), 24--27 September 2007, San Diego, California, {USA}},
pages = {275--280},
year = {2007},
......
......@@ -2,6 +2,7 @@
\usepackage{graphicx}
\usepackage{tabularx}
\usepackage{cite}
% Set serif font to Paratype
\usepackage{paratype}
......@@ -53,7 +54,7 @@ will interface with EOS and CTA.
\label{introduction}
The CERN Tape Archive (CTA) is the new storage system for the custodial copy of the CERN physics data. It has
been developed by the IT Storage Group (IT--ST) as the tape back-end to the EOS~\cite{citation_required} disk
been developed by the IT Storage Group (IT--ST) as the tape back-end to the EOS~\cite{eos_chep2015} disk
system. CTA is an evolution of CASTOR~\cite{castor2007}. It is planned that CTA will be put into production
for Run--3 of the Large Hadron Collider (LHC). Data from the LHC experiments will be migrated from CASTOR
to CTA during the second Long Shutdown which starts in January 2019.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment