@article {Vos049338, author = {Rutger A. Vos}, title = {Ten simple rules for managing high-throughput nucleotide sequencing data}, elocation-id = {049338}, year = {2016}, doi = {10.1101/049338}, publisher = {Cold Spring Harbor Laboratory}, abstract = {The challenges posed by large data volumes produced by high-throughput nucleotide sequencing technologies are well known. This document establishes ten simple rules for coping with these challenges. At the level of master data management, (1) data triage reduces data volumes; (2) some lossless data representations are much more compact than others; (3) careful management of data replication reduces wasted storage space. At the level of data analysis, (4) automated analysis pipelines obviate the need for storing work files; (5) virtualization reduces the need for data movement and bandwidth consumption; (6) tracking of data and analysis provenance will generate a paper trail to better understand how results were produced. At the level of data access and sharing, (7) careful modeling of data movement patterns reduces bandwidth consumption and haphazard copying; (8) persistent, resolvable identifiers for data reduce ambiguity caused by data movement; (9) sufficient metadata enables more effective collaboration. Finally, because of rapid developments in HTS technologies, (10) agile practices that combine loosely coupled modules operating on standards-compliant data are the best approach for avoiding lock-in. A generalized scenario is presented for data management from initial raw data generation to publication of result data.}, URL = {https://www.biorxiv.org/content/early/2016/04/19/049338}, eprint = {https://www.biorxiv.org/content/early/2016/04/19/049338.full.pdf}, journal = {bioRxiv} }