@inproceedings{GraenKewShaitarovaetal.2019, author = {Johannes Gra{\"e}n and Tannon Kew and Anastassia Shaitarova and Martin Volk}, title = {Modelling large parallel corpora. The Zurich Parallel Corpus Collection}, series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-7) 2019. Cardiff, 22nd July 2019}, editor = {Piotr BaƄski and Adrien Barbaresi and Hanno Biber and Evelyn Breiteneder and Simon Clematide and Marc Kupietz and Harald L{\"u}ngen and Caroline Iliadi}, publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache}, address = {Mannheim}, doi = {10.14618/ids-pub-9020}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-90207}, pages = {1 -- 8}, year = {2019}, abstract = {Text corpora come in many different shapes and sizes and carry heterogeneous annotations, depending on their purpose and design. The true benefit of corpora is rooted in their annotation and the method by which this data is encoded is an important factor in their interoperability. We have accumulated a large collection of multilingual and parallel corpora and encoded it in a unified format which is compatible with a broad range of NLP tools and corpus linguistic applications. In this paper, we present our corpus collection and describe a data model and the extensions to the popular CoNLL-U format that enable us to encode it.}, language = {en} }