@inproceedings{RuppenhoferRehbeinFlinz2020, author = {Josef Ruppenhofer and Ines Rehbein and Carolina Flinz}, title = {Fine-grained Named Entity Annotations for German Biographic Interviews}, series = {Proceedings of the 12th International Conference on Language Resources and Evaluation (LREC), May 11-16, 2020, Palais du Pharo, Marseille, France}, editor = {Nicoletta Calzolari and Fr{\´e}d{\´e}ric B{\´e}chet and Philippe Blache and Khalid Choukri and Christopher Cieri and Thierry Declerck and Sara Goggi and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and H{\´e}l{\`e}ne Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis}, publisher = {European Language Resources Association}, address = {Paris}, isbn = {979-10-95546-34-4}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-98652}, pages = {4605 -- 4614}, year = {2020}, abstract = {We present a fine-grained NER annotations scheme with 30 labels and apply it to German data. Building on the OntoNotes 5.0 NER inventory, our scheme is adapted for a corpus of transcripts of biographic interviews by adding categories for AGE and LAN(guage) and also adding label classes for various numeric and temporal expressions. Applying the scheme to the spoken data as well as a collection of teaser tweets from newspaper sites, we can confirm its generality for both domains, also achieving good inter-annotator agreement. We also show empirically how our inventory relates to the well-established 4-category NER inventory by re-annotating a subset of the GermEval 2014 NER coarse-grained dataset with our fine label inventory. Finally, we use a BERT-based system to establish some baselines for NER tagging on our two new datasets. Global results in in-domain testing are quite high on the two datasets, near what was achieved for the coarse inventory on the CoNLLL2003 data. Cross-domain testing produces much lower results due to the severe domain differences.}, language = {en} }