@inproceedings{RehbeinvanGenabith2017, author = {Ines Rehbein and Josef van Genabith}, title = {Evaluating Evaluation Measures}, series = {Proceedings of the 16th Nordic Conference of Computational Linguistics (NODALIDA-2007). University of Tartu, Tartu. May 24-26, 2007}, editor = {Joakim Nivre and Heiki-Jaan Kaalep and Kadri Muischnek and Mare Koit}, publisher = {University of Tartu}, address = {Tartu}, isbn = {978-9985-4-0513-0}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-57543}, pages = {372 -- 379}, year = {2017}, abstract = {This paper presents a thorough examination of the validity of three evaluation measures on parser output. We assess parser performance of an unlexicalised probabilistic parser trained on two German treebanks with different annotation schemes and evaluate parsing results using the PARSEVAL metric, the Leaf-Ancestor metric and a dependency-based evaluation. We reject the claim that the T{\"u}Ba-D/Z annotation scheme is more adequate then the TIGER scheme for PCFG parsing and show that PARSEVAL should not be used to compare parser performance for parsers trained on treebanks with different annotation schemes. An analysis of specific error types indicates that the dependency-based evaluation is most appropriate to reflect parse quality.}, language = {en} }