@inproceedings{BrunnerTuWeimeretal.2023, author = {Annelen Brunner and Ngoc Duyen Tanja Tu and Lukas Weimer and Fotis Jannidis}, title = {To BERT or not to BERT – Comparing contextual embeddings in a deep learning architecture for the automatic recognition of four types of speech, thought and writing representation}, series = {Proceedings of the 5th Swiss Text Analytics Conference (SwissText) \& 16th Conference on Natural Language Processing (KONVENS)}, editor = {Sarah Ebling and Don Tuggener and Manuela H{\"u}rlimann and Mark Cieliebak and Martin Volk}, publisher = {CEUR-WS}, address = {Aachen}, issn = {1613-0073}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-115617}, year = {2023}, abstract = {We present recognizers for four very different types of speech, thought and writing representation (STWR) for German texts. The implementation is based on deep learning with two different customized contextual embeddings, namely FLAIR embeddings and BERT embeddings. This paper gives an evaluation of our recognizers with a particular focus on the differences in performance we observed between those two embeddings. FLAIR performed best for direct STWR (F1=0.85), BERT for indirect (F1=0.76) and free indirect (F1=0.59) STWR. For reported STWR, the comparison was inconclusive, but BERT gave the best average results and best individual model (F1=0.60). Our best recognizers, our customized language embeddings and most of our test and training data are freely available and can be found via www.redewiedergabe.de or at github.com/redewiedergabe.}, language = {en} }