@inproceedings{WiegandRuppenhoferEder2021, author = {Michael Wiegand and Josef Ruppenhofer and Elisabeth Eder}, title = {Implicitly abusive language – What does it actually look like and why are we not getting there?}, series = {Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies}, editor = {Kristina Toutanova and Anna Rumshisky and Luke Zettlemoyer and Dilek Hakkani-Tur and Iz Beltagy and Steven Bethard and Ryan Cotterell and Tanmoy Chakraborty and Yichao Zhou}, publisher = {Association for Computational Linguistics}, address = {Stroudsburg, Pennsylvania}, isbn = {978-1-954085-46-6}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-104498}, pages = {576 -- 587}, year = {2021}, abstract = {Abusive language detection is an emerging field in natural language processing which has received a large amount of attention recently. Still the success of automatic detection is limited. Particularly, the detection of implicitly abusive language, i.e. abusive language that is not conveyed by abusive words (e.g. dumbass or scum), is not working well. In this position paper, we explain why existing datasets make learning implicit abuse difficult and what needs to be changed in the design of such datasets. Arguing for a divide-and-conquer strategy, we present a list of subtypes of implicitly abusive language and formulate research tasks and questions for future research.}, language = {en} }