@inproceedings{WiegandEderRuppenhofer2022, author = {Michael Wiegand and Elisabeth Eder and Josef Ruppenhofer}, title = {Identifying implicitly abusive remarks about identity groups using a linguistically informed approach}, series = {Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. July 10-15, 2022.}, editor = {Marine Carpuat and Marie-Catherine de Marneffe and Ivan Vladimir Meza Ruiz}, publisher = {Stroudsburg}, address = {Association for Computational Linguistics}, isbn = {978-1-955917-71-1}, doi = {10.18653/v1/2022.naacl-main.410}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-112614}, pages = {5600 -- 5612}, year = {2022}, abstract = {We address the task of distinguishing implicitly abusive sentences on identity groups (“Muslims contaminate our planet”) from other group-related negative polar sentences (“Muslims despise terrorism”). Implicitly abusive language are utterances not conveyed by abusive words (e.g. “bimbo” or “scum”). So far, the detection of such utterances could not be properly addressed since existing datasets displaying a high degree of implicit abuse are fairly biased. Following the recently-proposed strategy to solve implicit abuse by separately addressing its different subtypes, we present a new focused and less biased dataset that consists of the subtype of atomic negative sentences about identity groups. For that task, we model components that each address one facet of such implicit abuse, i.e. depiction as perpetrators, aspectual classification and non-conformist views. The approach generalizes across different identity groups and languages.}, language = {en} }