@inproceedings{BeckerHanWerthmannetal.2024, author = {Becker, Maria and Han, Kanyao and Werthmann, Antonina and Rezapour, Rezvaneh and Lee, Haejin and Diesner, Jana and Witt, Andreas}, title = {Detecting impact relevant sections in scientific research}, booktitle = {The 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)}, editor = {Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro and Sakti, Sakriani and Xue, Nianwen}, url = {https://aclanthology.org/2024.lrec-main.424/}, pages = {4744 -- 4749}, year = {2024}, abstract = {Impact assessment is an evolving area of research that aims at measuring and predicting the potential effects of projects or programs on a variety of stakeholders. While measuring the impact of scientific research is a vibrant subdomain of impact assessment, a recurring obstacle in this specific area is the lack of an efficient framework that facilitates labeling and analysis of lengthy reports. To address this issue, we propose, implement, and evaluate a framework for automatically assessing the impact of scientific research projects by identifying pertinent sections in research reports that indicate potential impact. We leverage a mixed-method approach that combines manual annotation with supervised machine learning to extract these passages from project reports. We experiment with different machine learning algorithms, including traditional statistical models as well as pre-trained transformer language models. Our results show that our proposed method achieves accuracy scores up to 0.81, and that our method is generalizable to scientific research from different domains and different languages.}, subject = {Forschung}, language = {en} }