@inproceedings{WiegandLeidnerKlakow2019, author = {Michael Wiegand and Jochen L. Leidner and Dietrich Klakow}, title = {Cost-Sensitive Learning in Answer Extraction}, series = {Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08), May 28-30, 2008, Marrakech, Morocco}, editor = {Nicoletta Calzolari and Khalid Choukri and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis and Daniel Tapias}, publisher = {European Language Resources Association}, address = {Paris}, isbn = {2-9517408-4-0}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-85373}, pages = {711 -- 714}, year = {2019}, abstract = {One problem of data-driven answer extraction in open-domain factoid question answering is that the class distribution of labeled training data is fairly imbalanced. In an ordinary training set, there are far more incorrect answers than correct answers. The class-imbalance is, thus, inherent to the classification task. It has a deteriorating effect on the performance of classifiers trained by standard machine learning algorithms. They usually have a heavy bias towards the majority class, i.e. the class which occurs most often in the training set. In this paper, we propose a method to tackle class imbalance by applying some form of cost-sensitive learning which is preferable to sampling. We present a simple but effective way of estimating the misclassification costs on the basis of class distribution. This approach offers three benefits. Firstly, it maintains the distribution of the classes of the labeled training data. Secondly, this form of meta-learning can be applied to a wide range of common learning algorithms. Thirdly, this approach can be easily implemented with the help of state-of-the-art machine learning software.}, language = {en} }