@article{ArnoldTomaschekSeringetal.2017, author = {Denis Arnold and Fabian Tomaschek and Konstantin Sering and Florence Lopez and R. Harald Baayen}, title = {Words from spontaneous conversational speech can be recognized with human-like accuracy by an error-driven learning algorithm that discriminates between meanings straight from smart acoustic features, bypassing the phoneme as recognition unit}, series = {PLoS ONE}, volume = {12}, number = {4}, editor = {Hedderik van Rijn}, doi = {10.1371/journal.pone.0174623}, url = {https://nbn-resolving.org/urn:nbn:de:bsz:mh39-60616}, pages = {1 -- 16}, year = {2017}, abstract = {Sound units play a pivotal role in cognitive models of auditory comprehension. The general consensus is that during perception listeners break down speech into auditory words and subsequently phones. Indeed, cognitive speech recognition is typically taken to be computationally intractable without phones. Here we present a computational model trained on 20 hours of conversational speech that recognizes word meanings within the range of human performance (model 25\%, native speakers 20–44\%), without making use of phone or word form representations. Our model also generates successfully predictions about the speed and accuracy of human auditory comprehension. At the heart of the model is a ‘wide’ yet sparse two-layer artificial neural network with some hundred thousand input units representing summaries of changes in acoustic frequency bands, and proxies for lexical meanings as output units. We believe that our model holds promise for resolving longstanding theoretical problems surrounding the notion of the phone in linguistic theory.}, language = {en} }