@article{Villemoes_Biswas_Purnhagen_Lehtonen_2020, title={Learning about perception of temporal fine structure by building audio codecs}, volume={7}, url={https://proceedings.isaar.eu/index.php/isaarproc/article/view/2019-17}, abstractNote={<div class="page" title="Page 1"> <div class="layoutArea"> <div class="column"> <p>The goal of audio coding is to efficiently describe an auditory experience while enabling a faithful reconstruction to the listener. The subjective quality compared to the original is measured by established psychoacoustic tests (BS.1116, 2015; BS.1534, 2015) and the description cost is measured in number of bits. As it is much cheaper to describe coarse scale signal properties than temporal fine structure (TFS), tools like noise fill, spectral extension, binaural cue coding, and machine learning have increased performance of audio codecs far beyond the first generation based on masking principles (e.g., mp3). In this evolution, implicit knowledge on hearing has been acquired by codec developers, but it has become increasingly difficult to construct tools to predict subjective quality. For example, it is yet unknown which aspects of the TFS that are essential for the listening impression to be preserved. To explore these issues, we study models of auditory representations with the mindset from audio coding. Given a method to solve the inverse problem of creating a signal with a specified representation, evaluating by listening can immediately reveal strengths and weaknesses of a candidate model.</p> </div> </div> </div>}, journal={Proceedings of the International Symposium on Auditory and Audiological Research}, author={Villemoes, Lars and Biswas, Arijit and Purnhagen, Heiko and Lehtonen, Heidi-Maria}, year={2020}, month={Apr.}, pages={141–148} }