The Centre for Speech Technology Research, The university of Edinburgh

Publications by Gustav Henter

ghenter.bib

@inproceedings{henter2014measuring,
  author = {Henter, Gustav Eje and Merritt, Thomas and Shannon, Matt and Mayo, Catherine and King, Simon},
  title = {Measuring the perceptual effects of modelling assumptions in speech synthesis using stimuli constructed from repeated natural speech},
  abstract = {Acoustic models used for statistical parametric speech synthesis typically incorporate many modelling assumptions. It is an open question to what extent these assumptions limit the naturalness of synthesised speech. To investigate this question, we recorded a speech corpus where each prompt was read aloud multiple times. By combining speech parameter trajectories extracted from different repetitions, we were able to quantify the perceptual effects of certain commonly used modelling assumptions. Subjective listening tests show that taking the source and filter parameters to be conditionally independent, or using diagonal covariance matrices, significantly limits the naturalness that can be achieved. Our experimental results also demonstrate the shortcomings of mean-based parameter generation.},
  month = {September},
  volume = {15},
  year = {2014},
  keywords = {speech synthesis, acoustic modelling, stream independence, diagonal covariance matrices, repeated speech},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/henter2014measuring.pdf},
  booktitle = {Proc. Interspeech},
  pages = {1504--1508}
}
@inproceedings{Aylett_Dall_Ghoshal_Henter_Merritt_Interspeech2014,
  author = {Aylett, Matthew and Dall, Rasmus and Ghoshal, Arnab and Henter, Gustav Eje and Merritt, Thomas},
  title = {A Flexible Front-End for {HTS}},
  booktitle = {Proc. Interspeech},
  abstract = {Parametric speech synthesis techniques depend on full context acoustic models generated by language front-ends, which analyse linguistic and phonetic structure. HTS, the leading parametric synthesis system, can use a number of different front-ends to generate full context models for synthesis and training. In this paper we explore the use of a new text processing front-end that has been added to the speech recognition toolkit Kaldi as part of an ongoing project to produce a new parametric speech synthesis system, Idlak. The use of XML specification files, a modular design, and modern coding and testing approaches, make the Idlak front-end ideal for adding, altering and experimenting with the contexts used in full context acoustic models. The Idlak front-end was evaluated against the standard Festival front-end in the HTS system. Results from the Idlak front-end compare well with the more mature Festival front-end (Idlak - 2.83 MOS vs Festival - 2.85 MOS), although a slight reduction in naturalness perceived by non-native English speakers can be attributed to Festival’s insertion of non-punctuated pauses.},
  month = {September},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/Aylett_Dall_Ghoshal_Henter_Merritt_Interspeech2014.pdf},
  pages = {1283--1287},
  categories = {speech synthesis, text processing, parametric synthesis, Kaldi, Idlak}
}
@inproceedings{wester:listeners:IS2015,
  author = {Wester, Mirjam and Valentini-Botinhao, Cassia and Henter, Gustav Eje},
  title = {Are we using enough listeners? {No! An empirically-supported critique of Interspeech 2014 TTS evaluations}},
  booktitle = {Proc. Interspeech},
  address = {Dresden},
  abstract = {Tallying the numbers of listeners that took part in subjective evaluations of synthetic speech at Interspeech 2014 showed that in more than 60% of papers conclusions are based on listening tests with less than 20 listeners. Our analysis of Blizzard 2013 data shows that for a MOS test measuring naturalness a stable level of significance is only reached when more than 30 listeners are used. In this paper, we set out a list of guidelines, i.e., a checklist for carrying out meaningful subjective evaluations. We further illustrate the importance of sentence coverage and number of listeners by presenting changes to rank order and number of significant pairs by re-analysing data from the Blizzard Challenge 2013.},
  month = {September},
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:listeners:IS2015.pdf},
  pages = {3476--3480},
  categories = {Subjective evaluation, text-to-speech, MOS test}
}
@inproceedings{dall2016testing,
  author = {Dall, Rasmus and Brognaux, Sandrine and Richmond, Korin and Valentini-Botinhao, Cassia and Henter, Gustav Eje and Hirschberg, Julia and Yamagishi, Junichi},
  title = {Testing the consistency assumption: pronunciation variant forced alignment in read and spontaneous speech synthesis},
  abstract = {Forced alignment for speech synthesis traditionally aligns a phoneme sequence predetermined by the front-end text processing system. This sequence is not altered during alignment, i.e., it is forced, despite possibly being faulty. The consistency assumption is the assumption that these mistakes do not degrade models, as long as the mistakes are consistent across training and synthesis. We present evidence that in the alignment of both standard read prompts and spontaneous speech this phoneme sequence is often wrong, and that this is likely to have a negative impact on acoustic models. A lattice-based forced alignment system allowing for pronunciation variation is implemented, resulting in improved phoneme identity accuracy for both types of speech. A perceptual evaluation of HMM-based voices showed that spontaneous models trained on this improved alignment also improved standard synthesis, despite breaking the consistency assumption.},
  month = {March},
  year = {2016},
  keywords = {speech synthesis, TTS, forced alignment, HMM},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2016/dall2016testing.pdf},
  booktitle = {Proc. IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
  pages = {5155-5159}
}
@article{leijon2016bayesian,
  author = {Leijon, Arne and Henter, Gustav Eje and Dahlquist, Martin},
  title = {{B}ayesian Analysis of Phoneme Confusion Matrices},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7364191},
  abstract = {This paper presents a parametric Bayesian approach to the statistical analysis of phoneme confusion matrices measured for groups of individual listeners in one or more test conditions. Two different bias problems in conventional estimation of mutual information are analyzed and explained theoretically. Evaluations with synthetic datasets indicate that the proposed Bayesian method can give satisfactory estimates of mutual information and response probabilities, even for phoneme confusion tests using a very small number of test items for each phoneme category. The proposed method can reveal overall differences in performance between two test conditions with better power than conventional Wilcoxon significance tests or conventional confidence intervals. The method can also identify sets of confusion-matrix cells that are credibly different between two test conditions, with better power than a similar approximate frequentist method.},
  number = {3},
  month = {March},
  volume = {24},
  pages = {469--482},
  year = {2016},
  keywords = {Speech recognition, parameter estimation, mutual information, Bayes methods},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/leijon2016bayesian.pdf},
  journal = {IEEE/ACM T. Audio Speech},
  categories = {Speech recognition, parameter estimation, mutual information, Bayes methods}
}
@inproceedings{wester2016evaluating,
  author = {Wester, Mirjam and Watts, Oliver and Henter, Gustav Eje},
  title = {Evaluating comprehension of natural and synthetic conversational speech},
  url = {http://www.isca-speech.org/archive/sp2016/pdfs_stamped/41.pdf},
  abstract = {Current speech synthesis methods typically operate on isolated sentences and lack convincing prosody when generating longer segments of speech. Similarly, prevailing TTS evaluation paradigms, such as intelligibility (transcription word error rate) or MOS, only score sentences in isolation, even though overall comprehension is arguably more important for speech-based communication. In an effort to develop more ecologically-relevant evaluation techniques that go beyond isolated sentences, we investigated comprehension of natural and synthetic speech dialogues. Specifically, we tested listener comprehension on long segments of spontaneous and engaging conversational speech (three 10-minute radio interviews of comedians). Interviews were reproduced either as natural speech, synthesised from carefully prepared transcripts, or synthesised using durations from forced-alignment against the natural speech, all in a balanced design. Comprehension was measured using multiple choice questions. A significant difference was measured between the comprehension/retention of natural speech (74\% correct responses) and synthetic speech with forced-aligned durations (61\% correct responses). However, no significant difference was observed between natural and regular synthetic speech (70\% correct responses). Effective evaluation of comprehension remains elusive.},
  year = {2016},
  month = {June},
  volume = {8},
  pages = {736--740},
  address = {Boston, MA},
  keywords = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/41.pdf},
  booktitle = {Speech Prosody},
  categories = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis}
}
@inproceedings{henter2016robust,
  author = {Henter, Gustav Eje and Ronanki, Srikanth and Watts, Oliver and Wester, Mirjam and Wu, Zhizheng and King, Simon},
  title = {Robust {TTS} duration modelling using {DNN}s},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472655},
  abstract = {Accurate modelling and prediction of speech-sound durations is an important component in generating more natural synthetic speech. Deep neural networks (DNNs) offer a powerful modelling paradigm, and large, found corpora of natural and expressive speech are easy to acquire for training them. Unfortunately, found datasets are seldom subject to the quality-control that traditional synthesis methods expect. Common issues likely to affect duration modelling include transcription errors, reductions, filled pauses, and forced-alignment inaccuracies. To combat this, we propose to improve modelling and prediction of speech durations using methods from robust statistics, which are able to disregard ill-fitting points in the training material. We describe a robust fitting criterion based on the density power divergence (the beta-divergence) and a robust generation heuristic using mixture density networks (MDNs). Perceptual tests indicate that subjects prefer synthetic speech generated using robust models of duration over the baselines.},
  year = {2016},
  month = {March},
  volume = {41},
  pages = {5130--5134},
  address = {Shanghai, China},
  keywords = {Speech synthesis, duration modelling, robust statistics},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/henter2016robust.pdf},
  booktitle = {Proc. ICASSP},
  categories = {Speech synthesis, duration modelling, robust statistics}
}
@inproceedings{watts2016hmms,
  author = {Watts, Oliver and Henter, Gustav Eje and Merritt, Thomas and Wu, Zhizheng and King, Simon},
  title = {From {HMM}s to {DNN}s: where do the improvements come from?},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472730},
  abstract = {Deep neural networks (DNNs) have recently been the focus of much text-to-speech research as a replacement for decision trees and hidden Markov models (HMMs) in statistical parametric synthesis systems. Performance improvements have been reported; however, the configuration of systems evaluated makes it impossible to judge how much of the improvement is due to the new machine learning methods, and how much is due to other novel aspects of the systems. Specifically, whereas the decision trees in HMM-based systems typically operate at the state-level, and separate trees are used to handle separate acoustic streams, most DNN-based systems are trained to make predictions simultaneously for all streams at the level of the acoustic frame. This paper isolates the influence of three factors (machine learning method; state vs. frame predictions; separate vs. combined stream predictions) by building a continuum of systems along which only a single factor is varied at a time. We find that replacing decision trees with DNNs and moving from state-level to frame-level predictions both significantly improve listeners' naturalness ratings of synthetic speech produced by the systems. No improvement is found to result from switching from separate-stream to combined-stream predictions.},
  year = {2016},
  month = {March},
  volume = {41},
  pages = {5505--5509},
  address = {Shanghai, China},
  keywords = {speech synthesis, hidden Markov model, decision tree, deep neural network},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/watts2016hmms.pdf},
  booktitle = {Proc. ICASSP},
  categories = {speech synthesis, hidden Markov model, decision tree, deep neural network}
}
@inproceedings{ronanki2016template,
  author = {Ronanki, Srikanth and Henter, Gustav Eje and Wu, Zhizheng and King, Simon},
  title = {A template-based approach for speech synthesis intonation generation using {LSTM}s},
  booktitle = {Proc. Interspeech},
  address = {San Francisco, USA},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016template.pdf},
  abstract = {The absence of convincing intonation makes current parametric speech synthesis systems sound dull and lifeless, even when trained on expressive speech data. Typically, these systems use regression techniques to predict the fundamental frequency (F0) frame-by-frame. This approach leads to overly-smooth pitch contours and fails to construct an appropriate prosodic structure across the full utterance. In order to capture and reproduce larger-scale pitch patterns, this paper proposes a template-based approach for automatic F0 generation, where per-syllable pitch-contour templates (from a small, automatically learned set) are predicted by a recurrent neural network (RNN). The use of syllable templates mitigates the over-smoothing problem and is able to reproduce pitch patterns observed in the data. The use of an RNN, paired with connectionist temporal classification (CTC), enables the prediction of structure in the pitch contour spanning the entire utterance. This novel F0 prediction system is used alongside separate LSTMs for predicting phone durations and the other acoustic features, to construct a complete text-to-speech system. We report the results of objective and subjective tests on an expressive speech corpus of children's audiobooks, and include comparisons to a conventional baseline that predicts F0 directly at the frame level.},
  categories = {speech synthesis, intonation modelling, F0 templates, LSTM, CTC}
}