The Centre for Speech Technology Research, The university of Edinburgh

Publications by Gustav Henter

ghenter.bib

@inproceedings{henter2014measuring,
  author = {Henter, Gustav Eje and Merritt, Thomas and Shannon, Matt and Mayo, Catherine and King, Simon},
  title = {Measuring the perceptual effects of modelling assumptions in speech synthesis using stimuli constructed from repeated natural speech},
  abstract = {Acoustic models used for statistical parametric speech synthesis typically incorporate many modelling assumptions. It is an open question to what extent these assumptions limit the naturalness of synthesised speech. To investigate this question, we recorded a speech corpus where each prompt was read aloud multiple times. By combining speech parameter trajectories extracted from different repetitions, we were able to quantify the perceptual effects of certain commonly used modelling assumptions. Subjective listening tests show that taking the source and filter parameters to be conditionally independent, or using diagonal covariance matrices, significantly limits the naturalness that can be achieved. Our experimental results also demonstrate the shortcomings of mean-based parameter generation.},
  month = {September},
  volume = {15},
  pages = {1504--1508},
  year = {2014},
  keywords = {speech synthesis, acoustic modelling, stream independence, diagonal covariance matrices, repeated speech},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/henter2014measuring.pdf},
  booktitle = {Proc. Interspeech}
}
@inproceedings{Aylett_Dall_Ghoshal_Henter_Merritt_Interspeech2014,
  author = {Aylett, Matthew and Dall, Rasmus and Ghoshal, Arnab and Henter, Gustav Eje and Merritt, Thomas},
  title = {A Flexible Front-End for {HTS}},
  booktitle = {Proc. Interspeech},
  month = {September},
  pages = {1283--1287},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/Aylett_Dall_Ghoshal_Henter_Merritt_Interspeech2014.pdf},
  abstract = {Parametric speech synthesis techniques depend on full context acoustic models generated by language front-ends, which analyse linguistic and phonetic structure. HTS, the leading parametric synthesis system, can use a number of different front-ends to generate full context models for synthesis and training. In this paper we explore the use of a new text processing front-end that has been added to the speech recognition toolkit Kaldi as part of an ongoing project to produce a new parametric speech synthesis system, Idlak. The use of XML specification files, a modular design, and modern coding and testing approaches, make the Idlak front-end ideal for adding, altering and experimenting with the contexts used in full context acoustic models. The Idlak front-end was evaluated against the standard Festival front-end in the HTS system. Results from the Idlak front-end compare well with the more mature Festival front-end (Idlak - 2.83 MOS vs Festival - 2.85 MOS), although a slight reduction in naturalness perceived by non-native English speakers can be attributed to Festival’s insertion of non-punctuated pauses.},
  categories = {speech synthesis, text processing, parametric synthesis, Kaldi, Idlak}
}
@inproceedings{wester:listeners:IS2015,
  author = {Wester, Mirjam and Valentini-Botinhao, Cassia and Henter, Gustav Eje},
  title = {Are we using enough listeners? {No! An empirically-supported critique of Interspeech 2014 TTS evaluations}},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  pages = {3476--3480},
  address = {Dresden},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/wester:listeners:IS2015.pdf},
  abstract = {Tallying the numbers of listeners that took part in subjective evaluations of synthetic speech at Interspeech 2014 showed that in more than 60% of papers conclusions are based on listening tests with less than 20 listeners. Our analysis of Blizzard 2013 data shows that for a MOS test measuring naturalness a stable level of significance is only reached when more than 30 listeners are used. In this paper, we set out a list of guidelines, i.e., a checklist for carrying out meaningful subjective evaluations. We further illustrate the importance of sentence coverage and number of listeners by presenting changes to rank order and number of significant pairs by re-analysing data from the Blizzard Challenge 2013.},
  categories = {Subjective evaluation, text-to-speech, MOS test}
}
@inproceedings{dall2016testing,
  author = {Dall, Rasmus and Brognaux, Sandrine and Richmond, Korin and Valentini-Botinhao, Cassia and Henter, Gustav Eje and Hirschberg, Julia and Yamagishi, Junichi},
  title = {Testing the consistency assumption: pronunciation variant forced alignment in read and spontaneous speech synthesis},
  abstract = {Forced alignment for speech synthesis traditionally aligns a phoneme sequence predetermined by the front-end text processing system. This sequence is not altered during alignment, i.e., it is forced, despite possibly being faulty. The consistency assumption is the assumption that these mistakes do not degrade models, as long as the mistakes are consistent across training and synthesis. We present evidence that in the alignment of both standard read prompts and spontaneous speech this phoneme sequence is often wrong, and that this is likely to have a negative impact on acoustic models. A lattice-based forced alignment system allowing for pronunciation variation is implemented, resulting in improved phoneme identity accuracy for both types of speech. A perceptual evaluation of HMM-based voices showed that spontaneous models trained on this improved alignment also improved standard synthesis, despite breaking the consistency assumption.},
  month = {March},
  pages = {5155-5159},
  year = {2016},
  keywords = {speech synthesis, TTS, forced alignment, HMM},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2016/dall2016testing.pdf},
  booktitle = {Proc. IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}
}
@article{leijon2016bayesian,
  author = {Leijon, Arne and Henter, Gustav Eje and Dahlquist, Martin},
  title = {{B}ayesian Analysis of Phoneme Confusion Matrices},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7364191},
  abstract = {This paper presents a parametric Bayesian approach to the statistical analysis of phoneme confusion matrices measured for groups of individual listeners in one or more test conditions. Two different bias problems in conventional estimation of mutual information are analyzed and explained theoretically. Evaluations with synthetic datasets indicate that the proposed Bayesian method can give satisfactory estimates of mutual information and response probabilities, even for phoneme confusion tests using a very small number of test items for each phoneme category. The proposed method can reveal overall differences in performance between two test conditions with better power than conventional Wilcoxon significance tests or conventional confidence intervals. The method can also identify sets of confusion-matrix cells that are credibly different between two test conditions, with better power than a similar approximate frequentist method.},
  number = {3},
  month = {March},
  volume = {24},
  journal = {IEEE/ACM T. Audio Speech},
  year = {2016},
  keywords = {Speech recognition, parameter estimation, mutual information, Bayes methods},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/leijon2016bayesian.pdf},
  pages = {469--482},
  categories = {Speech recognition, parameter estimation, mutual information, Bayes methods}
}
@inproceedings{wester2016evaluating,
  author = {Wester, Mirjam and Watts, Oliver and Henter, Gustav Eje},
  title = {Evaluating comprehension of natural and synthetic conversational speech},
  url = {http://www.isca-speech.org/archive/sp2016/pdfs_stamped/41.pdf},
  abstract = {Current speech synthesis methods typically operate on isolated sentences and lack convincing prosody when generating longer segments of speech. Similarly, prevailing TTS evaluation paradigms, such as intelligibility (transcription word error rate) or MOS, only score sentences in isolation, even though overall comprehension is arguably more important for speech-based communication. In an effort to develop more ecologically-relevant evaluation techniques that go beyond isolated sentences, we investigated comprehension of natural and synthetic speech dialogues. Specifically, we tested listener comprehension on long segments of spontaneous and engaging conversational speech (three 10-minute radio interviews of comedians). Interviews were reproduced either as natural speech, synthesised from carefully prepared transcripts, or synthesised using durations from forced-alignment against the natural speech, all in a balanced design. Comprehension was measured using multiple choice questions. A significant difference was measured between the comprehension/retention of natural speech (74\% correct responses) and synthetic speech with forced-aligned durations (61\% correct responses). However, no significant difference was observed between natural and regular synthetic speech (70\% correct responses). Effective evaluation of comprehension remains elusive.},
  address = {Boston, MA},
  month = {June},
  volume = {8},
  year = {2016},
  keywords = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/41.pdf},
  booktitle = {Speech Prosody},
  pages = {736--740},
  categories = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis}
}
@inproceedings{henter2016robust,
  author = {Henter, Gustav Eje and Ronanki, Srikanth and Watts, Oliver and Wester, Mirjam and Wu, Zhizheng and King, Simon},
  title = {Robust {TTS} duration modelling using {DNN}s},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472655},
  abstract = {Accurate modelling and prediction of speech-sound durations is an important component in generating more natural synthetic speech. Deep neural networks (DNNs) offer a powerful modelling paradigm, and large, found corpora of natural and expressive speech are easy to acquire for training them. Unfortunately, found datasets are seldom subject to the quality-control that traditional synthesis methods expect. Common issues likely to affect duration modelling include transcription errors, reductions, filled pauses, and forced-alignment inaccuracies. To combat this, we propose to improve modelling and prediction of speech durations using methods from robust statistics, which are able to disregard ill-fitting points in the training material. We describe a robust fitting criterion based on the density power divergence (the beta-divergence) and a robust generation heuristic using mixture density networks (MDNs). Perceptual tests indicate that subjects prefer synthetic speech generated using robust models of duration over the baselines.},
  address = {Shanghai, China},
  month = {March},
  volume = {41},
  year = {2016},
  keywords = {Speech synthesis, duration modelling, robust statistics},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/henter2016robust.pdf},
  booktitle = {Proc. ICASSP},
  pages = {5130--5134},
  categories = {Speech synthesis, duration modelling, robust statistics}
}
@inproceedings{watts2016hmms,
  author = {Watts, Oliver and Henter, Gustav Eje and Merritt, Thomas and Wu, Zhizheng and King, Simon},
  title = {From {HMM}s to {DNN}s: where do the improvements come from?},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472730},
  abstract = {Deep neural networks (DNNs) have recently been the focus of much text-to-speech research as a replacement for decision trees and hidden Markov models (HMMs) in statistical parametric synthesis systems. Performance improvements have been reported; however, the configuration of systems evaluated makes it impossible to judge how much of the improvement is due to the new machine learning methods, and how much is due to other novel aspects of the systems. Specifically, whereas the decision trees in HMM-based systems typically operate at the state-level, and separate trees are used to handle separate acoustic streams, most DNN-based systems are trained to make predictions simultaneously for all streams at the level of the acoustic frame. This paper isolates the influence of three factors (machine learning method; state vs. frame predictions; separate vs. combined stream predictions) by building a continuum of systems along which only a single factor is varied at a time. We find that replacing decision trees with DNNs and moving from state-level to frame-level predictions both significantly improve listeners' naturalness ratings of synthetic speech produced by the systems. No improvement is found to result from switching from separate-stream to combined-stream predictions.},
  address = {Shanghai, China},
  month = {March},
  volume = {41},
  year = {2016},
  keywords = {speech synthesis, hidden Markov model, decision tree, deep neural network},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/watts2016hmms.pdf},
  booktitle = {Proc. ICASSP},
  pages = {5505--5509},
  categories = {speech synthesis, hidden Markov model, decision tree, deep neural network}
}
@inproceedings{ronanki2016template,
  author = {Ronanki, Srikanth and Henter, Gustav Eje and Wu, Zhizheng and King, Simon},
  title = {A template-based approach for speech synthesis intonation generation using {LSTM}s},
  booktitle = {Proc. Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016template.pdf},
  abstract = {The absence of convincing intonation makes current parametric speech synthesis systems sound dull and lifeless, even when trained on expressive speech data. Typically, these systems use regression techniques to predict the fundamental frequency (F0) frame-by-frame. This approach leads to overly-smooth pitch contours and fails to construct an appropriate prosodic structure across the full utterance. In order to capture and reproduce larger-scale pitch patterns, this paper proposes a template-based approach for automatic F0 generation, where per-syllable pitch-contour templates (from a small, automatically learned set) are predicted by a recurrent neural network (RNN). The use of syllable templates mitigates the over-smoothing problem and is able to reproduce pitch patterns observed in the data. The use of an RNN, paired with connectionist temporal classification (CTC), enables the prediction of structure in the pitch contour spanning the entire utterance. This novel F0 prediction system is used alongside separate LSTMs for predicting phone durations and the other acoustic features, to construct a complete text-to-speech system. We report the results of objective and subjective tests on an expressive speech corpus of children's audiobooks, and include comparisons to a conventional baseline that predicts F0 directly at the frame level.},
  categories = {speech synthesis, intonation modelling, F0 templates, LSTM, CTC}
}
@inproceedings{Lorenzo17,
  author = {Lorenzo-Trueba, Jaime and {Valentini-Botinhao}, Cassia and Henter, Gustav and Yamagishi, Junichi},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/532_Paper_1.pdf},
  booktitle = {Interspeech},
  year = {2017},
  abstract = {This paper analyzes a) how often listeners interpret the emotional content of an utterance incorrectly when listening to vocoded or natural speech in adverse conditions; b) which noise conditions cause the most misperceptions; and c) which group of listeners misinterpret emotions the most. The long-term goal is to construct new emotional speech synthesizers that adapt to the environment and to the listener. We performed a large-scale listening test where over 400 listeners between the ages of 21 and 72 assessed natural and vocoded acted emotional speech stimuli. The stimuli had been artificially degraded using a room impulse response recorded in a car and various in-car noise types recorded in a real car. Experimental results show that the recognition rates for emotions and perceived emotional strength degrade as signal-to-noise ratio decreases. Interestingly, misperceptions seem to be more pronounced for negative and lowarousal emotions such as calmness or anger, while positive emotions such as happiness appear to be more robust to noise. An ANOVA analysis of listener meta-data further revealed that gender and age also influenced results, with elderly male listeners most likely to incorrectly identify emotions.},
  title = {Misperceptions of the emotional content of natural and vocoded speech in a car}
}
@inproceedings{ronanki_slt2016,
  author = {Ronanki, Srikanth and Watts, Oliver and King, Simon and Henter, Gustav Eje},
  title = {{Median-Based Generation of Synthetic Speech Durations using a Non-Parametric Approach}},
  booktitle = {Proc. IEEE Workshop on Spoken Language Technology (SLT)},
  month = {December},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016mediandur.pdf},
  abstract = {This paper proposes a new approach to duration modelling for statistical parametric speech synthesis in which a recurrent statistical model is trained to output a phone transition probability at each timestep (acoustic frame). Unlike conventional approaches to duration modelling – which assume that duration distributions have a particular form (e.g., a Gaussian) and use the mean of that distribution for synthesis – our approach can in principle model any distribution supported on the non-negative integers. Generation from this model can be performed in many ways; here we consider output generation based on the median predicted duration. The median is more typical (more probable) than the conventional mean duration, is robust to training-data irregularities, and enables incremental generation. Furthermore, a frame-level approach to duration prediction is consistent with a longer-term goal of modelling durations and acoustic features together. Results indicate that the proposed method is competitive with baseline approaches in approximating the median duration of held-out natural speech.},
  categories = {text-to-speech, speech synthesis, duration modelling, non-parametric models, LSTMs}
}
@inproceedings{yoshimura2016hierarchical,
  author = {Yoshimura, Takenori and Henter, {Gustav Eje} and Watts, Oliver and Wester, Mirjam and Yamagishi, Junichi and Tokuda, Keiichi},
  bdsk-url-1 = {http://dx.doi.org/10.21437/Interspeech.2016-847},
  publisher = {International Speech Communication Association},
  doi = {10.21437/Interspeech.2016-847},
  date-modified = {2018-01-19 16:43:35 +0000},
  title = {A Hierarchical Predictor of Synthetic Speech Naturalness Using Neural Networks},
  abstract = {A problem when developing and tuning speech synthesis systems is that there is no well-established method of automatically rating the quality of the synthetic speech. This research attempts to obtain a new automated measure which is trained on the result of large-scale subjective evaluations employing many human listeners, i.e., the Blizzard Challenge. To exploit the data, we experiment with linear regression, feed-forward and convolutional neural network models, and combinations of them to regress from synthetic speech to the perceptual scores obtained from listeners. The biggest improvements were seen when combining stimulus- and system-level predictions.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/0847.PDF},
  booktitle = {Interspeech 2016},
  pages = {342--346}
}