2016.bib

@comment{{This file has been generated by bib2bib 1.92}}
@comment{{Command line: /home/korin/bibtex2html-1.92-LINUX/bib2bib -oc /home/korin/projects/publications/new_output/transitdata/2016-citations -ob /home/korin/projects/publications/new_output/transitdata/2016.bib -c 'year : "2016"' /home/korin/projects/publications/filtlists/full_publications_list.bib}}
@article{Tang2015,
  author = {Tang, Yan and Cooke, Martin and Valentini-Botinhao, Cassia},
  volume = {35},
  doi = {10.1016/j.csl.2015.06.002},
  title = {Evaluating the predictions of objective intelligibility metrics for modified and synthetic speech},
  journal = {Computer Speech & Language},
  issn = {0885-2308},
  number = {},
  pages = {73 - 92},
  note = {},
  year = {2016},
  abstract = {Several modification algorithms that alter natural or synthetic speech with the goal of improving intelligibility in noise have been proposed recently. A key requirement of many modification techniques is the ability to predict intelligibility, both offline during algorithm development, and online, in order to determine the optimal modification for the current noise context. While existing objective intelligibility metrics (OIMs) have good predictive power for unmodified natural speech in stationary and fluctuating noise, little is known about their effectiveness for other forms of speech. The current study evaluated how well seven OIMs predict listener responses in three large datasets of modified and synthetic speech which together represent 396 combinations of speech modification, masker type and signal-to-noise ratio. The chief finding is a clear reduction in predictive power for most OIMs when faced with modified and synthetic speech. Modifications introducing durational changes are particularly harmful to intelligibility predictors. OIMs that measure masked audibility tend to over-estimate intelligibility in the presence of fluctuating maskers relative to stationary maskers, while OIMs that estimate the distortion caused by the masker to a clean speech prototype exhibit the reverse pattern.}
}
@article{stan-2016,
  author = {Stan, Adriana and Mamiya, Yoshitaka and Yamagishi, Junichi and Bell, Peter and Watts, Oliver and Clark, Rob and King, Simon},
  doi = {http://dx.doi.org/10.1016/j.csl.2015.06.006},
  title = {{ALISA}: An automatic lightly supervised speech segmentation and alignment tool},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230815000650},
  journal = {Computer Speech and Language},
  issn = {0885-2308},
  abstract = {This paper describes the ALISA tool, which implements a lightly supervised method for sentence-level alignment of speech with imperfect transcripts. Its intended use is to enable the creation of new speech corpora from a multitude of resources in a language-independent fashion, thus avoiding the need to record or transcribe speech data. The method is designed so that it requires minimum user intervention and expert knowledge, and it is able to align data in languages which employ alphabetic scripts. It comprises a GMM-based voice activity detector and a highly constrained grapheme-based speech aligner. The method is evaluated objectively against a gold standard segmentation and transcription, as well as subjectively through building and testing speech synthesis systems from the retrieved data. Results show that on average, 70% of the original data is correctly aligned, with a word error rate of less than 0.5%. In one case, subjective listening tests show a statistically significant preference for voices built on the gold transcript, but this is small and in other tests, no statistically significant differences between the systems built from the fully supervised training data and the one which uses the proposed method are found.},
  volume = {35},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/stan-2016.pdf},
  pages = {116--133},
  categories = {Speech segmentation, speech and text alignment, grapheme acoustic models, lightly supervised system, imperfect transcripts}
}
@inproceedings{dall2016testing,
  author = {Dall, Rasmus and Brognaux, Sandrine and Richmond, Korin and Valentini-Botinhao, Cassia and Henter, Gustav Eje and Hirschberg, Julia and Yamagishi, Junichi},
  title = {Testing the consistency assumption: pronunciation variant forced alignment in read and spontaneous speech synthesis},
  abstract = {Forced alignment for speech synthesis traditionally aligns a phoneme sequence predetermined by the front-end text processing system. This sequence is not altered during alignment, i.e., it is forced, despite possibly being faulty. The consistency assumption is the assumption that these mistakes do not degrade models, as long as the mistakes are consistent across training and synthesis. We present evidence that in the alignment of both standard read prompts and spontaneous speech this phoneme sequence is often wrong, and that this is likely to have a negative impact on acoustic models. A lattice-based forced alignment system allowing for pronunciation variation is implemented, resulting in improved phoneme identity accuracy for both types of speech. A perceptual evaluation of HMM-based voices showed that spontaneous models trained on this improved alignment also improved standard synthesis, despite breaking the consistency assumption.},
  month = {March},
  pages = {5155-5159},
  year = {2016},
  keywords = {speech synthesis, TTS, forced alignment, HMM},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2016/dall2016testing.pdf},
  booktitle = {Proc. IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}
}
@inproceedings{hu2016initial,
  author = {Hu, Qiong and Yamagishi, Junichi and Richmond, Korin and Subramanian, Kartick and Stylianou, Yannis},
  title = {Initial investigation of speech synthesis based on complex-valued neural networks},
  abstract = {Although frequency analysis often leads us to a speech signal in the complex domain, the acoustic models we frequently use are designed for real-valued data. Phase is usually ignored or modelled separately from spectral amplitude. Here, we propose a complex-valued neural network (CVNN) for directly modelling the results of the frequency analysis in the complex domain (such as the complex amplitude). We also introduce a phase encoding technique to map real-valued data (e.g. cepstra or log amplitudes) into the complex domain so we can use the same CVNN processing seamlessly. In this paper, a fully complex-valued neural network, namely a neural network where all of the weight matrices, activation functions and learning algorithms are in the complex domain, is applied for speech synthesis. Results show its ability to model both complex-valued and real-valued data.},
  month = {March},
  pages = {5630-5634},
  year = {2016},
  keywords = {complex-valued neural network, speech synthesis, complex amplitude, phase modelling},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2016/hu2016initial.pdf},
  booktitle = {Proc. IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}
}
@inproceedings{richmond2016smooth,
  author = {Richmond, Korin and King, Simon},
  title = {Smooth Talking: Articulatory Join Costs for Unit Selection},
  abstract = {Join cost calculation has so far dealt exclusively with acoustic speech parameters, and a large number of distance metrics have previously been tested in conjunction with a wide variety of acoustic parameterisations. In contrast, we propose here to calculate distance in articulatory space. The motivation for this is simple: physical constraints mean a human talker's mouth cannot ``jump'' from one configuration to a different one, so smooth evolution of articulator positions would also seem desirable for a good candidate unit sequence. To test this, we built Festival Multisyn voices using a large articulatory-acoustic dataset. We first synthesised 460 TIMIT sentences and confirmed our articulatory join cost gives appreciably different unit sequences compared to the standard Multisyn acoustic join cost. A listening test (3 sets of 25 sentence pairs, 30 listeners) then showed our articulatory cost is preferred at a rate of 58\% compared to the standard Multisyn acoustic join cost.},
  month = {March},
  pages = {5150-5154},
  year = {2016},
  keywords = {speech synthesis, unit selection, electromagnetic articulography, join cost},
  pdf = {http://www.cstr.ed.ac.uk/downloads/publications/2016/richmond2016smooth.pdf},
  booktitle = {Proc. IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}
}
@inproceedings{Swietojanski_ICASSP2016,
  author = {Swietojanski, P. and Renals, S.},
  title = {SAT-LHUC: Speaker Adaptive Training for Learning Hidden Unit Contributions},
  booktitle = {Proc. IEEE ICASSP},
  address = {Shanghai, China},
  month = {March},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Swietojanski_ICASSP2016.pdf},
  abstract = {This paper extends learning hidden unit contributions (LHUC) unsupervised speaker adaptation with speaker adaptive training (SAT). Contrary to other SAT approaches, the proposed technique does not require speaker-dependent features, the generation of auxiliary generative models to estimate or extract speaker-dependent information, or any changes to the speaker-independent model structure. SAT-LHUC is directly integrated into the objective and jointly learns speaker-independent and speaker-dependent representations. We demonstrate that the SAT-LHUC technique can match feature-space regression transforms for matched narrow-band data and outperform it on wide-band data when the runtime distribution differs significantly from training one. We have obtained 6.5%, 10% and 18.5% relative word error rate reductions compared to speaker-independent models on Switchboard, AMI meetings and TED lectures, respectively. This corresponds to relative gains of 2%, 4% and 6% compared with non-SAT LHUC adaptation. SAT-LHUC was also found to be complementary to SAT with feature-space maximum likelihood linear regression transforms.},
  categories = {SAT, Deep Neural Networks, LHUC}
}
@inproceedings{joachim_fainberg_improving_2016,
  author = {Fainberg, Joachim and Bell, Peter and Lincoln, Mike and Renals, Steve},
  title = {Improving Children's Speech Recognition through Out-of-Domain Data Augmentation},
  abstract = {Children’s speech poses challenges to speech recognition due to strong age-dependent anatomical variations and a lack of large, publicly-available corpora. In this paper we explore data augmentation for children’s speech recognition using stochastic feature mapping (SFM) to transform out-of-domain adult data for both GMM-based and DNN-based acoustic models. We performed experiments on the English PF-STAR corpus, augmenting using WSJCAM0 and ABI. Our experimental results indicate that a DNN acoustic model for childrens speech can make use of adult data, and that out-of-domain SFM is more accurate than in-domain SFM.},
  address = {San Francisco, USA},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/master.pdf},
  booktitle = {Proc. Interspeech},
  categories = {speech recognition, data augmentation, children’s speech}
}
@article{leijon2016bayesian,
  author = {Leijon, Arne and Henter, Gustav Eje and Dahlquist, Martin},
  title = {{B}ayesian Analysis of Phoneme Confusion Matrices},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7364191},
  abstract = {This paper presents a parametric Bayesian approach to the statistical analysis of phoneme confusion matrices measured for groups of individual listeners in one or more test conditions. Two different bias problems in conventional estimation of mutual information are analyzed and explained theoretically. Evaluations with synthetic datasets indicate that the proposed Bayesian method can give satisfactory estimates of mutual information and response probabilities, even for phoneme confusion tests using a very small number of test items for each phoneme category. The proposed method can reveal overall differences in performance between two test conditions with better power than conventional Wilcoxon significance tests or conventional confidence intervals. The method can also identify sets of confusion-matrix cells that are credibly different between two test conditions, with better power than a similar approximate frequentist method.},
  number = {3},
  month = {March},
  volume = {24},
  journal = {IEEE/ACM T. Audio Speech},
  year = {2016},
  keywords = {Speech recognition, parameter estimation, mutual information, Bayes methods},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/leijon2016bayesian.pdf},
  pages = {469--482},
  categories = {Speech recognition, parameter estimation, mutual information, Bayes methods}
}
@inproceedings{wester2016evaluating,
  author = {Wester, Mirjam and Watts, Oliver and Henter, Gustav Eje},
  title = {Evaluating comprehension of natural and synthetic conversational speech},
  url = {http://www.isca-speech.org/archive/sp2016/pdfs_stamped/41.pdf},
  abstract = {Current speech synthesis methods typically operate on isolated sentences and lack convincing prosody when generating longer segments of speech. Similarly, prevailing TTS evaluation paradigms, such as intelligibility (transcription word error rate) or MOS, only score sentences in isolation, even though overall comprehension is arguably more important for speech-based communication. In an effort to develop more ecologically-relevant evaluation techniques that go beyond isolated sentences, we investigated comprehension of natural and synthetic speech dialogues. Specifically, we tested listener comprehension on long segments of spontaneous and engaging conversational speech (three 10-minute radio interviews of comedians). Interviews were reproduced either as natural speech, synthesised from carefully prepared transcripts, or synthesised using durations from forced-alignment against the natural speech, all in a balanced design. Comprehension was measured using multiple choice questions. A significant difference was measured between the comprehension/retention of natural speech (74\% correct responses) and synthetic speech with forced-aligned durations (61\% correct responses). However, no significant difference was observed between natural and regular synthetic speech (70\% correct responses). Effective evaluation of comprehension remains elusive.},
  address = {Boston, MA},
  month = {June},
  volume = {8},
  year = {2016},
  keywords = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/41.pdf},
  booktitle = {Speech Prosody},
  pages = {736--740},
  categories = {evaluation, comprehension, conversational speech, statistical parametric speech synthesis}
}
@inproceedings{henter2016robust,
  author = {Henter, Gustav Eje and Ronanki, Srikanth and Watts, Oliver and Wester, Mirjam and Wu, Zhizheng and King, Simon},
  title = {Robust {TTS} duration modelling using {DNN}s},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472655},
  abstract = {Accurate modelling and prediction of speech-sound durations is an important component in generating more natural synthetic speech. Deep neural networks (DNNs) offer a powerful modelling paradigm, and large, found corpora of natural and expressive speech are easy to acquire for training them. Unfortunately, found datasets are seldom subject to the quality-control that traditional synthesis methods expect. Common issues likely to affect duration modelling include transcription errors, reductions, filled pauses, and forced-alignment inaccuracies. To combat this, we propose to improve modelling and prediction of speech durations using methods from robust statistics, which are able to disregard ill-fitting points in the training material. We describe a robust fitting criterion based on the density power divergence (the beta-divergence) and a robust generation heuristic using mixture density networks (MDNs). Perceptual tests indicate that subjects prefer synthetic speech generated using robust models of duration over the baselines.},
  address = {Shanghai, China},
  month = {March},
  volume = {41},
  year = {2016},
  keywords = {Speech synthesis, duration modelling, robust statistics},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/henter2016robust.pdf},
  booktitle = {Proc. ICASSP},
  pages = {5130--5134},
  categories = {Speech synthesis, duration modelling, robust statistics}
}
@inproceedings{watts2016hmms,
  author = {Watts, Oliver and Henter, Gustav Eje and Merritt, Thomas and Wu, Zhizheng and King, Simon},
  title = {From {HMM}s to {DNN}s: where do the improvements come from?},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472730},
  abstract = {Deep neural networks (DNNs) have recently been the focus of much text-to-speech research as a replacement for decision trees and hidden Markov models (HMMs) in statistical parametric synthesis systems. Performance improvements have been reported; however, the configuration of systems evaluated makes it impossible to judge how much of the improvement is due to the new machine learning methods, and how much is due to other novel aspects of the systems. Specifically, whereas the decision trees in HMM-based systems typically operate at the state-level, and separate trees are used to handle separate acoustic streams, most DNN-based systems are trained to make predictions simultaneously for all streams at the level of the acoustic frame. This paper isolates the influence of three factors (machine learning method; state vs. frame predictions; separate vs. combined stream predictions) by building a continuum of systems along which only a single factor is varied at a time. We find that replacing decision trees with DNNs and moving from state-level to frame-level predictions both significantly improve listeners' naturalness ratings of synthetic speech produced by the systems. No improvement is found to result from switching from separate-stream to combined-stream predictions.},
  address = {Shanghai, China},
  month = {March},
  volume = {41},
  year = {2016},
  keywords = {speech synthesis, hidden Markov model, decision tree, deep neural network},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/watts2016hmms.pdf},
  booktitle = {Proc. ICASSP},
  pages = {5505--5509},
  categories = {speech synthesis, hidden Markov model, decision tree, deep neural network}
}
@inproceedings{toda2016voice,
  author = {Toda, Tomoki and Chen, Ling-Hui and Saito, Daisuke and Villavicencio, Fernando and Wester, Mirjam and Wu, Zhizheng and Yamagishi, Junichi},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/toda2016voice.pdf},
  booktitle = {Proc. Interspeech},
  title = {The Voice Conversion Challenge 2016},
  abstract = {This paper describes the Voice Conversion Challenge 2016 devised by the authors to better understand different voice conversion (VC) techniques by comparing their performance on a common dataset. The task of the challenge was speaker conversion, i.e., to transform the voice identity of a source speaker into that of a target speaker while preserving the linguistic content. Using a common dataset consisting of 162 utterances for training and 54 utterances for evaluation from each of 5 source and 5 target speakers, 17 groups working in VC around the world developed their own VC systems for every combination of the source and target speakers, i.e., 25 systems in total, and generated voice samples converted by the developed systems. These samples were evaluated in terms of target speaker similarity and naturalness by 200 listeners in a controlled environment. This paper summarizes the design of the challenge, its result, and a future plan to share views about unsolved problems and challenges faced by the current VC techniques.},
  year = {2016}
}
@inproceedings{wester2016analysis,
  author = {Wester, Mirjam and Wu, Zhizheng and Yamagishi, Junichi},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/wester2016analysis.pdf},
  booktitle = {Proc. Interspeech},
  title = {Analysis of the Voice Conversion Challenge 2016 Evaluation Results},
  abstract = {The Voice Conversion Challenge 2016 is the first Voice Conversion Challenge in which different voice conversion systems and approaches using the same voice data were compared. This paper describes the design of the evaluation, it presents the results and statistical analyses of the results.},
  year = {2016}
}
@inproceedings{wester2016multidimensional,
  author = {Wester, Mirjam and Wu, Zhizheng and Yamagishi, Junichi},
  title = {Multidimensional scaling of systems in the Voice Conversion Challenge 2016},
  booktitle = {Proc. Speech Synthesis Workshop 9},
  year = {2016},
  address = {Sunnyvale, CA.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/wester2016multidimensional.pdf},
  abstract = {This study investigates how listeners judge the similarity of voice converted voices using a talker discrimination task. The data used is from the Voice Conversion Challenge 2016. 17 participants from around the world took part in building voice converted voices from a shared data set of source and target speakers. This paper describes the evaluation of similarity for four of the source-target pairs (two intra-gender and two cross-gender) in more detail. Multidimensional scaling was performed to illustrate where each system was perceived to be in an acoustic space compared to the source and target speakers and to each other.}
}
@inproceedings{ali16_dialect_detection,
  author = {Ali, Ahmed and Dehak, Najim and Cardinal, Patrick and Khurana, Sameer and Yella, Sree Harsha and Glass, James and Bell, Peter and Renals, Steve},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/is2016-automatic-dialect-detection.pdf},
  booktitle = {Proc. Interspeech},
  title = {Automatic dialect detection in arabic broadcast speech},
  abstract = {In this paper, we investigate different approaches for dialect identification in Arabic broadcast speech. These methods are based on phonetic and lexical features obtained from a speech recognition system, and bottleneck features using the i-vector framework. We studied both generative and discriminative classifiers, and we combined these features using a multi-class Support Vector Machine (SVM). We validated our results on an Arabic/English language identification task, with an accuracy of 100\%. We also evaluated these features in a binary classifier to discriminate between Modern Standard Arabic (MSA) and Dialectal Arabic, with an accuracy of 100\%. We further reported results using the proposed methods to discriminate between the five most widely used dialects of Arabic: namely Egyptian, Gulf, Levantine, North African, and MSA, with an accuracy of 59.2\%. We discuss dialect identification errors in the context of dialect code-switching between Dialectal Arabic and MSA, and compare the error pattern between manually labeled data, and the output from our classifier. All the data used on our experiments have been released to the public as a language identification corpus.},
  year = {2016}
}
@inproceedings{ronanki2016dnn,
  author = {Ronanki, Srikanth and Reddy, Siva and Bollepalli, Bajibabu and King, Simon},
  title = {{DNN-based Speech Synthesis for Indian Languages from ASCII text}},
  booktitle = {Proc. 9th ISCA Speech Synthesis Workshop (SSW9)},
  year = {2016},
  month = {September},
  address = {Sunnyvale, CA, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016ilsynth.pdf},
  abstract = {Text-to-Speech synthesis in Indian languages has a seen lot of progress over the decade partly due to the annual Blizzard challenges. These systems assume the text to be written in Devanagari or Dravidian scripts which are nearly phonemic orthography scripts. However, the most common form of computer interaction among Indians is ASCII written transliterated text. Such text is generally noisy with many variations in spelling for the same word. In this paper we evaluate three approaches to synthesize speech from such noisy ASCII text: a naive Uni-Grapheme approach, a Multi-Grapheme approach, and a supervised Grapheme-to-Phoneme (G2P) approach. These methods first convert the ASCII text to a phonetic script, and then learn a Deep Neural Network to synthesize speech from that. We train and test our models on Blizzard Challenge datasets that were transliterated to ASCII using crowdsourcing. Our experiments on Hindi, Tamil and Telugu demonstrate that our models generate speech of competetive quality from ASCII text compared to the speech synthesized from the native scripts. All the accompanying transliterated datasets are released for public access.},
  categories = {Indian Languages, Speech Synthesis, Deep Neural Networks, ASCII transliteration}
}
@inproceedings{ronanki2016template,
  author = {Ronanki, Srikanth and Henter, Gustav Eje and Wu, Zhizheng and King, Simon},
  title = {A template-based approach for speech synthesis intonation generation using {LSTM}s},
  booktitle = {Proc. Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016template.pdf},
  abstract = {The absence of convincing intonation makes current parametric speech synthesis systems sound dull and lifeless, even when trained on expressive speech data. Typically, these systems use regression techniques to predict the fundamental frequency (F0) frame-by-frame. This approach leads to overly-smooth pitch contours and fails to construct an appropriate prosodic structure across the full utterance. In order to capture and reproduce larger-scale pitch patterns, this paper proposes a template-based approach for automatic F0 generation, where per-syllable pitch-contour templates (from a small, automatically learned set) are predicted by a recurrent neural network (RNN). The use of syllable templates mitigates the over-smoothing problem and is able to reproduce pitch patterns observed in the data. The use of an RNN, paired with connectionist temporal classification (CTC), enables the prediction of structure in the pitch contour spanning the entire utterance. This novel F0 prediction system is used alongside separate LSTMs for predicting phone durations and the other acoustic features, to construct a complete text-to-speech system. We report the results of objective and subjective tests on an expressive speech corpus of children's audiobooks, and include comparisons to a conventional baseline that predicts F0 directly at the frame level.},
  categories = {speech synthesis, intonation modelling, F0 templates, LSTM, CTC}
}
@inproceedings{Dall2016f,
  author = {Dall, Rasmus and Hashimoto, Kei and Oura, Keiichiro and Nankaku, Yoshihiko and Tokuda, Keiichi},
  title = {{Redefining the Linguistic Context Feature Set for HMM and DNN TTS Through Position and Parsing}},
  booktitle = {Proc. Interspeech},
  year = {2016},
  address = {San Francisco, CA, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Dall_Hashimoto_Oura_Nankaku_Tokuda_Interspeech2016.pdf},
  abstract = {In this paper we present an investigation of a number of alter- native linguistic feature context sets for HMM and DNN text- to-speech synthesis. The representation of positional values is explored through two alternatives to the standard set of absolute values, namely relational and categorical values. In a preference test the categorical representation was found to be preferred for both HMM and DNN synthesis. Subsequently, features based on probabilistic context free grammar and dependency parsing are presented. These features represent the phrase level rela- tions between words in the sentences, and in a preference eval- uation it was found that these features all improved upon the base set, with a combination of both parsing methods best over- all. As the features primarily affected the F0 prediction, this il- lustrates the potential of syntactic structure to improve prosody in TTS.},
  categories = {Speech Synthesis, TTS, PCFG, dependency parse, parsing, HMM, DNN, linguistic features}
}
@inproceedings{Dall2016e,
  author = {Dall, Rasmus and Tomalin, Marcus and Wester, Mirjam},
  title = {{Synthesising Filled Pauses: Representation and Datamixing}},
  booktitle = {Proc. SSW9},
  year = {2016},
  address = {Cupertino, CA, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Dall_Tomalin_Wester_SSW2016.pdf},
  abstract = {Filled pauses occur frequently in spontaneous human speech, yet modern text-to-speech synthesis systems rarely model these disfluencies overtly, and consequently they do not output convincing synthetic filled pauses. This paper presents a text-to-speech system that is specifically designed to model these particular disfluencies more efffectively. A preparatory investigation shows that a synthetic voice trained exclusively on spontaneous speech is perceived to be inferior in quality to a voice trained entirely on read speech, even though the latter does not handle filled pauses well. This motivates an investigation into the phonetic representation of filled pauses which show that, in a preference test, the use of a distinct phone for filled pauses is preferred over the standard /V/ phone and the alternative /@/ phone. In addition, we present a variety of data-mixing techniques to combine the strengths of standard synthesis systems trained on read speech corpora with the supplementary advantages offered by systems trained on spontaneous speech. In a MUSHRA-style test, it is found that the best overall quality is obtained by combining the two types of corpora using a source mark- ing technique. Specifically, general speech is synthesised with a standard mark, while filled pauses are synthesised with a spontaneous mark, which has the added benefit of also producing filled pauses that are comparatively well synthesised.},
  categories = {TTS, Filled Pauses, HMM, Phonetic Represen- tation, Speech Synthesis}
}
@inproceedings{Dall2016d,
  author = {Dall, Rasmus and Gonzalvo, Xavi},
  title = {{JNDSLAM: A SLAM extension for Speech Synthesis}},
  booktitle = {Proc. Speech Prosody},
  year = {2016},
  address = {Boston, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Dall_Gonzalvo_SpeechProsody2016.pdf},
  abstract = {Pitch movement is a large component of speech prosody, and despite being directly modelled in statistical parametric speech synthesis systems very flat intonation contours are still pro- duced. We present an open-source fully data-driven approach to pitch contour stylisation suitable for speech synthesis based on the SLAM approach. Modifications are proposed based on the Just Noticeable Difference in pitch and tailored to the need of speech synthesis for describing the movement of the pitch. In an anchored Mean Opinion Score (MOS) test using oracle labels the proposed method shows an improvement over stan- dard synthesis. Long Short-Term Memory Neural Networks were then used to predict the contour labels, but initial exper- iments achieved low prediction rates. We conclude that using current linguistic features for pitch stylisation label mapping is not feasible unless additional features are added. Furthermore an open-source implementation is released.},
  categories = {HMM, TTS, LSTM, prosody, pitch contour, speech synthesis}
}
@inproceedings{swietojanskiICASSP16,
  author = {Swietojanski, P. and Renals, S.},
  title = {{SAT-LHUC}: Speaker Adaptive Training for Learning Hidden Unit Contributions},
  booktitle = {Proc. IEEE Int. Conf. Acoustic, Speech Signal Processing (ICASSP)},
  abstract = {This paper extends learning hidden unit contributions (LHUC) unsupervised speaker adaptation with speaker adaptive training (SAT). Contrary to other SAT approaches, the proposed technique does not require speaker-dependent features, the generation of auxiliary generative models to estimate or extract speaker-dependent information, or any changes to the speaker-independent model structure. SAT-LHUC is directly integrated into the objective and jointly learns speaker-independent and speaker-dependent representations. We demonstrate that the SAT-LHUC technique can match feature-space regression transforms for matched narrow-band data and outperform it on wide-band data when the runtime distribution differs significantly from training one. We have obtained 6.5\%, 10\% and 18.5\% relative word error rate reductions compared to speaker-independent models on Switchboard, AMI meetings and TED lectures, respectively. This corresponds to relative gains of 2\%, 4\% and 6\% compared with non-SAT LHUC adaptation. SAT-LHUC was also found to be complementary to SAT with feature-space maximum likelihood linear regression transforms.},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Swietojanski_ICASSP2016.pdf},
  pages = {5010--5014}
}
@article{swietojanski2016lhuc,
  author = {Swietojanski, P. and Li, J. and Renals, S.},
  doi = {10.1109/TASLP.2016.2560534},
  title = {Learning Hidden Unit Contributions for Unsupervised Acoustic Model Adaptation},
  journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
  issn = {2329-9290},
  number = {8},
  month = {August},
  volume = {24},
  pages = {1450-1463},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/swietojanski2016lhuc.pdf},
  abstract = {This work presents a broad study on the adaptation of neural network acoustic models by means of learning hidden unit contributions (LHUC) -- a method that linearly re-combines hidden units in a speaker- or environment-dependent manner using small amounts of unsupervised adaptation data. We also extend LHUC to a speaker adaptive training (SAT) framework that leads to a more adaptable DNN acoustic model, working both in a speaker-dependent and a speaker-independent manner, without the requirements to maintain auxiliary speaker-dependent feature extractors or to introduce significant speaker-dependent changes to the DNN structure. Through a series of experiments on four different speech recognition benchmarks (TED talks, Switchboard, AMI meetings, and Aurora4) comprising 270 test speakers, we show that LHUC in both its test-only and SAT variants results in consistent word error rate reductions ranging from 5\% to 23\% relative depending on the task and the degree of mismatch between training and test data. In addition, we have investigated the effect of the amount of adaptation data per speaker, the quality of unsupervised adaptation targets, the complementarity to other adaptation techniques, one-shot adaptation, and an extension to adapting DNNs trained in a sequence discriminative manner.}
}
@article{swietojanski2016diffp,
  author = {{Swietojanski}, P. and {Renals}, S.},
  doi = {10.1109/TASLP.2016.2584700},
  title = {{Differentiable Pooling for Unsupervised Acoustic Model Adaptation}},
  journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
  issn = {2329-9290},
  number = {10},
  month = {October},
  volume = {24},
  pages = {1773-1784},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/swietojanski2016diffp.pdf},
  abstract = {We present a deep neural network (DNN) acoustic model that includes parametrised and differentiable pooling operators. Unsupervised acoustic model adaptation is cast as the problem of updating the decision boundaries implemented by each pooling operator. In particular, we experiment with two types of pooling parametrisations: learned $L_p$-norm pooling and weighted Gaussian pooling, in which the weights of both operators are treated as speaker-dependent. We perform investigations using three different large vocabulary speech recognition corpora: AMI meetings, TED talks and Switchboard conversational telephone speech. We demonstrate that differentiable pooling operators provide a robust and relatively low-dimensional way to adapt acoustic models, with relative word error rates reductions ranging from 5--20\% with respect to unadapted systems, which themselves are better than the baseline fully-connected DNN-based acoustic models. We also investigate how the proposed techniques work under various adaptation conditions including the quality of adaptation data and complementarity to other feature- and model-space adaptation methods, as well as providing an analysis of the characteristics of each of the proposed approaches.}
}
@phdthesis{swietojanski2016phdthesis,
  author = {Swietojanski, P.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/swietojanski_phdthesis.pdf},
  school = {University of Edinburgh},
  title = {Learning Representations for Speech Recognition using Artificial Neural Networks},
  abstract = {Learning representations is a central challenge in machine learning. For speech recognition, we are interested in learning robust representations that are stable across different acoustic environments, recording equipment and irrelevant inter-- and intra-- speaker variabilities. This thesis is concerned with representation learning for acoustic model adaptation to speakers and environments, construction of acoustic models in low-resource settings, and learning representations from multiple acoustic channels. The investigations are primarily focused on the hybrid approach to acoustic modelling based on hidden Markov models and artificial neural networks (ANN). The first contribution concerns acoustic model adaptation. This comprises two new adaptation transforms operating in ANN parameters space. Both operate at the level of activation functions and treat a trained ANN acoustic model as a canonical set of fixed-basis functions, from which one can later derive variants tailored to the specific distribution present in adaptation data. The first technique, termed Learning Hidden Unit Contributions (LHUC), depends on learning distribution-dependent linear combination coefficients for hidden units. This technique is then extended to altering groups of hidden units with parametric and differentiable pooling operators. We found the proposed adaptation techniques pose many desirable properties: they are relatively low-dimensional, do not over-fit and can work in both a supervised and an unsupervised manner. For LHUC we also present extensions to speaker adaptive training and environment factorisation. On average, depending on the characteristics of the test set, 5-25\% relative word error rate (WERR) reductions are obtained in an unsupervised two-pass adaptation setting. The second contribution concerns building acoustic models in low-resource data scenarios. In particular, we are concerned with insufficient amounts of transcribed acoustic material for estimating acoustic models in the target language -- thus assuming resources like lexicons or texts to estimate language models are available. First we proposed an ANN with a structured output layer which models both context--dependent and context--independent speech units, with the context-independent predictions used at runtime to aid the prediction of context-dependent states. We also propose to perform multi-task adaptation with a structured output layer. We obtain consistent WERR reductions up to 6.4\% in low-resource speaker-independent acoustic modelling. Adapting those models in a multi-task manner with LHUC decreases WERRs by an additional 13.6\%, compared to 12.7\% for non multi-task LHUC. We then demonstrate that one can build better acoustic models with unsupervised multi-- and cross-- lingual initialisation and find that pre-training is a largely language-independent. Up to 14.4\% WERR reductions are observed, depending on the amount of the available transcribed acoustic data in the target language. The third contribution concerns building acoustic models from multi-channel acoustic data. For this purpose we investigate various ways of integrating and learning multi-channel representations. In particular, we investigate channel concatenation and the applicability of convolutional layers for this purpose. We propose a multi-channel convolutional layer with cross-channel pooling, which can be seen as a data-driven non-parametric auditory attention mechanism. We find that for unconstrained microphone arrays, our approach is able to match the performance of the comparable models trained on beamform-enhanced signals.},
  year = {2016}
}
@inproceedings{sgangireddy_interspeech16,
  author = {Gangireddy, Siva Reddy and Swietojanski, Pawel and Bell, Peter and Renals, Steve},
  title = {{Unsupervised adaptation of Recurrent Neural Network Language Models}},
  booktitle = {Proc. Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ispeech16.pdf},
  abstract = {Recurrent neural network language models (RNNLMs) have been shown to consistently improve Word Error Rates (WERs) of large vocabulary speech recognition systems employing n-gram LMs. In this paper we investigate supervised and unsupervised discriminative adaptation of RNNLMs in a broadcast transcription task to target domains defined by either genre or show. We have explored two approaches based on (1) scaling forward-propagated hidden activations (Learning Hidden Unit Contributions (LHUC) technique) and (2) direct fine-tuning of the parameters of the whole RNNLM. To investigate the effectiveness of the proposed methods we carry out experiments on multi-genre broadcast (MGB) data following the MGB-2015 challenge protocol. We observe small but significant improvements in WER compared to a strong unadapted RNNLM model.},
  categories = {RNNLM, LHUC, unsupervised adaptation, fine-tuning, MGB-Challenge}
}
@inproceedings{merritt2016hybrid,
  author = {Merritt, Thomas and Clark, Robert A J and Wu, Zhizheng and Yamagishi, Junichi and King, Simon},
  title = {Deep neural network-guided unit selection synthesis},
  booktitle = {Proc. ICASSP},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Merritt_ICASSP2016.pdf},
  abstract = {Vocoding of speech is a standard part of statistical parametric speech synthesis systems. It imposes an upper bound of the naturalness that can possibly be achieved. Hybrid systems using parametric models to guide the selection of natural speech units can combine the benefits of robust statistical models with the high level of naturalness of waveform concatenation. Existing hybrid systems use Hidden Markov Models (HMMs) as the statistical model. This paper demonstrates that the superiority of Deep Neural Network (DNN) acoustic models over HMMs in conventional statistical parametric speech synthesis also carries over to hybrid synthesis. We compare various DNN and HMM hybrid configurations, guiding the selection of waveform units in either the vocoder parameter domain, or in the domain of embeddings (bottleneck features).},
  categories = {speech synthesis, hybrid synthesis, deep neural networks, embedding, unit selection}
}
@inproceedings{cstr2016blizzard,
  author = {Merritt, Thomas and Ronanki, Srikanth and Wu, Zhizheng and Watts, Oliver},
  title = {The {CSTR} entry to the {Blizzard Challenge} 2016},
  booktitle = {Proc. Blizzard Challenge},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Cstr2016BlizzardEntry.pdf},
  abstract = {This paper describes the text-to-speech system entered by The Centre for Speech Technology Research into the 2016 Blizzard Challenge. This system is a hybrid synthesis system which uses output from a recurrent neural network to drive a unit selection synthesiser. The annual Blizzard Challenge conducts side-by-side testing of a number of speech synthesis systems trained on a common set of speech data. The task of the 2016 Blizzard Challenge is to train on expressively-read children’s storybooks, and to synthesise speech in the same domain. The Challenge therefore presents an opportunity to test the effectiveness of several techniques we have developed when applied to expressive speech data.},
  categories = {hybrid synthesis, statistical parametric speech synthesis, deep neural network, recurrent neural network, unit selection}
}
@inproceedings{farrus_paragraph-based_2016,
  author = {Farrus, Mireia and Lai, Catherine and Moore, Johanna D.},
  doi = {10.21437/SpeechProsody.2016-235},
  title = {Paragraph-based prosodic cues for speech synthesis applications},
  booktitle = {Proceedings of Speech Prosody 2016},
  address = {Boston, MA, USA},
  abstract = {Speech synthesis has improved in both expressiveness and voice quality in recent years. However, obtaining full expressiveness when dealing with large multi-sentential synthesized discourse is still a challenge, since speech synthesizers do not take into account the prosodic differences that have been observed in discourse units such as paragraphs. The current study validates and extends previous work by analyzing the prosody of paragraph units in a large and diverse corpus of TED Talks using automatically extracted F0, intensity and timing features. In addition, a series of classification experiments was performed in order to identify which features are consistently used to distinguish paragraph breaks. The results show significant differences in prosody related to paragraph position. Moreover, the classification experiments show that boundary features such as pause duration and differences in F0 and intensity levels are the most consistent cues in marking paragraph boundaries. This suggests that these features should be taken into account when generating spoken discourse in order to improve naturalness and expressiveness.},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/farrus2016para.pdf},
  pages = {1143--1147},
  categories = {discourse unit, prosodic cue, paragraph boundary, speech synthesis}
}
@inproceedings{lai_automatic_2016,
  author = {Lai, Catherine and Farrus, Mireia and Moore, Johanna},
  title = {Automatic {Paragraph} {Segmentation} with {Lexical} and {Prosodic} {Features}},
  booktitle = {Proceedings of {Interspeech} 2016},
  address = {San Francisco, CA, USA},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/laic2016para.pdf},
  abstract = {As long-form spoken documents become more ubiquitous in everyday life, so does the need for automatic discourse segmentation in spoken language processing tasks. Although previous work has focused on broad topic segmentation, detection of finer-grained discourse units, such as paragraphs, is highly desirable for presenting and analyzing spoken content. To better understand how different aspects of speech cue these subtle discourse transitions, we investigate automatic paragraph segmentation of TED talks. We build lexical and prosodic paragraph segmenters using Support Vector Machines, AdaBoost, and Long Short Term Memory (LSTM) recurrent neural networks. In general, we find that induced cue words and supra-sentential prosodic features outperform features based on topical coherence, syntactic form and complexity. However, our best performance is achieved by combining a wide range of individually weak lexical and prosodic features, with the sequence modelling LSTM generally outperforming the other classifiers by a large margin. Moreover, we find that models that allow lower level interactions between different feature types produce better results than treating lexical and prosodic contributions as separate, independent information sources.},
  categories = {prosody, discourse, segmentation, paragraph, coherence, spoken language processing}
}
@phdthesis{qiong-2016,
  author = {Hu, Qiong},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Thesis_Qiong_Hu.pdf},
  school = {University of Edinburgh},
  year = {2016},
  abstract = {This study focuses on improving the voice quality of statistical speech synthesis based on sinusoidal models. Although our study shows that for copy synthesis, sinusoidal model with the complex amplitude can generate high quality of speech compared with source-filter one, component sinusoids are correlated with each other, and its number of parameters is also high and varied in each frame. Therefore, a perceptually based dynamic sinusoidal model (PDM) is proposed for its application for statistical speech synthesis. then we extensively discuss the methods for using dynamic sinusoidal models for HMM-based statistical speech synthesis. Two parametrisation approaches are presented: INT and DIR. To further improve voice quality of SPSS, we further apply a deep neural networks (DNN) model for proposed vocoder and investigate ways to combine INT and DIR at the level of both DNN modelling and waveform generation. Finally an alternative statistical model referred as complex-valued n! eural network (CVNN), which treats complex coefficients as a whole, is also proposed to model the complex amplitude explicitly.},
  title = {Statistical parametric speech synthesis based on sinusoidal models}
}
@inproceedings{ribeiro2016wavelet,
  author = {Ribeiro, Manuel Sam and Watts, Oliver and Yamagishi, Junichi and Clark, Robert A. J.},
  title = {Wavelet-based decomposition of f0 as a secondary task for {DNN-based} speech synthesis with multi-task learning},
  booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
  year = {2016},
  month = {March},
  address = {Shanghai, China},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ribeiro-et-al-icassp16.pdf},
  abstract = {We investigate two wavelet-based decomposition strategies of the f0 signal and their usefulness as a secondary task for speech synthesis using multi-task deep neural networks (MTL-DNN). The first decomposition strategy uses a static set of scales for all utterances in the training data. We propose a second strategy, where the scale of the mother wavelet is dynamically adjusted to the rate of each utterance. This approach is able to capture f0 variations related to the syllable, word, clitic-group, and phrase units. This method also constrains the wavelet components to be within the frequency range that previous experiments have shown to be more natural. These two strategies are evaluated as a secondary task in multi-task deep neural networks (MTL-DNNs). Results indicate that on an expressive dataset there is a strong preference for the systems using multi-task learning when compared to the baseline system.},
  categories = {speech synthesis, f0 modelling, deep neural network, multi-task learning, continuous wavelet transform}
}
@inproceedings{goldman2016siwis,
  author = {Goldman, Jean-Philippe and Honnet, Pierre-Edouard and Clark, Rob and Garner, Philip N and Ivanova, Maria and Lazaridis, Alexandros and Liang, Hui and Macedo, Tiago and Pfister, Beat and Ribeiro, Manuel Sam and others},
  title = {{The SIWIS database: a multilingual speech database with acted emphasis}},
  booktitle = {Proceedings of Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, United States},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/1003.PDF},
  abstract = {We describe here a collection of speech data of bilingual and trilingual speakers of English, French, German and Italian. In the context of speech to speech translation (S2ST), this database is designed for several purposes and studies: training CLSA systems (cross-language speaker adaptation), conveying emphasis through S2ST systems, and evaluating TTS systems. More precisely, 36 speakers judged as accentless (22 bilingual and 14 trilingual speakers) were recorded for a set of 171 prompts in two or three languages, amounting to a total of 24 hours of speech. These sets of prompts include 100 sentences from news, 25 sentences from Europarl, the same 25 sentences with one acted emphasised word, 20 semantically unpredictable sentences, and finally a 240-word long text. All in all, it yielded 64 bilingual session pairs of the six possible combinations of the four languages. The database is freely available for non-commercial use and scientific research purposes},
  categories = {speech-to-speech translation, speech corpus, bilingual speakers, emphasis}
}
@inproceedings{ribeiro2016syllable,
  author = {Ribeiro, Manuel Sam and Watts, Oliver and Yamagishi, Junichi},
  title = {Syllable-level representations of suprasegmental features for {DNN-based} text-to-speech synthesis},
  booktitle = {Proceedings of Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, United States},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/1034.PDF},
  abstract = {A top-down hierarchical system based on deep neural networks is investigated for the modeling of prosody in speech synthesis. Suprasegmental features are processed separately from segmental features and a compact distributed representation of highlevel units is learned at syllable-level. The suprasegmental representation is then integrated into a frame-level network. Objective measures show that balancing segmental and suprasegmental features can be useful for the frame-level network. Additional features incorporated into the hierarchical system are then tested. At the syllable-level, a bag-of-phones representation is proposed and, at the word-level, embeddings learned from text sources are used. It is shown that the hierarchical system is able to leverage new features at higher-levels more efficiently than a system which exploits them directly at the frame-level. A perceptual evaluation of the proposed systems is conducted and followed by a discussion of the results.},
  categories = {speech synthesis, prosody, deep neural networks, suprasegmental representations}
}
@inproceedings{ribeiro2016parallel,
  author = {Ribeiro, Manuel Sam and Watts, Oliver and Yamagishi, Junichi},
  title = {Parallel and cascaded deep neural networks for text-to-speech synthesis},
  booktitle = {9th ISCA Workshop on Speech Synthesis (SSW9)},
  year = {2016},
  month = {September},
  address = {Sunnyvale, United States},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ribeiro-et-al-ssw9.pdf},
  abstract = {An investigation of cascaded and parallel deep neural networks for speech synthesis is conducted. In these systems, suprasegmental linguistic features (syllable-level and above) are processed separately from segmental features (phone-level and below). The suprasegmental component of the networks learns compact distributed representations of high-level linguistic units without any segmental influence. These representations are then integrated into a frame-level system using a cascaded or a parallel approach. In the cascaded network, suprasegmental representations are used as input to the frame-level network. In the parallel network, segmental and suprasegmental features are processed separately and concatenated at a later stage. These experiments are conducted with a standard set of high-dimensional linguistic features as well as a hand-pruned one. It is observed that hierarchical systems are consistently preferred over the baseline feedforward systems. Similarly, parallel networks are preferred over cascaded networks.},
  categories = {speech synthesis, prosody, deep neural networks, embeddings, suprasegmental representations}
}
@inproceedings{Stan16,
  author = {Stan, Adriana and {Valentini-Botinhao}, Cassia and Orza, Bogdan and Giurgiu, Mircea},
  publisher = {IEEE},
  doi = {10.1109/SLT.2016.7846324},
  isbn = {978-1-5090-4903-5},
  title = {Blind Speech Segmentation using Spectrogram Image-based Features and Mel Cepstral Coefficients},
  booktitle = {SLT},
  abstract = {This paper introduces a novel method for blind speech segmentation at a phone level based on image processing. We consider the spectrogram of the waveform of an utterance as an image and hypothesize that its striping defects, i.e. discontinuities, appear due to phone boundaries. Using a simple image destriping algorithm these discontinuities are found. To discover phone transitions which are not as salient in the image, we compute spectral changes derived from the time evolution of Mel cepstral parametrisation of speech. These so called image-based and acoustic features are then combined to form a mixed probability function, whose values indicate the likelihood of a phone boundary being located at the corresponding time frame. The method is completely unsupervised and achieves an accuracy of 75.59% at a -3.26% over segmentation rate, yielding an F-measure of 0.76 and an 0.80 R-value on the TIMIT dataset.},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/master_3.pdf},
  pages = {597--602}
}
@inproceedings{Valentini16b,
  author = {{Valentini-Botinhao}, Cassia and Wang, Xin and Takaki, Shinji and Yamagishi, Junichi},
  publisher = {ISCA},
  doi = {10.21437/Interspeech.2016-159},
  title = {Speech Enhancement for a Noise-Robust Text-to-Speech Synthesis System using Deep Recurrent Neural Networks},
  booktitle = {Interspeech},
  abstract = {Quality of text-to-speech voices built from noisy recordings is diminished. In order to improve it we propose the use of a recurrent neural network to enhance acoustic parameters prior to training. We trained a deep recurrent neural network using a parallel database of noisy and clean acoustics parameters as input and output of the network. The database consisted of multiple speakers and diverse noise conditions. We investigated using text-derived features as an additional input of the network. We processed a noisy database of two other speakers using this network and used its output to train an HMM acoustic text-to-synthesis model for each voice. Listening experiment results showed that the voice built with enhanced parameters was ranked significantly higher than the ones trained with noisy speech and speech that has been enhanced using a conventional enhancement system. The text-derived features improved results only for the female voice, where it was ranked as highly as a voice trained with clean speech.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Interspeech2016_Cassia_1.pdf},
  pages = {352--356}
}
@inproceedings{Valentini16a,
  author = {{Valentini-Botinhao}, Cassia and Wang, Xin and Takaki, Shinji and Yamagishi, Junichi},
  title = {Investigating {RNN}-based speech enhancement methods for noise-robust Text-to-Speech},
  booktitle = {Proceedings of 9th ISCA Speech Synthesis Workshop},
  abstract = {The quality of text-to-speech (TTS) voices built from noisy speech is compromised. Enhancing the speech data before training has been shown to improve quality but voices built with clean speech are still preferred. In this paper we investigate two different approaches for speech enhancement to train TTS systems. In both approaches we train a recursive neural network (RNN) to map acoustic features extracted from noisy speech to features describing clean speech. The enhanced data is then used to train the TTS acoustic model. In one approach we use the features conventionally employed to train TTS acoustic models, i.e Mel cepstral (MCEP) coefficients, aperiodicity values and fundamental frequency (F0). In the other approach, following conventional speech enhancement methods, we train an RNN using only the MCEP coefficients extracted from the magnitude spectrum. The enhanced MCEP features and the phase extracted from noisy speech are combined to reconstruct the waveform which is then used to extract acoustic features to train the TTS system. We show that the second approach results in larger MCEP distortion but smaller F0 errors. Subjective evaluation shows that synthetic voices trained with data enhanced with this method were rated higher and with similar to scores to voices trained with clean speech.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/SSW9_Cassia_1.pdf},
  pages = {159--165}
}
@inproceedings{ronanki_slt2016,
  author = {Ronanki, Srikanth and Watts, Oliver and King, Simon and Henter, Gustav Eje},
  title = {{Median-Based Generation of Synthetic Speech Durations using a Non-Parametric Approach}},
  booktitle = {Proc. IEEE Workshop on Spoken Language Technology (SLT)},
  month = {December},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016mediandur.pdf},
  abstract = {This paper proposes a new approach to duration modelling for statistical parametric speech synthesis in which a recurrent statistical model is trained to output a phone transition probability at each timestep (acoustic frame). Unlike conventional approaches to duration modelling – which assume that duration distributions have a particular form (e.g., a Gaussian) and use the mean of that distribution for synthesis – our approach can in principle model any distribution supported on the non-negative integers. Generation from this model can be performed in many ways; here we consider output generation based on the median predicted duration. The median is more typical (more probable) than the conventional mean duration, is robust to training-data irregularities, and enables incremental generation. Furthermore, a frame-level approach to duration prediction is consistent with a longer-term goal of modelling durations and acoustic features together. Results indicate that the proposed method is competitive with baseline approaches in approximating the median duration of held-out natural speech.},
  categories = {text-to-speech, speech synthesis, duration modelling, non-parametric models, LSTMs}
}
@inproceedings{ronanki_demo_ssw2016,
  author = {Ronanki, Srikanth and Wu, Zhizheng and Watts, Oliver and King, Simon},
  title = {{A Demonstration of the Merlin Open Source Neural Network Speech Synthesis System}},
  booktitle = {Proc. Speech Synthesis Workshop (SSW9)},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Merlin_demo_paper.pdf},
  abstract = {This demonstration showcases our new Open Source toolkit for neural network-based speech synthesis, Merlin. We wrote Merlin because we wanted free, simple, maintainable code that we understood. No existing toolkits met all of those requirements. Merlin is designed for speech synthesis, but can be put to other uses. It has already also been used for voice conversion, classification tasks, and for predicting head motion from speech.},
  categories = {Merlin, speech synthesis, deep learning}
}
@inproceedings{ali16_mgb2,
  author = {Ali, A. and Bell, P. and Glass, J. and Messaoui, Y. and Mubarak, H. and Renals, S. and Zhang, Y.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/mgb_2_challenge_1.pdf},
  booktitle = {Proc. SLT},
  year = {2016},
  abstract = {This paper describes the Arabic Multi-Genre Broadcast (MGB-2) Challenge for SLT-2016. Unlike last year’s English MGB Challenge, which focused on recognition of diverse TV genres, this year, the challenge has an emphasis on handling the diversity in dialect in Arabic speech. Audio data comes from 19 distinct programmes from the Aljazeera Arabic TV channel between March 2005 and December 2015. Programmes are split into three groups: conversations, interviews, and reports. A total of 1,200 hours have been released with lightly supervised transcriptions for the acoustic modelling. For language modelling, we made available over 110M words crawled from Aljazeera Arabic website Aljazeera.net for a 10 year duration 2000-2011. Two lexicons have been provided, one phoneme based and one grapheme based. Finally, two tasks were proposed for this year’s challenge: standard speech transcription, and word alignment. This paper describes the task data and evaluation process used in the MGB challenge, and summarises the results obtained.},
  title = {The {MGB-2} {C}hallenge: {A}rabic multi-dialect broadcast media recognition}
}
@inproceedings{klejch2016slt,
  author = {Klejch, Ondrej and Bell, Peter and Renals, Steve},
  title = {Punctuated transcription of multi-genre broadcasts using acoustic and lexical approaches},
  abstract = {In this paper we investigate the punctuated transcription of multi-genre broadcast media. We examine four systems, three of which are based on lexical features, the fourth of which uses acoustic features by integrating punctuation into the speech recognition acoustic models. We also explore the combination of these component systems using voting and log-linear interpolation. We performed experiments on the English language MGB Challenge data, which comprises about 1,600h of BBC television recordings. Our results indicate that a lexical system, based on a neural machine translation approach is significantly better than other systems achieving an F-Measure of 62.6% on reference text, with a relative degradation of 19% on ASR output. Our analysis of the results in terms of specific punctuation indicated that using longer context improves the prediction of question marks and acoustic information improves prediction of exclamation marks. Finally, we show that even though the systems are complementary, their straightforward combination does not yield better F-measures than a single system using neural machine translation.},
  year = {2016},
  month = {December},
  address = {San Diego, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/slt-2016.pdf},
  booktitle = {Proc. IEEE Workshop on Spoken Language Technology},
  categories = {punctuation, speech recognition, neural machine translation, rich transcription}
}
@inproceedings{tian2016recognizing,
  author = {Tian, Leimin and Moore, Johanna and Lai, Catherine},
  organization = {IEEE},
  title = {Recognizing emotions in spoken dialogue with hierarchically fused acoustic and lexical features},
  abstract = {Automatic emotion recognition is vital for building natural and engaging human-computer interaction systems. Combining information from multiple modalities typically improves emotion recognition performance. In previous work, features from different modalities have generally been fused at the same level with two types of fusion strategies: Feature-Level fusion, which concatenates feature sets before recognition; and Decision-Level fusion, which makes the final decision based on outputs of the unimodal models. However, different features may describe data at different time scales or have different levels of abstraction. Cognitive Science research also indicates that when perceiving emotions, humans use information from different modalities at different cognitive levels and time steps. Therefore, we propose a Hierarchical fusion strategy for multimodal emotion recognition, which incorporates global or more abstract features at higher levels of its knowledge-inspired structure. We build multimodal emotion recognition models combining state-of-the-art acoustic and lexical features to study the performance of the proposed Hierarchical fusion. Experiments on two emotion databases of spoken dialogue show that this fusion strategy consistently outperforms both Feature-Level and Decision-Level fusion. The multimodal emotion recognition models using the Hierarchical fusion strategy achieved state-of-the-art performance on recognizing emotions in both spontaneous and acted dialogue.},
  pages = {565--572},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/SLT2016_Leimin_final.pdf},
  booktitle = {Spoken Language Technology Workshop (SLT), 2016 IEEE},
  categories = {emotion recognition, modality fusion, LSTM, dialogue, human-computer interaction}
}
@inproceedings{Espic2016,
  author = {Espic, Felipe and Valentini-Botinhao, Cassia and Wu, Zhizheng and King, Simon},
  title = {Waveform generation based on signal reshaping for statistical parametric speech synthesis},
  booktitle = {Proc. Interspeech},
  address = {San Francisco, CA, USA},
  abstract = {We propose a new paradigm of waveform generation for Statistical Parametric Speech Synthesis that is based on neither source-filter separation nor sinusoidal modelling. We suggest that one of the main problems of current vocoding techniques is that they perform an extreme decomposition of the speech signal into source and filter, which is an underlying cause of “buzziness”, “musical artifacts”, or “muffled sound” in the synthetic speech. The proposed method avoids making unnecessary assumptions and decompositions as far as possible, and uses only the spectral envelope and F0 as parameters. Prerecorded speech is used as a base signal, which is “reshaped” to match the acoustic specification predicted by the statistical model, without any source-filter decomposition. A detailed description of the method is presented, including implementation details and adjustments. Subjective listening test evaluations of complete DNN-based text-to-speech systems were conducted for two voices: one female and one male. The results show that the proposed method tends to outperform the state-of-theart standard vocoder STRAIGHT, whilst using fewer acoustic parameters.},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/0487.PDF},
  pages = {2263-2267},
  categories = {speech synthesis, waveform generation, vocoding, statistical parametric speech synthesis}
}
@inproceedings{Villavicencio+2016,
  author = {Villavicencio, Fernando and Yamagishi, Junichi and Bonada, Jordi and Espic, Felipe},
  doi = {10.21437/Interspeech.2016-305},
  title = {Applying Spectral Normalisation and Efficient Envelope Estimation and Statistical Transformation for the Voice Conversion Challenge 2016},
  url = {http://hdl.handle.net/10230/32891},
  booktitle = {Interspeech},
  address = {San Francisco, USA},
  abstract = {In this work we present our entry for the Voice Conversion Challenge 2016, denoting new features to previous work on GMM-based voice conversion. We incorporate frequency warping and pitch transposition strategies to perform a normalisation of the spectral conditions, with benefits confirmed by objective and perceptual means. Moreover, the results of the challenge showed our entry among the highest performing systems in terms of perceived naturalness while maintaining the target similarity performance of GMM-based conversion.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/0305.PDF},
  pages = {1657-61},
  categories = {voice conversion, speech synthesis, statistical spectral transformation, spectral envelope modeling}
}
@inproceedings{yoshimura2016hierarchical,
  author = {Yoshimura, Takenori and Henter, {Gustav Eje} and Watts, Oliver and Wester, Mirjam and Yamagishi, Junichi and Tokuda, Keiichi},
  bdsk-url-1 = {http://dx.doi.org/10.21437/Interspeech.2016-847},
  publisher = {International Speech Communication Association},
  doi = {10.21437/Interspeech.2016-847},
  date-modified = {2018-01-19 16:43:35 +0000},
  title = {A Hierarchical Predictor of Synthetic Speech Naturalness Using Neural Networks},
  abstract = {A problem when developing and tuning speech synthesis systems is that there is no well-established method of automatically rating the quality of the synthetic speech. This research attempts to obtain a new automated measure which is trained on the result of large-scale subjective evaluations employing many human listeners, i.e., the Blizzard Challenge. To exploit the data, we experiment with linear regression, feed-forward and convolutional neural network models, and combinations of them to regress from synthetic speech to the perceptual scores obtained from listeners. The biggest improvements were seen when combining stimulus- and system-level predictions.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/0847.PDF},
  booktitle = {Interspeech 2016},
  pages = {342--346}
}
@inproceedings{wu2016merlin,
  author = {Wu, Zhizheng and Watts, Oliver and King, Simon},
  date-modified = {2018-01-19 16:44:16 +0000},
  title = {Merlin: An Open Source Neural Network Speech Synthesis System},
  abstract = {We introduce the Merlin speech synthesis toolkit for neural network-based speech synthesis. The system takes linguistic features as input, and employs neural networks to predict acoustic features, which are then passed to a vocoder to produce the speech waveform. Various neural netw are implemented, including a standard feedforward neural network, mixture density neural network, recurrent neural network (RNN), long short-term memory (LSTM) recurrent neural network, amongst others. The toolkit is Open Source, written in Python, and is extensible. This paper briefly describes the system, and provides some benchmarking results on a freely available corpus.},
  month = sep,
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/master_2.pdf},
  booktitle = {9th ISCA Speech Synthesis Workshop (2016)},
  pages = {218--223}
}