The Centre for Speech Technology Research, The university of Edinburgh

Publications by Srikanth Ronanki

s1432486.bib

@inproceedings{henter2016robust,
  author = {Henter, Gustav Eje and Ronanki, Srikanth and Watts, Oliver and Wester, Mirjam and Wu, Zhizheng and King, Simon},
  title = {Robust {TTS} duration modelling using {DNN}s},
  url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7472655},
  abstract = {Accurate modelling and prediction of speech-sound durations is an important component in generating more natural synthetic speech. Deep neural networks (DNNs) offer a powerful modelling paradigm, and large, found corpora of natural and expressive speech are easy to acquire for training them. Unfortunately, found datasets are seldom subject to the quality-control that traditional synthesis methods expect. Common issues likely to affect duration modelling include transcription errors, reductions, filled pauses, and forced-alignment inaccuracies. To combat this, we propose to improve modelling and prediction of speech durations using methods from robust statistics, which are able to disregard ill-fitting points in the training material. We describe a robust fitting criterion based on the density power divergence (the beta-divergence) and a robust generation heuristic using mixture density networks (MDNs). Perceptual tests indicate that subjects prefer synthetic speech generated using robust models of duration over the baselines.},
  address = {Shanghai, China},
  month = {March},
  volume = {41},
  year = {2016},
  keywords = {Speech synthesis, duration modelling, robust statistics},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/henter2016robust.pdf},
  booktitle = {Proc. ICASSP},
  pages = {5130--5134},
  categories = {Speech synthesis, duration modelling, robust statistics}
}
@inproceedings{ronanki2016dnn,
  author = {Ronanki, Srikanth and Reddy, Siva and Bollepalli, Bajibabu and King, Simon},
  title = {{DNN-based Speech Synthesis for Indian Languages from ASCII text}},
  booktitle = {Proc. 9th ISCA Speech Synthesis Workshop (SSW9)},
  year = {2016},
  month = {September},
  address = {Sunnyvale, CA, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016ilsynth.pdf},
  abstract = {Text-to-Speech synthesis in Indian languages has a seen lot of progress over the decade partly due to the annual Blizzard challenges. These systems assume the text to be written in Devanagari or Dravidian scripts which are nearly phonemic orthography scripts. However, the most common form of computer interaction among Indians is ASCII written transliterated text. Such text is generally noisy with many variations in spelling for the same word. In this paper we evaluate three approaches to synthesize speech from such noisy ASCII text: a naive Uni-Grapheme approach, a Multi-Grapheme approach, and a supervised Grapheme-to-Phoneme (G2P) approach. These methods first convert the ASCII text to a phonetic script, and then learn a Deep Neural Network to synthesize speech from that. We train and test our models on Blizzard Challenge datasets that were transliterated to ASCII using crowdsourcing. Our experiments on Hindi, Tamil and Telugu demonstrate that our models generate speech of competetive quality from ASCII text compared to the speech synthesized from the native scripts. All the accompanying transliterated datasets are released for public access.},
  categories = {Indian Languages, Speech Synthesis, Deep Neural Networks, ASCII transliteration}
}
@inproceedings{ronanki2016template,
  author = {Ronanki, Srikanth and Henter, Gustav Eje and Wu, Zhizheng and King, Simon},
  title = {A template-based approach for speech synthesis intonation generation using {LSTM}s},
  booktitle = {Proc. Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016template.pdf},
  abstract = {The absence of convincing intonation makes current parametric speech synthesis systems sound dull and lifeless, even when trained on expressive speech data. Typically, these systems use regression techniques to predict the fundamental frequency (F0) frame-by-frame. This approach leads to overly-smooth pitch contours and fails to construct an appropriate prosodic structure across the full utterance. In order to capture and reproduce larger-scale pitch patterns, this paper proposes a template-based approach for automatic F0 generation, where per-syllable pitch-contour templates (from a small, automatically learned set) are predicted by a recurrent neural network (RNN). The use of syllable templates mitigates the over-smoothing problem and is able to reproduce pitch patterns observed in the data. The use of an RNN, paired with connectionist temporal classification (CTC), enables the prediction of structure in the pitch contour spanning the entire utterance. This novel F0 prediction system is used alongside separate LSTMs for predicting phone durations and the other acoustic features, to construct a complete text-to-speech system. We report the results of objective and subjective tests on an expressive speech corpus of children's audiobooks, and include comparisons to a conventional baseline that predicts F0 directly at the frame level.},
  categories = {speech synthesis, intonation modelling, F0 templates, LSTM, CTC}
}
@inproceedings{cstr2016blizzard,
  author = {Merritt, Thomas and Ronanki, Srikanth and Wu, Zhizheng and Watts, Oliver},
  title = {The {CSTR} entry to the {Blizzard Challenge} 2016},
  booktitle = {Proc. Blizzard Challenge},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Cstr2016BlizzardEntry.pdf},
  abstract = {This paper describes the text-to-speech system entered by The Centre for Speech Technology Research into the 2016 Blizzard Challenge. This system is a hybrid synthesis system which uses output from a recurrent neural network to drive a unit selection synthesiser. The annual Blizzard Challenge conducts side-by-side testing of a number of speech synthesis systems trained on a common set of speech data. The task of the 2016 Blizzard Challenge is to train on expressively-read children’s storybooks, and to synthesise speech in the same domain. The Challenge therefore presents an opportunity to test the effectiveness of several techniques we have developed when applied to expressive speech data.},
  categories = {hybrid synthesis, statistical parametric speech synthesis, deep neural network, recurrent neural network, unit selection}
}
@inproceedings{ronanki_interspeech17,
  author = {Ronanki, Srikanth and Watts, Oliver and King, Simon},
  title = {{A Hierarchical Encoder-Decoder Model for Statistical Parametric Speech Synthesis}},
  booktitle = {Proc. Interspeech 2017},
  month = {August},
  year = {2017},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/interspeech_2017_paper.pdf},
  abstract = {Current approaches to statistical parametric speech synthesis using Neural Networks generally require input at the same temporal resolution as the output, typically a frame every 5ms, or in some cases at waveform sampling rate. It is therefore necessary to fabricate highly-redundant frame-level (or sample-level) linguistic features at the input. This paper proposes the use of a hierarchical encoder-decoder model to perform the sequence-to-sequence regression in a way that takes the input linguistic features at their original timescales, and preserves the relationships between words, syllables and phones. The proposed model is designed to make more effective use of supra-segmental features than conventional architectures, as well as being computationally efficient. Experiments were conducted on prosodically-varied audiobook material because the use of supra-segmental features is thought to be particularly important in this case. Both objective measures and results from subjective listening tests, which asked listeners to focus on prosody, show that the proposed method performs significantly better than a conventional architecture that requires the linguistic input to be at the acoustic frame rate. We provide code and a recipe to enable our system to be reproduced using the Merlin toolkit.},
  categories = {Speech synthesis, prosody modeling, hierarchical models and sequence modeling}
}
@inproceedings{ronanki_blizzard2017,
  author = {Ronanki, Srikanth and Ribeiro, Sam and Espic, Felipe and Watts, Oliver},
  title = {{The CSTR entry to the Blizzard Challenge 2017}},
  booktitle = {Proc. Blizzard Challenge Workshop (Interspeech Satellite)},
  year = {2017},
  month = {August},
  key = {ronanki_blizzard2017},
  address = {Stockholm, Sweden},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/CSTR_Blizzard2017.pdf},
  abstract = {The annual Blizzard Challenge conducts side-by-side testing of a number of speech synthesis systems trained on a common set of speech data. Similar to 2016 Blizzard challenge, the task for this year is to train on expressively-read children's story-books, and to synthesise speech in the same domain. The Challenge therefore presents an opportunity to investigate the effectiveness of several techniques we have developed when applied to expressive and prosodically-varied audiobook data. This paper describes the text-to-speech system entered by The Centre for Speech Technology Research into the 2017 Blizzard Challenge. The current system is a hybrid synthesis system which drives a unit selection synthesiser using the output from a neural network based acoustic and duration model. We assess the performance of our system by reporting the results from formal listening tests provided by the challenge.},
  categories = {Merlin, hybrid speech synthesis, unit selection, deep neural networks}
}
@inproceedings{ronanki_slt2016,
  author = {Ronanki, Srikanth and Watts, Oliver and King, Simon and Henter, Gustav Eje},
  title = {{Median-Based Generation of Synthetic Speech Durations using a Non-Parametric Approach}},
  booktitle = {Proc. IEEE Workshop on Spoken Language Technology (SLT)},
  month = {December},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ronanki2016mediandur.pdf},
  abstract = {This paper proposes a new approach to duration modelling for statistical parametric speech synthesis in which a recurrent statistical model is trained to output a phone transition probability at each timestep (acoustic frame). Unlike conventional approaches to duration modelling – which assume that duration distributions have a particular form (e.g., a Gaussian) and use the mean of that distribution for synthesis – our approach can in principle model any distribution supported on the non-negative integers. Generation from this model can be performed in many ways; here we consider output generation based on the median predicted duration. The median is more typical (more probable) than the conventional mean duration, is robust to training-data irregularities, and enables incremental generation. Furthermore, a frame-level approach to duration prediction is consistent with a longer-term goal of modelling durations and acoustic features together. Results indicate that the proposed method is competitive with baseline approaches in approximating the median duration of held-out natural speech.},
  categories = {text-to-speech, speech synthesis, duration modelling, non-parametric models, LSTMs}
}
@inproceedings{ronanki_demo_ssw2016,
  author = {Ronanki, Srikanth and Wu, Zhizheng and Watts, Oliver and King, Simon},
  title = {{A Demonstration of the Merlin Open Source Neural Network Speech Synthesis System}},
  booktitle = {Proc. Speech Synthesis Workshop (SSW9)},
  month = {September},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Merlin_demo_paper.pdf},
  abstract = {This demonstration showcases our new Open Source toolkit for neural network-based speech synthesis, Merlin. We wrote Merlin because we wanted free, simple, maintainable code that we understood. No existing toolkits met all of those requirements. Merlin is designed for speech synthesis, but can be put to other uses. It has already also been used for voice conversion, classification tasks, and for predicting head motion from speech.},
  categories = {Merlin, speech synthesis, deep learning}
}
@inproceedings{watts_blizzard2015,
  author = {Watts, Oliver and Ronanki, Srikanth and Wu, Zhizheng and Raitio, Tuomo and Suni, Antti},
  title = {{The NST--GlottHMM entry to the Blizzard Challenge 2015}},
  booktitle = {Proc. Blizzard Challenge Workshop (Interspeech Satellite)},
  year = {2015},
  month = {September},
  key = {watts_blizzard2015},
  address = {Berlin, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/bc2015_nst.pdf},
  abstract = {We describe the synthetic voices forming the joint entry into the 2015 Blizzard Challenge of the Natural Speech Technology consortium, Helsinki University, and Aalto University. The 2015 Blizzard Challenge presents an opportunity to test and benchmark some of the tools we have developed to address the problem of how to produce systems in arbitrary new languages with minimal annotated data and language-specific expertise on the part of the system builders. We here explain how our tools were used to address these problems on the different tasks of the challenge, and provide some discussion of the evaluation results. Some additions to the system used to build voices for the previous Challenge are described: acoustic modelling using deep neural networks with jointly-trained duration model, and an unsupervised approach for handling the phenomenon of inherent vowel deletion which occurs in 3 of the 6 target languages.},
  categories = {statistical parametric speech synthesis, unsupervised learning, vector space model, glottal inverse filtering, deep neural network, glottal flow pulse library, schwa-deletion}
}
@inproceedings{cstr2017blizzard,
  author = {Ronanki, Srikanth and Ribeiro, {Manuel Sam} and Espic, Felipe and Watts, Oliver},
  title = {The {CSTR} entry to the {Blizzard Challenge} 2017},
  booktitle = {Proc. Blizzard Challenge},
  year = {2017},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2017/CSTR_Blizzard2017.pdf},
  abstract = {The annual Blizzard Challenge conducts side-by-side testing of a number of speech synthesis systems trained on a common set of speech data. Similar to 2016 Blizzard challenge, the task for this year is to train on expressively-read children’s story-books, and to synthesise speech in the same domain. The Challenge therefore presents an opportunity to investigate the effectiveness of several techniques we have developed when applied to expressive and prosodically-varied audiobook data. This paper describes the text-to-speech system entered by The Centre for Speech Technology Research into the 2017 Blizzard Challenge. The current system is a hybrid synthesis system which drives a unit selection synthesiser using the output from a neural network based acoustic and duration model. We assess the performance of our system by reporting the results from formal listening tests provided by the challenge.},
  categories = {Merlin, hybrid speech synthesis, unit selection, deep neural networks}
}
@inproceedings{watts2015nst,
  author = {Watts, Oliver and Ronanki, Srikanth and Wu, Zhizheng and Raitio, Tuomo and Suni, A.},
  date-modified = {2018-01-19 16:44:49 +0000},
  title = {The NST--GlottHMM entry to the Blizzard Challenge 2015},
  abstract = {We describe the synthetic voices forming the joint entry into the 2015 Blizzard Challenge of the Natural Speech Technology consortium, Helsinki University, and Aal to University. The 2015 Blizzard Challenge presents an opportunity to test and benchmark some of the tools we have developed to address the problem of how to produce systems in arbitrary new languages with minimal annotated data and language-specific expertise on the part of the system builders. We here explain how our tools were used to address these problems on the different tasks of the challenge, and provide some discussion of the evaluation results. Some additions to the system used to build voices for the previous Challenge are described: acoustic modelling using deep neural networks with jointly-trained duration model,and an unsupervised approach for handling the phenomenon of inherent vowel deletion which occurs in 3 of the 6 target languages.},
  month = sep,
  year = {2015},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/watts2015nst-glotthmm.pdf},
  booktitle = {Proceedings of Blizzard Challenge 2015}
}