The Centre for Speech Technology Research, The university of Edinburgh

Publications by Rob Clark

robert.bib

@inproceedings{anderssonetal2010,
  author = {Andersson, Sebastian and Georgila, Kallirroi and Traum, David and Aylett, Matthew and Clark, Robert},
  title = {Prediction and Realisation of Conversational Characteristics by Utilising Spontaneous Speech for Unit Selection},
  booktitle = {Speech Prosody 2010},
  month = {May},
  year = {2010},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2010/100116.pdf},
  abstract = {Unit selection speech synthesis has reached high levels of naturalness and intelligibility for neutral read aloud speech. However, synthetic speech generated using neutral read aloud data lacks all the attitude, intention and spontaneity associated with everyday conversations. Unit selection is heavily data dependent and thus in order to simulate human conversational speech, or create synthetic voices for believable virtual characters, we need to utilise speech data with examples of how people talk rather than how people read. In this paper we included carefully selected utterances from spontaneous conversational speech in a unit selection voice. Using this voice and by automatically predicting type and placement of lexical fillers and filled pauses we can synthesise utterances with conversational characteristics. A perceptual listening test showed that it is possible to make synthetic speech sound more conversational without degrading naturalness.},
  categories = {speech synthesis, unit selection, conversation, spontaneous speech, lexical fillers, filled pauses}
}
@inproceedings{oliverclark_interspeech05,
  author = {Oliver, Dominika and Clark, Robert A. J.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2005/oliverclark_interspeech05.pdf},
  booktitle = {Proc. Interspeech 2005},
  year = {2005},
  categories = {speech synthesis, prosody, intonation, festival, Polish},
  title = {Modelling pitch accent types for {P}olish speech synthesis}
}
@inproceedings{anderssoncabral09,
  author = {Andersson, J. Sebastian and Cabral, Joao P. and Badino, Leonardo and Yamagishi, Junichi and Clark, Robert A.J.},
  title = {Glottal Source and Prosodic Prominence Modelling in {HMM}-based Speech Synthesis for the {B}lizzard {C}hallenge 2009},
  booktitle = {The Blizzard Challenge 2009},
  year = {2009},
  month = {September},
  address = {Edinburgh, U.K.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2009/cstr_Blizzard2009.pdf},
  abstract = {This paper describes the CSTR entry for the Blizzard Challenge 2009. The work focused on modifying two parts of the Nitech 2005 HTS speech synthesis system to improve naturalness and contextual appropriateness. The first part incorporated an implementation of the Linjencrants-Fant (LF) glottal source model. The second part focused on improving synthesis of prosodic prominence including emphasis through context dependent phonemes. Emphasis was assigned to the synthesised test sentences based on a handful of theory based rules. The two parts (LF-model and prosodic prominence) were not combined and hence evaluated separately. The results on naturalness for the LF-model showed that it is not yet perceived as natural as the Benchmark HTS system for neutral speech. The results for the prosodic prominence modelling showed that it was perceived as contextually appropriate as the Benchmark HTS system, despite a low naturalness score. The Blizzard challenge evaluation has provided valuable information on the status of our work and continued work will begin with analysing why our modifications resulted in reduced naturalness compared to the Benchmark HTS system.},
  categories = {HMM, HTS, speech synthesis, LF-model, glottal source, prosodic prominence, emphasis}
}
@article{Andersson2012175,
  author = {Andersson, Sebastian and Yamagishi, Junichi and Clark, Robert A.J.},
  note = {},
  doi = {10.1016/j.specom.2011.08.001},
  title = {Synthesis and evaluation of conversational characteristics in {HMM}-based speech synthesis},
  url = {http://www.sciencedirect.com/science/article/pii/S0167639311001178},
  journal = {Speech Communication},
  issn = {0167-6393},
  number = {2},
  abstract = {Spontaneous conversational speech has many characteristics that are currently not modelled well by HMM-based speech synthesis and in order to build synthetic voices that can give an impression of someone partaking in a conversation, we need to utilise data that exhibits more of the speech phenomena associated with conversations than the more generally used carefully read aloud sentences. In this paper we show that synthetic voices built with HMM-based speech synthesis techniques from conversational speech data, preserved segmental and prosodic characteristics of frequent conversational speech phenomena. An analysis of an evaluation investigating the perception of quality and speaking style of HMM-based voices confirms that speech with conversational characteristics are instrumental for listeners to perceive successful integration of conversational speech phenomena in synthetic speech. The achieved synthetic speech quality provides an encouraging start for the continued use of conversational speech in HMM-based speech synthesis.},
  volume = {54},
  year = {2012},
  keywords = {Speech synthesis, HMM, Conversation, Spontaneous speech, Filled pauses, Discourse marker},
  pages = {175--188}
}
@inproceedings{clark_gala97,
  author = {Clark, Robert A. J.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1997/clark_gala97.ps},
  title = {Language Acquisition and Implication for Language Change: A Computational Model},
  booktitle = {Proceedings of the {GALA} 97 Conference on Language Acquisition},
  year = {1997},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1997/clark_gala97.pdf},
  pages = {322-326},
  categories = {lm}
}
@inproceedings{leo_07-1,
  author = {Badino, Leonardo and Clark, Robert A.J.},
  title = {Issues of Optionality in Pitch Accent Placement},
  booktitle = {Proc. 6th ISCA Speech Synthesis Workshop},
  year = {2007},
  address = {Bonn, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2007/ssw6_252.pdf},
  abstract = {When comparing the prosodic realization of different English speakers reading the same text, a significant disagreement is usually found amongst the pitch accent patterns of the speakers. Assuming that such disagreement is due to a partial optionality of pitch accent placement, it has been recently proposed to evaluate pitch accent predictors by comparing them with multi-speaker reference data. In this paper we face the issue of pitch accent optionality at different levels. At first we propose a simple mathematical definition of intra-speaker optionality which allows us to introduce a function for evaluating pitch accent predictors which we show being more accurate and robust than those used in previous works. Subsequently we compare a pitch accent predictor trained on single speaker data with a predictor trained on multi-speaker data in order to point out the large overlapping between intra-speaker and inter-speaker optionality. Finally, we show our successful results in predicting intra-speaker optionality and we suggest how this achievement could be exploited to improve the performances of a unit selection text-to speech synthesis (TTS) system.}
}
@inproceedings{janska_clark:2010a,
  author = {Janska, Anna C. and Clark, Robert A. J.},
  title = {Native and Non-Native Speaker Judgements on the Quality of Synthesized Speech},
  booktitle = {Proc. Interspeech},
  abstract = {The difference between native speakers' and non-native speak- ers' naturalness judgements of synthetic speech is investigated. Similar/difference judgements are analysed via a multidimensional scaling analysis and compared to Mean opinion scores. It is shown that although the two groups generally behave in a similar manner the variance of non-native speaker judgements is generally higher. While both groups of subject can clearly distinguish natural speech from the best synthetic examples, the groups' responses to different artefacts present in the synthetic speech can vary.},
  year = {2010},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2010/janskaclark_interspeech2010.pdf},
  pages = {1121--1124}
}
@inproceedings{clark:podsiadlo:mayo:king:blizzard2007,
  author = {Clark, Robert A. J. and Podsiadlo, Monika and Fraser, Mark and Mayo, Catherine and King, Simon},
  title = {Statistical Analysis of the {B}lizzard {C}hallenge 2007 Listening Test Results},
  booktitle = {Proc. Blizzard 2007 (in Proc. Sixth {ISCA} Workshop on Speech Synthesis)},
  year = {2007},
  month = {August},
  address = {Bonn, Germany},
  keywords = {Blizzard},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2007/blz3_003.pdf},
  abstract = {Blizzard 2007 is the third Blizzard Challenge, in which participants build voices from a common dataset. A large listening test is conducted which allows comparison of systems in terms of naturalness and intelligibility. New sections were added to the listening test for 2007 to test the perceived similarity of the speaker's identity between natural and synthetic speech. In this paper, we present the results of the listening test and the subsequent statistical analysis.},
  categories = {blizzard,listening test}
}
@article{white_clark_moore:2010,
  author = {White, Michael and Clark, Robert A. J. and Moore, Johanna D.},
  doi = {10.1162/coli.09-023-R1-08-002},
  title = {Generating Tailored, Comparative Descriptions with Contextually Appropriate Intonation},
  journal = {Computational Linguistics},
  number = {2},
  abstract = {Generating responses that take user preferences into account requires adaptation at all levels of the generation process. This article describes a multi-level approach to presenting user-tailored information in spoken dialogues which brings together for the first time multi-attribute decision models, strategic content planning, surface realization that incorporates prosody prediction, and unit selection synthesis that takes the resulting prosodic structure into account. The system selects the most important options to mention and the attributes that are most relevant to choosing between them, based on the user model. Multiple options are selected when each offers a compelling trade-off. To convey these trade-offs, the system employs a novel presentation strategy which straightforwardly lends itself to the determination of information structure, as well as the contents of referring expressions. During surface realization, the prosodic structure is derived from the information structure using Combinatory Categorial Grammar in a way that allows phrase boundaries to be determined in a flexible, data-driven fashion. This approach to choosing pitch accents and edge tones is shown to yield prosodic structures with significantly higher acceptability than baseline prosody prediction models in an expert evaluation. These prosodic structures are then shown to enable perceptibly more natural synthesis using a unit selection voice that aims to produce the target tunes, in comparison to two baseline synthetic voices. An expert evaluation and f0 analysis confirm the superiority of the generator-driven intonation and its contribution to listeners' ratings.},
  volume = {36},
  year = {2010},
  pages = {159-201}
}
@mastersthesis{clark_msc96,
  author = {Clark, Robert A.J.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1996/clark_msc96.ps},
  school = {University of Edinburgh},
  title = {Internal and External Factors Affecting Language Change: A Computational Model},
  year = {1996},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1996/clark_msc96.pdf},
  categories = {lm}
}
@inproceedings{richmond_interspeech2010,
  author = {Richmond, Korin and Clark, Robert and Fitt, Sue},
  title = {On Generating {C}ombilex Pronunciations via Morphological Analysis},
  booktitle = {Proc. Interspeech},
  year = {2010},
  abstract = {Combilex is a high-quality lexicon that has been developed specifically for speech technology purposes and recently released by CSTR. Combilex benefits from many advanced features. This paper explores one of these: the ability to generate fully-specified transcriptions for morphologically derived words automatically. This functionality was originally implemented to encode the pronunciations of derived words in terms of their constituent morphemes, thus accelerating lexicon development and ensuring a high level of consistency. In this paper, we propose this method of modelling pronunciations can be exploited further by combining it with a morphological parser, thus yielding a method to generate full transcriptions for unknown derived words. Not only could this accelerate adding new derived words to Combilex, but it could also serve as an alternative to conventional letter-to-sound rules. This paper presents preliminary work indicating this is a promising direction.},
  month = {September},
  address = {Makuhari, Japan},
  keywords = {combilex lexicon, letter-to-sound rules, grapheme-to-phoneme conversion, morphological decomposition},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2010/IS100683.pdf},
  pages = {1974--1977}
}
@inproceedings{clarkrichmondking_interspeech05,
  author = {Clark, Robert A.J. and Richmond, Korin and King, Simon},
  title = {Multisyn voices from {ARCTIC} data for the {B}lizzard challenge},
  booktitle = {Proc. Interspeech 2005},
  month = {September},
  year = {2005},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2005/clarkrichmondking_interspeech05.pdf},
  abstract = {This paper describes the process of building unit selection voices for the Festival Multisyn engine using four ARCTIC datasets, as part of the Blizzard evaluation challenge. The build process is almost entirely automatic, with very little need for human intervention. We discuss the difference in the evaluation results for each voice and evaluate the suitability of the ARCTIC datasets for building this type of voice.},
  categories = {speech synthesis, festival, evaluation}
}
@article{anderssonyamagishi12,
  author = {Andersson, S. and Yamagishi, J. and Clark, R.A.J.},
  doi = {10.1016/j.specom.2011.08.001},
  title = {Synthesis and Evaluation of Conversational Characteristics in {HMM}-Based Speech Synthesis},
  journal = {Speech Communication},
  number = {2},
  abstract = {Spontaneous conversational speech has many characteristics that are currently not modelled well by HMM-based speech synthesis and in order to build synthetic voices that can give an impression of someone partaking in a conversation, we need to utilise data that exhibits more of the speech phenomena associated with conversations than the more generally used carefully read aloud sentences. In this paper we show that synthetic voices built with HMM-based speech synthesis techniques from conversational speech data, preserved segmental and prosodic characteristics of frequent conversational speech phenomena. An analysis of an evaluation investigating the perception of quality and speaking style of HMM-based voices confirms that speech with conversational characteristics are instrumental for listeners to perceive successful integration of conversational speech phenomena in synthetic speech. The achieved synthetic speech quality provides an encouraging start for the continued use of conversational speech in HMM-based speech synthesis.},
  volume = {54},
  year = {2012},
  pages = {175-188}
}
@inproceedings{janska_clark:2010b,
  author = {Janska, Anna C. and Clark, Robert A. J.},
  title = {Further exploration of the possibilities and pitfalls of multidimensional scaling as a tool for the evaluation of the quality of synthesized speech},
  booktitle = {The 7th ISCA Tutorial and Research Workshop on Speech Synthesis},
  abstract = {Multidimensional scaling (MDS) has been suggested as a use- ful tool for the evaluation of the quality of synthesized speech. However, it has not yet been extensively tested for its applica- tion in this specific area of evaluation. In a series of experi- ments based on data from the Blizzard Challenge 2008 the relations between Weighted Euclidean Distance Scaling and Simple Euclidean Distance Scaling is investigated to understand how aggregating data affects the MDS configuration. These results are compared to those collected as mean opinion scores (MOS). The ranks correspond, and MOS can be predicted from an object's space in the MDS generated stimulus space. The big advantage of MDS over MOS is its diagnostic value; dimensions along which stimuli vary are not correlated, as is the case in modular evaluation using MOS. Finally, it will be attempted to generalize from the MDS representations of the thoroughly tested subset to the aggregated data of the larger-scale Blizzard Challenge.},
  year = {2010},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2010/janskaclark_ssw7.pdf},
  pages = {142--147}
}
@inproceedings{clarkrichmondking_ssw504,
  author = {Clark, Robert A.J. and Richmond, Korin and King, Simon},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2004/clarkrichmondking_ssw504.ps},
  title = {Festival 2 -- build your own general purpose unit selection speech synthesiser},
  booktitle = {Proc. 5th {ISCA} workshop on speech synthesis},
  year = {2004},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2004/clarkrichmondking_ssw504.pdf},
  abstract = {This paper describes version 2 of the Festival speech synthesis system. Festival 2 provides a development environment for concatenative speech synthesis, and now includes a general purpose unit selection speech synthesis engine. We discuss various aspects of unit selection speech synthesis, focusing on the research issues that relate to voice design and the automation of the voice development process.},
  categories = {synthesis, festival, unitselection}
}
@inproceedings{badinoclark_interspeech12,
  author = {Badino, Leonardo and Clark, Robert A.J. and Wester, Mirjam},
  title = {Towards Hierarchical Prosodic Prominence Generation in {TTS} Synthesis},
  booktitle = {Proc. Interspeech},
  year = {2012},
  address = {Portland, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2012/badinoclark_IS_2012.pdf},
  categories = {speech synthesis, prosody}
}
@inproceedings{bakerclarkwhite_ssw504,
  author = {Baker, Rachel and Clark, Robert A.J. and White, Michael},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2004/bakerclarkwhite_ssw504.ps},
  title = {Synthesising Contextually Appropriate Intonation in Limited Domains},
  booktitle = {Proc. 5th {ISCA} workshop on speech synthesis},
  year = {2004},
  address = {Pittsburgh, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2004/bakerclarkwhite_ssw504.pdf},
  categories = {synthesis, prosody, intonation, festival}
}
@inproceedings{clark_icphs99,
  author = {Clark, Robert A. J.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1999/clark_icphs99.ps},
  title = {Using Prosodic Structure to Improve Pitch Range Variation in Text to Speech Synthesis},
  booktitle = {Proc. {XIV}th international congress of phonetic sciences},
  volume = {1},
  year = {1999},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1999/clark_icphs99.pdf},
  pages = {69--72},
  categories = {synthesis, prosody, intonation, festival}
}
@inproceedings{leo_09-1,
  author = {Badino, Leonardo and Andersson, J. Sebastian and Yamagishi, Junichi and Clark, Robert A.J.},
  title = {Identification of Contrast and Its Emphatic Realization in {HMM}-based Speech Synthesis},
  booktitle = {Proc. Interspeech 2009},
  year = {2009},
  month = {September},
  address = {Brighton, U.K.},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2009/IS090749.PDF},
  abstract = {The work presented in this paper proposes to identify contrast in the form of contrastive word pairs and prosodically signal it with emphatic accents in a Text-to-Speech (TTS) application using a Hidden-Markov-Model (HMM) based speech synthesis system. We first describe a novel method to automatically detect contrastive word pairs using textual features only and report its performance on a corpus of spontaneous conversations in English. Subsequently we describe the set of features selected to train a HMM-based speech synthesis system and attempting to properly control prosodic prominence (including emphasis). Results from a large scale perceptual test show that in the majority of cases listeners judge emphatic contrastive word pairs as acceptable as their non-emphatic counterpart, while emphasis on non-contrastive pairs is almost never acceptable.}
}
@inproceedings{strom:etal:interspeech2007,
  author = {Strom, Volker and Nenkova, Ani and Clark, Robert and Vazquez-Alvarez, Yolanda and Brenier, Jason and King, Simon and Jurafsky, Dan},
  title = {Modelling Prominence and Emphasis Improves Unit-Selection Synthesis},
  booktitle = {Proc. Interspeech 2007},
  year = {2007},
  month = {August},
  address = {Antwerp, Belgium},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2007/p540.pdf},
  abstract = {We describe the results of large scale perception experiments showing improvements in synthesising two distinct kinds of prominence: standard pitch-accent and strong emphatic accents. Previously prominence assignment has been mainly evaluated by computing accuracy on a prominence-labelled test set. By contrast we integrated an automatic pitch-accent classifier into the unit selection target cost and showed that listeners preferred these synthesised sentences. We also describe an improved recording script for collecting emphatic accents, and show that generating emphatic accents leads to further improvements in the fiction genre over incorporating pitch accent only. Finally, we show differences in the effects of prominence between child-directed speech and news and fiction genres. Index Terms: speech synthesis, prosody, prominence, pitch accent, unit selection},
  categories = {speech synthesis}
}
@inproceedings{clark_blizzard2006,
  author = {Clark, R. and Richmond, K. and Strom, V. and King, S.},
  title = {Multisyn Voices for the {B}lizzard {C}hallenge 2006},
  booktitle = {Proc. Blizzard Challenge Workshop (Interspeech Satellite)},
  year = {2006},
  month = {September},
  note = {(http://festvox.org/blizzard/blizzard2006.html)},
  key = {clark_blizzard2006},
  address = {Pittsburgh, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2006/cstr_blizzard2006.pdf},
  abstract = {This paper describes the process of building unit selection voices for the Festival Multisyn engine using the ATR dataset provided for the Blizzard Challenge 2006. We begin by discussing recent improvements that we have made to the Multisyn voice building process, prompted by our participation in the Blizzard Challenge 2006. We then go on to discuss our interpretation of the results observed. Finally, we conclude with some comments and suggestions for the formulation of future Blizzard Challenges.},
  categories = {tts, blizzard, multisyn, unit selection}
}
@inproceedings{clarkdusterhoff_eurospeech99,
  author = {Clark, Robert. A. J. and Dusterhoff, Kurt E.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1999/clarkdusterhoff_eurospeech99.ps},
  title = {Objective Methods for Evaluating Synthetic Intonation},
  booktitle = {Proc. {E}urospeech 1999},
  volume = {4},
  year = {1999},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/1999/clarkdusterhoff_eurospeech99.pdf},
  pages = {1623--1626},
  categories = {synthesis, prosody, intonation}
}
@inproceedings{clark_king:proc:2006,
  author = {Clark, Robert A. J. and King, Simon},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2006/clarkking_interspeech_2006.ps},
  title = {Joint Prosodic and Segmental Unit Selection Speech Synthesis},
  booktitle = {Proc. Interspeech 2006},
  year = {2006},
  month = {September},
  address = {Pittsburgh, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2006/clarkking_interspeech_2006.pdf},
  abstract = {We describe a unit selection technique for text-to-speech synthesis which jointly searches the space of possible diphone sequences and the space of possible prosodic unit sequences in order to produce synthetic speech with more natural prosody. We demonstrates that this search, although currently computationally expensive, can achieve improved intonation compared to a baseline in which only the space of possible diphone sequences is searched. We discuss ways in which the search could be made sufficiently efficient for use in a real-time system.}
}
@incollection{Pipe_etal:2011,
  editor = {Bar-Cohen, Yoseph},
  author = {Pipe, A. G. and Vaidyanathan, R. and Melhuish, C. and Bremner, P. and Robinson, P. and Clark, R. A. J. and Lenz, A. and Eder, K. and Hawes, N. and Ghahramani, Z. and Fraser, M. and Mermehdi, M. and Healey, P. and Skachek, S.},
  chapter = {15},
  publisher = {Taylor and Francis},
  booktitle = {Biomimetics: Nature-Based Innovation},
  year = {2011},
  title = {Affective Robotics: Human Motion and Behavioural Inspiration for Cooperation between Humans and Assistive Robots}
}
@inproceedings{janskaetal_interspeech12,
  author = {Janska, Anna C. and Schröger, Erich and Jacobsen, Thomas and Clark, Robert A. J.},
  title = {Asymmetries in the perception of synthesized speech},
  booktitle = {Proc. Interspeech},
  year = {2012},
  address = {Portland, USA},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2012/janskaeral_IS_2012.pdf},
  categories = {speech synthesis, evaluation}
}
@inproceedings{richmond2009a,
  author = {Richmond, K. and Clark, R. and Fitt, S.},
  title = {Robust {LTS} rules with the {Combilex} speech technology lexicon},
  booktitle = {Proc. Interspeech},
  year = {2009},
  abstract = {Combilex is a high quality pronunciation lexicon aimed at speech technology applications that has recently been released by CSTR. Combilex benefits from several advanced features. This paper evaluates one of these: the explicit alignment of phones to graphemes in a word. This alignment can help to rapidly develop robust and accurate letter-to-sound (LTS) rules, without needing to rely on automatic alignment methods. To evaluate this, we used Festival's LTS module, comparing its standard automatic alignment with Combilex's explicit alignment. Our results show using Combilex's alignment improves LTS accuracy: 86.50\% words correct as opposed to 84.49\%, with our most general form of lexicon. In addition, building LTS models is greatly accelerated, as the need to list allowed alignments is removed. Finally, loose comparison with other studies indicates Combilex is a superior quality lexicon in terms of consistency and size.},
  month = {September},
  address = {Brighton, UK},
  keywords = {combilex, letter-to-sound rules, grapheme-to-phoneme conversion},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2009/IS090308.pdf},
  pages = {1295--1298}
}
@phdthesis{clark_phd03,
  author = {Clark, Robert A. J.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2003/clark_phd03.ps.gz},
  school = {The University of Edinburgh},
  title = {Generating Synthetic Pitch Contours Using Prosodic Structure},
  year = {2003},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2003/clark_phd03.pdf},
  categories = {speech synthesis, prosody, intonation, festival}
}
@inproceedings{leo_08-2,
  author = {Badino, Leonardo and Clark, Robert A.J. and Strom, Volker},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2008/IS080159.ps},
  title = {Including Pitch Accent Optionality in Unit Selection Text-to-Speech Synthesis},
  booktitle = {Proc.~Interspeech},
  year = {2008},
  address = {Brisbane},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2008/IS080159.pdf},
  abstract = {A significant variability in pitch accent placement is found when comparing the patterns of prosodic prominence realized by different English speakers reading the same sentences. In this paper we describe a simple approach to incorporate this variability to synthesize prosodic prominence in unit selection text-to-speech synthesis. The main motivation of our approach is that by taking into account the variability of accent placements we enlarge the set of prosodically acceptable speech units, thus increasing the chances of selecting a good quality sequence of units, both in prosodic and segmental terms. Results on a large scale perceptual test show the benefits of our approach and indicate directions for further improvements.},
  categories = {speech synthesis, unit selection, prosodic prominence, pitch accents}
}
@inproceedings{strom06,
  author = {Strom, Volker and Clark, Robert and King, Simon},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2006/strom06.ps},
  title = {Expressive Prosody for Unit-selection Speech Synthesis},
  booktitle = {Proc.~Interspeech},
  year = {2006},
  address = {Pittsburgh},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2006/strom06.pdf},
  abstract = {Current unit selection speech synthesis voices cannot produce emphasis or interrogative contours because of a lack of the necessary prosodic variation in the recorded speech database. A method of recording script design is proposed which addresses this shortcoming. Appropriate components were added to the target cost function of the Festival Multisyn engine, and a perceptual evaluation showed a clear preference over the baseline system.}
}
@inproceedings{morgan:08,
  author = {Morgan, Maggie and McGee-Lennon, Marilyn R. and Hine, Nick and Arnott, John and Martin, Chris and Clark, Julia S. and Wolters, Maria},
  booktitle = {Proc. 26th Conference on Computer-Human Interaction, Florence},
  year = {2008},
  title = {Requirements Gathering with Diverse User Groups and Stakeholders}
}
@inproceedings{mayoclarkking-isp05,
  author = {Mayo, C. and Clark, R. A. J. and King, S.},
  title = {Multidimensional Scaling of Listener Responses to Synthetic Speech},
  booktitle = {Proc. Interspeech 2005},
  year = {2005},
  month = {September},
  address = {Lisbon, Portugal},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2005/ie-speech-2005.pdf}
}
@inproceedings{clark_icphs03,
  author = {Clark, Robert A. J.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2003/clark_icphs03.ps},
  title = {Modelling Pitch Accents for Concept-to-Speech Synthesis.},
  booktitle = {Proc. XVth International Congress of Phonetic Sciences},
  volume = {2},
  year = {2003},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2003/clark_icphs03.pdf},
  pages = {1141--1144},
  categories = {speech synthesis, prosody, intonation, festival}
}
@inproceedings{karaiskos:king:clark:mayo:blizzard2008,
  author = {Karaiskos, Vasilis and King, Simon and Clark, Robert A. J. and Mayo, Catherine},
  title = {The Blizzard Challenge 2008},
  booktitle = {Proc. Blizzard Challenge Workshop},
  year = {2008},
  month = {September},
  address = {Brisbane, Australia},
  keywords = {Blizzard},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2008/summary_Blizzard2008.pdf},
  abstract = {The Blizzard Challenge 2008 was the fourth annual Blizzard Challenge. This year, participants were asked to build two voices from a UK English corpus and one voice from a Man- darin Chinese corpus. This is the first time that a language other than English has been included and also the first time that a large UK English corpus has been available. In addi- tion, the English corpus contained somewhat more expressive speech than that found in corpora used in previous Blizzard Challenges. To assist participants with limited resources or limited ex- perience in UK-accented English or Mandarin, unaligned la- bels were provided for both corpora and for the test sentences. Participants could use the provided labels or create their own. An accent-specific pronunciation dictionary was also available for the English speaker. A set of test sentences was released to participants, who were given a limited time in which to synthesise them and submit the synthetic speech. An online listening test was con- ducted, to evaluate naturalness, intelligibility and degree of similarity to the original speaker.}
}
@article{clarkrichmondking_specom2007,
  author = {Clark, Robert A. J. and Richmond, Korin and King, Simon},
  doi = {10.1016/j.specom.2007.01.014},
  title = {Multisyn: Open-domain unit selection for the {F}estival speech synthesis system},
  journal = {Speech Communication},
  number = {4},
  abstract = {We present the implementation and evaluation of an open-domain unit selection speech synthesis engine designed to be flexible enough to encourage further unit selection research and allow rapid voice development by users with minimal speech synthesis knowledge and experience. We address the issues of automatically processing speech data into a usable voice using automatic segmentation techniques and how the knowledge obtained at labelling time can be exploited at synthesis time. We describe target cost and join cost implementation for such a system and describe the outcome of building voices with a number of different sized datasets. We show that, in a competitive evaluation, voices built using this technology compare favourably to other systems.},
  volume = {49},
  year = {2007},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2007/clarkrichmondking_specom2007.pdf},
  pages = {317--330},
  categories = {speech synthesis, festival, multisyn, unitselection}
}
@article{mayo:clark:king:10,
  author = {Mayo, C. and Clark, R. A. J. and King, S.},
  doi = {10.1016/j.specom.2010.10.003},
  title = {Listeners' Weighting of Acoustic Cues to Synthetic Speech Naturalness: A Multidimensional Scaling Analysis},
  journal = {Speech Communication},
  number = {3},
  abstract = {The quality of current commercial speech synthesis systems is now so high that system improvements are being made at subtle sub- and supra-segmental levels. Human perceptual evaluation of such subtle improvements requires a highly sophisticated level of perceptual attention to specific acoustic characteristics or cues. However, it is not well understood what acoustic cues listeners attend to by default when asked to evaluate synthetic speech. It may, therefore, be potentially quite difficult to design an evaluation method that allows listeners to concentrate on only one dimension of the signal, while ignoring others that are perceptually more important to them. The aim of the current study was to determine which acoustic characteristics of unit-selection synthetic speech are most salient to listeners when evaluating the naturalness of such speech. This study made use of multidimensional scaling techniques to analyse listeners' pairwise comparisons of synthetic speech sentences. Results indicate that listeners place a great deal of perceptual importance on the presence of artifacts and discontinuities in the speech, somewhat less importance on aspects of segmental quality, and very little importance on stress/intonation appropriateness. These relative differences in importance will impact on listeners' ability to attend to these different acoustic characteristics of synthetic speech, and should therefore be taken into account when designing appropriate methods of synthetic speech evaluation.},
  volume = {53},
  year = {2011},
  keywords = {Speech synthesis; Evaluation; Speech perception; Acoustic cue weighting; Multidimensional scaling},
  pages = {311--326}
}
@inproceedings{richmond2007b,
  author = {Richmond, K. and Strom, V. and Clark, R. and Yamagishi, J. and Fitt, S.},
  title = {Festival Multisyn Voices for the 2007 Blizzard Challenge},
  booktitle = {Proc. Blizzard Challenge Workshop (in Proc. SSW6)},
  year = {2007},
  month = {August},
  key = {richmond2007b},
  address = {Bonn, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2007/blizzard2007paper.pdf},
  abstract = {This paper describes selected aspects of the Festival Multisyn entry to the Blizzard Challenge 2007. We provide an overview of the process of building the three required voices from the speech data provided. This paper focuses on new features of Multisyn which are currently under development and which have been employed in the system used for this Blizzard Challenge. These differences are the application of a more flexible phonetic lattice representation during forced alignment labelling and the use of a pitch accent target cost component. Finally, we also examine aspects of the speech data provided for this year's Blizzard Challenge and raise certain issues for discussion concerning the aim of comparing voices made with differing subsets of the data provided.},
  categories = {tts, blizzard, multisyn, unit selection}
}
@inproceedings{leo_08-1,
  author = {Badino, Leonardo and Clark, Robert A.J.},
  title = {Automatic labeling of contrastive word pairs from spontaneous spoken English},
  booktitle = {in 2008 IEEE/ACL Workshop on Spoken Language Technology},
  year = {2008},
  address = {Goa, India},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2008/0000101.pdf},
  abstract = {This paper addresses the problem of automatically labeling contrast in spontaneous spoken speech, where contrast here is meant as a relation that ties two words that explicitly contrast with each other. Detection of contrast is certainly relevant in the analysis of discourse and information structure and also, because of the prosodic correlates of contrast, could play an important role in speech applications, such as text-to-speech synthesis, that need an accurate and discourse context related modeling of prosody. With this prospect we investigate the feasibility of automatic contrast labeling by training and evaluating on the Switchboard corpus a novel contrast tagger, based on Support Vector Machines (SVM), that combines lexical features, syntactic dependencies and WordNet semantic relations.}
}
@inproceedings{hofer-eurosp05,
  author = {Hofer, G. and Richmond, K. and Clark, R.},
  ps = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2005/hofer_emosyn.ps},
  title = {Informed Blending of Databases for Emotional Speech Synthesis},
  booktitle = {Proc. Interspeech},
  month = {September},
  year = {2005},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2005/hofer_emosyn.pdf},
  abstract = {The goal of this project was to build a unit selection voice that could portray emotions with varying intensities. A suitable definition of an emotion was developed along with a descriptive framework that supported the work carried out. A single speaker was recorded portraying happy and angry speaking styles. Additionally a neutral database was also recorded. A target cost function was implemented that chose units according to emotion mark-up in the database. The Dictionary of Affect supported the emotional target cost function by providing an emotion rating for words in the target utterance. If a word was particularly 'emotional', units from that emotion were favoured. In addition intensity could be varied which resulted in a bias to select a greater number emotional units. A perceptual evaluation was carried out and subjects were able to recognise reliably emotions with varying amounts of emotional units present in the target utterance.},
  categories = {speech synthesis,emotion,edinburgh}
}
@inproceedings{anderssonetal2010_ssw7,
  author = {Andersson, Sebastian and Yamagishi, Junichi and Clark, Robert},
  title = {Utilising Spontaneous Conversational Speech in {HMM}-Based Speech Synthesis},
  booktitle = {The 7th ISCA Tutorial and Research Workshop on Speech Synthesis},
  month = {September},
  year = {2010},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2010/ssw7_paper.pdf},
  abstract = {Spontaneous conversational speech has many characteristics that are currently not well modelled in unit selection and HMM-based speech synthesis. But in order to build synthetic voices more suitable for interaction we need data that exhibits more conversational characteristics than the generally used read aloud sentences. In this paper we will show how carefully selected utterances from a spontaneous conversation was instrumental for building an HMM-based synthetic voices with more natural sounding conversational characteristics than a voice based on carefully read aloud sentences. We also investigated a style blending technique as a solution to the inherent problem of phonetic coverage in spontaneous speech data. But the lack of an appropriate representation of spontaneous speech phenomena probably contributed to results showing that we could not yet compete with the speech quality achieved for grammatical sentences.},
  categories = {HMM, speech synthesis, spontaneous speech, conversation, lexical fillers, filled pauses}
}
@inproceedings{CalzadaClark2013,
  author = {Defez, Àngel Calzada and Carrié, Joan Claudi Socoró and Clark, Robert},
  title = {Parametric model for vocal effort interpolation with Harmonics Plus Noise Models},
  booktitle = {Proc. 8th ISCA Speech Synthesis Workshop},
  abstract = {It is known that voice quality plays an important role in expressive speech. In this paper, we present a methodology for modifying vocal effort level, which can be applied by text-to-speech (TTS) systems to provide the flexibility needed to improve the naturalness of synthesized speech. This extends previous work using low order Linear Prediction Coefficients (LPC) where the flexibility was constrained by the amount of vocal effort levels available in the corpora. The proposed methodology overcomes these limitations by replacing the low order LPC by ninth order polynomials to allow not only vocal effort to be modified towards the available templates, but also to allow the generation of intermediate vocal effort levels between levels available in training data. This flexibility comes from the combination of Harmonics plus Noise Models and using a parametric model to represent the spectral envelope. The conducted perceptual tests demonstrate the effectiveness of the proposed technique in per- forming vocal effort interpolations while maintaining the signal quality in the final synthesis. The proposed technique can be used in unit-selection TTS systems to reduce corpus size while increasing its flexibility, and the techniques could potentially be employed by HMM based speech synthesis systems if appropriate acoustic features are being used.},
  year = {2013},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS1-1_Calzada_Defez.pdf},
  pages = {25-30},
  categories = {vocal effort interpolation, harmonics plus noise model, expressive speech synthesis}
}
@inproceedings{Mamiya_SSW8,
  author = {Mamiya, Yoshitaka and Stan, Adriana and Yamagishi, Junichi and Bell, Peter and Watts, Oliver and Clark, Robert and King, Simon},
  title = {Using Adaptation to Improve Speech Transcription Alignment in Noisy and Reverberant Environments},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {When using data retrieved from the internet to create new speech databases, the recording conditions can often be highly variable within and between sessions. This variance influences the overall performance of any automatic speech and text alignment techniques used to process this data. In this paper we discuss the use of speaker adaptation methods to address this issue. Starting from a baseline system for automatic sentence-level segmentation and speech and text alignment based on GMMs and grapheme HMMs, respectively, we employ Maximum A Posteriori (MAP) and Constrained Maximum Likelihood Linear Regression (CMLLR) techniques to model the variation in the data in order to increase the amount of confidently aligned speech. We tested 29 different scenarios, which include reverberation, 8 talker babble noise and white noise, each in various combinations and SNRs. Results show that the MAP-based segmentation's performance is very much influenced by the noise type, as well as the presence or absence of reverberation. On the other hand, the CMLLR adaptation of the acoustic models gives an average 20\% increase in the aligned data percentage for the majority of the studied scenarios.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_PS1-4_Mamiya.pdf},
  pages = {61--66}
}
@inproceedings{Watts_SSW8,
  author = {Watts, Oliver and Stan, Adriana and Clark, Rob and Mamiya, Yoshitaka and Giurgiu, Mircea and Yamagishi, Junichi and King, Simon},
  title = {Unsupervised and lightly-supervised learning for rapid construction of {TTS} systems in multiple languages from 'found' data: evaluation and analysis},
  booktitle = {8th ISCA Workshop on Speech Synthesis},
  year = {2013},
  abstract = {This paper presents techniques for building text-to-speech front-ends in a way that avoids the need for language-specific expert knowledge, but instead relies on universal resources (such as the Unicode character database) and unsupervised learning from unannotated data to ease system development. The acquisition of expert language-specific knowledge and expert annotated data is a major bottleneck in the development of corpus-based TTS systems in new languages. The methods presented here side-step the need for such resources as pronunciation lexicons, phonetic feature sets, part of speech tagged data, etc. The paper explains how the techniques introduced are applied to the 14 languages of a corpus of `found' audiobook data. Results of an evaluation of the intelligibility of the systems resulting from applying these novel techniques to this data are presented.},
  month = {August},
  address = {Barcelona, Spain},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/ssw8_OS2-3_Watts.pdf},
  pages = {121--126}
}
@inproceedings{Stan_IS13,
  author = {Stan, Adriana and Watts, Oliver and Mamiya, Yoshitaka and Giurgiu, Mircea and Clark, Rob and Yamagishi, Junichi and King, Simon},
  title = {{TUNDRA: A Multilingual Corpus of Found Data for TTS Research Created with Light Supervision}},
  booktitle = {Proc. Interspeech},
  year = {2013},
  month = {August},
  address = {Lyon, France},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/IS131055.pdf},
  abstract = {Simple4All Tundra (version 1.0) is the first release of a standardised multilingual corpus designed for text-to-speech research with imperfect or found data. The corpus consists of approximately 60 hours of speech data from audiobooks in 14 languages, as well as utterance-level alignments obtained with a lightly-supervised process. Future versions of the corpus will include finer-grained alignment and prosodic annotation, all of which will be made freely available. This paper gives a general outline of the data collected so far, as well as a detailed description of how this has been done, emphasizing the minimal language-specific knowledge and manual intervention used to compile the corpus. To demonstrate its potential use, text-to-speech systems have been built for all languages using unsupervised or lightly supervised methods, also briefly presented in the paper.}
}
@inproceedings{Mamiya_13a,
  author = {Mamiya, Yoshitaka and Yamagishi, Junichi and Watts, Oliver and Clark, Robert A.J. and King, Simon and Stan, Adriana},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2013/0007987.pdf},
  booktitle = {Proc. ICASSP},
  year = {2013},
  abstract = {Audiobooks have been focused on as promising data for training Text-to-Speech (TTS) systems. However, they usually do not have a correspondence between audio and text data. Moreover, they are usually divided only into chapter units. In practice, we have to make a correspondence of audio and text data before we use them for building TTS synthesisers. However aligning audio and text data is time-consuming and involves manual labor. It also requires persons skilled in speech processing. Previously, we have proposed to use graphemes for automatically aligning speech and text data. This paper further integrates a lightly supervised voice activity detection (VAD) technique to detect sentence boundaries as a pre-processing step before the grapheme approach. This lightly supervised technique requires time stamps of speech and silence only for the first fifty sentences. Combining those, we can semi-automatically build TTS systems from audiobooks with minimum manual intervention. From subjective evaluations we analyse how the grapheme-based aligner and/or the proposed VAD technique impact the quality of HMM-based speech synthesisers trained on audiobooks.},
  title = {LIGHTLY SUPERVISED GMM VAD TO USE AUDIOBOOK FOR SPEECH SYNTHESISER}
}
@article{mayo_gibbon_clark_jslhr13,
  author = {Mayo, Catherine and Gibbon, Fiona and Clark, Robert A. J.},
  doi = {doi:10.1044/1092-4388(2012/10-0280)},
  title = {Phonetically Trained and Untrained Adults' Transcription of Place of Articulation for Intervocalic Lingual Stops With Intermediate Acoustic Cues},
  journal = {Journal of Speech, Language and Hearing Research},
  abstract = {Purpose: In this study, the authors aimed to investigate how listener training and the presence of intermediate acoustic cues influence transcription variability for conflicting cue speech stimuli. Method: Twenty listeners with training in transcribing disordered speech, and 26 untrained listeners, were asked to make forced-choice labeling decisions for synthetic vowel–consonant–vowel (VCV) sequences "a doe" and "a go". Both the VC and CV transitions in these stimuli ranged through intermediate positions, from appropriate for /d/ to appropriate for /g/. Results: Both trained and untrained listeners gave more weight to the CV transitions than to the VC transitions. However, listener behavior was not uniform: The results showed a high level of inter- and intratranscriber inconsistency, with untrained listeners showing a nonsignificant tendency to be more influenced than trained listeners by CV transitions. Conclusions: Listeners do not assign consistent categorical labels to the type of intermediate, conflicting transitional cues that were present in the stimuli used in the current study and that are also present in disordered articulations. Although listener inconsistency in assigning labels to intermediate productions is not increased as a result of phonetic training, neither is it reduced by such training.},
  volume = {56},
  year = {2013},
  keywords = {speech perception, intermediate acoustic cues, phonetic transcription, multilevel logistic regression},
  pages = {779-791}
}
@inproceedings{abelman_clark_sp2014,
  author = {Abelman, David and Clark, Robert},
  title = {Altering Speech Synthesis Prosody Through Real Time Natural Gestural Control},
  booktitle = {Proc. Speech Prosody 2014, Dublin Ireland},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/AblemanClark2014.pdf},
  abstract = {This paper investigates the usage of natural gestural controls to alter synthesised speech prosody in real time (for example, recognising a one-handed beat as a cue to emphasise a certain word in a synthesised sentence). A user’s gestures are recognised using a Microsoft Kinect sensor, and synthesised speech prosody is altered through a series of hand-crafted rules running through a modified HTS engine (pHTS, developed at Universite de Mons). Two sets of preliminary experiments are carried out. Firstly, it is shown that users can control the device to a moderate level of accuracy, though this is projected to improve further as the system is refined. Secondly, it is shown that the prosody of the altered out- put is significantly preferred to that of the baseline pHTS synthesis. Future work is recommended to focus on learning gestural and prosodic rules from data, and in using an updated version of the underlying pHTS engine. The reader is encouraged to watch a short video demonstration of the work at http://tinyurl.com/gesture-prosody.},
  categories = {speech synthesis, prosody}
}
@inproceedings{zhang_clark_wang:2014,
  author = {Zhang, Wei and Clark, Robert A. J. and Wang, Yongyuan},
  title = {Unsupervised Language Filtering using the Latent {D}irichlet Allocation},
  booktitle = {Proc. Interspeech},
  abstract = {To automatically build from scratch the language processing component for a speech synthesis system in a new language a purified text corpora is needed where any words and phrases from other languages are clearly identified or excluded. When using found data and where there is no inherent linguistic knowledge of the language/languages contained in the data, identifying the pure data is a difficult problem. We propose an unsupervised language identification approach based on Latent Dirichlet Allocation where we take the raw n-gram count as features without any smoothing, pruning or interpolation. The Latent Dirichlet Allocation topic model is reformulated for the language identification task and Collapsed Gibbs Sampling is used to train an unsupervised language identification model. We show that such a model is highly capable of identifying the primary language in a corpus and filtering out other languages present.},
  month = {September},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/zhang2014.pdf},
  pages = {1268--1272},
  categories = {Language Filtering, Language Purification, Language Identification}
}
@inproceedings{palmazlopezpelaez_clark:2014,
  author = {López-Peláez, Susana Palmaz and Clark, Robert A. J.},
  title = {Speech synthesis reactive to dynamic noise environmental conditions},
  booktitle = {Proc. Interspeech},
  abstract = {This paper addresses the issue of generating synthetic speech in changing noise conditions. We will investigate the potential improvements that can be introduced by using a speech synthesiser that is able to modulate between a normal speech style and a speech style produced in a noisy environment according to a changing level of noise. We demonstrate that an adaptive system where the speech style is changed to suit the noise conditions maintains intelligibility and improves naturalness compared to traditional systems.},
  month = {September},
  year = {2014},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/PalmaLopezPelaez2014.pdf},
  pages = {2927--2931},
  categories = {Reactive Speech synthesis, Lombard Speech}
}
@inproceedings{garner2014translation,
  author = {Garner, Philip N and Clark, Rob and Goldman, Jean-Philippe and Honnet, Pierre-Edouard and Ivanova, Maria and Lazaridis, Alexandros and Liang, Hui and Pfister, Beat and Ribeiro, Manuel Sam and Wehrli, Eric and others},
  title = {Translation and Prosody in Swiss Languages},
  booktitle = {Nouveaux cahiers de linguistique francaise, 31. 3rd Swiss Workshop on Prosody},
  year = {2014},
  month = {September},
  address = {Geneva, Switzerland},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2014/Garner:14.pdf},
  abstract = {The SIWIS project aims to investigate spoken language translation, where both the speaker characteristics and prosody are translated. This means the translation carries not only spoken content, but also speaker identification, emotion and intent. We describe the background of the project, and present some initial approaches and results. These include the design and collection of a Swiss bilingual database that both enables research in Swiss accented speech processing, and facilitates reliable evaluation.},
  categories = {automatic speech recognition, text-to-speech synthesis, speech-to-speech translation, prosody}
}
@inproceedings{ribeiro2015multilevel,
  author = {Ribeiro, Manuel Sam and Clark, Robert A. J.},
  title = {A Multi-Level Representation of f0 using the Continuous Wavelet Transform and the Discrete Cosine Transform},
  booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP},
  year = {2015},
  month = {April},
  address = {Brisbane, Australia},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ribeiro_and_clark_icassp15.pdf},
  abstract = {We propose a representation of f0 using the Continuous Wavelet Transform (CWT) and the Discrete Cosine Transform (DCT). The CWT decomposes the signal into various scales of selected frequencies, while the DCT compactly represents complex contours as a weighted sum of cosine functions. The proposed approach has the advantage of combining signal decomposition and higher-level representations, thus modeling low-frequencies at higher levels and high-frequencies at lower-levels. Objective results indicate that this representation improves f0 prediction over traditional short-term approaches. Subjective results show that improvements are seen over the typical MSD-HMM and are comparable to the recently proposed CWT-HMM, while using less parameters. These results are discussed and future lines of research are proposed.},
  categories = {prosody, HMM-based synthesis, f0 modeling, continuous wavelet transform, discrete cosine transform}
}
@inproceedings{ribeiro2015perceptual,
  author = {Ribeiro, Manuel Sam and Yamagishi, Junichi and Clark, Robert A. J.},
  title = {A Perceptual Investigation of Wavelet-based Decomposition of f0 for Text-to-Speech Synthesis},
  booktitle = {Proc. Interspeech},
  year = {2015},
  month = {September},
  address = {Dresden, Germany},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2015/ribeiro_et_al_IS15.pdf},
  abstract = {The Continuous Wavelet Transform (CWT) has been recently proposed to model f0 in the context of speech synthesis. It was shown that systems using signal decomposition with the CWT tend to outperform systems that model the signal directly. The f0 signal is typically decomposed into various scales of differing frequency. In these experiments, we reconstruct f0 with selected frequencies and ask native listeners to judge the naturalness of synthesized utterances with respect to natural speech. Results indicate that HMM-generated f0 is comparable to the CWT low frequencies, suggesting it mostly generates utterances with neutral intonation. Middle frequencies achieve very high levels of naturalness, while very high frequencies are mostly noise.},
  categories = {speech synthesis, prosody, f0 modeling, continuous wavelet transform, perceptual experiments}
}
@article{stan-2016,
  author = {Stan, Adriana and Mamiya, Yoshitaka and Yamagishi, Junichi and Bell, Peter and Watts, Oliver and Clark, Rob and King, Simon},
  doi = {http://dx.doi.org/10.1016/j.csl.2015.06.006},
  title = {{ALISA}: An automatic lightly supervised speech segmentation and alignment tool},
  url = {http://www.sciencedirect.com/science/article/pii/S0885230815000650},
  journal = {Computer Speech and Language},
  issn = {0885-2308},
  abstract = {This paper describes the ALISA tool, which implements a lightly supervised method for sentence-level alignment of speech with imperfect transcripts. Its intended use is to enable the creation of new speech corpora from a multitude of resources in a language-independent fashion, thus avoiding the need to record or transcribe speech data. The method is designed so that it requires minimum user intervention and expert knowledge, and it is able to align data in languages which employ alphabetic scripts. It comprises a GMM-based voice activity detector and a highly constrained grapheme-based speech aligner. The method is evaluated objectively against a gold standard segmentation and transcription, as well as subjectively through building and testing speech synthesis systems from the retrieved data. Results show that on average, 70% of the original data is correctly aligned, with a word error rate of less than 0.5%. In one case, subjective listening tests show a statistically significant preference for voices built on the gold transcript, but this is small and in other tests, no statistically significant differences between the systems built from the fully supervised training data and the one which uses the proposed method are found.},
  volume = {35},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/stan-2016.pdf},
  pages = {116--133},
  categories = {Speech segmentation, speech and text alignment, grapheme acoustic models, lightly supervised system, imperfect transcripts}
}
@inproceedings{merritt2016hybrid,
  author = {Merritt, Thomas and Clark, Robert A J and Wu, Zhizheng and Yamagishi, Junichi and King, Simon},
  title = {Deep neural network-guided unit selection synthesis},
  booktitle = {Proc. ICASSP},
  year = {2016},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/Merritt_ICASSP2016.pdf},
  abstract = {Vocoding of speech is a standard part of statistical parametric speech synthesis systems. It imposes an upper bound of the naturalness that can possibly be achieved. Hybrid systems using parametric models to guide the selection of natural speech units can combine the benefits of robust statistical models with the high level of naturalness of waveform concatenation. Existing hybrid systems use Hidden Markov Models (HMMs) as the statistical model. This paper demonstrates that the superiority of Deep Neural Network (DNN) acoustic models over HMMs in conventional statistical parametric speech synthesis also carries over to hybrid synthesis. We compare various DNN and HMM hybrid configurations, guiding the selection of waveform units in either the vocoder parameter domain, or in the domain of embeddings (bottleneck features).},
  categories = {speech synthesis, hybrid synthesis, deep neural networks, embedding, unit selection}
}
@inproceedings{ribeiro2016wavelet,
  author = {Ribeiro, Manuel Sam and Watts, Oliver and Yamagishi, Junichi and Clark, Robert A. J.},
  title = {Wavelet-based decomposition of f0 as a secondary task for {DNN-based} speech synthesis with multi-task learning},
  booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
  year = {2016},
  month = {March},
  address = {Shanghai, China},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/ribeiro-et-al-icassp16.pdf},
  abstract = {We investigate two wavelet-based decomposition strategies of the f0 signal and their usefulness as a secondary task for speech synthesis using multi-task deep neural networks (MTL-DNN). The first decomposition strategy uses a static set of scales for all utterances in the training data. We propose a second strategy, where the scale of the mother wavelet is dynamically adjusted to the rate of each utterance. This approach is able to capture f0 variations related to the syllable, word, clitic-group, and phrase units. This method also constrains the wavelet components to be within the frequency range that previous experiments have shown to be more natural. These two strategies are evaluated as a secondary task in multi-task deep neural networks (MTL-DNNs). Results indicate that on an expressive dataset there is a strong preference for the systems using multi-task learning when compared to the baseline system.},
  categories = {speech synthesis, f0 modelling, deep neural network, multi-task learning, continuous wavelet transform}
}
@inproceedings{goldman2016siwis,
  author = {Goldman, Jean-Philippe and Honnet, Pierre-Edouard and Clark, Rob and Garner, Philip N and Ivanova, Maria and Lazaridis, Alexandros and Liang, Hui and Macedo, Tiago and Pfister, Beat and Ribeiro, Manuel Sam and others},
  title = {{The SIWIS database: a multilingual speech database with acted emphasis}},
  booktitle = {Proceedings of Interspeech},
  year = {2016},
  month = {September},
  address = {San Francisco, United States},
  pdf = {http://www.cstr.inf.ed.ac.uk/downloads/publications/2016/1003.PDF},
  abstract = {We describe here a collection of speech data of bilingual and trilingual speakers of English, French, German and Italian. In the context of speech to speech translation (S2ST), this database is designed for several purposes and studies: training CLSA systems (cross-language speaker adaptation), conveying emphasis through S2ST systems, and evaluating TTS systems. More precisely, 36 speakers judged as accentless (22 bilingual and 14 trilingual speakers) were recorded for a set of 171 prompts in two or three languages, amounting to a total of 24 hours of speech. These sets of prompts include 100 sentences from news, 25 sentences from Europarl, the same 25 sentences with one acted emphasised word, 20 semantically unpredictable sentences, and finally a 240-word long text. All in all, it yielded 64 bilingual session pairs of the six possible combinations of the four languages. The database is freely available for non-commercial use and scientific research purposes},
  categories = {speech-to-speech translation, speech corpus, bilingual speakers, emphasis}
}