dc.contributor.author | Cabral, Joao | |
dc.contributor.author | Székely, Éva | |
dc.contributor.author | Ahmed, Zeeshan | |
dc.contributor.author | Carson-Berndsen, Julie | |
dc.date.accessioned | 2020-02-25T11:17:11Z | |
dc.date.available | 2020-02-25T11:17:11Z | |
dc.date.created | 7 June | en |
dc.date.issued | 2012 | |
dc.date.submitted | 2012 | en |
dc.identifier.citation | Székely, É., Ahmed, Z., Cabral, J.P. & Carson-Berndsen, J., WinkTalk: A Demonstration of a Multimodal Speech Synthesis Platform Linking Facial Expressions to Expressive Synthetic Voices, the Third Workshop on Speech and Language Processing for Assistive Technologies, Montreal, Canada, 7 June, Association for Computational Linguistics, 2012, 5 - 8 | en |
dc.identifier.other | Y | |
dc.description.abstract | This paper describes a demonstration of the
WinkTalk system, which is a speech synthesis
platform using expressive synthetic voices.
With the help of a webcamera and facial expression
analysis, the system allows the user
to control the expressive features of the synthetic
speech for a particular utterance with
their facial expressions. Based on a personalised
mapping between three expressive synthetic
voices and the users facial expressions,
the system selects a voice that matches their
face at the moment of sending a message.
TheWinkTalk system is an early research prototype
that aims to demonstrate that facial
expressions can be used as a more intuitive
control over expressive speech synthesis than
manual selection of voice types, thereby contributing
to an improved communication experience
for users of speech generating devices. | en |
dc.format.extent | 5 | en |
dc.format.extent | 8 | en |
dc.language.iso | en | en |
dc.publisher | Association for Computational Linguistics | en |
dc.rights | Y | en |
dc.subject | Speech synthesis | en |
dc.subject | Synthetic voices | en |
dc.subject | Facial expression analysis | en |
dc.title | WinkTalk: A Demonstration of a Multimodal Speech Synthesis Platform Linking Facial Expressions to Expressive Synthetic Voices | en |
dc.title.alternative | the Third Workshop on Speech and Language Processing for Assistive Technologies | en |
dc.type | Conference Paper | en |
dc.type.supercollection | scholarly_publications | en |
dc.type.supercollection | refereed_publications | en |
dc.identifier.peoplefinderurl | http://people.tcd.ie/cabralj | |
dc.identifier.rssinternalid | 213003 | |
dc.rights.ecaccessrights | openAccess | |
dc.subject.TCDTheme | Creative Technologies | en |
dc.subject.TCDTheme | Digital Engagement | en |
dc.subject.TCDTag | Multimodal System | en |
dc.subject.TCDTag | Speech synthesis | en |
dc.status.accessible | N | en |
dc.identifier.uri | https://www.aclweb.org/anthology/W12-2902/ | |
dc.identifier.uri | http://hdl.handle.net/2262/91618 | |