dc.contributor.author | Roddy, Stephen | |
dc.contributor.author | Furlong, Dermot | |
dc.contributor.editor | Dr Raymond Bond , Prof Maurice Mulvenna , Prof Jonathan Wallace , Dr Michaela Black | en |
dc.date.accessioned | 2022-02-09T16:50:02Z | |
dc.date.available | 2022-02-09T16:50:02Z | |
dc.date.created | 4 - 6 July 2018 | en |
dc.date.issued | 2018 | |
dc.date.submitted | 2018 | en |
dc.identifier.citation | Roddy, Stephen & Furlong, Dermot, Vowel Formant Profiles and Image Schemata and Auditory Display, Proceedings of the 32nd International BCS Human Computer Interaction Conference (HCI 2018) (HCI), Belfast, 4 - 6 July 2018 | en |
dc.identifier.other | Y | |
dc.description.abstract | This paper presents two evaluations intended to examine if listeners are more likely to associate
certain vowel formant profiles with specific data types in an auditory display context. The data
types and sounds chosen to reflect those data types are informed by findings from the field of
cognitive science. The results of the evaluations suggest that to a limited degree, listeners
associate certain vowel formant profiles with strength, largeness of size, darkness and tension.
The results further suggest that the amount of noise present in the vocal gesture effects the
listeners perception of the tension represented in the sound. These results have implications for
the field of auditory display. | en |
dc.language.iso | en | en |
dc.rights | Y | en |
dc.title | Vowel Formant Profiles and Image Schemata and Auditory Display | en |
dc.title.alternative | Proceedings of the 32nd International BCS Human Computer Interaction Conference (HCI 2018) (HCI) | en |
dc.title.alternative | International BCS Human Computer Interaction Conference (HCI 2018) | en |
dc.type | Conference Paper | en |
dc.type.supercollection | scholarly_publications | en |
dc.type.supercollection | refereed_publications | en |
dc.identifier.peoplefinderurl | http://people.tcd.ie/stroddy | |
dc.identifier.rssinternalid | 237944 | |
dc.identifier.doi | 10.14236/ewic/HCI2018.109 | en |
dc.rights.ecaccessrights | openAccess | |
dc.subject.TCDTheme | Creative Arts Practice | en |
dc.subject.TCDTheme | Creative Technologies | en |
dc.subject.TCDTheme | Digital Engagement | en |
dc.subject.TCDTheme | Digital Humanities | en |
dc.subject.TCDTheme | Inclusive Society | en |
dc.subject.TCDTheme | Smart & Sustainable Planet | en |
dc.subject.TCDTheme | Telecommunications | en |
dc.subject.TCDTag | Auditory Display | en |
dc.subject.TCDTag | Human-Computer Interaction | en |
dc.subject.TCDTag | SPEECH PROSODY | en |
dc.subject.TCDTag | Sonification | en |
dc.identifier.orcid_id | 0000-0001-8491-3031 | |
dc.subject.darat_thematic | Accessibility | en |
dc.subject.darat_thematic | Home and community living | en |
dc.status.accessible | N | en |
dc.contributor.sponsor | Science Foundation Ireland (SFI) | en |
dc.contributor.sponsorGrantNumber | 13/RC/2077 | en |
dc.contributor.sponsor | Irish Research Council (IRC) | en |
dc.identifier.uri | http://hdl.handle.net/2262/98071 | |