@inproceedings{41c32abc229d4e4abec41ce1724f38a5,
title = "Audio-visual spontaneous emotion recognition",
abstract = "Automatic multimodal recognition of spontaneous emotional expressions is a largely unexplored and challenging problem. In this paper, we explore audio-visual emotion recognition in a realistic human conversation setting-the Adult Attachment Interview (AAI). Based on the assumption that facial expression and vocal expression are at the same coarse affective states, positive and negative emotion sequences are labeled according to Facial Action Coding System. Facial texture in visual channel and prosody in audio channel are integrated in the framework of Adaboost multi-stream hidden Markov model (AdaMHMM) in which the Adaboost learning scheme is used to build component HMM fusion. Our approach is evaluated in AAI spontaneous emotion recognition experiments.",
keywords = "Affect recognition, Affective computing, Emotion recognition, Multimodal human-computer interaction",
author = "Zhihong Zeng and Yuxiao Hu and Roisman, {Glenn I.} and Zhen Wen and Yun Fu and Huang, {Thomas S.}",
note = "Copyright: Copyright 2008 Elsevier B.V., All rights reserved.; 20th International Joint Conference on Artificial Intelligence, IJCAI 2007 - Workshop on Artifical Intelligence for Human Computing ; Conference date: 06-01-2007 Through 06-01-2007",
year = "2007",
doi = "10.1007/978-3-540-72348-6_4",
language = "English (US)",
isbn = "3540723463",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
pages = "72--90",
booktitle = "Artifical Intelligence for Human Computing, ICMI 2006 and IJCAI 2007 International Workshops, Banff, Canada, November 3, 2006 and Hyderabad, India, January 6, 2007, Revised Seleced and Invited Papers",
}