@inproceedings{2e45a2fc98ea480394f225bf0cc2ff9f,
title = "Blendshapes from commodity RGB-D sensors",
abstract = "Creating and animating a realistic 3D human face is an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. We demonstrate a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single depth sensor, such as a Microsoft Kinect version 1, and requires no artistic expertise in order to process those scans. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires a few minutes of processing time to transform it into a blendshape-compatible model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects and puppeteering their 3D faces in an animation system with real-time facial performance retargeting.",
keywords = "Blendshapes, Depth sensors, Face animation, RGB-D",
author = "Dan Casas and Oleg Alexander and Feng, {Andrew W.} and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro",
year = "2015",
month = jul,
day = "31",
doi = "10.1145/2775280.2792540",
language = "English (US)",
series = "ACM SIGGRAPH 2015 Talks, SIGGRAPH 2015",
publisher = "Association for Computing Machinery, Inc",
booktitle = "ACM SIGGRAPH 2015 Talks, SIGGRAPH 2015",
note = "International Conference on Computer Graphics and Interactive Techniques, SIGGRAPH 2015 ; Conference date: 09-08-2015 Through 13-08-2015",
}