@article{5f1d9eb4d1ac4875ae4fab6d9dcda81a,
title = "Evaluation of facial expression recognition by a smart eyewear for facial direction changes, repeatability, and positional drift",
abstract = "This article presents a novel smart eyewear that recognizes the wearer's facial expressions in daily scenarios. Our device uses embedded photo-reflective sensors and machine learning to recognize the wearer's facial expressions. Our approach focuses on skin deformations around the eyes that occur when the wearer changes his or her facial expressions. With small photo-reflective sensors, we measure the distances between the skin surface on the face and the 17 sensors embedded in the eyewear frame. A Support Vector Machine (SVM) algorithm is then applied to the information collected by the sensors. The sensors can cover various facial muscle movements. In addition, they are small and light enough to be integrated into daily-use glasses. Our evaluation of the device shows the robustness to the noises from the wearer's facial direction changes and the slight changes in the glasses' position, as well as the reliability of the device's recognition capacity. The main contributions of our work are as follows: (1) We evaluated the recognition accuracy in daily scenes, showing 92.8% accuracy regardless of facial direction and removal/remount. Our device can recognize facial expressions with 78.1% accuracy for repeatability and 87.7% accuracy in case of its positional drift. (2)We designed and implemented the device by taking usability and social acceptability into account. The device looks like a conventional eyewear so that users can wear it anytime, anywhere. (3) Initial field trials in a daily life setting were undertaken to test the usability of the device. Our work is one of the first attempts to recognize and evaluate a variety of facial expressions with an unobtrusive wearable device.",
keywords = "Affective computing, Eyewear computing, Facial expression, Wearable",
author = "Katsutoshi Masai and Kai Kunze and Yuta Sugiura and Masa Ogata and Masahiko Inami and Maki Sugimoto",
note = "Funding Information: This work is supported by JST CREST grant number JPMJCR14E1. Authors{\textquoteright} addresses: K. Masai, Y. Sugiura, and M. Sugimoto, Interactive Media Lab, School of Science for Open and Enviromental Systems, Graduate School of Science and Technology, Keio University, 223-8522, Japan, emails: {masai, sugiura, sugimoto}@imlab.ics.keio.ac.jp; K. Kunze, Graduate School of Media Design, Keio University, 223-0061, Japan, email: kai@kmd.keio.ac.jp; M. Ogata, Media Interaction Group, Information Technology Research Institute (ITRI), National Institute of Advanced Industrial Science and Technology, 305-8560, Japan, email: masa@masaoagta.com; M. Inami, Research Center for Advanced Science and Technology, The University of Tokyo, 153-8904, Japan, email: inami@inami.info. Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from Permissions@acm.org. {\textcopyright} 2017 ACM 2160-6455/2017/12-ART15 $15.00 https://doi.org/10.1145/3012941 Publisher Copyright: {\textcopyright} 2017 ACM.",
year = "2017",
month = dec,
doi = "10.1145/3012941",
language = "English",
volume = "7",
journal = "ACM Transactions on Interactive Intelligent Systems",
issn = "2160-6455",
publisher = "Association for Computing Machinery (ACM)",
number = "4",
}