@inproceedings{39a81b8f6e4b4cf29a4adb8ce38f7032,
title = "VoLearn: An Operable Motor Learning System with Auditory Feedback",
abstract = "Previous motor learning systems rely on a vision-based workflow both from feed-forward and feedback process, which limits the application requirement and scenario. In this demo, we presented a novel cross-modal motor learning system named VoLearn. The novice is able to interact with desired motion through a virtual 3D interface and obtain the audio feedback based on a personal smartphone. Both interactivity and user-Accessibility of the designed system contribute to a wider range of applications and reduce the limitations in the applied space as well. ",
keywords = "Cross-modality, feedback, motor learning, virtual avatar",
author = "Chengshuo Xia and Xinrui Fang and Yuta Sugiura",
note = "Funding Information: This work was supported by JST PRESTO Grant Number JPMJPR17J4 and JST AIP-PRISM Grant Number JPMJCR18Y2. Publisher Copyright: {\textcopyright} 2021 Owner/Author.; 34th Annual ACM Symposium on User Interface Software and Technology, UIST 2021 ; Conference date: 10-10-2021 Through 14-10-2021",
year = "2021",
month = oct,
day = "10",
doi = "10.1145/3474349.3480186",
language = "English",
series = "Adjunct Publication of the 34th Annual ACM Symposium on User Interface Software and Technology, UIST 2021",
publisher = "Association for Computing Machinery, Inc",
pages = "103--105",
booktitle = "Adjunct Publication of the 34th Annual ACM Symposium on User Interface Software and Technology, UIST 2021",
}