@article {Chihiro:2016:0736-2935:5109, title = "Kansei Modeling for Multimodal User Experience (Visual Expectation effect on Product sound perception)", journal = "INTER-NOISE and NOISE-CON Congress and Conference Proceedings", parent_itemid = "infobike://ince/incecp", publishercode ="ince", year = "2016", volume = "253", number = "3", publication date ="2016-08-21T00:00:00", pages = "5109-5118", itemtype = "ARTICLE", issn = "0736-2935", url = "https://ince.publisher.ingentaconnect.com/content/ince/incecp/2016/00000253/00000003/art00033", author = "Chihiro, Miyazaki and Nakano, Sohya", abstract = "Multiple senses interact with a product through a time series of user experience. We assume that user's evaluation of product sound is affective by auditory sense as well as other senses, such as visual and touch. In this paper, we propose a new Kansei modeling method considering multimodal user experiences. In this method, we structure user's Kansei as a cognitive process involving four layers; physical quantity, perceived feature, meanings, and attitude. We extract the layered structure for each scene of user experience. Each scene consists of user's senses and action. With the modeling method, we can extract cognitive components involving multimodal integration from comprehensive cognitive structures of user experience. To verify the contextual cross-modal effect in a scene transition of user experience, we carried out a sensory evaluation experiment with participants using a set of hairdryer as a product sample. In the experiment, we manipulated size of a product as a visual expectation cue, and loudness level of the product sound. We presented combinations of different size and loudness to participants and asked them to evaluate sound for each. With the experimental results, we demonstrate how the visual expectation affect sound cognition such as loudness acceptance and powerful feelings.", }