publications([{ "lang": "fr", "publisher": "ACM", "doi": "https://doi.org/10.1145/3450522.3451326", "title": "Exploring the Physicality of Widgets for Head-Based Interaction: the Case of Menu in Mixed Reality", "url": "https://hal.archives-ouvertes.fr/hal-03567060", "abstract": "Mixed Reality with a Head-Mounted Display (HMD) offers unique perspectives for head-based interaction with virtual content and widgets. Besides virtual widgets, physical objects can be anchors (mixed widgets) or directly materialised widgets (physical widgets). The physicality (virtual-mixed-physical) of widgets defines a new dimension for Mixed Reality (MR) interaction that extends existing taxonomies of widgets in MR. As a first step to explore this new dimension, we focus on a commonly used widget: a menu. We thus evaluate the performance and usability of head pointing to a virtual, a mixed and a physical menu. Results suggest that pointing to a physical menu was on average 2s faster than pointing to a mixed or a virtual menu and preferred by participants. Virtual and mixed menus led to similar performances, but 11 participants over 15 preferred mixed menus over virtual ones. Based on our findings, we provide recommendations (benefits/limitations) for virtual, mixed and physical menus in MR.", "authors": { "1": { "first_name": "Charles", "last_name": "Bailly" }, "2": { "first_name": "François", "last_name": "Leitner" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2021, "uri": "http://iihm.imag.fr/publication/BLN21a/", "pages": "11:1-11", "bibtype": "inproceedings", "id": 940, "abbr": "BLN21a", "address": "Virtual Event, France", "date": "2021-04-13", "type": "Conférences nationales avec comité de lecture sur texte complet", "booktitle": "Actes de la 32e conférence francophone sur l'Interaction Humain-Machine (IHM'20.21)", "type_publi": "colcomlec" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3399715.3399842", "title": "Bring2Me: Bringing Virtual Widgets Back to the User's Field of View in Mixed Reality", "url": "https://hal.archives-ouvertes.fr/hal-02960599", "abstract": "Current Mixed Reality (MR) Head-Mounted Displays (HMDs) offer a limited Field Of View (FOV) of the mixed environment. Turning the head is thus necessary to visually perceive the virtual objects that are placed within the real world. However, turning the head also means loosing the initial visual context. This limitation is critical in contexts like augmented surgery where surgeons need to visually focus on the operative field. To address this limitation we propose to bring virtual objects/widgets back to the users' FOV instead of forcing the users to turn their head. We carry an initial investigation to demonstrate the approach by designing and evaluating three new menu techniques to first bring the menu back to the users' FOV before selecting an item. Results show that our three menu techniques are 1.5s faster on average than the baseline head-motion menu technique and are largely preferred by participants.", "authors": { "1": { "first_name": "Charles", "last_name": "Bailly" }, "2": { "first_name": "François", "last_name": "Leitner" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2020, "uri": "http://iihm.imag.fr/publication/BLN20a/", "pages": "1-9", "bibtype": "inproceedings", "id": 885, "abbr": "BLN20a", "address": "Ischia Island, Italy", "date": "2020-09-28", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "AVI '20: International Conference on Advanced Visual Interfaces", "type_publi": "icolcomlec" }, { "lang": "fr", "type_publi": "these", "title": "Interacting in Mixed Reality with a Head-Mounted Display : Application to Augmented Surgery", "url": "https://tel.archives-ouvertes.fr/tel-03185064", "abstract": "This thesis is in the field of Human-Computer Interaction (HCI) and specifically focuses on user interaction with Mixed Reality systems that rely on Head-Mounted Displays. Mixed Reality (MR) enables us to create mixed environments that combine virtual content with physical content. The entire workspace, including both physical and virtual objects, can then be used to interact with the system. Unlike traditional graphical interfaces, MR combines the advantages of the physical environment while providing the flexibility of virtual elements. The fields of application are many and varied, ranging from industrial manufacturing to medicine.Surgery is an application context in which MR is particularly promising. The ability to view medical data directly on the patient's body and around the operating table is a crucial benefit for surgeons. However, the existing techniques for interacting with this virtual information are not suited to the many constraints of the operating room. New approaches are needed to enable surgeons to view, manipulate and edit virtual content during an operation.Our research is driven by this need for MR interaction techniques for augmented surgery. Thanks to our partnership with the company Aesculap for this CIFRE thesis, our work is dedicated to knee prosthesis operations. We focus on the use of Head-Mounted Displays (HMDs), through which the augmented field of view is directly related to the position of the head.We start by providing a design space for MR head-based interaction. We explore this design space by devising new interaction techniques with menus. These techniques involve the entire reality-virtuality continuum by considering different physicalities for targets and implementing transitions between mixed reality and virtual reality. Our goal is to fulfill the surgery constraints by taking advantage of the objects already present in the operating room.After exploring head-based techniques, we broaden our area of research by considering surgical stages where surgeons have at least one hand available to interact with the system. Our contributions on this aspect are twofold. We first continue our study of menu techniques by considering a key aspect of the surgeon's work: visual attention to the operating field. We thus propose three new techniques allowing the surgeon to bring virtual widgets back into his field of vision without having to look away from the operating field. We then study the exploration of MR 3D scenes with multiple views. For instance, multiple views of the knee are displayed during the surgical planning step. The originality of the work lies in the different nature of the multiple views: virtual views and mixed views. We propose two new complementary interaction techniques that allow users to control the level of coupling between these different views and implement transitions between virtual reality and mixed reality.", "year": 2020, "uri": "http://iihm.imag.fr/publication/B20a/", "id": 893, "bibtype": "phdthesis", "abbr": "B20a", "authors": { "1": { "first_name": "Charles", "last_name": "Bailly" } }, "date": "2020-12-16", "type": "Thèses et habilitations", "pages": "186" }, { "lang": "en", "publisher": "Springer", "doi": "https://doi.org/10.1007/978-3-030-29390-1_22", "title": "Head-controlled Menu in Mixed Reality with a HMD", "abstract": "We present a design-space and three new techniques for head-based interaction with menus in Mixed Reality (MR) with a Head-Mounted Display (HMD). Usual input modalities such as hand gestures and voice commands are not suitable in noisy MR contexts where the users have both hands occupied as in augmented surgery and machine maintenance. To address these two issues of noisy MR contexts and hand-free interaction, we systematically explore the design space of head-controlled menu interaction by considering two design factors: 1) head-controlled menu versus head-controlled cursor 2) virtual targets versus mixed targets anchored on physical objects. Based on the design space, we present three novel menu techniques that we compared with a baseline head-controlled cursor technique. Experimental results suggest that head-controlled menu and head-controlled cursor techniques offer similar performance. In addition, the study found that mixed targets do not impact ultimate user performance when users are trained enough, but improve the learning phase. When using virtual targets, users still progressed after the training phase by reducing their mean selection time by 0.84s. When using mixed targets, the improvement was limited to 0.3s.", "year": 2019, "uri": "http://iihm.imag.fr/publication/BLN19a/", "pages": "395-415", "bibtype": "inproceedings", "id": 852, "abbr": "BLN19a", "authors": { "1": { "first_name": "Charles", "last_name": "Bailly" }, "2": { "first_name": "François", "last_name": "Leitner" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "date": "2019-09-02", "document": "http://iihm.imag.fr/publs/2019/cameraReady_submission1166_.pdf", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Human-Computer Interaction – INTERACT 2019. INTERACT 2019. Lecture Notes in Computer Science, vol 11749", "type_publi": "icolcomlec" }]);