publications([{ "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3746059.3747701", "title": "Efficient Finger Model and Accurate Tracking for Hover-and-Touch, Mid-air and Microgesture Interaction", "url": "https://hal.science/hal-05247942", "abstract": "Bare-handed gestural interaction with computer systems is widespread, whether with touchscreens or Augmented Reality headsets. Various forms of gestural interaction exist including hover-and-touch, mid-air and microgesture interaction. Studying the full benefits of these gestural interactions, and their combinations, is currently not possible due to the inadequate performances of the existing tracking solutions. To address this problem, we propose a marker-based visual tracking algorithm with a novel finger model, and its open source implementation. A key contribution is the simplicity of the finger and fingertip model (i.e. cylinders and a sphere respectively). This simple model leads to low computational cost (600 microseconds), high precision (0.02 mm) and accurate (one millimeter) fingertip tracking, without impeding finger movement. We illustrate the benefits of the proposed tracking approach with a demonstration application combining hover-and-touch, mid-air and microgesture interactions for editing a 3D point cloud.", "authors": { "1": { "first_name": "Quentin", "last_name": "Zoppis" }, "2": { "first_name": "Sergi", "last_name": "Pujades" }, "3": { "first_name": "Laurence", "last_name": "Nigay" }, "4": { "first_name": "François", "last_name": "Bérard" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/ZPN+25a/", "pages": "1-14", "bibtype": "inproceedings", "id": 966, "abbr": "ZPN+25a", "address": "Busan, South Korea", "date": "2025-09-28", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "UIST 2025 - 38th Annual ACM Symposium on User Interface Software and Technology", "type_publi": "icolcomlec" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3689050.3707690", "title": "The not-so-masochist teapot", "url": "https://hal.science/hal-04854127", "abstract": "The not-so-masochist teapot challenges the human capacity to go beyond the first impression. The not-so-masochist teapot appears as an unusable teapot at first with its spout above the handle. Yet, if participants go beyond this first impression and start to make tea, the spout rotate to a usable location, i.e. opposite of the handle, just in time for the tea to be ready. With the not-so-masochist teapot we are questioning the unusability of objects and the capacity to go beyond first impressions.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Jasper", "last_name": "Flügge" }, "3": { "first_name": "Eric", "last_name": "Chaffangeon" }, "4": { "first_name": "Katrin", "last_name": "Wolf" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CFC+25a/", "id": 996, "bibtype": "inproceedings", "abbr": "CFC+25a", "address": "Bordeaux, France", "date": "2025-02-04", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Nineteenth International Conference on Tangible, Embedded, and Embodied Interaction" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765726", "title": "Sensitive Pen : An Open-Source And Low-Cost Digital Pen For Diagnosing Children With Dysgraphia", "url": "https://hal.science/hal-05294824", "abstract": "Handwriting is a complex motor activity. Handwriting disorders, known as dysgraphia, have a considerable impact on an individual’s academic and professional success. To facilitate the diagnosis of dysgraphia, we propose an open-source and low-cost digital pen, called Sensitive Pen. In a first study, we evaluate its ease of use, usefulness and acceptability with psychomotor therapists. In a second study, we are testing the Sensitive Pen with children aged 6-10 to assess its ability to diagnose dysgraphia using machine learning. Our results show that psychomotor therapists would be ready and interested in using such a tool. Moreover, on a sample of 20 children, using the pen’s cinematic and angle data, we obtained a true positive rate, i.e. of dysgraphic children, of 100%, and an overall accuracy of 65%.", "authors": { "1": { "first_name": "Ana", "last_name": "Phelippeau" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Adrien", "last_name": "Husson" }, "4": { "first_name": "Joël", "last_name": "Chevrier" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/PCH+25a/", "id": 997, "bibtype": "inproceedings", "abbr": "PCH+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765723", "title": "Couplage et contrôle de points de vue en réalité mixte collaborative : la notation graphique (Point de Vue)*- PV*", "url": "https://hal.science/hal-05294806", "abstract": "Mixed Reality (MR) is increasingly being studied for synchronous remote collaboration in various fields, from industrial maintenance to education. It allows to combine virtual reality, augmented reality, and 2D interfaces to visualize and share real and virtual content. The notion of coupling of collaborative activities, central to studies of Computer-Supported Cooperative Work (CSCW) applications, takes various forms with MR due to its 3D and immersive characteristics, as well as the heterogeneity of interactive technologies. To cope with this diversity, we propose a graphical notation, PV*, that precisely describes the coupling relationships between multiple viewpoints on physical or virtual views, as well as their control by collaborators. We study the descriptive power of PV* by describing several collaborative situations from the literature and discuss its generative power by exploring several design possibilities described with PV*.", "authors": { "1": { "first_name": "Thomaz", "last_name": "Fèvre" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Cédric", "last_name": "Fleury" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/FCF+25a/", "id": 998, "bibtype": "inproceedings", "abbr": "FCF+25a", "address": "Toulouse, France", "date": "2025-11-04", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Viewpoint control techniques for mixed reality collaboration generated with the(Point of View)* - PV* graphical notation", "url": "https://hal.science/hal-05312071", "abstract": "Mixed Reality (MR) is increasingly being used for synchronous remote collaboration in various fields, from industrial maintenance to education. It allows to combine virtual reality, augmented reality, and 2D interfaces to visualize and share real and virtual content. The notion of coupling of collaborative activities, central to studies of Computer-Supported Cooperative Work (CSCW) applications, takes various forms with MR due to its 3D and immersive characteristics, as well as the heterogeneity of interactive technologies. To copewith this diversity, we have proposed a graphical notation, PV*, that precisely describes the coupling relationships between multiple viewpoints on physical or virtual views, as well as their control by collaborators. In this demonstration, we present different viewpoint control techniques designed with PV*, illustrating the generative power of our notation.", "authors": { "1": { "first_name": "Thomaz", "last_name": "Fèvre" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Cédric", "last_name": "Fleury" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/FCF+25b/", "id": 999, "bibtype": "inproceedings", "abbr": "FCF+25b", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 : 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Microgesture Interaction in Context: demonstrations of the ANR MIC project Interaction par microgeste en contexte : démonstrations du projet ANR MIC", "url": "https://hal.science/hal-05311866", "abstract": "We present demonstrations from the ANR MIC project. MIC aims at studying and promoting microgesture-based interaction by putting it in practice in use situations. The demontrations show interaction techniques based on microgestures or on the combination of microgestures with another modality including haptic feedback as well as mechanisms that support discoverability and learnability of microgestures. The demonstrations illustrate three different contexts of use: 1) Augmented/Virtual Reality because microgesture interaction does not require us to hold any external device and is less physically demanding than mid-air interaction. 2) Car driving because microgestures may be performed in parallel with other tasks, they only require a few seconds and only one hand. 3) Eyes-free interaction (i.e. users with visual impairments) because users can perform microgestures by relying on proprioception only, without looking at their hand.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Vincent", "last_name": "Lambert" }, "5": { "first_name": "Laurence", "last_name": "Nigay" }, "6": { "first_name": "Charles", "last_name": "Bailly" }, "7": { "first_name": "Julien", "last_name": "Castet" }, "8": { "first_name": "Michael", "last_name": "Ortega" }, "9": { "first_name": "Zoé", "last_name": "Lacroux" }, "10": { "first_name": "Céline", "last_name": "Lemercier" }, "11": { "first_name": "Pierre-Vincent", "last_name": "Paubel" }, "12": { "first_name": "Sandra", "last_name": "Bardot" }, "13": { "first_name": "Christophe", "last_name": "Jouffrais" }, "14": { "first_name": "Suliac", "last_name": "Lavenant" }, "15": { "first_name": "Sylvain", "last_name": "Malacria" }, "16": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CCG+25a/", "id": 1000, "bibtype": "inproceedings", "abbr": "CCG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3706598.3713350", "title": "An Evaluation of Spatial Anchoring to position AR Guidance in Arthroscopic Surgery", "url": "https://hal.science/hal-05095338", "abstract": "
This work examines spatial anchoring strategies to position augmented reality guidance during surgery. We consider three strategies: anchoring to the Patient, the surgical Tool, and the Surgeon's head. These strategies were evaluated in a first experiment involving 24 non-professional participants, using two guidance techniques: 3D Trajectory and 2D Crosshair. For 3D Trajectory, Patient and Tool anchoring were more precise than Surgeon anchoring, and Patient anchoring was the most preferred. For 2D Crosshair, no significant effect of anchoring strategies on precision was observed. However, participants preferred Patient and Surgeon anchoring. A second experiment with 6 surgeons confirmed the first experiment's results. For 3D trajectory, Tool anchoring proved more precise than Patient anchoring, despite surgeons' preference for Patient anchoring. These findings contribute to empirical evidence for the design of surgical AR guidance, with potential applications for similar, less critical tasks.