publications([{ "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3746059.3747701", "title": "Efficient Finger Model and Accurate Tracking for Hover-and-Touch, Mid-air and Microgesture Interaction", "url": "https://hal.science/hal-05247942", "abstract": "Bare-handed gestural interaction with computer systems is widespread, whether with touchscreens or Augmented Reality headsets. Various forms of gestural interaction exist including hover-and-touch, mid-air and microgesture interaction. Studying the full benefits of these gestural interactions, and their combinations, is currently not possible due to the inadequate performances of the existing tracking solutions. To address this problem, we propose a marker-based visual tracking algorithm with a novel finger model, and its open source implementation. A key contribution is the simplicity of the finger and fingertip model (i.e. cylinders and a sphere respectively). This simple model leads to low computational cost (600 microseconds), high precision (0.02 mm) and accurate (one millimeter) fingertip tracking, without impeding finger movement. We illustrate the benefits of the proposed tracking approach with a demonstration application combining hover-and-touch, mid-air and microgesture interactions for editing a 3D point cloud.", "authors": { "1": { "first_name": "Quentin", "last_name": "Zoppis" }, "2": { "first_name": "Sergi", "last_name": "Pujades" }, "3": { "first_name": "Laurence", "last_name": "Nigay" }, "4": { "first_name": "François", "last_name": "Bérard" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/ZPN+25a/", "pages": "1-14", "bibtype": "inproceedings", "id": 966, "abbr": "ZPN+25a", "address": "Busan, South Korea", "date": "2025-09-28", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "UIST 2025 - 38th Annual ACM Symposium on User Interface Software and Technology", "type_publi": "icolcomlec" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3689050.3707690", "title": "The not-so-masochist teapot", "url": "https://hal.science/hal-04854127", "abstract": "The not-so-masochist teapot challenges the human capacity to go beyond the first impression. The not-so-masochist teapot appears as an unusable teapot at first with its spout above the handle. Yet, if participants go beyond this first impression and start to make tea, the spout rotate to a usable location, i.e. opposite of the handle, just in time for the tea to be ready. With the not-so-masochist teapot we are questioning the unusability of objects and the capacity to go beyond first impressions.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Jasper", "last_name": "Flügge" }, "3": { "first_name": "Eric", "last_name": "Chaffangeon" }, "4": { "first_name": "Katrin", "last_name": "Wolf" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CFC+25a/", "id": 996, "bibtype": "inproceedings", "abbr": "CFC+25a", "address": "Bordeaux, France", "date": "2025-02-04", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "Nineteenth International Conference on Tangible, Embedded, and Embodied Interaction" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765726", "title": "Sensitive Pen : An Open-Source And Low-Cost Digital Pen For Diagnosing Children With Dysgraphia", "url": "https://hal.science/hal-05294824", "abstract": "Handwriting is a complex motor activity. Handwriting disorders, known as dysgraphia, have a considerable impact on an individual’s academic and professional success. To facilitate the diagnosis of dysgraphia, we propose an open-source and low-cost digital pen, called Sensitive Pen. In a first study, we evaluate its ease of use, usefulness and acceptability with psychomotor therapists. In a second study, we are testing the Sensitive Pen with children aged 6-10 to assess its ability to diagnose dysgraphia using machine learning. Our results show that psychomotor therapists would be ready and interested in using such a tool. Moreover, on a sample of 20 children, using the pen’s cinematic and angle data, we obtained a true positive rate, i.e. of dysgraphic children, of 100%, and an overall accuracy of 65%.", "authors": { "1": { "first_name": "Ana", "last_name": "Phelippeau" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Adrien", "last_name": "Husson" }, "4": { "first_name": "Joël", "last_name": "Chevrier" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/PCH+25a/", "id": 997, "bibtype": "inproceedings", "abbr": "PCH+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765723", "title": "Couplage et contrôle de points de vue en réalité mixte collaborative : la notation graphique (Point de Vue)*- PV*", "url": "https://hal.science/hal-05294806", "abstract": "Mixed Reality (MR) is increasingly being studied for synchronous remote collaboration in various fields, from industrial maintenance to education. It allows to combine virtual reality, augmented reality, and 2D interfaces to visualize and share real and virtual content. The notion of coupling of collaborative activities, central to studies of Computer-Supported Cooperative Work (CSCW) applications, takes various forms with MR due to its 3D and immersive characteristics, as well as the heterogeneity of interactive technologies. To cope with this diversity, we propose a graphical notation, PV*, that precisely describes the coupling relationships between multiple viewpoints on physical or virtual views, as well as their control by collaborators. We study the descriptive power of PV* by describing several collaborative situations from the literature and discuss its generative power by exploring several design possibilities described with PV*.", "authors": { "1": { "first_name": "Thomaz", "last_name": "Fèvre" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Cédric", "last_name": "Fleury" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/FCF+25a/", "id": 998, "bibtype": "inproceedings", "abbr": "FCF+25a", "address": "Toulouse, France", "date": "2025-11-04", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Viewpoint control techniques for mixed reality collaboration generated with the(Point of View)* - PV* graphical notation", "url": "https://hal.science/hal-05312071", "abstract": "Mixed Reality (MR) is increasingly being used for synchronous remote collaboration in various fields, from industrial maintenance to education. It allows to combine virtual reality, augmented reality, and 2D interfaces to visualize and share real and virtual content. The notion of coupling of collaborative activities, central to studies of Computer-Supported Cooperative Work (CSCW) applications, takes various forms with MR due to its 3D and immersive characteristics, as well as the heterogeneity of interactive technologies. To copewith this diversity, we have proposed a graphical notation, PV*, that precisely describes the coupling relationships between multiple viewpoints on physical or virtual views, as well as their control by collaborators. In this demonstration, we present different viewpoint control techniques designed with PV*, illustrating the generative power of our notation.", "authors": { "1": { "first_name": "Thomaz", "last_name": "Fèvre" }, "2": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "3": { "first_name": "Cédric", "last_name": "Fleury" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/FCF+25b/", "id": 999, "bibtype": "inproceedings", "abbr": "FCF+25b", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 : 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/None", "title": "Microgesture Interaction in Context: demonstrations of the ANR MIC project Interaction par microgeste en contexte : démonstrations du projet ANR MIC", "url": "https://hal.science/hal-05311866", "abstract": "We present demonstrations from the ANR MIC project. MIC aims at studying and promoting microgesture-based interaction by putting it in practice in use situations. The demontrations show interaction techniques based on microgestures or on the combination of microgestures with another modality including haptic feedback as well as mechanisms that support discoverability and learnability of microgestures. The demonstrations illustrate three different contexts of use: 1) Augmented/Virtual Reality because microgesture interaction does not require us to hold any external device and is less physically demanding than mid-air interaction. 2) Car driving because microgestures may be performed in parallel with other tasks, they only require a few seconds and only one hand. 3) Eyes-free interaction (i.e. users with visual impairments) because users can perform microgestures by relying on proprioception only, without looking at their hand.", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Aurélien", "last_name": "Conil" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Vincent", "last_name": "Lambert" }, "5": { "first_name": "Laurence", "last_name": "Nigay" }, "6": { "first_name": "Charles", "last_name": "Bailly" }, "7": { "first_name": "Julien", "last_name": "Castet" }, "8": { "first_name": "Michael", "last_name": "Ortega" }, "9": { "first_name": "Zoé", "last_name": "Lacroux" }, "10": { "first_name": "Céline", "last_name": "Lemercier" }, "11": { "first_name": "Pierre-Vincent", "last_name": "Paubel" }, "12": { "first_name": "Sandra", "last_name": "Bardot" }, "13": { "first_name": "Christophe", "last_name": "Jouffrais" }, "14": { "first_name": "Suliac", "last_name": "Lavenant" }, "15": { "first_name": "Sylvain", "last_name": "Malacria" }, "16": { "first_name": "Thomas", "last_name": "Pietrzak" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/CCG+25a/", "id": 1000, "bibtype": "inproceedings", "abbr": "CCG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3706598.3713350", "title": "An Evaluation of Spatial Anchoring to position AR Guidance in Arthroscopic Surgery", "url": "https://hal.science/hal-05095338", "abstract": "

This work examines spatial anchoring strategies to position augmented reality guidance during surgery. We consider three strategies: anchoring to the Patient, the surgical Tool, and the Surgeon's head. These strategies were evaluated in a first experiment involving 24 non-professional participants, using two guidance techniques: 3D Trajectory and 2D Crosshair. For 3D Trajectory, Patient and Tool anchoring were more precise than Surgeon anchoring, and Patient anchoring was the most preferred. For 2D Crosshair, no significant effect of anchoring strategies on precision was observed. However, participants preferred Patient and Surgeon anchoring. A second experiment with 6 surgeons confirmed the first experiment's results. For 3D trajectory, Tool anchoring proved more precise than Patient anchoring, despite surgeons' preference for Patient anchoring. These findings contribute to empirical evidence for the design of surgical AR guidance, with potential applications for similar, less critical tasks.

", "authors": { "1": { "first_name": "Chaymae", "last_name": "Acherki" }, "2": { "first_name": "Laurence", "last_name": "Nigay" }, "3": { "first_name": "Quentin", "last_name": "Roy" }, "4": { "first_name": "Thibault", "last_name": "Salque" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/ANR+25a/", "pages": "1-17", "bibtype": "inproceedings", "id": 1001, "abbr": "ANR+25a", "address": "Yokohama, Japan", "date": "2025-04-25", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "CHI 2025: CHI Conference on Human Factors in Computing Systems", "type_publi": "icolcomlec" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765725", "title": "Effect of Robotic Modular Interface Assistance Type on Sense of Agency", "url": "https://hal.science/hal-05294835", "abstract": "Robotic modular interfaces, increasingly studied in Human-Computer Interaction, offer assistance to support users in their tasks. However, this assistance can harm the sense of agency (i.e., feeling of control), leading to non-use of the interface or a diminishing sense of responsibility regarding the consequences of users' actions. The impact of robotic modular interface assistance, during a cooperative task, on the user's sense of agency has not yet been studied. In this article, we propose to remedy this through the use of swarm robotic interfaces. We focus on nine levels of assistance, varying system autonomy and module coordination. Our study shows that: (1) the higher the autonomy, the lower the sense of agency, (2) coordination seems to have an impact on the sense of agency, and (3) three types of sense of agency emerge depending on the coordination of the modules.", "authors": { "1": { "first_name": "Ophélie", "last_name": "Jobert" }, "2": { "first_name": "Thibaut", "last_name": "Leone" }, "3": { "first_name": "Alix", "last_name": "Goguey" }, "4": { "first_name": "Bruno", "last_name": "Berberian" }, "5": { "first_name": "Julien", "last_name": "Bourgeois" }, "6": { "first_name": "Céline", "last_name": "Coutrix" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/JLG+25a/", "id": 1002, "bibtype": "inproceedings", "abbr": "JLG+25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "fr", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3765712.3765729", "title": "Impact of Primary Tasks on Soft Shape-Change Perception on a Smartwatch", "url": "https://hal.science/hal-05294809", "abstract": "Smartwatches have become integral to receiving notifications during everyday tasks, helping users stay connected, reduce anxiety about missing important information, and support their well-being. While emerging technologies offer novel notification experiences,how effectively they perform when users are engaged in primary tasks remains unclear. This study examines the notification performance of a recently developed pneumatic soft curvature-changing smartwatch. Through an experimental investigation, we measuredusers’ absolute detection threshold (ADT) for notifications while they performed cognitive and physical tasks. Results show an ADT of 5.70 psi (round curvature) and 6.22 psi (pointy curvature) during a cognitive task and an ADT of 8.07 psi (round curvature) and 10 psi (pointy curvature) during a physical task. These findings provide valuable insights for designing future shape-changing notification smartwatches to improve user experience and enhance task compatibility.", "authors": { "1": { "first_name": "Fouad", "last_name": "Gasmi" }, "2": { "first_name": "Zhuzhi", "last_name": "Fan" }, "3": { "first_name": "Céline", "last_name": "Coutrix" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/GFC25a/", "id": 1003, "bibtype": "inproceedings", "abbr": "GFC25a", "address": "Toulouse, France", "date": "2025-11-03", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IHM'25 - 36e Conférence Internationale Francophone sur l'Interaction Humain-Machine" }, { "lang": "en", "publisher": "Springer", "doi": "https://doi.org/None", "title": "PLM Meta-UI: Toward an End-User Solution for Personalizing Product Lifecycle Management applications", "url": "https://hal.science/hal-05329315", "abstract": "Like other applications, Product Lifecycle Management (PLM) applications need to be adapted to meet individual needs. However, PLM applications, are not built to be adapted. This article proposes to enable PLM customization using a meta-UI, which aims to allow users to adapt their environments themselves. Here the meta-UI concept is applied to web-based PLM applications. A proof of concept of a PLM Meta-UI has been designed and built. It has been evaluated thanks to a user experiment with 20 professionals. The results tend to show a perceived usefulness of the meta-UI for customization. The participants also raised the advantages of using the meta-UI for their job. In particular, using the meta-UI seems to improve the understanding of concepts underlying PLM applications.", "authors": { "1": { "first_name": "Eliott", "last_name": "Dutronc" }, "2": { "first_name": "Sophie", "last_name": "Dupuy-Chessa" }, "3": { "first_name": "Jean-Marie", "last_name": "Favre" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/DDF25a/", "id": 1005, "bibtype": "inproceedings", "abbr": "DDF25a", "address": "Sevilla (Spain), Spain", "date": "2025-07-07", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "IFIP 22nd International Conference on Product Lifecycle Management (PLM)", "type_publi": "icolcomlec" }, { "lang": "en", "type_publi": "icolcomlec", "doi": "https://doi.org/10.1145/3731406.3734975", "title": "A lead for web-based generic PLM personalization with Meta-UI proof-of-concept", "url": "https://hal.science/hal-05329281", "abstract": "Personalization is a well-known need in human-computer interaction. The concept of meta-UI has been proposed to allow endusers to adapt their systems. However the implementations of this concept are, so far, specific to one application, which has been developed with the meta-UI. To ease meta-UI expansion, this paper proposes to add a meta-UI to a type of applications used by workers in their daily work, web based Product Management (PLM) applications. The proposed meta-UI takes advantage of the similarity in the structure of web based PLM applications to be weaved inside the PLM. It has been designed with classic human-computer interaction design techniques by considering the PLM professional context of use. A proof-of-concept has been implemented on top of one PLM in JavaScript thanks to the injection mechanism.• Human-centered computing → Systems and tools for interaction design; • Software and its engineering → Software usability.", "authors": { "1": { "first_name": "Eliott", "last_name": "Dutronc" }, "2": { "first_name": "Sophie", "last_name": "Dupuy-Chessa" }, "3": { "first_name": "Jean-Marie", "last_name": "Favre" } }, "year": 2025, "uri": "http://iihm.imag.fr/publication/DDF25b/", "pages": "21-26", "bibtype": "inproceedings", "id": 1006, "abbr": "DDF25b", "address": "Trier, Germany", "date": "2025-06-23", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "The 17th ACM SIGCHI Symposium on Engineering Interactive Computing Systems" }, { "lang": "en", "publisher": "Elsevier", "type_publi": "irevcomlec", "bibtype": "article", "title": "Microgesture + Grasp: A journey from human capabilities to interaction with microgestures", "url": "https://hal.science/hal-04801105", "abstract": "Microgestures, i.e. fast and subtle finger movements, have shown a high potential for ubiquitous interaction. However, work to-date either focuses on grasp contexts (holding an object) or on the free-hand context (no held object). These two contexts influence the microgestures feasibility. Researchers have created sets of microgesture feasible across the entire taxonomy of everyday grasps. However, those sets include a limited number of microgestures as compared to those for the free-hand context, for which microgestures are distinguished according to fine characteristics such as the part of the finger being touched or the number of fingers used. We present the first study on microgestures feasibility across free-hand and grasp contexts. We also study, for the first time, the use of finer characteristics of a microgesture in grasp context: area surface. Then, we present a set of rules to determine guess the feasibility of a microgesture in a given context without the need of doing time-consuming feasibility studies. In both studies, some microgesture were not feasible across all considered contexts. We are therefore exploring different ways of defining a set of microgestures compatible with free-hand and grasping contexts.", "year": 2025, "uri": "http://iihm.imag.fr/publication/CGN25a/", "id": 972, "volume": 195, "abbr": "CGN25a", "authors": { "1": { "first_name": "Adrien", "last_name": "Chaffangeon Caillet" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Laurence", "last_name": "Nigay" } }, "date": "2025-01-01", "type": "Revues internationales avec comité de lecture", "journal": "International Journal of Human-Computer Studies" }, { "lang": "en", "publisher": "Springer Verlag", "type_publi": "irevcomlec", "title": "Writing words on paper and phone", "url": "https://hal.science/hal-05248569", "abstract": "Word writing requires the retrieval of letter components and producing movements to transform letters into graphic outputs. Nowadays, word writing is also prevalent in smartphones but we hardly know how the cognitive processes underlying this writing device operate. This exploratory study investigated the timing of orthographic retrieval and movement execution in smartphone writing by comparing it with handwriting production. French young adults wrote words of varying length on a smartphone and digitizer that recorded fine-grained data on the timing of movement production. The results revealed longer latencies in phonewriting than handwriting, indicating that the writing medium affected orthographic processing before starting to write. During letter execution, motor production was slower in handwriting than phonewriting. Furthermore, the latency data revealed that the participants took more time to start writing short words than long ones. During letter production, long words yielded longer durations than short ones. Short and long words seem to require two different strategies. In short words, most orthographic processing is concluded before starting to write. Long words seem to be segmented in sub-lexical letter chunks, with the first chunks being processed orthographically before starting to write and the rest of the chunks processed on-line, simultaneously to the execution of the first letters of the word. Overall, the data revealed similar functioning patterns in phonewriting and handwriting, suggesting that despite differences in execution dynamics, the two writing modalities share common underlying processes. The results comfort the cascaded perspective in which central and peripheral processes are active simultaneously during movement production.", "year": 2025, "uri": "http://iihm.imag.fr/publication/ARP+25a/", "id": 1004, "bibtype": "article", "abbr": "ARP+25a", "authors": { "1": { "first_name": "Anna", "last_name": "Anastaseni" }, "2": { "first_name": "Quentin", "last_name": "Roy" }, "3": { "first_name": "Cyril", "last_name": "Perret" }, "4": { "first_name": "Antonio", "last_name": "Romano" }, "5": { "first_name": "Sonia", "last_name": "Kandel" } }, "date": "2025-09-10", "type": "Revues internationales avec comité de lecture", "journal": "Reading and Writing" }]);