publications([{ "lang": "fr", "type_publi": "these", "title": "Increasing input interaction expressiveness using eyes-free multi-finger interaction", "url": "https://hal.science/tel-04948639", "abstract": "In our visually-oriented societies, people with visual impairments (PVI) face significant challenges in their daily lives. For PVI, assistive technologies (AT) play a crucial role in facilitating their interaction with their environment and accessing various content. However, abandonment rates and non-usage of traditional AT are high due to factors such as availability, cost, and social acceptance. This is why mainstream touchscreen devices (MTDs) like smartphones and tablets are increasingly utilized by PVI due to their affordability and widespread availability. Besides, MTDs offer diverse functionalities and can replace the need for multiple specialized AT.Nevertheless, the accessibility of MTDs presents issues due to their reliance on visual input. Without vision, it becomes challenging to target elements within a graphical user interface. In such situations, interactions are limited to simple gestures that can be executed without visual feedback, such as taps and directional swipes, which reduces expressiveness. Additionally, for PVI, MTD accessibility relies on applications known as \"screen readers\", which audibly present the screen's 2D visual content as a 1D auditory list of elements. Although this allows PVI to access digital content and interact with it, each element must be processed one by one, increasing the time and interactions needed to decipher the content. Performing complex actions like copy-paste becomes even more time-consuming as users must navigate step-by-step through menus to find the desired functions.To address these issues, we explore the possibility of introducing an additional means of interaction: Thumb-To-Finger (TTF) microgestures (µG), aimed at enhancing MTD expressiveness in situations where visual feedback is absent. For instance, by combining four directional swipes with just two TTF µG (thumb touching the index or middle finger), it is possible to generate 12 commands: four swipes without the thumb touching any finger, four swipes with the thumb touching the index, and four swipes with the thumb touching the middle finger. Similar to keyboard shortcuts on a computer, TTF µG have the potential to streamline interactions for common tasks.Hence, our research focuses on the feasibility and utility of TTF µG when used in conjunction with MTDs and without visual feedback. Initially, we conducted a study to assess the feasibility of 33 commonly mentioned TTF µG in the literature. Our results identified 8 particularly effective TTF µG that can be used concurrently with a MTD in a eyes-free situation.Subsequently, we demonstrate, through three practical usage scenarios, how these eight TTF µG can effectively address common challenges faced by PVI when using MTDs:The first scenario revolves around exploring an audio-tactile document, using TTF µG to trigger commands and provide localized audio feedback without interrupting contact with the MTD's surface, thereby enhancing exploration fluidity.The second scenario involves rearranging a grid of icons. Here, TTF µG serve as commands to swiftly access copy-paste functionalities, thus shortening interaction paths.Lastly, the third scenario pertains to text selection, a cumbersome task for PVI due to hierarchical menus imposed by accessibility tools. TTF µG enhance MTD expressiveness, simplifying text selection and bypassing menu usage.Our results indicate that TTF µG show promise in improving the efficiency and accessibility of interactions on MTDs in the absence of visual feedback. By enhancing expressiveness, they reduce the time and actions required to perform various tasks that are typically avoided by PVI due to their complexity when using default accessibility tools.", "year": 2024, "uri": "http://iihm.imag.fr/publication/F24a/", "bibtype": "phdthesis", "abbr": "F24a", "authors": { "1": { "first_name": "Gauthier", "last_name": "Faisandaz" } }, "date": "2024-11-18", "type": "Thèses et habilitations", "id": 973 }, { "lang": "en", "publisher": "ACM: Association for Computing Machinery, New York", "doi": "https://doi.org/10.1145/3577190.3614131", "title": "µGeT: Multimodal eyes-free text selection technique combining touch interaction and microgestures", "url": "https://hal.science/hal-04353214", "abstract": "We present μGeT, a novel multimodal eyes-free text selection technique. μGeT combines touch interaction with microgestures. μGeT is especially suited for People with Visual Impairments (PVI) by expanding the input bandwidth of touchscreen devices, thus shortening the interaction paths for routine tasks. To do so, μGeT extends touch interaction (left/right and up/down flicks) using two simple microgestures: thumb touching either the index or the middle finger. For text selection, the multimodal technique allows us to directly modify the positioning of the two selection handles and the granularity of text selection. Two user studies, one with 9 PVI and one with 8 blindfolded sighted people, compared μGeT with a baseline common technique (VoiceOver like on iPhone). Despite a large variability in performance, the two user studies showed that μGeT is globally faster and yields fewer errors than VoiceOver. A detailed analysis of the interaction trajectories highlights the different strategies adopted by the participants. Beyond text selection, this research shows the potential of combining touch interaction and microgestures for improving the accessibility of touchscreen devices for PVI.", "authors": { "1": { "first_name": "Gauthier", "last_name": "Faisandaz" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Christophe", "last_name": "Jouffrais" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2023, "uri": "http://iihm.imag.fr/publication/FGJ+23a/", "pages": "594-603", "bibtype": "inproceedings", "id": 958, "abbr": "FGJ+23a", "address": "Paris, France", "date": "2023-10-09", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "25th ACM International Conference on Multimodal Interaction Paris (ICMI 2023)", "type_publi": "icolcomlec" }, { "lang": "en", "publisher": "ACM", "doi": "https://doi.org/10.1145/3536221.3556589", "title": "Keep in Touch: Combining Touch Interaction with Thumb-to-Finger µGestures for People with Visual Impairment", "url": "https://hal.archives-ouvertes.fr/hal-03778999", "abstract": "We present a set of 8 thumb-to-finger microgestures (TTF μGestures) that can be used as an additional modality to enrich touch interaction in eyes-free situations. TTF μGestures possess characteristics especially suited for people with visual impairment (PVI). They have never been studied specifically for PVI to improve accessibility of touchscreen devices. We studied a set of 33 common TTF μGestures to determine which are feasible and usable without seeing while the index is touching a surface. We found that the constrained position of the hand and the absence of vision prevent participants from being able to efficiently target a specific phalanx. Thus, we propose a set of 8 TTF μGestures (6 taps, 2 swipes) balancing resiliency (i.e., low error-rate) and expressivity (i.e., number of possible inputs): as a dimension combined with the touch modality, it would realistically multiply the touch command space by eight. Within our set of 8 TTF μGestures, we chose a subset of 4 μGestures (2 taps and 2 swipes) and implemented an exploration scenario of an audio-tactile map with a raised-line overlay on a touchscreen and tested it with 7 PVI. Their feedback was positive on the potential benefits of TTF μGestures in enhancing the touch modality and supporting PVI interaction with touchscreen devices", "authors": { "1": { "first_name": "Gauthier", "last_name": "Faisandaz" }, "2": { "first_name": "Alix", "last_name": "Goguey" }, "3": { "first_name": "Christophe", "last_name": "Jouffrais" }, "4": { "first_name": "Laurence", "last_name": "Nigay" } }, "year": 2022, "uri": "http://iihm.imag.fr/publication/FGJ+22a/", "pages": "105–116", "bibtype": "inproceedings", "id": 945, "abbr": "FGJ+22a", "address": "Bengaluru (Bangalore), India", "date": "2022-11-07", "type": "Conférences internationales de large diffusion avec comité de lecture sur texte complet", "booktitle": "24th ACM International Conference on Multimodal Interaction (ICMI 2022)", "type_publi": "icolcomlec" }]);