Open Access Publications
From research on the visual systems of turtles, to the perception of faces with or without makeup, to transaccadic perception and perceptual cycles in the brain– VPixx hardware and software solutions have supported research in vision science and beyond for over 20 years. We are immensely proud of the discoveries and accomplishments of our customers across the world.
On this page you will find a non-exhaustive list of peer-reviewed, open access publications citing VPixx tools dating back to 2003. Browse the list or use the tag filter to search for specific products. Note that we report the device used in the paper according to the authors; this may not accurately reflect the specific model of device used (e.g., VIEWPixx vs. VIEWPixx /3D). Nor do we guarantee the accuracy of published content. Please contact our team at [email protected] if you have any questions about a specific paper.
Curious about a specific application of our tools? Can’t find what you are looking for? Our staff scientists are happy to discuss paradigms and protocols using our equipment by email or video chat. Please contact us with your questions.
Want to have your work added to our library? Send us a message at [email protected] and we will add it. Your article must be peer-reviewed, open access, and it must indicate VPixx products were used in the research.
Use the search tool below to search for specific terms among the titles, authors and abstracts in our library.
Balestrieri, Elio; Michel, René; Busch, Niko A.
Alpha-Band Lateralization and Microsaccades Elicited by Exogenous Cues Do Not Track Attentional Orienting Journal Article
In: eneuro, vol. 11, no. 2, pp. ENEURO.0076–23.2023, 2024, ISSN: 2373-2822.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{balestrieri_alpha-band_2024,
title = {Alpha-Band Lateralization and Microsaccades Elicited by Exogenous Cues Do Not Track Attentional Orienting},
author = {Elio Balestrieri and René Michel and Niko A. Busch},
url = {https://www.eneuro.org/lookup/doi/10.1523/ENEURO.0076-23.2023},
doi = {10.1523/ENEURO.0076-23.2023},
issn = {2373-2822},
year = {2024},
date = {2024-02-01},
urldate = {2024-05-09},
journal = {eneuro},
volume = {11},
number = {2},
pages = {ENEURO.0076–23.2023},
abstract = {We explore the world by constantly shifting our focus of attention toward salient stimuli and then disengaging from them in search of new ones. The alpha rhythm (8–13 Hz) has been suggested as a pivotal neural substrate of these attentional shifts, due to its local synchronization and desynchronization that suppress irrelevant cortical areas and facilitate relevant areas, a phenomenon called alpha lateralization. Whether alpha lateralization tracks the focus of attention from orienting toward a salient stimulus to disengaging from it is still an open question. We addressed it by leveraging the phenomenon of inhibition of return (IOR), consisting of an initial facilitation in response times (RTs) for stimuli appearing at an exogenously cued location, followed by a suppression of that location. Our behavioral data from human participants showed a typical IOR effect with both early facilitation and subsequent inhibition. In contrast, alpha lateralized in the cued direction after the behavioral facilitation effect and never re-lateralized compatibly with the behavioral inhibition. Furthermore, we analyzed the interaction between alpha lateralization and microsaccades: while alpha was lateralized toward the cued location, microsaccades were mostly oriented away from it. Crucially, the two phenomena showed a significant positive correlation. These results indicate that alpha lateralization reflects primarily the processing of salient stimuli, challenging the view that alpha lateralization is directly involved in exogenous attentional orienting per se. We discuss the relevance of the present findings for an oculomotor account of alpha lateralization as a modulator of cortical excitability in preparation of a saccade.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Huber-Huber, Christoph; Melcher, David
Saccade execution increases the preview effect with faces: An EEG and eye-tracking coregistration study Journal Article
In: Attention, Perception, & Psychophysics, 2023, ISSN: 1943-393X.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{huber-huber_saccade_2023,
title = {Saccade execution increases the preview effect with faces: An EEG and eye-tracking coregistration study},
author = {Christoph Huber-Huber and David Melcher},
url = {https://doi.org/10.3758/s13414-023-02802-5},
doi = {10.3758/s13414-023-02802-5},
issn = {1943-393X},
year = {2023},
date = {2023-11-01},
urldate = {2023-12-21},
journal = {Attention, Perception, & Psychophysics},
abstract = {Under naturalistic viewing conditions, humans conduct about three to four saccadic eye movements per second. These dynamics imply that in real life, humans rarely see something completely new; there is usually a preview of the upcoming foveal input from extrafoveal regions of the visual field. In line with results from the field of reading research, we have shown with EEG and eye-tracking coregistration that an extrafoveal preview also affects postsaccadic visual object processing and facilitates discrimination. Here, we ask whether this preview effect in the fixation-locked N170, and in manual responses to the postsaccadic target face (tilt discrimination), requires saccade execution. Participants performed a gaze-contingent experiment in which extrafoveal face images could change their orientation during a saccade directed to them. In a control block, participants maintained stable gaze throughout the experiment and the extrafoveal face reappeared foveally after a simulated saccade latency. Compared with this no-saccade condition, the neural and the behavioral preview effects were much larger in the saccade condition. We also found shorter first fixation durations after an invalid preview, which is in contrast to reading studies. We interpret the increased preview effect under saccade execution as the result of the additional sensorimotor processes that come with gaze behavior compared with visual perception under stable fixation. In addition, our findings call into question whether EEG studies with fixed gaze capture key properties and dynamics of active, natural vision.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Greene, Michelle R.; Hansen, Bruce C.
Shared spatiotemporal category representations in biological and artificial deep neural networks Journal Article
In: PLOS Computational Biology, vol. 14, no. 7, pp. e1006327, 2018, ISSN: 1553-7358, (Publisher: Public Library of Science).
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{greene_shared_2018,
title = {Shared spatiotemporal category representations in biological and artificial deep neural networks},
author = {Michelle R. Greene and Bruce C. Hansen},
url = {https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006327},
doi = {10.1371/journal.pcbi.1006327},
issn = {1553-7358},
year = {2018},
date = {2018-07-01},
urldate = {2024-01-17},
journal = {PLOS Computational Biology},
volume = {14},
number = {7},
pages = {e1006327},
abstract = {Visual scene category representations emerge very rapidly, yet the computational transformations that enable such invariant categorizations remain elusive. Deep convolutional neural networks (CNNs) perform visual categorization at near human-level accuracy using a feedforward architecture, providing neuroscientists with the opportunity to assess one successful series of representational transformations that enable categorization in silico. The goal of the current study is to assess the extent to which sequential scene category representations built by a CNN map onto those built in the human brain as assessed by high-density, time-resolved event-related potentials (ERPs). We found correspondence both over time and across the scalp: earlier (0–200 ms) ERP activity was best explained by early CNN layers at all electrodes. Although later activity at most electrode sites corresponded to earlier CNN layers, activity in right occipito-temporal electrodes was best explained by the later, fully-connected layers of the CNN around 225 ms post-stimulus, along with similar patterns in frontal electrodes. Taken together, these results suggest that the emergence of scene category representations develop through a dynamic interplay between early activity over occipital electrodes as well as later activity over temporal and frontal electrodes.},
note = {Publisher: Public Library of Science},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Turatto, Massimo; Bonetti, Francesca; Pascucci, David
Filtering visual onsets via habituation: A context-specific long-term memory of irrelevant stimuli Journal Article
In: Psychonomic Bulletin & Review, vol. 25, no. 3, pp. 1028–1034, 2018, ISSN: 1531-5320.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{turatto_filtering_2018,
title = {Filtering visual onsets via habituation: A context-specific long-term memory of irrelevant stimuli},
author = {Massimo Turatto and Francesca Bonetti and David Pascucci},
url = {https://doi.org/10.3758/s13423-017-1320-x},
doi = {10.3758/s13423-017-1320-x},
issn = {1531-5320},
year = {2018},
date = {2018-06-01},
urldate = {2024-01-03},
journal = {Psychonomic Bulletin & Review},
volume = {25},
number = {3},
pages = {1028–1034},
abstract = {The fact that we are often immediately attracted by sudden visual onsets provides a clear advantage for our survival. However, how can we resist from being continuously distracted by irrelevant repetitive onsets? Since the seminal work of Sokolov (1963), habituation of the orienting of attention has long been proposed to be a possible filtering mechanism. Here, in two experiments, we provide novel evidence showing that (a) habituation of capture of focused visual attention relies on a stored representation of the distractor onsets in relation to their context, and (b) that once formed such representation endures unchanged for weeks without any further exposure to the distractors. In agreement with the proposal of Wagner (1979) concerning the associative nature of habituation, the results of Experiment 1 suggest that habituation of attentional capture is context specific. Furthermore, the results of Experiment 2 show that to filter visual distractors our cognitive system uses long-lasting memories of the irrelevant information. Although distractor filtering can be implemented via top-down inhibitory control, neural and cognitive mechanisms underlying habituation provide a straightforward explanation for the reduced distraction obtained with training, thus working like an automatic filter that prevents irrelevant recurring stimuli from gaining access to higher stages of analysis.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Sheldon, Sarah S.; Mathewson, Kyle E.
Does 10-Hz Cathodal Oscillating Current of the Parieto-Occipital Lobe Modulate Target Detection? Journal Article
In: Frontiers in Neuroscience, vol. 12, pp. 83, 2018, ISSN: 1662-4548.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{sheldon_does_2018,
title = {Does 10-Hz Cathodal Oscillating Current of the Parieto-Occipital Lobe Modulate Target Detection?},
author = {Sarah S. Sheldon and Kyle E. Mathewson},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5827548/},
doi = {10.3389/fnins.2018.00083},
issn = {1662-4548},
year = {2018},
date = {2018-02-01},
urldate = {2024-01-02},
journal = {Frontiers in Neuroscience},
volume = {12},
pages = {83},
abstract = {The phase of alpha (8–12 Hz) brain oscillations have been associated with moment to moment changes in visual attention and awareness. Previous work has demonstrated that endogenous oscillations and subsequent behavior can be modulated by oscillating transcranial current stimulation (otCS). The purpose of the current study is to establish the efficacy of cathodal otCS for modulation of the ongoing alpha brain oscillations, allowing for modulation of individual's visual perception. Thirty-six participants performed a target detection with sham and 10-Hz cathodal otCS. Each participant had two practice and two experimental sets composed of three blocks of 128 trials per block. Stimulating electrodes were placed on the participant's head with the anode electrode at Cz and the cathode electrode at Oz. A 0.5 mA current was applied every 100 ms (10 Hz frequency) during the otCS condition. The same current and frequency was applied for the first 10–20 s of the sham condition, after which the current was turned off. Target detection rates were compared between the sham and otCS experimental conditions in order to test for effects of otCS phase on target detection. We found no significant difference in target detection rates between the sham and otCS conditions, and discuss potential reasons for the apparent inability of cathodal otCS to effectively modulate visual perception.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Wittevrongel, Benjamin; Wolputte, Elia Van; Hulle, Marc M. Van
Code-modulated visual evoked potentials using fast stimulus presentation and spatiotemporal beamformer decoding Journal Article
In: Scientific Reports, vol. 7, no. 1, pp. 15037, 2017, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{wittevrongel_code-modulated_2017,
title = {Code-modulated visual evoked potentials using fast stimulus presentation and spatiotemporal beamformer decoding},
author = {Benjamin Wittevrongel and Elia Van Wolputte and Marc M. Van Hulle},
url = {https://www.nature.com/articles/s41598-017-15373-x},
doi = {10.1038/s41598-017-15373-x},
issn = {2045-2322},
year = {2017},
date = {2017-11-01},
urldate = {2023-12-22},
journal = {Scientific Reports},
volume = {7},
number = {1},
pages = {15037},
abstract = {When encoding visual targets using various lagged versions of a pseudorandom binary sequence of luminance changes, the EEG signal recorded over the viewer’s occipital pole exhibits so-called code-modulated visual evoked potentials (cVEPs), the phase lags of which can be tied to these targets. The cVEP paradigm has enjoyed interest in the brain-computer interfacing (BCI) community for the reported high information transfer rates (ITR, in bits/min). In this study, we introduce a novel decoding algorithm based on spatiotemporal beamforming, and show that this algorithm is able to accurately identify the gazed target. Especially for a small number of repetitions of the coding sequence, our beamforming approach significantly outperforms an optimised support vector machine (SVM)-based classifier, which is considered state-of-the-art in cVEP-based BCI. In addition to the traditional 60 Hz stimulus presentation rate for the coding sequence, we also explore the 120 Hz rate, and show that the latter enables faster communication, with a maximal median ITR of 172.87 bits/min. Finally, we also report on a transition effect in the EEG signal following the onset of the stimulus sequence, and recommend to exclude the first 150 ms of the trials from decoding when relying on a single presentation of the stimulus sequence.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Silvestre, Daphné; Cavanagh, Patrick; Arleo, Angelo; Allard, Rémy
Adding temporally localized noise can enhance the contribution of target knowledge on contrast detection Journal Article
In: Journal of Vision, vol. 17, no. 2, pp. 5, 2017, ISSN: 1534-7362.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{silvestre_adding_2017,
title = {Adding temporally localized noise can enhance the contribution of target knowledge on contrast detection},
author = {Daphné Silvestre and Patrick Cavanagh and Angelo Arleo and Rémy Allard},
url = {https://doi.org/10.1167/17.2.5},
doi = {10.1167/17.2.5},
issn = {1534-7362},
year = {2017},
date = {2017-02-01},
urldate = {2023-12-21},
journal = {Journal of Vision},
volume = {17},
number = {2},
pages = {5},
abstract = {External noise paradigms are widely used to characterize sensitivity by comparing the effect of a variable on contrast threshold when it is limited by internal versus external noise. A basic assumption of external noise paradigms is that the processing properties are the same in low and high noise. However, recent studies (e.g., Allard & Cavanagh, 2011; Allard & Faubert, 2014b) suggest that this assumption could be violated when using spatiotemporally localized noise (i.e., appearing simultaneously and at the same location as the target) but not when using spatiotemporally extended noise (i.e., continuously displayed, full-screen, dynamic noise). These previous findings may have been specific to the crowding and 0D noise paradigms that were used, so the purpose of the current study is to test if this violation of noise-invariant processing also occurs in a standard contrast detection task in white noise. The rationale of the current study is that local external noise triggers the use of recognition rather than detection and that a recognition process should be more affected by uncertainty about the shape of the target than one involving detection. To investigate the contribution of target knowledge on contrast detection, the effect of orientation uncertainty was evaluated for a contrast detection task in the absence of noise and in the presence of spatiotemporally localized or extended noise. A larger orientation uncertainty effect was observed with temporally localized noise than with temporally extended noise or with no external noise, indicating a change in the nature of the processing for temporally localized noise. We conclude that the use of temporally localized noise in external noise paradigms risks triggering a shift in process, invalidating the noise-invariant processing required for the paradigm. If, instead, temporally extended external noise is used to match the properties of internal noise, no such processing change occurs.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Higashi, Hiroshi; Minami, Tetsuto; Nakauchi, Shigeki
Variation in Event-Related Potentials by State Transitions Journal Article
In: Frontiers in Human Neuroscience, vol. 11, 2017, ISSN: 1662-5161.
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{higashi_variation_2017,
title = {Variation in Event-Related Potentials by State Transitions},
author = {Hiroshi Higashi and Tetsuto Minami and Shigeki Nakauchi},
url = {https://www.frontiersin.org/articles/10.3389/fnhum.2017.00075},
issn = {1662-5161},
year = {2017},
date = {2017-01-01},
urldate = {2024-01-18},
journal = {Frontiers in Human Neuroscience},
volume = {11},
abstract = {The probability of an event's occurrence affects event-related potentials (ERPs) on electroencephalograms. The relation between probability and potentials has been discussed by using a quantity called surprise that represents the self-information that humans receive from the event. Previous studies have estimated surprise based on the probability distribution in a stationary state. Our hypothesis is that state transitions also play an important role in the estimation of surprise. In this study, we compare the effects of surprise on the ERPs based on two models that generate an event sequence: a model of a stationary state and a model with state transitions. To compare these effects, we generate the event sequences with Markov chains to avoid a situation that the state transition probability converges with the stationary probability by the accumulation of the event observations. Our trial-by-trial model-based analysis showed that the stationary probability better explains the P3b component and the state transition probability better explains the P3a component. The effect on P3a suggests that the internal model, which is constantly and automatically generated by the human brain to estimate the probability distribution of the events, approximates the model with state transitions because Bayesian surprise, which represents the degree of updating of the internal model, is highly reflected in P3a. The global effect reflected in P3b, however, may not be related to the internal model because P3b depends on the stationary probability distribution. The results suggest that an internal model can represent state transitions and the global effect is generated by a different mechanism than the one for forming the internal model.},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}
Nittono, Hiroshi; Ohashi, Akane
Considering Cuteness Enhances Smiling Responses to Infant Faces1,2 Journal Article
In: Japanese Psychological Research, vol. n/a, no. n/a, 0000, ISSN: 1468-5884, (_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jpr.12514).
Abstract | Links | BibTeX | Tags: VIEWPixxEEG
@article{nittono_considering_nodate,
title = {Considering Cuteness Enhances Smiling Responses to Infant Faces1,2},
author = {Hiroshi Nittono and Akane Ohashi},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/jpr.12514},
doi = {10.1111/jpr.12514},
issn = {1468-5884},
urldate = {2024-04-03},
journal = {Japanese Psychological Research},
volume = {n/a},
number = {n/a},
abstract = {Laboratory and field research has reported that the appearance of infants causes observers to smile. The current study examined whether this smiling response is modulated by the observer's task and evaluative dimension. Thirty-nine young nulliparous women were asked to rate the cuteness or beauty levels of 6-month-old infants' faces using a 7-point scale in different blocks. Facial electromyograms (EMGs) were recorded of participants' facial muscles related to both smiling (the zygomaticus major [ZM] and orbicularis oculi [OO]) and frowning (the corrugator supercilii [CS]). The results showed that cuteness and beauty ratings were highly correlated with each other (r = .90), indicating that these evaluations were based on similar attractiveness-related physical features. Facial EMG responses on the smiling muscle sites, ZM and OO, increased significantly from the baseline when participants rated the faces' cuteness, and the responses were larger than when participants rated the faces' beauty. CS activity was not found to have any effect. The perceived cuteness level of the infant faces did not affect the facial EMG responses. Moreover, the magnitude of the smiling response was shown to be much smaller than that associated with voluntary smiling. These findings suggest that facial expressions while viewing infant faces do not exhibit a fixed pattern but are modulated by observers' tasks and that considering cuteness, which is based on more affective evaluations than beauty, can enhance smiling responses.},
note = {_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/jpr.12514},
keywords = {VIEWPixxEEG},
pubstate = {published},
tppubtype = {article}
}