Open Access Publications
From research on the visual systems of turtles, to the perception of faces with or without makeup, to transaccadic perception and perceptual cycles in the brain– VPixx hardware and software solutions have supported research in vision science and beyond for over 20 years. We are immensely proud of the discoveries and accomplishments of our customers across the world.
On this page you will find a non-exhaustive list of peer-reviewed, open access publications citing VPixx tools dating back to 2003. Browse the list or use the tag filter to search for specific products. Note that we report the device used in the paper according to the authors; this may not accurately reflect the specific model of device used (e.g., VIEWPixx vs. VIEWPixx /3D). Nor do we guarantee the accuracy of published content. Please contact our team at [email protected] if you have any questions about a specific paper.
Curious about a specific application of our tools? Can’t find what you are looking for? Our staff scientists are happy to discuss paradigms and protocols using our equipment by email or video chat. Please contact us with your questions.
Want to have your work added to our library? Send us a message at [email protected] and we will add it. Your article must be peer-reviewed, open access, and it must indicate VPixx products were used in the research.
Use the search tool below to search for specific terms among the titles, authors and abstracts in our library.
Saurels, Blake W.; Peluso, Natalie; Taubert, Jessica
A behavioral advantage for the face pareidolia illusion in peripheral vision Journal Article
In: Scientific Reports, vol. 14, no. 1, pp. 10040, 2024, ISSN: 2045-2322, (Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: TRACKPixx3, VIEWPixx
@article{saurels_behavioral_2024,
title = {A behavioral advantage for the face pareidolia illusion in peripheral vision},
author = {Blake W. Saurels and Natalie Peluso and Jessica Taubert},
url = {https://www.nature.com/articles/s41598-024-60892-z},
doi = {10.1038/s41598-024-60892-z},
issn = {2045-2322},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-09},
journal = {Scientific Reports},
volume = {14},
number = {1},
pages = {10040},
abstract = {Investigation of visual illusions helps us understand how we process visual information. For example, face pareidolia, the misperception of illusory faces in objects, could be used to understand how we process real faces. However, it remains unclear whether this illusion emerges from errors in face detection or from slower, cognitive processes. Here, our logic is straightforward; if examples of face pareidolia activate the mechanisms that rapidly detect faces in visual environments, then participants will look at objects more quickly when the objects also contain illusory faces. To test this hypothesis, we sampled continuous eye movements during a fast saccadic choice task—participants were required to select either faces or food items. During this task, pairs of stimuli were positioned close to the initial fixation point or further away, in the periphery. As expected, the participants were faster to look at face targets than food targets. Importantly, we also discovered an advantage for food items with illusory faces but, this advantage was limited to the peripheral condition. These findings are among the first to demonstrate that the face pareidolia illusion persists in the periphery and, thus, it is likely to be a consequence of erroneous face detection.},
note = {Publisher: Nature Publishing Group},
keywords = {TRACKPixx3, VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Schmitz, Inka; Strauss, Hanna; Reinel, Ludwig; Einhäuser, Wolfgang
Attentional cueing: Gaze is harder to override than arrows Journal Article
In: PLOS ONE, vol. 19, no. 3, pp. e0301136, 2024, ISSN: 1932-6203, (Publisher: Public Library of Science).
Abstract | Links | BibTeX | Tags: RESPONSEPixx, VIEWPixx
@article{schmitz_attentional_2024,
title = {Attentional cueing: Gaze is harder to override than arrows},
author = {Inka Schmitz and Hanna Strauss and Ludwig Reinel and Wolfgang Einhäuser},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0301136},
doi = {10.1371/journal.pone.0301136},
issn = {1932-6203},
year = {2024},
date = {2024-03-01},
urldate = {2024-04-02},
journal = {PLOS ONE},
volume = {19},
number = {3},
pages = {e0301136},
abstract = {Gaze is an important and potent social cue to direct others’ attention towards specific locations. However, in many situations, directional symbols, like arrows, fulfill a similar purpose. Motivated by the overarching question how artificial systems can effectively communicate directional information, we conducted two cueing experiments. In both experiments, participants were asked to identify peripheral targets appearing on the screen and respond to them as quickly as possible by a button press. Prior to the appearance of the target, a cue was presented in the center of the screen. In Experiment 1, cues were either faces or arrows that gazed or pointed in one direction, but were non-predictive of the target location. Consistent with earlier studies, we found a reaction time benefit for the side the arrow or the gaze was directed to. Extending beyond earlier research, we found that this effect was indistinguishable between the vertical and the horizontal axis and between faces and arrows. In Experiment 2, we used 100% “counter-predictive” cues; that is, the target always occurred on the side opposite to the direction of gaze or arrow. With cues without inherent directional meaning (color), we controlled for general learning effects. Despite the close quantitative match between non-predictive gaze and non-predictive arrow cues observed in Experiment 1, the reaction-time benefit for counter-predictive arrows over neutral cues is more robust than the corresponding benefit for counter-predictive gaze. This suggests that–if matched for efficacy towards their inherent direction–gaze cues are harder to override or reinterpret than arrows. This difference can be of practical relevance, for example, when designing cues in the context of human-machine interaction.},
note = {Publisher: Public Library of Science},
keywords = {RESPONSEPixx, VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Neumann, Antonia; Leube, Alexander; Nabawi, Najma; Sauer, Yannick; Essig, Peter; Breher, Katharina; Wahl, Siegfried
Short-Term Peripheral Contrast Reduction Affects Central Chromatic and Achromatic Contrast Sensitivity Journal Article
In: Photonics, vol. 9, no. 3, pp. 123, 2022, ISSN: 2304-6732, (Number: 3 Publisher: Multidisciplinary Digital Publishing Institute).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{neumann_short-term_2022,
title = {Short-Term Peripheral Contrast Reduction Affects Central Chromatic and Achromatic Contrast Sensitivity},
author = {Antonia Neumann and Alexander Leube and Najma Nabawi and Yannick Sauer and Peter Essig and Katharina Breher and Siegfried Wahl},
url = {https://www.mdpi.com/2304-6732/9/3/123},
doi = {10.3390/photonics9030123},
issn = {2304-6732},
year = {2022},
date = {2022-03-01},
urldate = {2023-12-21},
journal = {Photonics},
volume = {9},
number = {3},
pages = {123},
abstract = {Peripheral retinal contrast reduction is suggested as a potential myopia control strategy. However, the underlying mechanism is yet unknown. Therefore, this study investigated the influence of peripheral contrast reduction on central chromatic and achromatic contrast sensitivity (CS). A total of 19 participants were included. Peripheral contrast reduction was induced via Bangerter foils of 0.4 and 0.8 density, each with a clear central zone of 8.0 mm diameter. Central achromatic and chromatic (for S-, M-, and L-cone types) CS was measured at 3 and 12 cpd in a 2-IFC psychophysical procedure. CS was tested monocularly at 0, 30, and 90 min of adaptation time, while the fellow eye was covered by an infrared filter. With the filter in place, pupil size was controlled to be smaller than the clear central aperture. Data were analyzed using linear mixed models. Cone-type CS showed significant differences among each other (all p < 0.05), except for the achromatic and L-cone type (p = 0.87). The minimum sensitivity was found with the S-cone type and the maximum with the M-cone type. Central achromatic and chromatic CS were equally affected by diffusion. The level of peripheral diffusion also influenced CS, while the 0.8 Bangerter foil led to a higher reduction in CS compared to the 0.4 Bangerter foil (p = 0.0008) and the control condition (p = 0.05). A significant reduction in CS occurred between 30 and 90 min of adaptation time (p < 0.0001). The current study found that peripheral contrast reduction impacted central achromatic and chromatic CS equally. It further showed that the amplitude of reduction was influenced by the level of diffusion, with the reduction becoming more pronounced over time.},
note = {Number: 3
Publisher: Multidisciplinary Digital Publishing Institute},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Shenfield, Lucienne; Beanland, Vanessa; Apthorp, Deborah
Temporal predictability does not impact attentional blink performance: effects of fixed vs. random inter-trial intervals Journal Article
In: PeerJ, vol. 8, pp. e8677, 2020, ISSN: 2167-8359.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{shenfield_temporal_2020,
title = {Temporal predictability does not impact attentional blink performance: effects of fixed vs. random inter-trial intervals},
author = {Lucienne Shenfield and Vanessa Beanland and Deborah Apthorp},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7060903/},
doi = {10.7717/peerj.8677},
issn = {2167-8359},
year = {2020},
date = {2020-03-01},
urldate = {2024-01-18},
journal = {PeerJ},
volume = {8},
pages = {e8677},
abstract = {Background
Does the inclusion of a randomized inter-trial interval (ITI) impact performance on an Attentional Blink (AB) task? The AB phenomenon is often used as a test of transient attention (); however, it is unclear whether incorporating aspects of sustained attention, by implementing a randomized ITI, would impact task performance. The current research sought to investigate this, by contrasting a standard version of the AB task with a random ITI version to determine whether performance changed, reflecting a change in difficulty, engagement, or motivation.
Method Thirty university students (21 female; age range 18–57},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Does the inclusion of a randomized inter-trial interval (ITI) impact performance on an Attentional Blink (AB) task? The AB phenomenon is often used as a test of transient attention (); however, it is unclear whether incorporating aspects of sustained attention, by implementing a randomized ITI, would impact task performance. The current research sought to investigate this, by contrasting a standard version of the AB task with a random ITI version to determine whether performance changed, reflecting a change in difficulty, engagement, or motivation.
Method Thirty university students (21 female; age range 18–57
Bonmassar, Claudia; Pavani, Francesco; Zoest, Wieske
The role of eye movements in manual responses to social and nonsocial cues Journal Article
In: Attention, Perception, & Psychophysics, vol. 81, no. 5, pp. 1236–1252, 2019, ISSN: 1943-393X.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{bonmassar_role_2019,
title = {The role of eye movements in manual responses to social and nonsocial cues},
author = {Claudia Bonmassar and Francesco Pavani and Wieske Zoest},
url = {https://doi.org/10.3758/s13414-019-01669-9},
doi = {10.3758/s13414-019-01669-9},
issn = {1943-393X},
year = {2019},
date = {2019-07-01},
urldate = {2024-01-18},
journal = {Attention, Perception, & Psychophysics},
volume = {81},
number = {5},
pages = {1236–1252},
abstract = {Gaze and arrow cues cause covert attention shifts even when they are uninformative. Nonetheless, it is unclear to what extent oculomotor behavior influences manual responses to social and nonsocial stimuli. In two experiments, we tracked the gaze of participants during the cueing task with nonpredictive gaze and arrow cues. In Experiment 1, the discrimination task was easy and eye movements were not necessary, whereas in Experiment 2 they were instrumental in identifying the target. Validity effects on manual response time (RT) were similar for the two cues in Experiment 1 and in Experiment 2, though in the presence of eye movements observers were overall slower to respond to the arrow cue compared with the gaze cue. Cue direction had an effect on saccadic performance before the discrimination was presented and throughout the duration of the trial. Furthermore, we found evidence of a distinct impact of the type of cue on diverse oculomotor components. While saccade latencies were affected by the type of cue, both before and after the target onset, saccade landing positions were not. Critically, the manual validity effect was predicted by the landing position of the initial eye movement. This work suggests that the relationship between eye movements and attention is not straightforward. In the presence of overt selection, saccade latency related to the overall speed of manual response, while eye movements landing position was closely related to manual performance in response to different cues.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Salinas, Emilio; Steinberg, Benjamin R; Sussman, Lauren A; Fry, Sophia M; Hauser, Christopher K; Anderson, Denise D; Stanford, Terrence R
Voluntary and involuntary contributions to perceptually guided saccadic choices resolved with millisecond precision Journal Article
In: eLife, vol. 8, pp. e46359, 2019, ISSN: 2050-084X, (Publisher: eLife Sciences Publications, Ltd).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{salinas_voluntary_2019,
title = {Voluntary and involuntary contributions to perceptually guided saccadic choices resolved with millisecond precision},
author = {Emilio Salinas and Benjamin R Steinberg and Lauren A Sussman and Sophia M Fry and Christopher K Hauser and Denise D Anderson and Terrence R Stanford},
editor = {Daeyeol Lee and Timothy E Behrens and Daeyeol Lee},
url = {https://doi.org/10.7554/eLife.46359},
doi = {10.7554/eLife.46359},
issn = {2050-084X},
year = {2019},
date = {2019-06-01},
urldate = {2024-01-18},
journal = {eLife},
volume = {8},
pages = {e46359},
abstract = {In the antisaccade task, which is considered a sensitive assay of cognitive function, a salient visual cue appears and the participant must look away from it. This requires sensory, motor-planning, and cognitive neural mechanisms, but what are their unique contributions to performance, and when exactly are they engaged? Here, by manipulating task urgency, we generate a psychophysical curve that tracks the evolution of the saccadic choice process with millisecond precision, and resolve the distinct contributions of reflexive (exogenous) and voluntary (endogenous) perceptual mechanisms to antisaccade performance over time. Both progress extremely rapidly, the former driving the eyes toward the cue early on (∼100 ms after cue onset) and the latter directing them away from the cue ∼40 ms later. The behavioral and modeling results provide a detailed, dynamical characterization of attentional and oculomotor capture that is not only qualitatively consistent across participants, but also indicative of their individual perceptual capacities.},
note = {Publisher: eLife Sciences Publications, Ltd},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Beevers, Christopher G.; Mullarkey, Michael C.; Dainer-Best, Justin; Stewart, Rochelle A.; Labrada, Jocelyn; Allen, John J. B.; McGeary, John E.; Shumake, Jason
Association between negative cognitive bias and depression: A symptom-level approach Journal Article
In: Journal of abnormal psychology, vol. 128, no. 3, pp. 212–227, 2019, ISSN: 0021-843X.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{beevers_association_2019,
title = {Association between negative cognitive bias and depression: A symptom-level approach},
author = {Christopher G. Beevers and Michael C. Mullarkey and Justin Dainer-Best and Rochelle A. Stewart and Jocelyn Labrada and John J. B. Allen and John E. McGeary and Jason Shumake},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6449499/},
doi = {10.1037/abn0000405},
issn = {0021-843X},
year = {2019},
date = {2019-04-01},
urldate = {2023-12-21},
journal = {Journal of abnormal psychology},
volume = {128},
number = {3},
pages = {212–227},
abstract = {Cognitive models of depression posit that negatively biased self-referent processing and attention have important roles in the disorder. However, depression is a heterogeneous collection of symptoms and all symptoms are unlikely to be associated with these negative cognitive biases. The current study involved 218 community adults whose depression ranged from no symptoms to clinical levels of depression. Random forest machine learning was used to identify the most important depression symptom predictors of each negative cognitive bias. Depression symptoms were measured with the Beck Depression Inventory – II. Model performance was evaluated using predictive R-squared (Rpred2), the expected variance explained in data not used to train the algorithm, estimated by 10 repetitions of 10-fold cross-validation. Using the Self-Referent Encoding Task (SRET), depression symptoms explained 34% to 45% of the variance in negative self-referent processing. The symptoms of sadness, self-dislike, pessimism, feelings of punishment, and indecision were most important. Notably, many depression symptoms made virtually no contribution to this prediction. In contrast, for attention bias for sad stimuli, measured with the dot-probe task using behavioral reaction time and eye gaze metrics, no reliable symptom predictors were identified. Findings indicate that a symptom-level approach may provide new insights into which symptoms, if any, are associated with negative cognitive biases in depression., This study finds that many symptoms of depression are not strongly associated with thinking negatively about oneself or attending to negative information. This implies that negative cognitive biases may not be strongly associated with depression per se, but may instead contribute to the maintenance of specific depression symptoms, such as sadness, self-dislike, pessimism, feelings of punishment, and indecision.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Gloriani, Alejandro H.; Schütz, Alexander C.
Humans Trust Central Vision More Than Peripheral Vision Even in the Dark Journal Article
In: Current Biology, vol. 29, no. 7, pp. 1206–1210.e4, 2019, ISSN: 0960-9822.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{gloriani_humans_2019,
title = {Humans Trust Central Vision More Than Peripheral Vision Even in the Dark},
author = {Alejandro H. Gloriani and Alexander C. Schütz},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6453110/},
doi = {10.1016/j.cub.2019.02.023},
issn = {0960-9822},
year = {2019},
date = {2019-04-01},
urldate = {2024-01-03},
journal = {Current Biology},
volume = {29},
number = {7},
pages = {1206–1210.e4},
abstract = {Two types of photoreceptors in the human retina support vision across a wide range of luminances: cones are active under bright daylight illumination (photopic viewing) and rods under dim illumination at night (scotopic viewing). These photoreceptors are distributed inhomogeneously across the retina []: cone-receptor density peaks at the center of the visual field (i.e., the fovea) and declines toward the periphery, allowing for high-acuity vision at the fovea in daylight. Rod receptors are absent from the fovea, leading to a functional foveal scotoma in night vision. In order to make optimal perceptual decisions, the visual system requires knowledge about its own properties and the relative reliability of signals arriving from different parts of the visual field []. Since cone and rod signals converge on the same pathways [], and their cortical processing is similar except for the foveal scotoma [], it is unclear if humans can take into account the differences between scotopic and photopic vision when making perceptual decisions. Here, we show that the scotopic foveal scotoma is filled in with information from the immediate surround and that humans trust this inferred information more than veridical information from the periphery of the visual field. We observed a similar preference under daylight illumination, indicating that humans have a default preference for information from the fovea even if this information is not veridical, like in night vision. This suggests that filling-in precedes the estimation of confidence, thereby shielding awareness from the foveal scotoma with respect to its contents and its properties.,
•
Veridical information from the fovea is preferred under photopic viewing
•
Information missing in the scotopic foveal scotoma is filled in from the surround
•
Inferred information from the fovea is preferred under scotopic viewing
•
Content and properties of the foveal scotopic scotoma are hidden from awareness
, Gloriani and Schütz show that the absence of rod photoreceptors in central vision is compensated by filling-in of information from the surround under dark adaptation. Human observers trust this inferred information more than veridical information from peripheral vision. This preference for central vision is also present under daylight conditions.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
•
Veridical information from the fovea is preferred under photopic viewing
•
Information missing in the scotopic foveal scotoma is filled in from the surround
•
Inferred information from the fovea is preferred under scotopic viewing
•
Content and properties of the foveal scotopic scotoma are hidden from awareness
, Gloriani and Schütz show that the absence of rod photoreceptors in central vision is compensated by filling-in of information from the surround under dark adaptation. Human observers trust this inferred information more than veridical information from peripheral vision. This preference for central vision is also present under daylight conditions.
Bogadhi, Amarender R.; Bollimunta, Anil; Leopold, David A.; Krauzlis, Richard J.
Spatial Attention Deficits Are Causally Linked to an Area in Macaque Temporal Cortex Journal Article
In: Current Biology, vol. 29, no. 5, pp. 726–736.e4, 2019, ISSN: 09609822.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{bogadhi_spatial_2019,
title = {Spatial Attention Deficits Are Causally Linked to an Area in Macaque Temporal Cortex},
author = {Amarender R. Bogadhi and Anil Bollimunta and David A. Leopold and Richard J. Krauzlis},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0960982219300302},
doi = {10.1016/j.cub.2019.01.028},
issn = {09609822},
year = {2019},
date = {2019-03-01},
urldate = {2024-01-03},
journal = {Current Biology},
volume = {29},
number = {5},
pages = {726–736.e4},
abstract = {Spatial neglect is a common clinical syndrome involving disruption of the brain’s attention-related circuitry, including the dorsocaudal temporal cortex. In macaques, the attention deficits associated with neglect can be readily modeled, but the absence of evidence for temporal cortex involvement has suggested a fundamental difference from humans. To map the neurological expression of neglect-like attention deficits in macaques, we measured attention-related fMRI activity across the cerebral cortex during experimental induction of neglect through reversible inactivation of the superior colliculus and frontal eye fields. During inactivation, monkeys exhibited hallmark attentional deficits of neglect in tasks using either motion or non-motion stimuli. The behavioral deficits were accompanied by marked reductions in fMRI attentional modulation that were strongest in a small region on the floor of the superior temporal sulcus; smaller reductions were also found in frontal eye fields and dorsal parietal cortex. Notably, direct inactivation of the mid-superior temporal sulcus (STS) cortical region identified by fMRI caused similar neglect-like spatial attention deficits. These results identify a putative macaque homolog to temporal cortex structures known to play a central role in human neglect.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Devaraju, Dhatri S; U, Ajith Kumar; Maruthy, Santosh
Comparison of McGurk Effect across Three Consonant-Vowel Combinations in Kannada Journal Article
In: Journal of Audiology & Otology, vol. 23, no. 1, pp. 39–48, 2019, ISSN: 2384-1621.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{devaraju_comparison_2019,
title = {Comparison of McGurk Effect across Three Consonant-Vowel Combinations in Kannada},
author = {Dhatri S Devaraju and Ajith Kumar U and Santosh Maruthy},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6348306/},
doi = {10.7874/jao.2018.00234},
issn = {2384-1621},
year = {2019},
date = {2019-01-01},
urldate = {2024-01-02},
journal = {Journal of Audiology & Otology},
volume = {23},
number = {1},
pages = {39–48},
abstract = {Background and Objectives
The influence of visual stimulus on the auditory component in the perception of auditory-visual (AV) consonant-vowel syllables has been demonstrated in different languages. Inherent properties of unimodal stimuli are known to modulate AV integration. The present study investigated how the amount of McGurk effect (an outcome of AV integration) varies across three different consonant combinations in Kannada language. The importance of unimodal syllable identification on the amount of McGurk effect was also seen.
Subjects and Methods
Twenty-eight individuals performed an AV identification task with ba/ ga, pa/ka and ma/n· a consonant combinations in AV congruent, AV incongruent (McGurk combination), audio alone and visual alone condition. Cluster analysis was performed using the identification scores for the incongruent stimuli, to classify the individuals into two groups; one with high and the other with low McGurk scores. The differences in the audio alone and visual alone scores between these groups were compared.
Results
The results showed significantly higher McGurk scores for ma/n· a compared to ba/ga and pa/ka combinations in both high and low McGurk score groups. No significant difference was noted between ba/ga and pa/ka combinations in either group. Identification of /n· a/ presented in the visual alone condition correlated negatively with the higher McGurk scores.
Conclusions
The results suggest that the final percept following the AV integration is not exclusively explained by the unimodal identification of the syllables. But there are other factors which may also contribute to making inferences about the final percept.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
The influence of visual stimulus on the auditory component in the perception of auditory-visual (AV) consonant-vowel syllables has been demonstrated in different languages. Inherent properties of unimodal stimuli are known to modulate AV integration. The present study investigated how the amount of McGurk effect (an outcome of AV integration) varies across three different consonant combinations in Kannada language. The importance of unimodal syllable identification on the amount of McGurk effect was also seen.
Subjects and Methods
Twenty-eight individuals performed an AV identification task with ba/ ga, pa/ka and ma/n· a consonant combinations in AV congruent, AV incongruent (McGurk combination), audio alone and visual alone condition. Cluster analysis was performed using the identification scores for the incongruent stimuli, to classify the individuals into two groups; one with high and the other with low McGurk scores. The differences in the audio alone and visual alone scores between these groups were compared.
Results
The results showed significantly higher McGurk scores for ma/n· a compared to ba/ga and pa/ka combinations in both high and low McGurk score groups. No significant difference was noted between ba/ga and pa/ka combinations in either group. Identification of /n· a/ presented in the visual alone condition correlated negatively with the higher McGurk scores.
Conclusions
The results suggest that the final percept following the AV integration is not exclusively explained by the unimodal identification of the syllables. But there are other factors which may also contribute to making inferences about the final percept.
Ölander, K.; Muukkonen, I.; Saarela, T. P.; Salmela, V. R.
Integration of facial features under memory load Journal Article
In: Scientific Reports, vol. 9, no. 1, pp. 892, 2019, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{olander_integration_2019,
title = {Integration of facial features under memory load},
author = {K. Ölander and I. Muukkonen and T. P. Saarela and V. R. Salmela},
url = {https://www.nature.com/articles/s41598-018-37596-2},
doi = {10.1038/s41598-018-37596-2},
issn = {2045-2322},
year = {2019},
date = {2019-01-01},
urldate = {2024-01-08},
journal = {Scientific Reports},
volume = {9},
number = {1},
pages = {892},
abstract = {Simple visual items and complex real-world objects are stored into visual working memory as a collection of independent features, not as whole or integrated objects. Storing faces into memory might differ, however, since previous studies have reported perceptual and memory advantage for whole faces compared to other objects. We investigated whether facial features can be integrated in a statistically optimal fashion and whether memory maintenance disrupts this integration. The observers adjusted a probe – either a whole face or isolated features (eyes or mouth region) – to match the identity of a target while viewing both stimuli simultaneously or after a 1.5 second retention period. Precision was better for the whole face compared to the isolated features. Perceptual precision was higher than memory precision, as expected, and memory precision further declined as the number of memorized items was increased from one to four. Interestingly, the whole-face precision was better predicted by models assuming injection of memory noise followed by integration of features than by models assuming integration of features followed by the memory noise. The results suggest equally weighted or optimal integration of facial features and indicate that feature information is preserved in visual working memory while remembering faces.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Glatz, Christiane; Chuang, Lewis L.
The time course of auditory looming cues in redirecting visuo-spatial attention Journal Article
In: Scientific Reports, vol. 9, no. 1, pp. 743, 2019, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{glatz_time_2019,
title = {The time course of auditory looming cues in redirecting visuo-spatial attention},
author = {Christiane Glatz and Lewis L. Chuang},
url = {https://www.nature.com/articles/s41598-018-36033-8},
doi = {10.1038/s41598-018-36033-8},
issn = {2045-2322},
year = {2019},
date = {2019-01-01},
urldate = {2024-01-18},
journal = {Scientific Reports},
volume = {9},
number = {1},
pages = {743},
abstract = {By orienting attention, auditory cues can improve the discrimination of spatially congruent visual targets. Looming sounds that increase in intensity are processed preferentially by the brain. Thus, we investigated whether auditory looming cues can orient visuo-spatial attention more effectively than static and receding sounds. Specifically, different auditory cues could redirect attention away from a continuous central visuo-motor tracking task to peripheral visual targets that appeared occasionally. To investigate the time course of crossmodal cuing, Experiment 1 presented visual targets at different time-points across a 500 ms auditory cue’s presentation. No benefits were found for simultaneous audio-visual cue-target presentation. The largest crossmodal benefit occurred at early cue-target asynchrony onsets (i.e.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Vilidaite, Greta; Norcia, Anthony M.; West, Ryan J. H.; Elliott, Christopher J. H.; Pei, Francesca; Wade, Alex R.; Baker, Daniel H.
Autism sensory dysfunction in an evolutionarily conserved system Journal Article
In: Proceedings of the Royal Society B: Biological Sciences, vol. 285, no. 1893, pp. 20182255, 2018, ISSN: 0962-8452.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{vilidaite_autism_2018,
title = {Autism sensory dysfunction in an evolutionarily conserved system},
author = {Greta Vilidaite and Anthony M. Norcia and Ryan J. H. West and Christopher J. H. Elliott and Francesca Pei and Alex R. Wade and Daniel H. Baker},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6304042/},
doi = {10.1098/rspb.2018.2255},
issn = {0962-8452},
year = {2018},
date = {2018-12-01},
urldate = {2023-12-21},
journal = {Proceedings of the Royal Society B: Biological Sciences},
volume = {285},
number = {1893},
pages = {20182255},
abstract = {There is increasing evidence for a strong genetic basis for autism, with many genetic models being developed in an attempt to replicate autistic symptoms in animals. However, current animal behaviour paradigms rarely match the social and cognitive behaviours exhibited by autistic individuals. Here, we instead assay another functional domain—sensory processing—known to be affected in autism to test a novel genetic autism model in Drosophila melanogaster. We show similar visual response alterations and a similar development trajectory in Nhe3 mutant flies (total n = 72) and in autistic human participants (total n = 154). We report a dissociation between first- and second-order electrophysiological visual responses to steady-state stimulation in adult mutant fruit flies that is strikingly similar to the response pattern in human adults with ASD as well as that of a large sample of neurotypical individuals with high numbers of autistic traits. We explain this as a genetically driven, selective signalling alteration in transient visual dynamics. In contrast to adults, autistic children show a decrease in the first-order response that is matched by the fruit fly model, suggesting that a compensatory change in processing occurs during development. Our results provide the first animal model of autism comprising a differential developmental phenotype in visual processing.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Eiber, Calvin D.; Rahman, Abrar S.; Pietersen, Alexander N. J.; Zeater, Natalie; Dreher, Bogdan; Solomon, Samuel G.; Martin, Paul R.
Receptive Field Properties of Koniocellular On/Off Neurons in the Lateral Geniculate Nucleus of Marmoset Monkeys Journal Article
In: Journal of Neuroscience, vol. 38, no. 48, pp. 10384–10398, 2018, ISSN: 0270-6474, 1529-2401, (Publisher: Society for Neuroscience Section: Research Articles).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{eiber_receptive_2018,
title = {Receptive Field Properties of Koniocellular On/Off Neurons in the Lateral Geniculate Nucleus of Marmoset Monkeys},
author = {Calvin D. Eiber and Abrar S. Rahman and Alexander N. J. Pietersen and Natalie Zeater and Bogdan Dreher and Samuel G. Solomon and Paul R. Martin},
url = {https://www.jneurosci.org/content/38/48/10384},
doi = {10.1523/JNEUROSCI.1679-18.2018},
issn = {0270-6474, 1529-2401},
year = {2018},
date = {2018-11-01},
urldate = {2024-01-17},
journal = {Journal of Neuroscience},
volume = {38},
number = {48},
pages = {10384–10398},
abstract = {The koniocellular (K) layers of the primate dorsal lateral geniculate nucleus house a variety of visual receptive field types, not all of which have been fully characterized. Here we made single-cell recordings targeted to the K layers of diurnal New World monkeys (marmosets). A subset of recorded cells was excited by both increments and decrements of light intensity (on/off-cells). Histological reconstruction of the location of these cells confirmed that they are segregated to K layers; we therefore refer to these cells as K-on/off cells. The K-on/off cells show high contrast sensitivity, strong bandpass spatial frequency tuning, and their response magnitude is strongly reduced by stimuli larger than the excitatory receptive field (silent suppressive surrounds). Stationary counterphase gratings evoke unmodulated spike rate increases or frequency-doubled responses in K-on/off cells; such responses are largely independent of grating spatial phase. The K-on/off cells are not orientation or direction selective. Some (but not all) properties of K-on/off cells are consistent with those of local-edge-detector/impressed-by-contrast cells reported in studies of cat retina and geniculate, and broad-thorny ganglion cells recorded in macaque monkey retina. The receptive field properties of K-on/off cells and their preferential location in the ventral K layers (K1 and K2) make them good candidates for the direct projection from geniculate to extrastriate cortical area MT/V5. If so, they could contribute to visual information processing in the dorsal (“where” or “action”) visual stream.
SIGNIFICANCE STATEMENT We characterize cells in an evolutionary ancient part of the visual pathway in primates. The cells are located in the lateral geniculate nucleus (the main visual afferent relay nucleus), in regions called koniocellular layers that are known to project to extrastriate visual areas as well as primary visual cortex. The cells show high contrast sensitivity and rapid, transient responses to light onset and offset. Their properties suggest they could contribute to visual processing in the dorsal (“where” or “action”) visual stream.},
note = {Publisher: Society for Neuroscience
Section: Research Articles},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
SIGNIFICANCE STATEMENT We characterize cells in an evolutionary ancient part of the visual pathway in primates. The cells are located in the lateral geniculate nucleus (the main visual afferent relay nucleus), in regions called koniocellular layers that are known to project to extrastriate visual areas as well as primary visual cortex. The cells show high contrast sensitivity and rapid, transient responses to light onset and offset. Their properties suggest they could contribute to visual processing in the dorsal (“where” or “action”) visual stream.
Arcizet, Fabrice; Krauzlis, Richard J.
Covert spatial selection in primate basal ganglia Journal Article
In: PLOS Biology, vol. 16, no. 10, pp. e2005930, 2018, ISSN: 1545-7885, (Publisher: Public Library of Science).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{arcizet_covert_2018,
title = {Covert spatial selection in primate basal ganglia},
author = {Fabrice Arcizet and Richard J. Krauzlis},
url = {https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2005930},
doi = {10.1371/journal.pbio.2005930},
issn = {1545-7885},
year = {2018},
date = {2018-10-01},
urldate = {2024-01-02},
journal = {PLOS Biology},
volume = {16},
number = {10},
pages = {e2005930},
abstract = {The basal ganglia are important for action selection. They are also implicated in perceptual and cognitive functions that seem far removed from motor control. Here, we tested whether the role of the basal ganglia in selection extends to nonmotor aspects of behavior by recording neuronal activity in the caudate nucleus while animals performed a covert spatial attention task. We found that caudate neurons strongly select the spatial location of the relevant stimulus throughout the task even in the absence of any overt action. This spatially selective activity was dependent on task and visual conditions and could be dissociated from goal-directed actions. Caudate activity was also sufficient to correctly identify every epoch in the covert attention task. These results provide a novel perspective on mechanisms of attention by demonstrating that the basal ganglia are involved in spatial selection and tracking of behavioral states even in the absence of overt orienting movements.},
note = {Publisher: Public Library of Science},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Knöll, Jonas; Pillow, Jonathan W.; Huk, Alexander C.
Lawful tracking of visual motion in humans, macaques, and marmosets in a naturalistic, continuous, and untrained behavioral context Journal Article
In: Proceedings of the National Academy of Sciences, vol. 115, no. 44, pp. E10486–E10494, 2018, (Publisher: Proceedings of the National Academy of Sciences).
Abstract | Links | BibTeX | Tags: PROPixx, VIEWPixx
@article{knoll_lawful_2018,
title = {Lawful tracking of visual motion in humans, macaques, and marmosets in a naturalistic, continuous, and untrained behavioral context},
author = {Jonas Knöll and Jonathan W. Pillow and Alexander C. Huk},
url = {https://www.pnas.org/doi/10.1073/pnas.1807192115},
doi = {10.1073/pnas.1807192115},
year = {2018},
date = {2018-10-01},
urldate = {2024-01-12},
journal = {Proceedings of the National Academy of Sciences},
volume = {115},
number = {44},
pages = {E10486–E10494},
abstract = {Much study of the visual system has focused on how humans and monkeys integrate moving stimuli over space and time. Such assessments of spatiotemporal integration provide fundamental grounding for the interpretation of neurophysiological data, as well as how the resulting neural signals support perceptual decisions and behavior. However, the insights supported by classical characterizations of integration performed in humans and rhesus monkeys are potentially limited with respect to both generality and detail: Standard tasks require extensive amounts of training, involve abstract stimulus–response mappings, and depend on combining data across many trials and/or sessions. It is thus of concern that the integration observed in classical tasks involves the recruitment of brain circuits that might not normally subsume natural behaviors, and that quantitative analyses have limited power for characterizing single-trial or single-session processes. Here we bridge these gaps by showing that three primate species (humans, macaques, and marmosets) track the focus of expansion of an optic flow field continuously and without substantial training. This flow-tracking behavior was volitional and reflected substantial temporal integration. Most strikingly, gaze patterns exhibited lawful and nuanced dependencies on random perturbations in the stimulus, such that repetitions of identical flow movies elicited remarkably similar eye movements over long and continuous time periods. These results demonstrate the generality of spatiotemporal integration in natural vision, and offer a means for studying integration outside of artificial tasks while maintaining lawful and highly reliable behavior.},
note = {Publisher: Proceedings of the National Academy of Sciences},
keywords = {PROPixx, VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Harrar, Vanessa; Trung, William Le; Malienko, Anton; Khan, Aarlenne Zein
A nonvisual eye tracker calibration method for video-based tracking Journal Article
In: Journal of Vision, vol. 18, no. 9, pp. 13, 2018, ISSN: 1534-7362.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{harrar_nonvisual_2018,
title = {A nonvisual eye tracker calibration method for video-based tracking},
author = {Vanessa Harrar and William Le Trung and Anton Malienko and Aarlenne Zein Khan},
url = {https://doi.org/10.1167/18.9.13},
doi = {10.1167/18.9.13},
issn = {1534-7362},
year = {2018},
date = {2018-09-01},
urldate = {2023-12-21},
journal = {Journal of Vision},
volume = {18},
number = {9},
pages = {13},
abstract = {Video-based eye trackers have enabled major advancements in our understanding of eye movements through their ease of use and their non-invasiveness. One necessity to obtain accurate eye recordings using video-based trackers is calibration. The aim of the current study was to determine the feasibility and reliability of alternative calibration methods for scenarios in which the standard visual-calibration is not possible. Fourteen participants were tested using the EyeLink 1000 Plus video-based eye tracker, and each completed the following 5-point calibration methods: 1) standard visual-target calibration, 2) described calibration where participants were provided with verbal instructions about where to direct their eyes (without vision of the screen), 3) proprioceptive calibration where participants were asked to look at their hidden finger, 4) replacement calibration, where the visual calibration was performed by 3 different people; the calibrators were temporary substitutes for the participants. Following calibration, participants performed a simple visually-guided saccade task to 16 randomly presented targets on a grid. We found that precision errors were comparable across the alternative calibration methods. In terms of accuracy, compared to the standard calibration, non-visual calibration methods (described and proprioception) led to significantly larger errors, whilst the replacement calibration method had much smaller errors. In conditions where calibration is not possible, for example when testing blind or visually impaired people who are unable to foveate the calibration targets, we suggest that using a single stand-in to perform the calibration is a simple and easy alternative calibration method, which should only cause a minimal decrease in accuracy.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Bollimunta, Anil; Bogadhi, Amarender R.; Krauzlis, Richard J.
Comparing frontal eye field and superior colliculus contributions to covert spatial attention Journal Article
In: Nature Communications, vol. 9, no. 1, pp. 3553, 2018, ISSN: 2041-1723, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{bollimunta_comparing_2018,
title = {Comparing frontal eye field and superior colliculus contributions to covert spatial attention},
author = {Anil Bollimunta and Amarender R. Bogadhi and Richard J. Krauzlis},
url = {https://www.nature.com/articles/s41467-018-06042-2},
doi = {10.1038/s41467-018-06042-2},
issn = {2041-1723},
year = {2018},
date = {2018-09-01},
urldate = {2024-01-02},
journal = {Nature Communications},
volume = {9},
number = {1},
pages = {3553},
abstract = {The causal roles of the frontal eye fields (FEF) and superior colliculus (SC) in spatial selective attention have not been directly compared. Reversible inactivation is an established method for testing causality but comparing results between FEF and SC is complicated by differences in size and morphology of the two brain regions. Here we exploited the fact that inactivation of FEF and SC also changes the metrics of saccadic eye movements, providing an independent benchmark for the strength of the causal manipulation. Using monkeys trained to covertly perform a visual motion-change detection task, we found that inactivation of either FEF or SC could cause deficits in attention task performance. However, SC-induced attention deficits were found with saccade changes half the size needed to get FEF-induced attention deficits. Thus, performance in visual attention tasks is vulnerable to loss of signals from either structure, but suppression of SC activity has a more devastating effect.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Ronconi, Luca; Busch, Niko A.; Melcher, David
Alpha-band sensory entrainment alters the duration of temporal windows in visual perception Journal Article
In: Scientific Reports, vol. 8, no. 1, pp. 11810, 2018, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{ronconi_alpha-band_2018,
title = {Alpha-band sensory entrainment alters the duration of temporal windows in visual perception},
author = {Luca Ronconi and Niko A. Busch and David Melcher},
url = {https://www.nature.com/articles/s41598-018-29671-5},
doi = {10.1038/s41598-018-29671-5},
issn = {2045-2322},
year = {2018},
date = {2018-08-01},
urldate = {2023-12-21},
journal = {Scientific Reports},
volume = {8},
number = {1},
pages = {11810},
abstract = {The phase and frequency of neural oscillations in the alpha band (8–12 Hz) have been recently proposed as key parameters for the temporal resolution of visual perception. Here, we tested the possible causal links between these oscillatory features and temporal integration/segregation. The individual alpha frequency (IAF) peak as obtained from resting-state electroencephalography was used to set the frequency of sensory (audio-visual) entrainment for the lower (IAF − 2 Hz) and upper (IAF + 2 Hz) alpha. Entrainment at IAF ± 2 Hz was administered in the prestimulus interval to align oscillations to a faster or slower rhythm. We densely sampled in time the accuracy for integration/segregation by using identical stimuli with different instructions. The spectral peaks of performance fluctuations over time were found in the upper or lower alpha band for the IAF + 2 and IAF − 2 Hz entrainment, respectively, implying that faster entrainment resulted in faster behavioral fluctuations. Moreover, the entrainment frequency had opposite effects on temporal resolution: faster entrainment improved segregation while slower entrainment improved integration. Performance fluctuations were almost in anti-phase between the two tasks, such that highest integration performance coincided with lowest segregation performance. These findings provide evidence for a direct link between changes in the alpha band and the temporal resolution of perception.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}
Rideaux, Reuben; Baker, Emma; Edwards, Mark
Parallel consolidation into visual working memory results in reduced precision representations Journal Article
In: Vision Research, vol. 149, pp. 24–29, 2018, ISSN: 0042-6989.
Abstract | Links | BibTeX | Tags: VIEWPixx
@article{rideaux_parallel_2018,
title = {Parallel consolidation into visual working memory results in reduced precision representations},
author = {Reuben Rideaux and Emma Baker and Mark Edwards},
url = {https://www.sciencedirect.com/science/article/pii/S0042698918301007},
doi = {10.1016/j.visres.2018.06.005},
issn = {0042-6989},
year = {2018},
date = {2018-08-01},
urldate = {2024-01-17},
journal = {Vision Research},
volume = {149},
pages = {24–29},
abstract = {Information can be consolidated into visual working memory in parallel, i.e. two items can be consolidated in the same time required to consolidate one. However, while motion direction items consolidated in parallel are encoded at a reduced precision, no such reduction has been reported for colour. Here we examine two possible explanations for the inconsistency between the phenomena associated with consolidating these features in parallel: i) that reduced precision can only be detected when more than two colour items are consolidated in parallel, or ii) that the exposure duration used in previous studies was too long, allowing observers serially consolidate items. Our results show that (like motion direction) colour items consolidated in parallel are encoded at a reduced precision and the critical feature for detecting this phenomenon is the exposure duration. Furthermore, we demonstrate that this process is limited to two items. These findings indicate a general principle of consolidation into visual working memory, that is, a trade-off between the number of items consolidated in parallel and the precision at which they are encoded.},
keywords = {VIEWPixx},
pubstate = {published},
tppubtype = {article}
}