2025
Li, Yitong; Ghahremani, Morteza; Wally, Youssef; Wachinger, Christian
DiaMond: Dementia Diagnosis with Multi-Modal Vision Transformers Using MRI and PET Conference
IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2025.
@conference{Li2024diamond,
title = {DiaMond: Dementia Diagnosis with Multi-Modal Vision Transformers Using MRI and PET},
author = {Yitong Li and Morteza Ghahremani and Youssef Wally and Christian Wachinger},
url = {https://arxiv.org/abs/2410.23219},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2024
Bongratz, Fabian; Karmann, Markus; Holz, Adrian; Bonhoeffer, Moritz; Neumaier, Viktor; Deli, Sarah; Schmitz-Koep, Benita; Zimmer, Claus; Sorg, Christian; Thalhammer, Melissa; Hedderich, Dennis M; Wachinger, Christian
MLV^2-Net: Rater-Based Majority-Label Voting for Consistent Meningeal Lymphatic Vessel Segmentation Proceedings Article
In: ML4H 2024, 2024.
@inproceedings{nokey,
title = {MLV^2-Net: Rater-Based Majority-Label Voting for Consistent Meningeal Lymphatic Vessel Segmentation},
author = {Fabian Bongratz and Markus Karmann and Adrian Holz and Moritz Bonhoeffer and Viktor Neumaier and Sarah Deli and Benita Schmitz-Koep and Claus Zimmer and Christian Sorg and Melissa Thalhammer and Dennis M Hedderich and Christian Wachinger},
url = {https://arxiv.org/abs/2411.08537},
year = {2024},
date = {2024-12-15},
urldate = {2024-12-15},
booktitle = {ML4H 2024},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Jiajun; Ghahremani, Morteza; Li, Yitong; Ommer, Björn; Wachinger., Christian
Stable-Pose: Leveraging Transformers for Pose-Guided Text-to-Image Generation Conference
Conference on Neural Information Processing Systems (NeurIPS), 2024.
@conference{Wang2024,
title = {Stable-Pose: Leveraging Transformers for Pose-Guided Text-to-Image Generation},
author = {Jiajun Wang and Morteza Ghahremani and Yitong Li and Björn Ommer and Christian Wachinger.},
url = {https://arxiv.org/abs/2406.02485},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-01},
booktitle = {Conference on Neural Information Processing Systems (NeurIPS)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Li, Yitong; Yakushev, Igor; Hedderich, Dennis M.; Wachinger, Christian
PASTA: Pathology-Aware MRI to PET Cross-Modal Translation with Diffusion Models Conference
International Conference On Medical Image Computing & Computer Assisted Intervention (MICCAI), 2024.
@conference{Li2024pasta,
title = {PASTA: Pathology-Aware MRI to PET Cross-Modal Translation with Diffusion Models},
author = {Yitong Li and Igor Yakushev and Dennis M. Hedderich and Christian Wachinger},
url = {https://arxiv.org/abs/2405.16942},
doi = { https://doi.org/10.1007/978-3-031-72104-5_51},
year = {2024},
date = {2024-10-01},
urldate = {2024-10-01},
booktitle = {International Conference On Medical Image Computing & Computer Assisted Intervention (MICCAI)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Li, Yitong; Wolf, Tom Nuno; Pölsterl, Sebastian; Yakushev, Igor; Hedderich, Dennis M.; Wachinger, Christian
From Barlow Twins to Triplet Training: Differentiating Dementia with Limited Data Conference
Medical Imaging with Deep Learning (MIDL), 2024.
@conference{Li2024,
title = {From Barlow Twins to Triplet Training: Differentiating Dementia with Limited Data},
author = {Yitong Li and Tom Nuno Wolf and Sebastian Pölsterl and Igor Yakushev and Dennis M. Hedderich and Christian Wachinger},
url = {https://arxiv.org/abs/2404.06253},
year = {2024},
date = {2024-07-03},
urldate = {2024-07-03},
booktitle = {Medical Imaging with Deep Learning (MIDL)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ghahremani, Morteza; Khateri, Mohammad; Jian, Bailiang; Wiestler, Benedikt; Adeli, Ehsan; Wachinger, Christian
H-ViT: A Hierarchical Vision Transformer for Deformable Image Registration Conference
2024.
@conference{ghahremani2024hvit,
title = {H-ViT: A Hierarchical Vision Transformer for Deformable Image Registration},
author = {Morteza Ghahremani and Mohammad Khateri and Bailiang Jian and Benedikt Wiestler and Ehsan Adeli and Christian Wachinger},
url = {https://openaccess.thecvf.com/content/CVPR2024/papers/Ghahremani_H-ViT_A_Hierarchical_Vision_Transformer_for_Deformable_Image_Registration_CVPR_2024_paper.pdf},
year = {2024},
date = {2024-07-01},
urldate = {2024-06-01},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Jian, Bailiang; Pan, Jiazhen; Ghahremani, Morteza; Rueckert, Daniel; Wachinger, Christian; Wiestler, Benedikt
Mamba? Catch The Hype Or Rethink What Really Helps for Image Registration Conference
2024.
@conference{nokey,
title = {Mamba? Catch The Hype Or Rethink What Really Helps for Image Registration},
author = {Bailiang Jian and Jiazhen Pan and Morteza Ghahremani and Daniel Rueckert and Christian Wachinger and Benedikt Wiestler},
url = {https://link.springer.com/chapter/10.1007/978-3-031-73480-9_7},
year = {2024},
date = {2024-06-01},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wolf, Tom Nuno; Bongratz, Fabian; Rickmann, Anne-Marie; Pölsterl, Sebastian; Wachinger, Christian
Keep the Faith: Faithful Explanations in Convolutional Neural Networks for Case-Based Reasoning Proceedings Article Forthcoming
In: AAAI Conference on Artificial Intelligence, 2024, Forthcoming.
@inproceedings{nokey,
title = {Keep the Faith: Faithful Explanations in Convolutional Neural Networks for Case-Based Reasoning},
author = {Wolf, Tom Nuno and Bongratz, Fabian and Rickmann, Anne-Marie and Pölsterl, Sebastian and Wachinger, Christian},
url = {https://arxiv.org/abs/2312.09783},
year = {2024},
date = {2024-02-26},
urldate = {2024-02-26},
booktitle = {AAAI Conference on Artificial Intelligence, 2024},
abstract = {Explaining predictions of black-box neural networks is crucial when applied to decision-critical tasks. Thus, attribution maps are commonly used to identify important image regions, despite prior work showing that humans prefer explanations based on similar examples. To this end, ProtoPNet learns a set of class-representative feature vectors (prototypes) for case-based reasoning. During inference, similarities of latent features to prototypes are linearly classified to form predictions and attribution maps are provided to explain the similarity. In this work, we evaluate whether architectures for case-based reasoning fulfill established axioms required for faithful explanations using the example of ProtoPNet. We show that such architectures allow the extraction of faithful explanations. However, we prove that the attribution maps used to explain the similarities violate the axioms. We propose a new procedure to extract explanations for trained ProtoPNets, named ProtoPFaith. Conceptually, these explanations are Shapley values, calculated on the similarity scores of each prototype. They allow to faithfully answer which prototypes are present in an unseen image and quantify each pixel's contribution to that presence, thereby complying with all axioms. The theoretical violations of ProtoPNet manifest in our experiments on three datasets (CUB-200-2011, Stanford Dogs, RSNA) and five architectures (ConvNet, ResNet, ResNet50, WideResNet50, ResNeXt50). Our experiments show a qualitative difference between the explanations given by ProtoPNet and ProtoPFaith. Additionally, we quantify the explanations with the Area Over the Perturbation Curve, on which ProtoPFaith outperforms ProtoPNet on all experiments by a factor >10**3.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Bongratz, Fabian; Rickmann, Anne-Marie; Wachinger, Christian
Neural deformation fields for template-based reconstruction of cortical surfaces from MRI Journal Article
In: Medical Image Analysis, vol. 93, pp. 103093, 2024, ISSN: 1361-8415.
@article{BongratzV2CFlow2024,
title = {Neural deformation fields for template-based reconstruction of cortical surfaces from MRI},
author = {Fabian Bongratz and Anne-Marie Rickmann and Christian Wachinger},
url = {https://www.sciencedirect.com/science/article/pii/S1361841524000185},
doi = {https://doi.org/10.1016/j.media.2024.103093},
issn = {1361-8415},
year = {2024},
date = {2024-01-26},
urldate = {2024-01-26},
journal = {Medical Image Analysis},
volume = {93},
pages = {103093},
abstract = {The reconstruction of cortical surfaces is a prerequisite for quantitative analyses of the cerebral cortex in magnetic resonance imaging (MRI). Existing segmentation-based methods separate the surface registration from the surface extraction, which is computationally inefficient and prone to distortions. We introduce Vox2Cortex-Flow (V2C-Flow), a deep mesh-deformation technique that learns a deformation field from a brain template to the cortical surfaces of an MRI scan. To this end, we present a geometric neural network that models the deformation-describing ordinary differential equation in a continuous manner. The network architecture comprises convolutional and graph-convolutional layers, which allows it to work with images and meshes at the same time. V2C-Flow is not only very fast, requiring less than two seconds to infer all four cortical surfaces, but also establishes vertex-wise correspondences to the template during reconstruction. In addition, V2C-Flow is the first approach for cortex reconstruction that models white matter and pial surfaces jointly, therefore avoiding intersections between them. Our comprehensive experiments on internal and external test data demonstrate that V2C-Flow results in cortical surfaces that are state-of-the-art in terms of accuracy. Moreover, we show that the established correspondences are more consistent than in FreeSurfer and that they can directly be utilized for cortex parcellation and group analyses of cortical thickness.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2023
Ghahremani, Morteza; Wachinger, Christian
RegBN: Batch Normalization of Multimodal Data with Regularization Conference
2023.
@conference{ghahremani2023regbn,
title = {RegBN: Batch Normalization of Multimodal Data with Regularization},
author = {Morteza Ghahremani and Christian Wachinger},
url = {https://arxiv.org/pdf/2310.00641.pdf},
year = {2023},
date = {2023-12-20},
urldate = {2023-12-20},
journal = {Advances in Neural Information Processing Systems (NeurIPS 2023)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Gaus, Richard; Pölsterl, Sebastian; Greimel, Ellen; Schulte-Körne, Gerd; Wachinger, Christian
In: JCPP Adv, vol. 3, no. 4, pp. e12184, 2023, ISSN: 2692-9384.
@article{pmid38054056,
title = {Can we diagnose mental disorders in children? A large-scale assessment of machine learning on structural neuroimaging of 6916 children in the adolescent brain cognitive development study},
author = {Richard Gaus and Sebastian Pölsterl and Ellen Greimel and Gerd Schulte-Körne and Christian Wachinger},
doi = {10.1002/jcv2.12184},
issn = {2692-9384},
year = {2023},
date = {2023-12-01},
urldate = {2023-12-01},
journal = {JCPP Adv},
volume = {3},
number = {4},
pages = {e12184},
abstract = {BACKGROUND: Prediction of mental disorders based on neuroimaging is an emerging area of research with promising first results in adults. However, research on the unique demographic of children is underrepresented and it is doubtful whether findings obtained on adults can be transferred to children.
METHODS: Using data from 6916 children aged 9-10 in the multicenter Adolescent Brain Cognitive Development study, we extracted 136 regional volume and thickness measures from structural magnetic resonance images to rigorously evaluate the capabilities of machine learning to predict 10 different psychiatric disorders: major depressive disorder, bipolar disorder (BD), psychotic symptoms, attention deficit hyperactivity disorder (ADHD), oppositional defiant disorder, conduct disorder, post-traumatic stress disorder, obsessive-compulsive disorder, generalized anxiety disorder, and social anxiety disorder. For each disorder, we performed cross-validation and assessed whether models discovered a true pattern in the data via permutation testing.
RESULTS: Two of 10 disorders can be detected with statistical significance when using advanced models that (i) allow for non-linear relationships between neuroanatomy and disorder, (ii) model interdependencies between disorders, and (iii) avoid confounding due to sociodemographic factors: ADHD (AUROC = 0.567, = 0.002) and BD (AUROC = 0.551, = 0.002). In contrast, traditional models perform consistently worse and predict only ADHD with statistical significance (AUROC = 0.529, = 0.002).
CONCLUSION: While the modest absolute classification performance does not warrant application in the clinic, our results provide empirical evidence that embracing and explicitly accounting for the complexities of mental disorders via advanced machine learning models can discover patterns that would remain hidden with traditional models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
METHODS: Using data from 6916 children aged 9-10 in the multicenter Adolescent Brain Cognitive Development study, we extracted 136 regional volume and thickness measures from structural magnetic resonance images to rigorously evaluate the capabilities of machine learning to predict 10 different psychiatric disorders: major depressive disorder, bipolar disorder (BD), psychotic symptoms, attention deficit hyperactivity disorder (ADHD), oppositional defiant disorder, conduct disorder, post-traumatic stress disorder, obsessive-compulsive disorder, generalized anxiety disorder, and social anxiety disorder. For each disorder, we performed cross-validation and assessed whether models discovered a true pattern in the data via permutation testing.
RESULTS: Two of 10 disorders can be detected with statistical significance when using advanced models that (i) allow for non-linear relationships between neuroanatomy and disorder, (ii) model interdependencies between disorders, and (iii) avoid confounding due to sociodemographic factors: ADHD (AUROC = 0.567, = 0.002) and BD (AUROC = 0.551, = 0.002). In contrast, traditional models perform consistently worse and predict only ADHD with statistical significance (AUROC = 0.529, = 0.002).
CONCLUSION: While the modest absolute classification performance does not warrant application in the clinic, our results provide empirical evidence that embracing and explicitly accounting for the complexities of mental disorders via advanced machine learning models can discover patterns that would remain hidden with traditional models.
Wachinger, Christian; Wolf, Tom Nuno; Pölsterl, Sebastian
Deep learning for the prediction of type 2 diabetes mellitus from neck-to-knee Dixon MRI in the UK biobank Journal Article
In: Heliyon, vol. 9, no. 11, 2023.
@article{nokey,
title = {Deep learning for the prediction of type 2 diabetes mellitus from neck-to-knee Dixon MRI in the UK biobank},
author = {Wachinger, Christian and Wolf, Tom Nuno and Pölsterl, Sebastian},
doi = {https://doi.org/10.1016/j.heliyon.2023.e22239},
year = {2023},
date = {2023-11-10},
urldate = {2023-11-10},
journal = {Heliyon},
volume = {9},
number = {11},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wachinger, Christian; Wolf, Tom Nuno; Pölsterl, Sebastian
Deep learning for the prediction of type 2 diabetes mellitus from neck-to-knee Dixon MRI in the UK biobank Journal Article
In: Heliyon, vol. 9, no. 11, pp. e22239, 2023, ISSN: 2405-8440.
@article{pmid38034698,
title = {Deep learning for the prediction of type 2 diabetes mellitus from neck-to-knee Dixon MRI in the UK biobank},
author = {Christian Wachinger and Tom Nuno Wolf and Sebastian Pölsterl},
doi = {10.1016/j.heliyon.2023.e22239},
issn = {2405-8440},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
journal = {Heliyon},
volume = {9},
number = {11},
pages = {e22239},
abstract = {RATIONALE AND OBJECTIVES: We evaluate the automatic identification of type 2 diabetes from neck-to-knee, two-point Dixon MRI scans with 3D convolutional neural networks on a large, population-based dataset. To this end, we assess the best combination of MRI contrasts and stations for diabetes prediction, and the benefit of integrating risk factors.
MATERIALS AND METHODS: Subjects with type 2 diabetes mellitus have been identified in the prospective UK Biobank Imaging study, and a matched control sample has been created to avoid confounding bias. Five-fold cross-validation is used for the evaluation. All scans from the two-point Dixon neck-to-knee sequence have been standardized. A neural network that considers multi-channel MRI input was developed and integrates clinical information in tabular format. An ensemble strategy is used to combine multi-station MRI predictions. A subset with quantitative fat measurements is identified for comparison to prior approaches.
RESULTS: MRI scans from 3406 subjects (mean age, 66.2 years ± 7.1 [standard deviation]; 1128 women) were analyzed with 1703 diabetics. A balanced accuracy of 78.7 %, AUC ROC of 0.872, and an average precision of 0.878 was obtained for the classification of diabetes. The ensemble over multiple Dixon MRI stations yields better performance than selecting the individually best station. Moreover, combining fat and water scans as multi-channel inputs to the networks improves upon just using single contrasts as input. Integrating clinical information about known risk factors of diabetes in the network boosts the performance across all stations and the ensemble. The neural network achieved superior results compared to the prediction based on quantitative MRI measurements.
CONCLUSIONS: The developed deep learning model accurately predicted type 2 diabetes from neck-to-knee two-point Dixon MRI scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
MATERIALS AND METHODS: Subjects with type 2 diabetes mellitus have been identified in the prospective UK Biobank Imaging study, and a matched control sample has been created to avoid confounding bias. Five-fold cross-validation is used for the evaluation. All scans from the two-point Dixon neck-to-knee sequence have been standardized. A neural network that considers multi-channel MRI input was developed and integrates clinical information in tabular format. An ensemble strategy is used to combine multi-station MRI predictions. A subset with quantitative fat measurements is identified for comparison to prior approaches.
RESULTS: MRI scans from 3406 subjects (mean age, 66.2 years ± 7.1 [standard deviation]; 1128 women) were analyzed with 1703 diabetics. A balanced accuracy of 78.7 %, AUC ROC of 0.872, and an average precision of 0.878 was obtained for the classification of diabetes. The ensemble over multiple Dixon MRI stations yields better performance than selecting the individually best station. Moreover, combining fat and water scans as multi-channel inputs to the networks improves upon just using single contrasts as input. Integrating clinical information about known risk factors of diabetes in the network boosts the performance across all stations and the ensemble. The neural network achieved superior results compared to the prediction based on quantitative MRI measurements.
CONCLUSIONS: The developed deep learning model accurately predicted type 2 diabetes from neck-to-knee two-point Dixon MRI scans.
Feldmann, Lisa; Zsigo, Carolin; Mörtl, Isabelle; Bartling, Jürgen; Wachinger, Christian; Oort, Frans; Schulte-Körne, Gerd; Greimel, Ellen
Emotion regulation in adolescents with major depression - Evidence from a combined EEG and eye-tracking study Journal Article
In: J Affect Disord, vol. 340, pp. 899–906, 2023, ISSN: 1573-2517.
@article{pmid37591354,
title = {Emotion regulation in adolescents with major depression - Evidence from a combined EEG and eye-tracking study},
author = {Lisa Feldmann and Carolin Zsigo and Isabelle Mörtl and Jürgen Bartling and Christian Wachinger and Frans Oort and Gerd Schulte-Körne and Ellen Greimel},
doi = {10.1016/j.jad.2023.08.087},
issn = {1573-2517},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
journal = {J Affect Disord},
volume = {340},
pages = {899--906},
abstract = {BACKGROUND: Adolescent major depression (MD) is characterized by deficits in emotion regulation (ER). Little is known about the neurophysiological correlates that are associated with these deficits. Moreover, the additional examination of visual attention during ER would allow a more in-depth understanding of ER deficits but has not yet been applied simultaneously.
METHODS: N = 33 adolescents with MD and n = 35 healthy controls (HCs) aged 12-18 years performed an ER task during which they either a) down-regulated their negative affective response to negative images via cognitive reappraisal or b) attended the images without changing their affective response. During the task, the Late Positive Potential (LPP), gaze fixations on emotional image aspects, and self-reported affective responses were collected simultaneously.
RESULTS: Compared to HCs, adolescents with MD demonstrated reduced ER success based on self-report but did not differ in LPP amplitudes. Participants in both groups showed increased amplitudes in the middle LPP window when they reappraised negative pictures compared to when they attended them. Only in the HC group, increased LPP amplitudes during reappraisal were paralleled by more positive affective responses.
LIMITATION: The applied stimuli were part of picture databases and might therefore have limited self-relevance.
CONCLUSIONS: Increased LPP amplitude during ER in both groups might be specific to adolescence and might suggest that ER at this age is challenging and requires a high amount of cognitive resources. These findings provide an important starting point for future interventional studies in youth MD.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
METHODS: N = 33 adolescents with MD and n = 35 healthy controls (HCs) aged 12-18 years performed an ER task during which they either a) down-regulated their negative affective response to negative images via cognitive reappraisal or b) attended the images without changing their affective response. During the task, the Late Positive Potential (LPP), gaze fixations on emotional image aspects, and self-reported affective responses were collected simultaneously.
RESULTS: Compared to HCs, adolescents with MD demonstrated reduced ER success based on self-report but did not differ in LPP amplitudes. Participants in both groups showed increased amplitudes in the middle LPP window when they reappraised negative pictures compared to when they attended them. Only in the HC group, increased LPP amplitudes during reappraisal were paralleled by more positive affective responses.
LIMITATION: The applied stimuli were part of picture databases and might therefore have limited self-relevance.
CONCLUSIONS: Increased LPP amplitude during ER in both groups might be specific to adolescence and might suggest that ER at this age is challenging and requires a high amount of cognitive resources. These findings provide an important starting point for future interventional studies in youth MD.
Bongratz, Fabian; Rickmann, Anne-Marie; Wachinger, Christian
Abdominal organ segmentation via deep diffeomorphic mesh deformations Journal Article
In: Scientific Reports, vol. 13, no. 1, 2023.
@article{BongratzAbdominal2023,
title = {Abdominal organ segmentation via deep diffeomorphic mesh deformations},
author = {Fabian Bongratz and Anne-Marie Rickmann and Christian Wachinger},
url = {https://arxiv.org/abs/2306.15515},
doi = {https://doi.org/10.1038/s41598-023-45435-2},
year = {2023},
date = {2023-10-25},
urldate = {2023-10-25},
journal = {Scientific Reports},
volume = {13},
number = {1},
abstract = {Abdominal organ segmentation from CT and MRI is an essential prerequisite for surgical planning and computer-aided navigation systems. It is challenging due to the high variability in the shape, size, and position of abdominal organs. Three-dimensional numeric representations of abdominal shapes with point-wise correspondence to a template are further important for quantitative and statistical analyses thereof. Recently, template-based surface extraction methods have shown promising advances for direct mesh reconstruction from volumetric scans. However, the generalization of these deep learning-based approaches to different organs and datasets, a crucial property for deployment in clinical environments, has not yet been assessed. We close this gap and employ template-based mesh reconstruction methods for joint liver, kidney, pancreas, and spleen segmentation. Our experiments on manually annotated CT and MRI data reveal limited generalization capabilities of previous methods to organs of different geometry and weak performance on small datasets. We alleviate these issues with a novel deep diffeomorphic mesh-deformation architecture and an improved training scheme. The resulting method, UNetFlow, generalizes well to all four organs and can be easily fine-tuned on new data. Moreover, we propose a simple registration-based post-processing that aligns voxel and mesh outputs to boost segmentation accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rickmann, Anne-Marie; Bongratz, Fabian; Wachinger, Christian
Vertex Correspondence in Cortical Surface Reconstruction Conference
Medical Image Computing and Computer Assisted Intervention -- MICCAI 2023, vol. 14227, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-43993-3.
@conference{rickmann_v2cc_2023,
title = {Vertex Correspondence in Cortical Surface Reconstruction},
author = {Anne-Marie Rickmann and Fabian Bongratz and Christian Wachinger},
url = {https://link.springer.com/chapter/10.1007/978-3-031-43993-3_31https://ai-med.de/wp-content/uploads/2023/12/Screenshot-2023-12-21-135339-300x118.png},
doi = {https://doi.org/10.1007/978-3-031-43993-3_31},
isbn = {978-3-031-43993-3},
year = {2023},
date = {2023-10-01},
booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2023},
volume = {14227},
pages = {318--327},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Mesh-based cortical surface reconstruction is a fundamental task in neuroimaging that enables highly accurate measurements of brain morphology. Vertex correspondence between a patient's cortical mesh and a group template is necessary for comparing cortical thickness and other measures at the vertex level. However, post-processing methods for generating vertex correspondence are time-consuming and involve registering and remeshing a patient's surfaces to an atlas. Recent deep learning methods for cortex reconstruction have neither been optimized for generating vertex correspondence nor have they analyzed the quality of such correspondence. In this work, we propose to learn vertex correspondence by optimizing an L1 loss on registered surfaces instead of the commonly used Chamfer loss. This results in improved inter- and intra-subject correspondence suitable for direct group comparison and atlas-based parcellation. We demonstrate that state-of-the-art methods provide insufficient correspondence for mapping parcellations, highlighting the importance of optimizing for accurate vertex correspondence.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wolf, Tom Nuno; Pölsterl, Sebastian; Wachinger, Christian
Don't PANIC: Prototypical Additive Neural Network for Interpretable Classification of Alzheimer's Disease Proceedings Article
In: Information Processing in Medical Imaging (IPMI), 2023.
@inproceedings{nokey,
title = {Don't PANIC: Prototypical Additive Neural Network for Interpretable Classification of Alzheimer's Disease},
author = {Tom Nuno Wolf and Sebastian Pölsterl and Christian Wachinger},
url = {https://arxiv.org/abs/2303.07125},
doi = {https://doi.org/10.1007/978-3-031-34048-2_7},
year = {2023},
date = {2023-06-08},
urldate = {2023-06-08},
booktitle = {Information Processing in Medical Imaging (IPMI)},
abstract = {Alzheimer's disease (AD) has a complex and multifactorial etiology, which requires integrating information about neuroanatomy, genetics, and cerebrospinal fluid biomarkers for accurate diagnosis. Hence, recent deep learning approaches combined image and tabular information to improve diagnostic performance. However, the black-box nature of such neural networks is still a barrier for clinical applications, in which understanding the decision of a heterogeneous model is integral. We propose PANIC, a prototypical additive neural network for interpretable AD classification that integrates 3D image and tabular data. It is interpretable by design and, thus, avoids the need for post-hoc explanations that try to approximate the decision of a network. Our results demonstrate that PANIC achieves state-of-the-art performance in AD classification, while directly providing local and global explanations. Finally, we show that PANIC extracts biologically meaningful signatures of AD, and satisfies a set of desirable desiderata for trustworthy machine learning. Our implementation is available at https://github.com/ai-med/PANIC.},
howpublished = {To be published in proceedings of Information Processing In Medical Imaging 2023},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickmann, Anne-Marie; Xu, Murong; Wolf, Tom Nuno; Kovalenko, Oksana; Wachinger, Christian
HALOS: Hallucination-free Organ Segmentation after Organ Resection Surgery Proceedings Article
In: Information Processing in Medical Imaging (IPMI), 2023.
@inproceedings{nokey,
title = {HALOS: Hallucination-free Organ Segmentation after Organ Resection Surgery},
author = {Rickmann, Anne-Marie and Xu, Murong and Wolf, Tom Nuno and Kovalenko, Oksana and Wachinger, Christian},
url = {https://arxiv.org/pdf/2303.07717.pdf},
doi = {https://doi.org/10.1007/978-3-031-34048-2_51},
year = {2023},
date = {2023-06-08},
urldate = {2023-06-19},
booktitle = {Information Processing in Medical Imaging (IPMI)},
abstract = {The wide range of research in deep learning-based medical image segmentation pushed the boundaries in a multitude of applications. A clinically relevant problem that received less attention is the handling of scans with irregular anatomy, e.g., after organ resection. State-of-the-art segmentation models often lead to organ hallucinations, i.e., false-positive predictions of organs, which cannot be alleviated by oversampling or post-processing. Motivated by the increasing need to develop robust deep learning models, we propose HALOS for abdominal organ segmentation in MR images that handles cases after organ resection surgery. To this end, we combine missing organ classification and multi-organ segmentation tasks into a multi-task model, yielding a
classification-assisted segmentation pipeline. The segmentation network learns to incorporate knowledge about organ existence via feature fusion
modules. Extensive experiments on a small labeled test set and large-scale UK Biobank data demonstrate the effectiveness of our approach in
terms of higher segmentation Dice scores and near-to-zero false positive prediction rate.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
classification-assisted segmentation pipeline. The segmentation network learns to incorporate knowledge about organ existence via feature fusion
modules. Extensive experiments on a small labeled test set and large-scale UK Biobank data demonstrate the effectiveness of our approach in
terms of higher segmentation Dice scores and near-to-zero false positive prediction rate.
Zsigo, Carolin; Feldmann, Lisa; Oort, Frans; Piechaczek, Charlotte; Bartling, Jürgen; Schulte-Rüther, Martin; Wachinger, Christian; Schulte-Körne, Gerd; Greimel, Ellen
Emotion regulation training for adolescents with major depression: Results from a randomized controlled trial Journal Article
In: Emotion, 2023, ISSN: 1931-1516.
@article{pmid38060020,
title = {Emotion regulation training for adolescents with major depression: Results from a randomized controlled trial},
author = {Carolin Zsigo and Lisa Feldmann and Frans Oort and Charlotte Piechaczek and Jürgen Bartling and Martin Schulte-Rüther and Christian Wachinger and Gerd Schulte-Körne and Ellen Greimel},
doi = {10.1037/emo0001328},
issn = {1931-1516},
year = {2023},
date = {2023-06-02},
urldate = {2023-12-01},
journal = {Emotion},
abstract = {Difficulties in emotion regulation (ER) are thought to contribute to the development and maintenance of major depression (MD) in adolescents. In healthy adults, a task-based training of ER has previously proven effective to reduce stress, but no such studies are available for MD. It is also unclear whether findings can be generalized onto adolescent populations. The final sample consisted of = 70 adolescents with MD, who were randomized to a task-based ER training ( = 36) or a control training ( = 34). Across four sessions, the ER group was trained to downregulate negative affect to negative images via reappraisal, while the control group was instructed to attend the images. Rumination, stress-, and affect-related measures were assessed as primary outcomes, behavioral and neurophysiological responses (late positive potential, LPP), as secondary outcomes. The trial was preregistered at clinicaltrials.gov (NCT03957850). While there was no significant differential effect of the ER training on primary outcomes, we found small to moderate effects on rumination in the ER group, but not the control group. During reappraisal (compared to attend), the ER group showed an unexpected increase of the LPP during the first, but not during later training sessions. Although replication in large, multicenter trials is needed, our findings on effect sizes suggest that ER training might be promising to decrease rumination in adolescent MD. The LPP increase at the first session may represent cognitive effort, which was successfully reduced over the sessions. Future studies should research whether training effects transfer to daily life and are durable over a longer time period. (PsycInfo Database Record (c) 2023 APA, all rights reserved).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bilic, Patrick; Christ, Patrick; Li, Hongwei Bran; Sarasua, Ignacio; Wachinger, Christian; others,
The Liver Tumor Segmentation Benchmark (LiTS) Journal Article
In: Med Image Anal, vol. 84, pp. 102680, 2023, ISSN: 1361-8423.
@article{pmid36481607,
title = {The Liver Tumor Segmentation Benchmark (LiTS)},
author = {Patrick Bilic and Patrick Christ and Hongwei Bran Li and Ignacio Sarasua and Christian Wachinger and others},
url = {https://www.sciencedirect.com/science/article/pii/S1361841522003085?via%3Dihub},
doi = {10.1016/j.media.2022.102680},
issn = {1361-8423},
year = {2023},
date = {2023-02-01},
urldate = {2023-02-01},
journal = {Med Image Anal},
volume = {84},
pages = {102680},
abstract = {In this work, we report the set-up and results of the Liver Tumor Segmentation Benchmark (LiTS), which was organized in conjunction with the IEEE International Symposium on Biomedical Imaging (ISBI) 2017 and the International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2017 and 2018. The image dataset is diverse and contains primary and secondary tumors with varied sizes and appearances with various lesion-to-background levels (hyper-/hypo-dense), created in collaboration with seven hospitals and research institutions. Seventy-five submitted liver and liver tumor segmentation algorithms were trained on a set of 131 computed tomography (CT) volumes and were tested on 70 unseen test images acquired from different patients. We found that not a single algorithm performed best for both liver and liver tumors in the three events. The best liver segmentation algorithm achieved a Dice score of 0.963, whereas, for tumor segmentation, the best algorithms achieved Dices scores of 0.674 (ISBI 2017), 0.702 (MICCAI 2017), and 0.739 (MICCAI 2018). Retrospectively, we performed additional analysis on liver tumor detection and revealed that not all top-performing segmentation algorithms worked well for tumor detection. The best liver tumor detection method achieved a lesion-wise recall of 0.458 (ISBI 2017), 0.515 (MICCAI 2017), and 0.554 (MICCAI 2018), indicating the need for further research. LiTS remains an active benchmark and resource for research, e.g., contributing the liver-related segmentation tasks in http://medicaldecathlon.com/. In addition, both data and online evaluation are accessible via https://competitions.codalab.org/competitions/17094.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Schmitz-Koep, Benita; Menegaux, Aurore; Zimmermann, Juliana; Thalhammer, Melissa; Neubauer, Antonia; Wendt, Jil; Schinz, David; Wachinger, Christian; Daamen, Marcel; Boecker, Henning; Zimmer, Claus; Priller, Josef; Wolke, Dieter; Bartmann, Peter; Sorg, Christian; Hedderich, Dennis M
Aberrant allometric scaling of cortical folding in preterm-born adults Journal Article
In: Brain Commun, vol. 5, no. 1, pp. fcac341, 2023, ISSN: 2632-1297.
@article{pmid36632185,
title = {Aberrant allometric scaling of cortical folding in preterm-born adults},
author = {Benita Schmitz-Koep and Aurore Menegaux and Juliana Zimmermann and Melissa Thalhammer and Antonia Neubauer and Jil Wendt and David Schinz and Christian Wachinger and Marcel Daamen and Henning Boecker and Claus Zimmer and Josef Priller and Dieter Wolke and Peter Bartmann and Christian Sorg and Dennis M Hedderich},
doi = {10.1093/braincomms/fcac341},
issn = {2632-1297},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Brain Commun},
volume = {5},
number = {1},
pages = {fcac341},
abstract = {A universal allometric scaling law has been proposed to describe cortical folding of the mammalian brain as a function of the product of cortical surface area and the square root of cortical thickness across different mammalian species, including humans. Since these cortical properties are vulnerable to developmental disturbances caused by preterm birth in humans and since these alterations are related to cognitive impairments, we tested (i) whether cortical folding in preterm-born adults follows this cortical scaling law and (ii) the functional relevance of potential scaling aberrances. We analysed the cortical scaling relationship in a large and prospectively collected cohort of 91 very premature-born adults (<32 weeks of gestation and/or birthweight <1500 g, very preterm and/or very low birth weight) and 105 full-term controls at 26 years of age based on the total surface area, exposed surface area and average cortical thickness measured with structural magnetic resonance imaging and surface-based morphometry. We found that the slope of the log-transformed cortical scaling relationship was significantly altered in adults (very preterm and/or very low birth weight: 1.24, full-term: 1.14, = 0.018). More specifically, the slope was significantly altered in male adults (very preterm and/or very low birth weight: 1.24, full-term: 1.00, = 0.031), while there was no significant difference in the slope of female adults (very preterm and/or very low birth weight: 1.27, full-term: 1.12, = 0.225). Furthermore, offset was significantly lower compared with full-term controls in both male (very preterm and/or very low birth weight: -0.546, full-term: -0.538, = 0.001) and female adults (very preterm and/or very low birth weight: -0.545, full-term: -0.538, = 0.023), indicating a systematic shift of the regression line after preterm birth. Gestational age had a significant effect on the slope in very preterm and/or very low birth weight adults and more specifically in male very preterm and/or very low birth weight adults, indicating that the difference in slope is specifically related to preterm birth. The shape or tension term of the scaling law had no significant effect on cognitive performance, while the size of the cortex did. Results demonstrate altered scaling of cortical surface and cortical thickness in very premature-born adults. Data suggest altered mechanical forces acting on the cortex after preterm birth.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Roy, Abhijit Guha; Siddiqui, Shayan; Pölsterl, Sebastian; Farshad, Azade; Navab, Nassir; Wachinger, Christian
Few-shot segmentation of 3D medical images Book Section
In: Nguyen, Hien Van; Summers, Ronald; Chellappa, Rama (Ed.): Meta Learning With Medical Imaging and Health Informatics Applications, pp. 161-183, Academic Press, 2023, ISBN: 978-0-323-99851-2.
@incollection{GUHAROY2023161,
title = {Few-shot segmentation of 3D medical images},
author = {Abhijit Guha Roy and Shayan Siddiqui and Sebastian Pölsterl and Azade Farshad and Nassir Navab and Christian Wachinger},
editor = {Hien Van Nguyen and Ronald Summers and Rama Chellappa},
url = {https://www.sciencedirect.com/science/article/pii/B9780323998512000181},
doi = {https://doi.org/10.1016/B978-0-32-399851-2.00018-1},
isbn = {978-0-323-99851-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Meta Learning With Medical Imaging and Health Informatics Applications},
pages = {161-183},
publisher = {Academic Press},
series = {The MICCAI Society book Series},
abstract = {Deep neural networks enable highly accurate image segmentation but require large amounts of manually annotated data for supervised training. Few-shot learning aims to overcome this weakness by learning a new class from a few annotated support samples. This chapter introduces our novel few-shot segmentation framework for volumetric medical images with only a few annotated slices. Compared to other related works in computer vision, the main challenges are the absence of pretrained networks and the volumetric nature of medical scans. We address these challenges by proposing a new architecture for few-shot segmentation that incorporates ‘squeeze & excite’ blocks. Our two-armed architecture consists of a conditioner arm, which processes the annotated support input and generates a task-specific representation. This representation is passed on to the segmenter arm that uses this information to segment the new query image. To facilitate efficient interaction between the conditioner and the segmenter arm, we propose to use ‘channel squeeze & spatial excitation’ blocks – a lightweight computational module – that enables heavy interaction between both the arms with negligible increase in model complexity. This contribution allows us to perform image segmentation without relying on a pretrained model, which generally is unavailable for medical scans. Furthermore, we propose an efficient strategy for volumetric segmentation by optimally pairing a few slices of the support volume to all the slices of the query volume. We perform experiments for organ segmentation on whole-body contrast-enhanced CT scans from the Visceral Dataset. Our proposed model outperforms multiple baselines and existing approaches in segmentation accuracy by a significant margin. The source code is available at https://github.com/abhi4ssj/few-shot-segmentation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
2022
Pölsterl, Sebastian; Wachinger, Christian
Identification of causal effects of neuroanatomy on cognitive decline requires modeling unobserved confounders Journal Article
In: Alzheimer's & Dementia, 2022.
@article{Pölsterl2022-adj,
title = {Identification of causal effects of neuroanatomy on cognitive decline requires modeling unobserved confounders},
author = {Sebastian Pölsterl and Christian Wachinger},
url = {https://alz-journals.onlinelibrary.wiley.com/doi/full/10.1002/alz.12825},
doi = {10.1002/alz.12825},
year = {2022},
date = {2022-11-23},
urldate = {2022-11-23},
journal = {Alzheimer's & Dementia},
abstract = {Abstract Introduction Carrying out a randomized controlled trial to estimate the causal effects of regional brain atrophy due to Alzheimer's disease (AD) is impossible. Instead, we must estimate causal effects from observational data. However, this generally requires knowing and having recorded all confounders, which is often unrealistic. Methods We provide an approach that leverages the dependencies among multiple neuroanatomical measures to estimate causal effects from observational neuroimaging data without the need to know and record all confounders. Results Our analyses of N=732 subjects from the Alzheimer's Disease Neuroimaging Initiative demonstrate that using our approach results in biologically meaningful conclusions, whereas ignoring unobserved confounding yields results that conflict with established knowledge on cognitive decline due to AD. Discussion The findings provide evidence that the impact of unobserved confounding can be substantial. To ensure trustworthy scientific insights, future AD research can account for unobserved confounding via the proposed approach.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rickmann, Anne-Marie; Bongratz, Fabian; P"olsterl, Sebastian; Sarasua, Ignacio; Wachinger, Christian
Joint Reconstruction and Parcellation of Cortical Surfaces Proceedings Article
In: International Workshop on Machine Learning in Clinical Neuroimaging, pp. 3–12, Springer, 2022.
@inproceedings{rickmann2022joint,
title = {Joint Reconstruction and Parcellation of Cortical Surfaces},
author = {Rickmann, Anne-Marie and Bongratz, Fabian and P{"o}lsterl, Sebastian and Sarasua, Ignacio and Wachinger, Christian},
url = {http://arxiv.org/abs/2210.01772},
year = {2022},
date = {2022-09-18},
urldate = {2022-09-18},
booktitle = {International Workshop on Machine Learning in Clinical Neuroimaging},
pages = {3--12},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickmann, Anne-Marie; Senapati, Jyotirmay; Kovalenko, Oksana; Peters, Annette; Bamberg, Fabian; Wachinger, Christian
AbdomenNet: deep neural network for abdominal organ segmentation in epidemiologic imaging studies Journal Article
In: BMC Medical Imaging, vol. 22, no. 168, 2022.
@article{nokey,
title = { AbdomenNet: deep neural network for abdominal organ segmentation in epidemiologic imaging studies },
author = {Rickmann, Anne-Marie and Senapati, Jyotirmay and Kovalenko, Oksana and Peters, Annette and Bamberg, Fabian and Wachinger, Christian},
doi = {10.1186/s12880-022-00893-4},
year = {2022},
date = {2022-09-17},
urldate = {2022-09-17},
journal = {BMC Medical Imaging},
volume = {22},
number = {168},
abstract = {Background
Whole-body imaging has recently been added to large-scale epidemiological studies providing novel opportunities for investigating abdominal organs. However, the segmentation of these organs is required beforehand, which is time consuming, particularly on such a large scale.
Methods
We introduce AbdomentNet, a deep neural network for the automated segmentation of abdominal organs on two-point Dixon MRI scans. A pre-processing pipeline enables to process MRI scans from different imaging studies, namely the German National Cohort, UK Biobank, and Kohorte im Raum Augsburg. We chose a total of 61 MRI scans across the three studies for training an ensemble of segmentation networks, which segment eight abdominal organs. Our network presents a novel combination of octave convolutions and squeeze and excitation layers, as well as training with stochastic weight averaging.
Results
Our experiments demonstrate that it is beneficial to combine data from different imaging studies to train deep neural networks in contrast to training separate networks. Combining the water and opposed-phase contrasts of the Dixon sequence as input channels, yields the highest segmentation accuracy, compared to single contrast inputs. The mean Dice similarity coefficient is above 0.9 for larger organs liver, spleen, and kidneys, and 0.71 and 0.74 for gallbladder and pancreas, respectively.
Conclusions
Our fully automated pipeline provides high-quality segmentations of abdominal organs across population studies. In contrast, a network that is only trained on a single dataset does not generalize well to other datasets.
},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Whole-body imaging has recently been added to large-scale epidemiological studies providing novel opportunities for investigating abdominal organs. However, the segmentation of these organs is required beforehand, which is time consuming, particularly on such a large scale.
Methods
We introduce AbdomentNet, a deep neural network for the automated segmentation of abdominal organs on two-point Dixon MRI scans. A pre-processing pipeline enables to process MRI scans from different imaging studies, namely the German National Cohort, UK Biobank, and Kohorte im Raum Augsburg. We chose a total of 61 MRI scans across the three studies for training an ensemble of segmentation networks, which segment eight abdominal organs. Our network presents a novel combination of octave convolutions and squeeze and excitation layers, as well as training with stochastic weight averaging.
Results
Our experiments demonstrate that it is beneficial to combine data from different imaging studies to train deep neural networks in contrast to training separate networks. Combining the water and opposed-phase contrasts of the Dixon sequence as input channels, yields the highest segmentation accuracy, compared to single contrast inputs. The mean Dice similarity coefficient is above 0.9 for larger organs liver, spleen, and kidneys, and 0.71 and 0.74 for gallbladder and pancreas, respectively.
Conclusions
Our fully automated pipeline provides high-quality segmentations of abdominal organs across population studies. In contrast, a network that is only trained on a single dataset does not generalize well to other datasets.
Narazani, Marla; Sarasua, Ignacio; Pölsterl, Sebastian; Lizarraga, Aldana; Yakushev, Igor; Wachinger, Christian
Is a PET All You Need? A Multi-modal Study for Alzheimer’s Disease Using 3D CNNs Conference
Medical Image Computing and Computer Assisted Intervention – MICCAI 2022, 2022.
@conference{Narazani-miccai-2022,
title = {Is a PET All You Need? A Multi-modal Study for Alzheimer’s Disease Using 3D CNNs},
author = {
Narazani, Marla and Sarasua, Ignacio and Pölsterl, Sebastian and Lizarraga, Aldana and Yakushev, Igor and Wachinger, Christian
},
doi = {10.1007/978-3-031-16431-6_7},
year = {2022},
date = {2022-09-15},
urldate = {2022-09-15},
booktitle = { Medical Image Computing and Computer Assisted Intervention – MICCAI 2022},
pages = {66–76},
abstract = {Alzheimer’s Disease (AD) is the most common form of dementia and often difficult to diagnose due to the multifactorial etiology of dementia. Recent works on neuroimaging-based computer-aided diagnosis with deep neural networks (DNNs) showed that fusing structural magnetic resonance images (sMRI) and fluorodeoxyglucose positron emission tomography (FDG-PET) leads to improved accuracy in a study population of healthy controls and subjects with AD. However, this result conflicts with the established clinical knowledge that FDG-PET better captures AD-specific pathologies than sMRI. Therefore, we propose a framework for the systematic evaluation of multi-modal DNNs and critically re-evaluate single- and multi-modal DNNs based on FDG-PET and sMRI for binary healthy vs. AD, and three-way healthy/mild cognitive impairment/AD classification. Our experiments demonstrate that a single-modality network using FDG-PET performs better than MRI (accuracy 0.91 vs 0.87) and does not show improvement when combined. This conforms with the established clinical knowledge on AD biomarkers, but raises questions about the true benefit of multi-modal DNNs. We argue that future work on multi-modal fusion should systematically assess the contribution of individual modalities following our proposed evaluation framework. Finally, we encourage the community to go beyond healthy vs. AD classification and focus on differential diagnosis of dementia, where fusing multi-modal image information conforms with a clinical need.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sarasua, Ignacio; Pölsterl, Sebastian; Wachinger, Christian
CASHformer: Cognition Aware SHape Transformer for Longitudinal Analysis Conference
Medical Image Computing and Computer Assisted Intervention – MICCAI 2022, 2022.
@conference{Sarasua-miccai-2022,
title = {CASHformer: Cognition Aware SHape Transformer for Longitudinal Analysis},
author = {Sarasua, Ignacio and Pölsterl, Sebastian and Wachinger, Christian},
url = {https://arxiv.org/pdf/2207.02091.pdf},
year = {2022},
date = {2022-09-15},
urldate = {2022-09-15},
booktitle = {Medical Image Computing and Computer Assisted Intervention – MICCAI 2022},
pages = {44–54},
abstract = {Modeling temporal changes in subcortical structures is crucial for a better understanding of the progression of Alzheimer’s disease (AD). Given their flexibility to adapt to heterogeneous sequence lengths, mesh-based transformer architectures have been proposed in the past for predicting hippocampus deformations across time. However, one of the main limitations of transformers is the large amount of trainable parameters, which makes the application on small datasets very challenging. In addition, current methods do not include relevant non-image information that can help to identify AD-related patterns in the progression. To this end, we introduce CASHformer, a transformer-based framework to model longitudinal shape trajectories in AD. CASHformer incorporates the idea of pre-trained transformers as universal compute engines that generalize across a wide range of tasks by freezing most layers during fine-tuning. This reduces the number of parameters by over 90% with respect to the original model and therefore enables the application of large models on small datasets without overfitting. In addition, CASHformer models cognitive decline to reveal AD atrophy patterns in the temporal sequence. Our results show that CASHformer reduces the reconstruction error by 73% compared to previously proposed methods. Moreover, the accuracy of detecting patients progressing to AD increases by 3% with imputing missing longitudinal shape data.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wolf, Tom Nuno; Pölsterl, Sebastian; Wachinger, Christian
DAFT: A Universal Module to Interweave Tabular Data and 3D Images in CNNs Journal Article
In: NeuroImage, pp. 119505, 2022.
@article{WOLF2022119505,
title = {DAFT: A Universal Module to Interweave Tabular Data and 3D Images in CNNs},
author = {Tom Nuno Wolf and Sebastian Pölsterl and Christian Wachinger},
doi = {10.1016/j.neuroimage.2022.119505},
year = {2022},
date = {2022-07-22},
urldate = {2022-07-22},
journal = {NeuroImage},
pages = {119505},
abstract = {Prior work on Alzheimer’s Disease (AD) has demonstrated that convolutional neural networks (CNNs) can leverage the high-dimensional image information for diagnosing patients. Beside such data-driven approaches, many established biomarkers exist and are typically represented as tabular data, such as demographics, genetic alterations, or laboratory measurements from cerebrospinal fluid. However, little research has focused on the effective integration of tabular data into existing CNN architectures to improve patient diagnosis. We introduce the Dynamic Affine Feature Map Transform (DAFT), a general-purpose module for CNNs that incites or represses high-level concepts learned from a 3D image by conditioning feature maps of a convolutional layer on both a patient’s image and tabular clinical information. This is achieved by using an auxiliary neural network that outputs a scaling factor and offset to dynamically apply an affine transformation to the feature maps of a convolutional layer. In our experiments on AD diagnosis and time-to-dementia prediction, we show that the DAFT is highly effective in combining 3D image and tabular information by achieving a mean balanced accuracy of 0.622 for diagnosis, and mean c-index of 0.748 for time-to-dementia prediction, thus outperforming all baseline methods. Finally, our extensive ablation study and empirical experiments reveal that the performance improvement due to the DAFT is robust with respect to many design choices.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Antonelli, Michela; Reinke, Annika; Sarasua, Ignacio; Wachinger, Christian; others,
The Medical Segmentation Decathlon Journal Article
In: Nat Commun, vol. 13, no. 1, pp. 4128, 2022, ISSN: 2041-1723.
@article{pmid35840566,
title = {The Medical Segmentation Decathlon},
author = {Michela Antonelli and Annika Reinke and Ignacio Sarasua and Christian Wachinger and others},
url = {https://www.nature.com/articles/s41467-022-30695-9},
doi = {10.1038/s41467-022-30695-9},
issn = {2041-1723},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
journal = {Nat Commun},
volume = {13},
number = {1},
pages = {4128},
abstract = {International challenges have become the de facto standard for comparative assessment of image analysis algorithms. Although segmentation is the most widely investigated medical image processing task, the various challenges have been organized to focus only on specific clinical tasks. We organized the Medical Segmentation Decathlon (MSD)-a biomedical image analysis challenge, in which algorithms compete in a multitude of both tasks and modalities to investigate the hypothesis that a method capable of performing well on multiple tasks will generalize well to a previously unseen task and potentially outperform a custom-designed solution. MSD results confirmed this hypothesis, moreover, MSD winner continued generalizing well to a wide range of other clinical problems for the next two years. Three main conclusions can be drawn from this study: (1) state-of-the-art image segmentation algorithms generalize well when retrained on unseen tasks; (2) consistent algorithmic performance across multiple tasks is a strong surrogate of algorithmic generalizability; (3) the training of accurate AI segmentation models is now commoditized to scientists that are not versed in AI model training.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bongratz, Fabian; Rickmann, Anne-Marie; Pölsterl, Sebastian; Wachinger, Christian
Vox2Cortex: Fast Explicit Reconstruction of Cortical Surfaces from 3D MRI Scans with Geometric Deep Neural Networks Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.
@inproceedings{Bongratz_2022_CVPR,
title = {Vox2Cortex: Fast Explicit Reconstruction of Cortical Surfaces from 3D MRI Scans with Geometric Deep Neural Networks},
author = {Fabian Bongratz and Anne-Marie Rickmann and Sebastian Pölsterl and Christian Wachinger},
url = {https://ai-med.github.io/Vox2Cortex/
https://openaccess.thecvf.com/content/CVPR2022/papers/Bongratz_Vox2Cortex_Fast_Explicit_Reconstruction_of_Cortical_Surfaces_From_3D_MRI_CVPR_2022_paper.pdf
https://github.com/ai-med/Vox2Cortex},
year = {2022},
date = {2022-06-01},
urldate = {2022-06-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
abstract = {The reconstruction of cortical surfaces from brain magnetic resonance imaging (MRI) scans is essential for quantitative analyses of cortical thickness and sulcal morphology. Although traditional and deep learning-based algorithmic pipelines exist for this purpose, they have two major drawbacks: lengthy runtimes of multiple hours (traditional) or intricate post-processing, such as mesh extraction and topology correction (deep learning-based). In this work, we address both of these issues and propose Vox2Cortex, a deep learning-based algorithm that directly yields topologically correct, three-dimensional meshes of the boundaries of the cortex. Vox2Cortex leverages convolutional and graph convolutional neural networks to deform an initial template to the densely folded geometry of the cortex represented by an input MRI scan. We show in extensive experiments on three brain MRI datasets that our meshes are as accurate as the ones reconstructed by state-of-the-art methods in the field, without the need for time- and resource-intensive post-processing. To accurately reconstruct the tightly folded cortex, we work with meshes containing about 168,000 vertices at test time, scaling deep explicit reconstruction methods to a new level.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ignacio Sarasua, Sebastian Pölsterl; Wachinger, Christian
Hippocampal Representations for Deep Learning on Alzheimer's Disease Journal Article
In: Scientific reports, vol. 12, no. 1, pp. 1-13, 2022, ISSN: 2045-2322.
@article{sarasua2022hippocampal,
title = {Hippocampal Representations for Deep Learning on Alzheimer's Disease},
author = {Ignacio Sarasua, Sebastian Pölsterl and Christian Wachinger},
url = {https://www.nature.com/articles/s41598-022-12533-6},
issn = {2045-2322},
year = {2022},
date = {2022-05-21},
urldate = {2022-05-21},
journal = {Scientific reports},
volume = {12},
number = {1},
pages = {1-13},
abstract = {Deep learning offers a powerful approach for analyzing hippocampal changes in Alzheimer's disease (AD) without relying on handcrafted features.
Nevertheless, an input format needs to be selected to pass the image information to the neural network, which has wide ramifications for the analysis, but has not been evaluated yet. We compare five hippocampal representations (and their respective tailored network architectures) that span from raw images to geometric representations like meshes and point clouds. We performed a thorough evaluation for the prediction of AD diagnosis and time-to-dementia prediction with experiments on an independent test dataset. In addition, we evaluated the ease of interpretability for each representation-network pair.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nevertheless, an input format needs to be selected to pass the image information to the neural network, which has wide ramifications for the analysis, but has not been evaluated yet. We compare five hippocampal representations (and their respective tailored network architectures) that span from raw images to geometric representations like meshes and point clouds. We performed a thorough evaluation for the prediction of AD diagnosis and time-to-dementia prediction with experiments on an independent test dataset. In addition, we evaluated the ease of interpretability for each representation-network pair.
2021
Ronge, Raphael; Nho, Kwangsik; Wachinger, Christian; Pölsterl, Sebastian
Alzheimer's Disease Diagnosis via Deep Factorization Machine Models Conference
Machine Learning in Medical Imaging (MLMI), 2021.
@conference{Ronge2021,
title = {Alzheimer's Disease Diagnosis via Deep Factorization Machine Models},
author = {Raphael Ronge and Kwangsik Nho and Christian Wachinger and Sebastian Pölsterl},
url = {https://arxiv.org/abs/2108.05916},
doi = {10.1007/978-3-030-87589-3_64},
year = {2021},
date = {2021-09-23},
urldate = {2021-09-23},
booktitle = {Machine Learning in Medical Imaging (MLMI)},
pages = {624--633},
abstract = {The current state-of-the-art deep neural networks (DNNs) for Alzheimer's Disease diagnosis use different biomarker combinations to classify patients, but do not allow extracting knowledge about the interactions of biomarkers. However, to improve our understanding of the disease, it is paramount to extract such knowledge from the learned model. In this paper, we propose a Deep Factorization Machine model that combines the ability of DNNs to learn complex relationships and the ease of interpretability of a linear model. The proposed model has three parts: (i) an embedding layer to deal with sparse categorical data, (ii) a Factorization Machine to efficiently learn pairwise interactions, and (iii) a DNN to implicitly model higher order interactions. In our experiments on data from the Alzheimer's Disease Neuroimaging Initiative, we demonstrate that our proposed model classifies cognitive normal, mild cognitive impaired, and demented patients more accurately than competing models. In addition, we show that valuable knowledge about the interactions among biomarkers can be obtained.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Gröger, Fabian; Rickmann, Anne-Marie; Wachinger, Christian
STRUDEL: Self-Training with Uncertainty Dependent Label Refinement across Domains Conference
Machine Learning in Medical Imaging (MLMI), 2021.
@conference{Gröger2021,
title = {STRUDEL: Self-Training with Uncertainty Dependent Label Refinement across Domains},
author = {Fabian Gröger and Anne-Marie Rickmann and Christian Wachinger},
url = {https://arxiv.org/abs/2104.11596},
doi = {10.1007/978-3-030-87589-3_32},
year = {2021},
date = {2021-09-23},
urldate = {2021-09-23},
booktitle = {Machine Learning in Medical Imaging (MLMI)},
pages = {306--316},
abstract = {We propose an unsupervised domain adaptation (UDA) approach for white matter hyperintensity (WMH) segmentation, which uses Self-TRaining with Uncertainty DEpendent Label refinement (STRUDEL). Self-training has recently been introduced as a highly effective method for UDA, which is based on self-generated pseudo labels. However, pseudo labels can be very noisy and therefore deteriorate model performance. We propose to predict the uncertainty of pseudo labels and integrate it in the training process with an uncertainty-guided loss function to high-
light labels with high certainty. STRUDEL is further improved by incorporating the segmentation output of an existing method in the pseudo label generation that showed high robustness for WMH segmentation. In our experiments, we evaluate STRUDEL with a standard U-Net and a modified network with a higher receptive field. Our results on WMH segmentation across datasets demonstrate the significant improvement of STRUDEL with respect to standard self-training.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
light labels with high certainty. STRUDEL is further improved by incorporating the segmentation output of an existing method in the pseudo label generation that showed high robustness for WMH segmentation. In our experiments, we evaluate STRUDEL with a standard U-Net and a modified network with a higher receptive field. Our results on WMH segmentation across datasets demonstrate the significant improvement of STRUDEL with respect to standard self-training.
Sarasua, Ignacio; Pölsterl, Sebastian; Wachinger, Christian
TransforMesh: A Transformer Network for Longitudinal modeling of Anatomical Meshes Conference
Machine Learning in Medical Imaging (MLMI), 2021.
@conference{sarasua2021transformesh,
title = {TransforMesh: A Transformer Network for Longitudinal modeling of Anatomical Meshes},
author = {Ignacio Sarasua and Sebastian Pölsterl and Christian Wachinger },
url = {https://arxiv.org/pdf/2109.00532.pdf},
doi = {10.1007/978-3-030-87589-3_22},
year = {2021},
date = {2021-09-23},
urldate = {2021-09-23},
booktitle = {Machine Learning in Medical Imaging (MLMI)},
pages = {209--218},
abstract = {The longitudinal modeling of neuroanatomical changes related to Alzheimer's disease (AD) is crucial for studying the progression of the disease. To this end, we introduce TransforMesh, a spatio-temporal network based on transformers that models longitudinal shape changes on 3D anatomical meshes.
While transformer and mesh networks have recently shown impressive performances in natural language processing and computer vision, their application to medical image analysis has been very limited.
To the best of our knowledge, this is the first work that combines transformer and mesh networks.
Our results show that TransforMesh can model shape trajectories better than other baseline architectures that do not capture temporal dependencies.
Moreover, we also explore the capabilities of TransforMesh in detecting structural anomalies of the hippocampus in patients developing AD.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
While transformer and mesh networks have recently shown impressive performances in natural language processing and computer vision, their application to medical image analysis has been very limited.
To the best of our knowledge, this is the first work that combines transformer and mesh networks.
Our results show that TransforMesh can model shape trajectories better than other baseline architectures that do not capture temporal dependencies.
Moreover, we also explore the capabilities of TransforMesh in detecting structural anomalies of the hippocampus in patients developing AD.
Pölsterl, Sebastian; Aigner, Christina; Wachinger, Christian
Scalable, Axiomatic Explanations of Deep Alzheimer's Diagnosis from Heterogeneous Data Conference
Medical Image Computing and Computer-Assisted Intervention (MICCAI), 2021.
@conference{Poelsterl2021-svehnn,
title = {Scalable, Axiomatic Explanations of Deep Alzheimer's Diagnosis from Heterogeneous Data},
author = {Sebastian Pölsterl and Christina Aigner and Christian Wachinger},
url = {https://arxiv.org/abs/2107.05997
https://github.com/ai-med/SVEHNN
https://youtu.be/87fTuJ_5tYg},
doi = {10.1007/978-3-030-87199-4_41},
year = {2021},
date = {2021-09-21},
urldate = {2021-09-21},
booktitle = {Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
pages = {434--444},
abstract = {Deep Neural Networks (DNNs) have an enormous potential to learn from complex biomedical data. In particular, DNNs have been used to seamlessly fuse heterogeneous information from neuroanatomy, genetics, biomarkers, and neuropsychological tests for highly accurate Alzheimer’s disease diagnosis. On the other hand, their black-box nature is still a barrier for the adoption of such a system in the clinic, where interpretability is absolutely essential. We propose Shapley Value Explanation of Heterogeneous Neural Networks (SVEHNN) for explaining the Alzheimer’s diagnosis made by a DNN from the 3D point cloud of the neuroanatomy and tabular biomarkers. Our explanations are based on the Shapley value, which is the unique method that satisfies all fundamental axioms for local explanations previously established in the literature. Thus, SVEHNN has many desirable characteristics that previous work on interpretability for medical decision making is lacking. To avoid the exponential time complexity of the Shapley value, we propose to transform a given DNN into a Lightweight Probabilistic Deep Network without re-training, thus achieving a complexity only quadratic in the number of features. In our experiments on synthetic and real data, we show that we can closely approximate the exact Shapley value with a dramatically reduced runtime and can reveal the hidden knowledge the network has learned from the data.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Pölsterl, Sebastian; Wolf, Tom Nuno; Wachinger, Christian
Combining 3D Image and Tabular Data via the Dynamic Affine Feature Map Transform Conference
Medical Image Computing and Computer-Assisted Intervention (MICCAI), 2021.
@conference{Poelsterl2021-daft,
title = {Combining 3D Image and Tabular Data via the Dynamic Affine Feature Map Transform},
author = {Sebastian Pölsterl and Tom Nuno Wolf and Christian Wachinger},
url = {https://arxiv.org/abs/2107.05990
https://github.com/ai-med/DAFT
https://youtu.be/WeBZeLqSgsM},
doi = {10.1007/978-3-030-87240-3_66},
year = {2021},
date = {2021-09-21},
urldate = {2021-09-21},
booktitle = {Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
pages = {688--698},
abstract = {Prior work on diagnosing Alzheimer’s disease from magnetic resonance images of the brain established that convolutional neural networks (CNNs) can leverage the high-dimensional image information for classifying patients. However, little research focused on how these models can utilize the usually low-dimensional tabular information, such as patient demographics or laboratory measurements. We introduce the Dynamic Affine Feature Map Transform (DAFT), a general-purpose module for CNNs that dynamically rescales and shifts the feature maps of a convolutional layer, conditional on a patient’s tabular clinical information. We show that DAFT is highly effective in combining 3D image and tabular information for diagnosis and time-to-dementia prediction, where it outperforms competing CNNs with a mean balanced accuracy of 0.622 and mean c-index of 0.748, respectively. Our extensive ablation study provides valuable insights into the architectural properties of DAFT. Our implementation is available at https://github.com/ai-med/DAFT.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sarasua, Ignacio; Lee, Jonwong; Wachinger, Christian
Geometric Deep Learning on Anatomical Meshes for the Prediction of Alzheimer's Disease Conference
ISBI: International Symposium on Biomedical Imaging 2021, 2021.
@conference{sarasua2021geometric,
title = {Geometric Deep Learning on Anatomical Meshes for the Prediction of Alzheimer's Disease},
author = {Ignacio Sarasua and Jonwong Lee and Christian Wachinger},
url = {https://arxiv.org/pdf/2104.10047.pdf},
year = {2021},
date = {2021-04-20},
urldate = {2021-04-20},
booktitle = {ISBI: International Symposium on Biomedical Imaging 2021},
abstract = {Geometric deep learning can find representations that are optimal for a given task and therefore improve the performance over pre-defined representations. While current work has mainly focused on point representations, meshes also contain connectivity information and are therefore a more comprehensive characterization of the underlying anatomical surface. In this work, we evaluate four recent geometric deep learning approaches that operate on mesh representations. These approaches can be grouped into template-free and template-based approaches, where the template-based methods need a more elaborate pre-processing step with the definition of a common reference template and correspondences. We compare the different networks for the prediction of Alzheimer's disease based on the meshes of the hippocampus. Our results show advantages for template-based methods in terms of accuracy, number of learnable parameters, and training speed. While the template creation may be limiting for some applications, neuroimaging has a long history of building templates with automated tools readily available. Overall, working with meshes is more involved than working with simplistic point clouds, but they also offer new avenues for designing geometric deep learning architectures.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Pölsterl, Sebastian; Wachinger, Christian
Estimation of Causal Effects in the Presence of Unobserved Confounding in the Alzheimer's Continuum Conference
IPMI: International Conference on Information Processing in Medical Imaging 2021, 2021.
@conference{Poelsterl2021-causal-effects-in-ad,
title = {Estimation of Causal Effects in the Presence of Unobserved Confounding in the Alzheimer's Continuum},
author = {Sebastian Pölsterl and Christian Wachinger },
url = {http://ai-med.de/wp-content/uploads/2021/04/2006.13135.pdf
https://github.com/ai-med/causal-effects-in-alzheimers-continuum},
doi = {10.1007/978-3-030-78191-0_4},
year = {2021},
date = {2021-04-12},
urldate = {2021-04-12},
booktitle = {IPMI: International Conference on Information Processing in Medical Imaging 2021},
pages = {45--57},
abstract = {Studying the relationship between neuroanatomy and cognitive decline due to Alzheimer's has been a major research focus in the last decade. However, to infer cause-effect relationships rather than simple associations from observational data, we need to (i) express the causal relationships leading to cognitive decline in a graphical model, and (ii) ensure the causal effect of interest is identifiable from the collected data. We derive a causal graph from the current clinical knowledge on cause and effect in the Alzheimer's disease continuum, and show that identifiability of the causal effect requires all confounders to be known and measured. However, in complex neuroimaging studies, we neither know all potential confounders nor do we have data on them. To alleviate this requirement, we leverage the dependencies among multiple causes by deriving a substitute confounder via a probabilistic latent factor model. In our theoretical analysis, we prove that using the substitute confounder enables identifiability of the causal effect of neuroanatomy on cognition. We quantitatively evaluate the effectiveness of our approach on semi-synthetic data, where we know the true causal effects, and illustrate its use on real data on the Alzheimer's disease continuum, where it reveals important causes that otherwise would have been missed. },
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2020
Wachinger, Christian; Rieckmann, Anna; Pölsterl, Sebastian
Detect and correct bias in multi-site neuroimaging datasets Journal Article
In: Medical Image Analysis, vol. 67, pp. 101879, 2020.
@article{wachinger2020detect,
title = {Detect and correct bias in multi-site neuroimaging datasets},
author = {Christian Wachinger and Anna Rieckmann and Sebastian Pölsterl},
url = {http://ai-med.de/wp-content/uploads/2021/01/Bias_MedIA.pdf
https://github.com/ai-med/Dataset-Bias},
year = {2020},
date = {2020-12-03},
urldate = {2020-12-03},
journal = {Medical Image Analysis},
volume = {67},
pages = {101879},
publisher = {Elsevier},
abstract = {The desire to train complex machine learning algorithms and to increase the statistical power in association studies drives neuroimaging research to use ever-larger datasets. The most obvious way to increase sample size is by pooling scans from independent studies. However, simple pooling is often ill-advised as selection, measurement, and confounding biases may creep in and yield spurious correlations. In this work, we combine 35,320 magnetic resonance images of the brain from 17 studies to examine bias in neuroimaging. In the first experiment, Name That Dataset, we provide empirical evidence for the presence of bias by showing that scans can be correctly assigned to their respective dataset with 71.5% accuracy. Given such evidence, we take a closer look at confounding bias, which is often viewed as the main shortcoming in observational studies. In practice, we neither know all potential confounders nor do we have data on them. Hence, we model confounders as unknown, latent variables. Kolmogorov complexity is then used to decide whether the confounded or the causal model provides the simplest factorization of the graphical model. Finally, we present methods for dataset harmonization and study their ability to remove bias in imaging features. In particular, we propose an extension of the recently introduced ComBat algorithm to control for global variation across image features, inspired by adjusting for unknown population stratification in genetics. Our results demonstrate that harmonization can reduce dataset-specific information in image features. Further, confounding bias can be reduced and even turned into a causal relationship. However, harmonization also requires caution as it can easily remove relevant subject-specific information. Code is available at https://github.com/ai-med/Dataset-Bias.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pölsterl, Sebastian; Wachinger, Christian
Adversarial Learned Molecular Graph Inference and Generation Proceedings Article
In: European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML-PKDD), 2020.
@inproceedings{Poelsterl2020-almgig,
title = {Adversarial Learned Molecular Graph Inference and Generation},
author = {Sebastian Pölsterl and Christian Wachinger},
url = {https://arxiv.org/pdf/1905.10310
https://github.com/ai-med/almgig
https://www.youtube.com/watch?v=ZpQtcEauT3U},
year = {2020},
date = {2020-06-22},
urldate = {2020-06-22},
booktitle = {European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML-PKDD)},
abstract = {Recent methods for generating novel molecules use graph representations of molecules and employ various forms of graph convolutional neural networks for inference. However, training requires solving an expensive graph isomorphism problem, which previous approaches do not address or solve only approximately. In this work, we propose ALMGIG, a likelihood-free adversarial learning framework for inference and de novo molecule generation that avoids explicitly computing a reconstruction loss. Our approach extends generative adversarial networks by including an adversarial cycle-consistency loss to implicitly enforce the reconstruction property. To capture properties unique to molecules, such as valence, we extend the Graph Isomorphism Network to multi-graphs. To quantify the performance of models, we propose to compute the distance between distributions of physicochemical properties with the 1-Wasserstein distance. We demonstrate that ALMGIG more accurately learns the distribution over the space of molecules than all baselines. Moreover, it can be utilized for drug discovery by efficiently searching the space of molecules using molecules' continuous latent representation. Our code is available at https://github.com/ai-med/almgig},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Richards, Rose; Greimel, Ellen; Kliemann, Dorit; Koerte, Inga K; Schulte-Körne, Gerd; Reuter, Martin; Wachinger, Christian
Increased Hippocampal Shape Asymmetry and Volumetric Ventricular Asymmetry in Autism Journal Article
In: NeuroImage: Clinical, pp. 102207, 2020.
@article{richards2020increased,
title = {Increased Hippocampal Shape Asymmetry and Volumetric Ventricular Asymmetry in Autism},
author = {Rose Richards and Ellen Greimel and Dorit Kliemann and Inga K Koerte and Gerd Schulte-Körne and Martin Reuter and Christian Wachinger},
url = {https://www.sciencedirect.com/science/article/pii/S2213158220300449},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {NeuroImage: Clinical},
pages = {102207},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rickmann, Anne-Marie; Roy, Abhijit Guha; Sarasua, Ignacio; Wachinger, Christian
Recalibrating 3D ConvNets with Project & Excite Journal Article
In: IEEE Transactions on Medical Imaging, 2020.
@article{rickmann2020recalibrating,
title = {Recalibrating 3D ConvNets with Project & Excite},
author = {Anne-Marie Rickmann and Abhijit Guha Roy and Ignacio Sarasua and Christian Wachinger},
url = {http://ai-med.de/wp-content/uploads/2020/02/PE_tmi1.pdf},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {IEEE Transactions on Medical Imaging},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gutmann, Daniel AP; Rospleszcz, Susanne; Rathmann, Wolfgang; Schlett, Christopher L; Peters, Annette; Wachinger, Christian; Gatidis, Sergios; Bamberg, Fabian
MRI-Derived Radiomics Features of Hepatic Fat Predict Metabolic States in Individuals without Cardiovascular Disease Journal Article
In: Academic Radiology, 2020.
@article{gutmann2020mri,
title = {MRI-Derived Radiomics Features of Hepatic Fat Predict Metabolic States in Individuals without Cardiovascular Disease},
author = {Daniel AP Gutmann and Susanne Rospleszcz and Wolfgang Rathmann and Christopher L Schlett and Annette Peters and Christian Wachinger and Sergios Gatidis and Fabian Bamberg},
url = {https://www.sciencedirect.com/science/article/abs/pii/S1076633220304086},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Academic Radiology},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reuter, Martin; Wachinger, Christian; Lombaert, Hervé; Paniagua, Beatriz; Goksel, Orcun; Rekik, Islem
Shape in Medical Imaging Miscellaneous
2020.
@misc{reutershape,
title = {Shape in Medical Imaging},
author = {Martin Reuter and Christian Wachinger and Hervé Lombaert and Beatriz Paniagua and Orcun Goksel and Islem Rekik},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Özgün, Sinan; Rickmann, Anne-Marie; Roy, Abhijit Guha; Wachinger, Christian
Importance Driven Continual Learning for Segmentation Across Domains Proceedings Article
In: Liu, Mingxia; Yan, Pingkun; Lian, Chunfeng; Cao, Xiaohuan (Ed.): Machine Learning in Medical Imaging, pp. 423–433, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-59861-7.
@inproceedings{10.1007/978-3-030-59861-7_43,
title = {Importance Driven Continual Learning for Segmentation Across Domains},
author = {Sinan Özgün and Anne-Marie Rickmann and Abhijit Guha Roy and Christian Wachinger},
editor = {Mingxia Liu and Pingkun Yan and Chunfeng Lian and Xiaohuan Cao},
url = {https://arxiv.org/pdf/2005.00079},
isbn = {978-3-030-59861-7},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Machine Learning in Medical Imaging},
pages = {423--433},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The ability of neural networks to continuously learn and adapt to new tasks while retaining prior knowledge is crucial for many applications. However, current neural networks tend to forget previously learned tasks when trained on new ones, i.e., they suffer from Catastrophic Forgetting (CF). The objective of Continual Learning (CL) is to alleviate this problem, which is particularly relevant for medical applications, where it may not be feasible to store and access previously used sensitive patient data. In this work, we propose a Continual Learning approach for brain segmentation, where a single network is consecutively trained on samples from different domains. We build upon an importance driven approach and adapt it for medical image segmentation. Particularly, we introduce a learning rate regularization to prevent the loss of the network's knowledge. Our results demonstrate that directly restricting the adaptation of important network parameters clearly reduces Catastrophic Forgetting for segmentation across domains. Our code is publicly available on https://github.com/ai-med/MAS-LR.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Senapati, Jyotirmay; Roy, Abhijit Guha; Pölsterl, Sebastian; Gutmann, Daniel; Gatidis, Sergios; Schlett, Christopher; Peters, Anette; Bamberg, Fabian; Wachinger, Christian
Bayesian Neural Networks for Uncertainty Estimation of Imaging Biomarkers Proceedings Article
In: Liu, Mingxia; Yan, Pingkun; Lian, Chunfeng; Cao, Xiaohuan (Ed.): Machine Learning in Medical Imaging, pp. 270–280, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-59861-7.
@inproceedings{10.1007/978-3-030-59861-7_28,
title = {Bayesian Neural Networks for Uncertainty Estimation of Imaging Biomarkers},
author = {Jyotirmay Senapati and Abhijit Guha Roy and Sebastian Pölsterl and Daniel Gutmann and Sergios Gatidis and Christopher Schlett and Anette Peters and Fabian Bamberg and Christian Wachinger},
editor = {Mingxia Liu and Pingkun Yan and Chunfeng Lian and Xiaohuan Cao},
url = {https://arxiv.org/pdf/2008.12680},
isbn = {978-3-030-59861-7},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Machine Learning in Medical Imaging},
pages = {270--280},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Image segmentation enables to extract quantitative measures from scans that can serve as imaging biomarkers for diseases. However, segmentation quality can vary substantially across scans, and therefore yield unfaithful estimates in the follow-up statistical analysis of biomarkers. The core problem is that segmentation and biomarker analysis are performed independently. We propose to propagate segmentation uncertainty to the statistical analysis to account for variations in segmentation confidence. To this end, we evaluate four Bayesian neural networks to sample from the posterior distribution and estimate the uncertainty. We then assign confidence measures to the biomarker and propose statistical models for its integration in group analysis and disease classification. Our results for segmenting the liver in patients with diabetes mellitus clearly demonstrate the improvement of integrating biomarker uncertainty in the statistical inference.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sarasua, Ignacio; Poelsterl, Sebastian; Wachinger, Christian
Recalibration of Neural Networks for Point Cloud Analysis Proceedings Article
In: 3DV, 2020.
@inproceedings{sarasua2020recalibration,
title = {Recalibration of Neural Networks for Point Cloud Analysis},
author = {Ignacio Sarasua and Sebastian Poelsterl and Christian Wachinger},
url = {https://arxiv.org/pdf/2011.12888},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {arXiv preprint arXiv:2011.12888},
publisher = {3DV},
abstract = {Spatial and channel re-calibration have become powerful concepts in computer vision. Their ability to capture long-range dependencies is especially useful for those networks that extract local features, such as CNNs. While re-calibration has been widely studied for image analysis, it has not yet been used on shape representations. In this work, we introduce re-calibration modules on deep neural networks for 3D point clouds. We propose a set of re-calibration blocks that extend Squeeze and Excitation blocks and that can be added to any network for 3D point cloud analysis that builds a global descriptor by hierarchically combining features from multiple local neighborhoods. We run two sets of experiments to validate our approach. First, we demonstrate the benefit and versatility of our proposed modules by incorporating them into three state-of-the-art networks for 3D point cloud analysis: PointNet++, DGCNN, and RSCNN. We evaluate each network on two tasks: object classification on ModelNet40, and object part segmentation on ShapeNet. Our results show an improvement of up to 1% in accuracy for ModelNet40 compared to the baseline method. In the second set of experiments, we investigate the benefits of re-calibration blocks on Alzheimer's Disease (AD) diagnosis. Our results demonstrate that our proposed methods yield a 2% increase in accuracy for diagnosing AD and a 2.3% increase in concordance index for predicting AD onset with time-to-event analysis. Concluding, re-calibration improves the accuracy of point cloud architectures, while only minimally increasing the number of parameters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Benjamin ; Sarasua Ignacio ; Wachinger Gutierrez-Becker, Christian
Discriminative and generative models for anatomical shape analysis on point clouds with deep neural networks Journal Article
In: Medical Image Analysis, vol. 67, pp. 101852, 2020.
@article{gutierrez2020discriminative,
title = {Discriminative and generative models for anatomical shape analysis on point clouds with deep neural networks},
author = {Gutierrez-Becker, Benjamin ; Sarasua Ignacio ; Wachinger, Christian },
url = {https://arxiv.org/pdf/2010.00820},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Medical Image Analysis},
volume = {67},
pages = {101852},
publisher = {Elsevier},
abstract = {We introduce deep neural networks for the analysis of anatomical shapes that learn a low-dimensional shape representation from the given task, instead of relying on hand-engineered representations. Our framework is modular and consists of several computing blocks that perform fundamental shape processing tasks. The networks operate on unordered point clouds and provide invariance to similarity transformations, avoiding the need to identify point correspondences between shapes. Based on the framework, we assemble a discriminative model for disease classification and age regression, as well as a generative model for the accruate reconstruction of shapes.
In particular, we propose a conditional generative model, where the condition vector provides a mechanism to control the generative process. For instance, it enables to assess shape variations specific to a particular diagnosis, when passing it as side information.
Next to working on single shapes, we introduce an extension for the joint analysis of multiple anatomical structures, where the simultaneous modeling of multiple structures can lead to a more compact encoding and a better understanding of disorders.
We demonstrate the advantages of our framework in comprehensive experiments on real and synthetic data. The key insights are that (i) learning a shape representation specific to the given task yields higher performance than alternative shape descriptors, (ii) multi-structure analysis is both more efficient and more accurate than single-structure analysis, and (iii) point clouds generated by our model capture morphological differences associated to Alzheimer’s disease, to the point that they can be used to train a discriminative model for disease classification. Our framework naturally scales to the analysis of large datasets, giving it the potential to learn characteristic variations in large populations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In particular, we propose a conditional generative model, where the condition vector provides a mechanism to control the generative process. For instance, it enables to assess shape variations specific to a particular diagnosis, when passing it as side information.
Next to working on single shapes, we introduce an extension for the joint analysis of multiple anatomical structures, where the simultaneous modeling of multiple structures can lead to a more compact encoding and a better understanding of disorders.
We demonstrate the advantages of our framework in comprehensive experiments on real and synthetic data. The key insights are that (i) learning a shape representation specific to the given task yields higher performance than alternative shape descriptors, (ii) multi-structure analysis is both more efficient and more accurate than single-structure analysis, and (iii) point clouds generated by our model capture morphological differences associated to Alzheimer’s disease, to the point that they can be used to train a discriminative model for disease classification. Our framework naturally scales to the analysis of large datasets, giving it the potential to learn characteristic variations in large populations.
2019
Chauvin, L.; Kumar, K.; Wachinger, C.; Vangel, M.; de Guise, J.; Desrosiers, C.; Wells, W.; Toews, M.
Neuroimage signature from salient keypoints is highly specific to individuals and shared by close relatives Journal Article
In: NeuroImage, 2019.
@article{Chauvin2019,
title = {Neuroimage signature from salient keypoints is highly specific to individuals and shared by close relatives},
author = {L. Chauvin and K. Kumar and C. Wachinger and M. Vangel and J. de Guise and C. Desrosiers and W. Wells and M. Toews},
url = {http://www.matthewtoews.com/papers/Chauvin_Neuroimage2020.pdf},
year = {2019},
date = {2019-11-01},
urldate = {2019-11-01},
journal = {NeuroImage},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Roy, Abhijit Guha; Siddiqui, Shayan; Pölsterl, Sebastian; Navab, Nassir; Wachinger, Christian
'Squeeze & Excite' Guided Few-Shot Segmentation of Volumetric Images Journal Article
In: Medical Image Analysis, 2019.
@article{few_shot_agr_2019,
title = {'Squeeze & Excite' Guided Few-Shot Segmentation of Volumetric Images},
author = {Abhijit Guha Roy and Shayan Siddiqui and Sebastian Pölsterl and Nassir Navab and Christian Wachinger},
url = {https://arxiv.org/abs/1902.01314},
year = {2019},
date = {2019-10-11},
urldate = {2019-10-11},
journal = {Medical Image Analysis},
keywords = {},
pubstate = {published},
tppubtype = {article}
}