@article {7071, title = {Characterizing Generalization under Out-Of-Distribution Shifts in Deep Metric Learning}, year = {2021}, url = {https://arxiv.org/abs/2107.09562}, author = {Timo Milbich and Karsten Roth and Samarth Sinha and Ludwig Schmidt and Marzyeh Ghassemi and Bj{\"o}rn Ommer} } @conference {Kluger2020, title = {CONSAC: Robust Multi-Model Fitting by Conditional Sample Consensus}, booktitle = {CVPR 2020}, year = {2020}, month = {01/2020}, abstract = {We present a robust estimator for fitting multiple parametric models of the same form to noisy measurements. Applications include finding multiple vanishing points in man-made scenes, fitting planes to architectural imagery, or estimating multiple rigid motions within the same sequence. In contrast to previous works, which resorted to hand-crafted search strategies for multiple model detection, we learn the search strategy from data. A neural network conditioned on previously detected models guides a RANSAC estimator to different subsets of all measurements, thereby finding model instances one after another. We train our method supervised as well as self-supervised. For supervised training of the search strategy, we contribute a new dataset for vanishing point estimation. Leveraging this dataset, the proposed algorithm is superior with respect to other robust estimators as well as to designated vanishing point estimation algorithms. For self-supervised learning of the search, we evaluate the proposed algorithm on multi-homography estimation and demonstrate an accuracy that is superior to state-of-the-art methods.}, url = {http://arxiv.org/abs/2001.02643}, author = {Kluger, Florian and Brachmann, Eric and Ackermann, Hanno and Carsten Rother and Yang, Michael Ying and Rosenhahn, Bodo} } @article {Kleesiek2019, title = {Can Virtual Contrast Enhancement in Brain MRI Replace Gadolinium?: A Feasibility Study}, journal = {Investigative Radiology}, volume = {54}, year = {2019}, pages = {653{\textendash}660}, abstract = {Objectives Gadolinium-based contrast agents (GBCAs) have become an integral part in daily clinical decision making in the last 3 decades. However, there is a broad consensus that GBCAs should be exclusively used if no contrast-free magnetic resonance imaging (MRI) technique is available to reduce the amount of applied GBCAs in patients. In the current study, we investigate the possibility of predicting contrast enhancement from noncontrast multiparametric brain MRI scans using a deep-learning (DL) architecture. Materials and Methods A Bayesian DL architecture for the prediction of virtual contrast enhancement was developed using 10-channel multiparametric MRI data acquired before GBCA application. The model was quantitatively and qualitatively evaluated on 116 data sets from glioma patients and healthy subjects by comparing the virtual contrast enhancement maps to the ground truth contrast-enhanced T1-weighted imaging. Subjects were split in 3 different groups: Enhancing tumors (n = 47), nonenhancing tumors (n = 39), and patients without pathologic changes (n = 30). The tumor regions were segmented for a detailed analysis of subregions. The influence of the different MRI sequences was determined. Results Quantitative results of the virtual contrast enhancement yielded a sensitivity of 91.8\% and a specificity of 91.2\%. T2-weighted imaging, followed by diffusion-weighted imaging, was the most influential sequence for the prediction of virtual contrast enhancement. Analysis of the whole brain showed a mean area under the curve of 0.969 {\textpm} 0.019, a peak signal-to-noise ratio of 22.967 {\textpm} 1.162 dB, and a structural similarity index of 0.872 {\textpm} 0.031. Enhancing and nonenhancing tumor subregions performed worse (except for the peak signal-to-noise ratio of the nonenhancing tumors). The qualitative evaluation by 2 raters using a 4-point Likert scale showed good to excellent (3-4) results for 91.5\% of the enhancing and 92.3\% of the nonenhancing gliomas. However, despite the good scores and ratings, there were visual deviations between the virtual contrast maps and the ground truth, including a more blurry, less nodular-like ring enhancement, few low-contrast false-positive enhancements of nonenhancing gliomas, and a tendency to omit smaller vessels. These "features" were also exploited by 2 trained radiologists when performing a Turing test, allowing them to discriminate between real and virtual contrast-enhanced images in 80\% and 90\% of the cases, respectively. Conclusions The introduced model for virtual gadolinium enhancement demonstrates a very good quantitative and qualitative performance. Future systematic studies in larger patient collectives with varying neurological disorders need to evaluate if the introduced virtual contrast enhancement might reduce GBCA exposure in clinical practice.}, keywords = {Bayesian uncertainty, deep learning, gadolinium-based contrast agents, glioma, multiparametric MRI}, issn = {15360210}, doi = {10.1097/RLI.0000000000000583}, author = {Kleesiek, Jens and Morshuis, Jan Nikolas and Isensee, Fabian and Deike-Hofmann, Katerina and Paech, Daniel and Kickingereder, Philipp and K{\"o}the, Ullrich and Rother, Carsten and Forsting, Michael and Wick, Wolfgang and Bendszus, Martin and Schlemmer, Heinz Peter and Radbruch, Alexander} } @conference {Mackowiak2019, title = {CEREALS - Cost-Effective REgion-based Active Learning for Semantic Segmentation}, booktitle = {British Machine Vision Conference 2018, BMVC 2018}, year = {2019}, abstract = {State of the art methods for semantic image segmentation are trained in a supervised fashion using a large corpus of fully labeled training images. However, gathering such a corpus is expensive, due to human annotation effort, in contrast to gathering unlabeled data. We propose an active learning-based strategy, called CEREALS, in which a human only has to hand-label a few, automatically selected, regions within an unlabeled image corpus. This minimizes human annotation effort while maximizing the performance of a semantic image segmentation method. The automatic selection procedure is achieved by: a) using a suitable information measure combined with an estimate about human annotation effort, which is inferred from a learned cost model, and b) exploiting the spatial coherency of an image. The performance of CEREALS is demonstrated on Cityscapes, where we are able to reduce the annotation effort to 17\%, while keeping 95\% of the mean Intersection over Union (mIoU) of a model that was trained with the fully annotated training set of Cityscapes.}, author = {Mackowiak, Radek and Lenz, Philip and Ghori, Omair and Ferran Diego and Lange, Oliver and Carsten Rother} } @conference {6322, title = {Content and Style Disentanglement for Artistic Style Transfer}, booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)}, year = {2019}, author = {Dmytro Kotovenko and Sanakoyeu, Artsiom and Sabine Lang and Bj{\"o}rn Ommer} } @article {Savarino:2019ab, title = {Continuous-Domain Assignment Flows}, journal = {preprint: arXiv}, year = {2019}, url = {https://arxiv.org/abs/1910.07287}, author = {Savarino, F. and Schn{\"o}rr, C.} } @inbook {6287, title = {Computer Vision und Kunstgeschichte {\textemdash} Dialog zweier Bildwissenschaften}, booktitle = {Computing Art Reader: Einf{\"u}hrung in die digitale Kunstgeschichte, P. Kuroczyński et al. (ed.)}, year = {2018}, author = {Bell, P. and Bj{\"o}rn Ommer} } @article {Arnab2018, title = {Conditional Random Fields Meet Deep Neural Networks for Semantic Segmentation}, journal = {Cvpr}, volume = {XX}, number = {Xx}, year = {2018}, pages = {1{\textendash}15}, abstract = {{\textemdash}Semantic Segmentation is the task of labelling every pixel in an image with a pre-defined object category. It has numer-ous applications in scenarios where the detailed understanding of an image is required, such as in autonomous vehicles and medical diagnosis. This problem has traditionally been solved with probabilistic models known as Conditional Random Fields (CRFs) due to their ability to model the relationships between the pixels being predicted. However, Deep Neural Networks (DNNs) have recently been shown to excel at a wide range of computer vision problems due to their ability to learn rich feature representations automatically from data, as opposed to traditional hand-crafted features. The idea of combining CRFs and DNNs have achieved state-of-the-art results in a number of domains. We review the literature on combining the modelling power of CRFs with the representation-learning ability of DNNs, ranging from early work that combines these two techniques as independent stages of a common pipeline to recent approaches that embed inference of probabilistic models directly in the neural network itself. Finally, we summarise future research directions.}, keywords = {conditional random fields, deep learning, seman-}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.308.8889\&rep=rep1\&type=pdf\%0Ahttp://dx.doi.org/10.1109/CVPR.2012.6248050}, author = {Arnab, Anurag and Zheng, Shuai and Jayasumana, Sadeep and Romera-paredes, Bernardino and Kirillov, Alexander and Savchynskyy, Bogdan and Carsten Rother and Kahl, Fredrik and Torr, Philip} } @conference {sayed:GCPR:2018, title = {Cross and Learn: Cross-Modal Self-Supervision}, booktitle = {German Conference on Pattern Recognition (GCPR) (Oral)}, year = {2018}, address = {Stuttgart, Germany}, abstract = {In this paper we present a self-supervised method to learn feature representations for different modalities. Based on the observation that cross-modal information has a high semantic meaning we propose a method to effectively exploit this signal. For our method we utilize video data since it is available on a large scale and provides easily accessible modalities given by RGB and optical flow. We demonstrate state-of-the-art performance on highly contested action recognition datasets in the context of self-supervised learning. We also show the transferability of our feature representations and conduct extensive ablation studies to validate our core contributions.}, keywords = {action recognition, cross-modal, image understanding, unsupervised learning}, url = {https://arxiv.org/abs/1811.03879v1}, author = {Sayed, N. and Biagio Brattoli and Bj{\"o}rn Ommer} } @mastersthesis {6177, title = {Cluster Resolving for Animal Tracking: Multi Hypotheses Tracking with Part Based Model for Object Hypotheses Generation and Pose Estimation}, year = {2017}, school = {University of Heidelberg}, author = {Brosowsky, M} } @conference {Dalitz2017, title = {Compressed Motion Sensing}, booktitle = {Proc. SSVM}, series = {LNCS}, volume = {10302}, year = {2017}, publisher = {Springer}, organization = {Springer}, author = {Dalitz, R. and Petra, S. and Schn{\"o}rr, C.} } @mastersthesis {6326, title = {Correlation of Performance and Entropy in Active Learning with Convolutional Neural Networks}, year = {2017}, school = {Heidelberg University}, author = {Krause, G} } @proceedings {6204, title = {Cost-efficient Gradient Boosting}, year = {2017}, author = {Peter, S. and Ferran Diego and Fred A. Hamprecht and Nadler, B} } @conference {Schlesinger2017, title = {Crowd sourcing image segmentation with iaSTAPLE}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, year = {2017}, pages = {401{\textendash}405}, abstract = {We propose a novel label fusion technique as well as a crowdsourcing protocol to efficiently obtain accurate epithelial cell segmentations from non-expert crowd workers. Our label fusion technique simultaneously estimates the true segmentation, the performance levels of individual crowd workers, and an image segmentation model in the form of a pairwise Markov random field. We term our approach image-aware STAPLE (iaSTAPLE) since our image segmentation model seamlessly integrates into the well-known and widely used STAPLE approach. In an evaluation on a light microscopy dataset containing more than 5000 membrane labeled epithelial cells of a fly wing, we show that iaSTAPLE outperforms STAPLE in terms of segmentation accuracy as well as in terms of the accuracy of estimated crowd worker performance levels, and is able to correctly segment 99\% of all cells when compared to expert segmentations. These results show that iaSTAPLE is a highly useful tool for crowd sourcing image segmentation.}, keywords = {Crowdsourcing, Epithelial cell segmentation, IaSTAPLE, Markovian Random Fields}, isbn = {9781509011711}, issn = {19458452}, doi = {10.1109/ISBI.2017.7950547}, author = {Schlesinger, Dmitrij and Jug, Florian and Myers, Gene and Carsten Rother and Kainmueller, Dagmar} } @conference {Mustikovela2016, title = {Can ground truth label propagation from video help semantic segmentation?}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9915 LNCS}, year = {2016}, pages = {804{\textendash}820}, abstract = {For state-of-the-art semantic segmentation task, training convolutional neural networks (CNNs) requires dense pixelwise ground truth (GT) labeling, which is expensive and involves extensive human effort. In this work, we study the possibility of using auxiliary ground truth, so-called pseudo ground truth (PGT) to improve the performance. The PGT is obtained by propagating the labels of a GT frame to its subsequent frames in the video using a simple CRF-based, cue integration framework. Our main contribution is to demonstrate the use of noisy PGT along with GT to improve the performance of a CNN. We perform a systematic analysis to find the right kind of PGT that needs to be added along with the GT for training a CNN. In this regard, we explore three aspects of PGT which influence the learning of a CNN: (i) the PGT labeling has to be of good quality; (ii) the PGT images have to be different compared to the GT images; (iii) the PGT has to be trusted differently than GT. We conclude that PGT which is diverse from GT images and has good quality of labeling can indeed help improve the performance of a CNN. Also, when PGT is multiple folds larger than GT, weighing down the trust on PGT helps in improving the accuracy. Finally, We show that using PGT along with GT, the performance of Fully Convolutional Network (FCN) on Camvid data is increased by 2.7\% on IoU accuracy. We believe such an approach can be used to train CNNs for semantic video segmentation where sequentially labeled image frames are needed. To this end, we provide recommendations for using PGT strategically for semantic segmentation and hence bypass the need for extensive human efforts in labeling.}, isbn = {9783319494081}, issn = {16113349}, doi = {10.1007/978-3-319-49409-8_66}, author = {Mustikovela, Siva Karthik and Yang, Michael Ying and Carsten Rother} } @mastersthesis {6093, title = {Cell Tracking With Graphical Model Using Higher Order Features On Track Segments}, year = {2016}, school = {University of Heidelberg}, author = {Wolf, S.} } @conference {arXiv:1608.08792, title = {CliqueCNN: Deep Unsupervised Exemplar Learning}, booktitle = {Proceedings of the Conference on Advances in Neural Information Processing Systems (NIPS)}, year = {2016}, publisher = {MIT Press}, organization = {MIT Press}, address = {Barcelona}, abstract = {Exemplar learning is a powerful paradigm for discovering visual similarities in an unsupervised manner. In this context, however, the recent breakthrough in deep learning could not yet unfold its full potential. With only a single positive sample, a great imbalance between one positive and many negatives, and unreliable relationships between most samples, training of Convolutional Neural networks is impaired. Given weak estimates of local distance we propose a single optimization problem to extract batches of samples with mutually consistent relations. Conflict- ing relations are distributed over different batches and similar samples are grouped into compact cliques. Learning exemplar similarities is framed as a sequence of clique categorization tasks. The CNN then consolidates transitivity relations within and between cliques and learns a single representation for all samples without the need for labels. The proposed unsupervised approach has shown competitive performance on detailed posture analysis and object classification.}, url = {https://arxiv.org/abs/1608.08792}, author = {Miguel Bautista and Sanakoyeu, A. and Sutter, E. and Bj{\"o}rn Ommer} } @article {baust2016combined, title = {Combined Tensor Fitting and TV Regularization in Diffusion Tensor Imaging based on a Riemannian Manifold Approach}, journal = {IEEE Transactions on Medical Imaging}, volume = {35}, year = {2016}, pages = {1972{\textendash}1989}, doi = {10.1109/TMI.2016.2528820}, author = {Maximilian Baust and Andreas Weinmann and Matthias Wieczorek and Tobias Lasser and Martin Storath and Nassir Navab} } @conference {Royer2016, title = {Convexity shape constraints for image segmentation}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, volume = {2016-Decem}, year = {2016}, month = {sep}, pages = {402{\textendash}410}, abstract = {Segmenting an image into multiple components is a central task in computer vision. In many practical scenarios, prior knowledge about plausible components is available. Incorporating such prior knowledge into models and algorithms for image segmentation is highly desirable, yet can be non-trivial. In this work, we introduce a new approach that allows, for the first time, to constrain some or all components of a segmentation to have convex shapes. Specifically, we extend the Minimum Cost Multicut Problem by a class of constraints that enforce convexity. To solve instances of this NP-hard integer linear program to optimality, we separate the proposed constraints in the branch-and-cut loop of a state-of-the-art ILP solver. Results on photographs and micrographs demonstrate the effectiveness of the approach as well as its advantages over the state-of-the-art heuristic.}, isbn = {9781467388504}, issn = {10636919}, doi = {10.1109/CVPR.2016.50}, url = {http://arxiv.org/abs/1509.02122}, author = {Royer, Loic A. and Richmond, David L. and Carsten Rother and Bj{\"o}rn Andres and Kainmueller, Dagmar} } @conference {DBLP:conf/isvc/GussefeldHK16, title = {Creating Feasible Reflectance Data for Synthetic Optical Flow Datasets}, booktitle = {Advances in Visual Computing - 12th International Symposium, {ISVC} 2016, Las Vegas, NV, USA, December 12-14, 2016, Proceedings, Part {I}}, year = {2016}, abstract = {Optical flow ground truth generated by computer graphics has many advantages. For example, we can systematically vary scene parameters to understand algorithm sensitivities. But is synthetic ground truth realistic enough? Appropriate material models have been established as one of the major challenges for the creation of synthetic datasets: previous research has shown that highly sophisticated reflectance field acquisition methods yield results, which various optical flow methods cannot distinguish from real scenes. However, such methods are costly both in acquisition and rendering time and thus infeasible for large datasets. In this paper we find the simplest reflectance models (RM) for different groups of materials which still provide sufficient accuracy for optical flow performance analysis. It turns out that a spatially varying Phong RM is sufficient for simple materials. Normal estimation combined with Anisotropic RM can handle even very complex materials.}, doi = {10.1007/978-3-319-50835-1_8}, author = {G{\"u}ssefeld, Burkhard and Katrin Honauer and Daniel Kondermann} } @proceedings {kandemir_15_cell, title = {Cell event detection in phase-contrast microscopy sequences from few annotations}, volume = {LNCS 9351}, year = {2015}, pages = {316-323}, publisher = {Springer}, doi = {10.1007/978-3-319-24574-4_38}, author = {Kandemir, M. and Fred A. Hamprecht} } @article {krall2015, title = {Comparative heat and gas exchange measurements in the Heidelberg Aeolotron, a large annular wind-wave tank}, journal = {Ocean Sci.}, volume = {11}, year = {2015}, pages = {111--120}, doi = {10.5194/os-11-111-2015}, author = {Leila Nagel and Kerstin Ellen Krall and Bernd J{\"a}hne} } @article {kappes_15_comparative, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {International Journal of Computer Vision}, year = {2015}, note = {1}, pages = {1-30}, doi = {10.1007/s11263-015-0809-x}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, S. and Dhruv Batra and Kim, S. and Bernhard X. Kausler and Thorben Kr{\"o}ger and Lellmann, J. and Komodakis, N. and Savchynskyy, B. and Carsten Rother} } @article {Kappes2015, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {Int.~J.~Comp.~Vision}, year = {2015}, note = {in press (preprint: arXiv:1404.0533)}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, S. and Dhruv Batra and Kim, S. and Bernhard X. Kausler and Thorben Kr{\"o}ger and Lellmann, J. and Komodakis, N. and Savchynskyy, B. and Carsten Rother} } @article {Kappes2015, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {International Journal of Computer Vision}, volume = {115}, number = {2}, year = {2015}, pages = {155{\textendash}184}, abstract = {Szeliski et al. published an influential study in 2006 on energy minimization methods for Markov random fields. This study provided valuable insights in choosing the best optimization technique for certain classes of problems. While these insights remain generally useful today, the phenomenal success of random field models means that the kinds of inference problems that have to be solved changed significantly. Specifically, the models today often include higher order interactions, flexible connectivity structures, large label-spaces of different cardinalities, or learned energy tables. To reflect these changes, we provide a modernized and enlarged study. We present an empirical comparison of more than 27 state-of-the-art optimization techniques on a corpus of 2453 energy minimization instances from diverse applications in computer vision. To ensure reproducibility, we evaluate all methods in the OpenGM 2 framework and report extensive results regarding runtime and solution quality. Key insights from our study agree with the results of Szeliski et al. for the types of models they studied. However, on new and challenging types of models our findings disagree and suggest that polyhedral methods and integer programming solvers are competitive in terms of runtime and solution quality over a large range of model types.}, keywords = {Benchmark, Combinatorial optimization, Discrete graphical models}, issn = {15731405}, doi = {10.1007/s11263-015-0809-x}, url = {http://hci.iwr.uni-heidelberg.de/opengm2/}, author = {Kappes, J{\"o}rg H and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, Sebastian and Dhruv Batra and Kim, Sungwoong and Kausler, Bernhard X and Kr{\"o}ger, Thorben and Lellmann, Jan and Komodakis, Nikos and Savchynskyy, Bogdan and Carsten Rother} } @article {Kappes2015a, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {International Journal of Computer Vision}, volume = {115}, number = {2}, year = {2015}, pages = {155{\textendash}184}, abstract = {Szeliski et al. published an influential study in 2006 on energy minimization methods for Markov random fields. This study provided valuable insights in choosing the best optimization technique for certain classes of problems. While these insights remain generally useful today, the phenomenal success of random field models means that the kinds of inference problems that have to be solved changed significantly. Specifically, the models today often include higher order interactions, flexible connectivity structures, large label-spaces of different cardinalities, or learned energy tables. To reflect these changes, we provide a modernized and enlarged study. We present an empirical comparison of more than 27 state-of-the-art optimization techniques on a corpus of 2453 energy minimization instances from diverse applications in computer vision. To ensure reproducibility, we evaluate all methods in the OpenGM 2 framework and report extensive results regarding runtime and solution quality. Key insights from our study agree with the results of Szeliski et al. for the types of models they studied. However, on new and challenging types of models our findings disagree and suggest that polyhedral methods and integer programming solvers are competitive in terms of runtime and solution quality over a large range of model types.}, keywords = {Benchmark, Combinatorial optimization, Discrete graphical models}, isbn = {25164671.25}, issn = {15731405}, doi = {10.1007/s11263-015-0809-x}, author = {Kappes, J{\"o}rg H and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, Sebastian and Dhruv Batra and Kim, Sungwoong and Kausler, Bernhard X and Kr{\"o}ger, Thorben and Lellmann, Jan and Komodakis, Nikos and Savchynskyy, Bogdan and Carsten Rother} } @article {Kappes2015b, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {International Journal of Computer Vision}, volume = {115}, number = {2}, year = {2015}, pages = {155{\textendash}184}, abstract = {Szeliski et al. published an influential study in 2006 on energy minimization methods for Markov random fields. This study provided valuable insights in choosing the best optimization technique for certain classes of problems. While these insights remain generally useful today, the phenomenal success of random field models means that the kinds of inference problems that have to be solved changed significantly. Specifically, the models today often include higher order interactions, flexible connectivity structures, large label-spaces of different cardinalities, or learned energy tables. To reflect these changes, we provide a modernized and enlarged study. We present an empirical comparison of more than 27 state-of-the-art optimization techniques on a corpus of 2453 energy minimization instances from diverse applications in computer vision. To ensure reproducibility, we evaluate all methods in the OpenGM 2 framework and report extensive results regarding runtime and solution quality. Key insights from our study agree with the results of Szeliski et al. for the types of models they studied. However, on new and challenging types of models our findings disagree and suggest that polyhedral methods and integer programming solvers are competitive in terms of runtime and solution quality over a large range of model types.}, keywords = {Benchmark, Combinatorial optimization, Discrete graphical models}, issn = {15731405}, doi = {10.1007/s11263-015-0809-x}, author = {Kappes, J{\"o}rg H and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, Sebastian and Dhruv Batra and Kim, Sungwoong and Kausler, Bernhard X and Kr{\"o}ger, Thorben and Lellmann, Jan and Komodakis, Nikos and Savchynskyy, Bogdan and Carsten Rother} } @article {Rathke2015, title = {A Computational Approach to Log-Concave Density Estimation}, journal = {An. St. Univ. Ovidius Constanta}, volume = {23}, number = {3}, year = {2015}, pages = {151-166}, author = {Rathke, F. and Christoph Schn{\"o}rr} } @article {rathke2015b, title = {A Computational Approach to Log-Concave Density Estimation}, journal = {An. St. Univ. Ovidius Constanta}, volume = {23}, year = {2015}, pages = {151-166}, author = {Rathke, Fabian and Christoph Schn{\"o}rr} } @conference {Silvestri2015, title = {A Convex Relaxation Approach to the Affine Subspace Clustering Problem}, booktitle = {Proc.~GCPR}, year = {2015}, author = {Silvestri, F. and Reinelt, G. and Christoph Schn{\"o}rr} } @conference {maierhein2014, title = {Can masses of non-experts train highly accurate image classifiers? A crowdsourcing approach to instrument segmentation in laparoscopic images}, booktitle = {MICCAI}, year = {2014}, author = {Lena Maier-Hein and Sven Mersmann and Daniel Kondermann and Bodenstedt, S. and Sanchez, A. and C. Stock and Kenngott, H. and Eisenmann, M. and Speidel, S.} } @conference {welbl_14_casting, title = {Casting Random Forests as Artificial Neural Networks (and Profiting from It)}, booktitle = {GCPR. Proceedings}, number = {8753}, year = {2014}, note = {1}, pages = {765-771}, doi = {10.1007/978-3-319-11752-2_66}, author = {Welbl, J.} } @conference {zhang_14_cell, title = {Cell detection and segmentation using correlation clustering}, booktitle = {MICCAI. Proceedings}, number = {8673}, year = {2014}, note = {1}, pages = {9-16}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-319-10404-1_2}, author = {Zhang, C. and Julian Yarkony and Fred A. Hamprecht} } @article {nagel2014a, title = {Comparative heat and gas exchange measurements in the Heidelberg Aeolotron, a large annular wind-wave tank}, journal = {Ocean Sci. Discuss.}, volume = {11}, year = {2014}, pages = {1691--1718}, doi = {10.5194/osd-11-1691-2014}, author = {Leila Nagel and Kerstin Ellen Krall and Bernd J{\"a}hne} } @article {kappes_14_comparative, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {CoRR}, year = {2014}, note = {1}, url = {http://arxiv.org/abs/1404.0533}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, S. and Dhruv Batra and Kim, S. and Bernhard X. Kausler and Thorben Kr{\"o}ger and Lellmann, J. and Komodakis, N. and Savchynskyy, B. and Carsten Rother} } @article {kappes-1014-bench-arxiv, title = {A Comparative Study of Modern Inference Techniques for Structured Discrete Energy Minimization Problems}, journal = {CoRR}, volume = {abs/1404.0533}, year = {2014}, url = {http://hci.iwr.uni-heidelberg.de/opengm2/}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, Sebastian and Dhruv Batra and Kim, Sungwoong and Bernhard X. Kausler and Thorben Kr{\"o}ger and Lellmann, Jan and Komodakis, Nikos and Savchynskyy, Bogdan and Carsten Rother} } @article {kandemir_14_computer-aided, title = {Computer-aided diagnosis from weak supervision: A benchmarking study}, journal = {Computerized Medical Imaging and Graphics}, volume = {42}, year = {2014}, note = {1}, pages = {44-50}, doi = {10.1016/j.compmedimag.2014.11.010}, author = {Kandemir, M. and Fred A. Hamprecht} } @phdthesis {meister2014, title = {On Creating Reference Data for Performance Analysis in Image Processing}, year = {2014}, publisher = {IWR, Fakult{\"a}t f{\"u}r Physik und Astronomie, Univ.\ Heidelberg}, url = {http://www.ub.uni-heidelberg.de/archiv/16193}, author = {Stephan Meister} } @phdthesis {meister2014, title = {On Creating Reference Data for Performance Analysis in Image Processing}, volume = {Dissertation}, year = {2014}, publisher = {IWR, Fakult{\"a}t f{\"u}r Physik und Astronomie, Univ. Heidelberg}, type = {phdDissertation}, doi = {10.11588/heidok.00016193}, author = {Stephan Nicolas Robert Meister} } @conference {maierhein2014a, title = {Crowdsourcing for reference correspondence generation in endoscopic images}, booktitle = {MICCAI}, year = {2014}, author = {Lena Maier-Hein and Sven Mersmann and Daniel Kondermann and C. Stock and Kenngott, H. and Sanchez, A. and Wagner, M. and Preukschas, A. and Wekerle, A. -L. and Helfert, S. and Bodenstedt, S. and Speidel, S.} } @conference {beier_14_cut, title = {Cut, Glue and Cut: A Fast, Approximate Solver for Multicut Partitioning}, booktitle = {2014 {IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR} 2014, Columbus, OH, USA, June 23-28, 2014}, year = {2014}, doi = {10.1109/CVPR.2014.17}, url = {http://dx.doi.org/10.1109/CVPR.2014.17}, author = {Thorsten Beier and Thorben Kr{\"o}ger and J{\"o}rg H. Kappes and Ullrich K{\"o}the and Fred A. Hamprecht} } @article {mersmann2013, title = {Calibration of time-of-flight cameras for accurate intraoperative surface reconstruction}, journal = {Med. Phys.}, volume = {40}, year = {2013}, pages = {082701}, doi = {10.1118/1.4812889}, author = {Sven Mersmann and Alexander Seitel and Michael Erz and Bernd J{\"a}hne and Nickel, Felix and Mieth, Markus and Mehrabi, Arianeb and Lena Maier-Hein} } @article {lenzen_13_class, title = {A Class of Quasi-Variational Inequalities for Adaptive Image Denoising and Decomposition}, journal = {Computational Optimization and Applications (COAP)}, volume = {54 (2)}, year = {2013}, note = {1}, pages = {371-398}, doi = {10.1007/s10589-012-9456-0}, author = {Frank Lenzen and Florian Becker and Lellmann, J. and Stefania Petra and Christoph Schn{\"o}rr} } @article {Lenzen-et-al-13, title = {A class of quasi-variational inequalities for adaptive image denoising and decomposition}, journal = {Computational Optimization and Applications}, volume = {54}, number = {2}, year = {2013}, pages = {371-398}, publisher = {Springer Netherlands}, issn = {0926-6003}, url = {http://dx.doi.org/10.1007/s10589-012-9456-0}, author = {Frank Lenzen and Florian Becker and Lellmann, Jan and Stefania Petra and Christoph Schn{\"o}rr} } @article {Breitenreicher2012, title = {COAL: a generic modelling and prototyping framework for convex optimization problems of variational image analysis}, journal = {Optimization Methods and Software}, volume = {28}, number = {5}, year = {2013}, note = {Projectpage: http://sourceforge.net/projects/coalproject/}, pages = {1081-1094}, doi = {10.1080/10556788.2012.672571}, url = {http://www.tandfonline.com/doi/abs/10.1080/10556788.2012.672571}, author = {Breitenreicher, Dirk and Lellmann, Jan and Christoph Schn{\"o}rr} } @conference {kappes_13_comparative, title = {A Comparative Study of Modern Inference Techniques for Discrete Energy Minimization Problems}, booktitle = {CVPR 2013. Proceedings}, year = {2013}, note = {1}, doi = {10.1109/CVPR.2013.175}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, S. and Dhruv Batra and Sungwoong, K. and Bernhard X. Kausler and Lellmann, J. and Komodakis, N. and Carsten Rother} } @conference {Kappes-2013-benchmark, title = {A Comparative Study of Modern Inference Techniques for Discrete Energy Minimization Problem}, booktitle = {CVPR}, year = {2013}, author = {J{\"o}rg H. Kappes and Bj{\"o}rn Andres and Fred A. Hamprecht and Christoph Schn{\"o}rr and Nowozin, Sebastian and Dhruv Batra and Kim, Sungwoong and Bernhard X. Kausler and Lellmann, Jan and Komodakis, Nikos and Carsten Rother} } @conference {schiegg_13_conservation, title = {Conservation Tracking}, booktitle = {ICCV 2013. Proceedings}, year = {2013}, note = {1}, pages = {2928--2935}, doi = {10.1109/ICCV.2013.364}, author = {Schiegg, M. and Hanslovsky, P. and Bernhard X. Kausler and Hufnagel, L. and Fred A. Hamprecht} } @booklet {SchmitzerSchnoerr-ShapeMeasures2013, title = {Contour Manifolds and Optimal Transport}, year = {2013}, note = {preprint}, author = {Bernhard Schmitzer and Christoph Schn{\"o}rr} } @article {SwobodaSchnoerr2013, title = {Convex Variational Image Restoration with Histogram Priors}, journal = {SIAM J.~Imag.~Sci.}, volume = {6}, number = {3}, year = {2013}, pages = {1719-1735}, author = {Swoboda, P. and Christoph Schn{\"o}rr} } @article {maco_13_correlative, title = {Correlative in vivo 2 photon and focused ion beam scanning electron microscopy of cortical neurons}, journal = {PloS one}, volume = {8 (2)}, year = {2013}, doi = {10.1371/journal.pone.0057405}, author = {Maco, B. and Holtmaat, A. and Cantoni, M. and Anna Kreshuk and Christoph N. Straehle and Fred A. Hamprecht and G. W. Knott} } @phdthesis {meister_13_on, title = {On Creating Reference Data for Performance Analysis in Image Processing}, year = {2013}, note = {1}, publisher = {University of Heidelberg}, author = {Stephan Meister} } @article {Petra2013, title = {Critical Parameter Values and Reconstruction Propertiesof Discrete Tomography: Application to Experimental FluidDynamics}, journal = {Fundamenta Informaticae}, volume = {125}, year = {2013}, pages = {285--312}, author = {Stefania Petra and Christoph Schn{\"o}rr and Schr{\"o}der, A.} } @article {geese_12_cnn, title = {CNN Based Dark Signal Non-Uniformity Estimation}, journal = {CNNA}, year = {2012}, pages = {1-6}, doi = {10.1109/CNNA.2012.6331408}, author = {Geese, M. and Bernd J{\"a}hne and Paul Ruhnau} } @conference {geese2012, title = {CNN based dark signal non-uniformity estimation}, booktitle = {Cellular Nanoscale Networks and Their Applications (CNNA), 2012 13th International Workshop on}, year = {2012}, pages = {1--6}, doi = {10.1109/CNNA.2012.6331408}, author = {Geese, M. and Paul Ruhnau and Bernd J{\"a}hne} } @booklet {Petra-Schnoerr-12b, title = {Critical Parameter Values and Reconstruction Properties of Discrete Tomography: Application to Experimental Fluid Dynamics}, year = {2012}, month = {September 19}, url = {http://arxiv.org/abs/1209.4316}, author = {Petra, S. and Schn{\"o}rr, C. and Schr{\"o}der, A.} } @conference {Shekhovtsov2012, title = {Curvature prior for MRF-based segmentation and shape inpainting}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {7476 LNCS}, year = {2012}, pages = {41{\textendash}51}, abstract = {Most image labeling problems such as segmentation and image reconstruction are fundamentally ill-posed and suffer from ambiguities and noise. Higher-order image priors encode high-level structural dependencies between pixels and are key to overcoming these problems. However, in general these priors lead to computationally intractable models. This paper addresses the problem of discovering compact representations of higher-order priors which allow efficient inference. We propose a framework for solving this problem that uses a recently proposed representation of higher-order functions which are encoded as lower envelopes of linear functions. Maximum a Posterior inference on our learned models reduces to minimizing a pairwise function of discrete variables. We show that our framework can learn a compact representation that approximates a low curvature shape prior and demonstrate its effectiveness in solving shape inpainting and image segmentation problems. {\textcopyright} 2012 Springer-Verlag.}, isbn = {9783642327162}, issn = {03029743}, doi = {10.1007/978-3-642-32717-9_5}, author = {Shekhovtsov, Alexander and Kohli, Pushmeet and Carsten Rother} } @conference {Shekhovtsov2012a, title = {Curvature prior for MRF-based segmentation and shape inpainting}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {7476 LNCS}, year = {2012}, pages = {41{\textendash}51}, abstract = {Most image labeling problems such as segmentation and image reconstruction are fundamentally ill-posed and suffer from ambiguities and noise. Higher-order image priors encode high-level structural dependencies between pixels and are key to overcoming these problems. However, in general these priors lead to computationally intractable models. This paper addresses the problem of discovering compact representations of higher-order priors which allow efficient inference. We propose a framework for solving this problem that uses a recently proposed representation of higher-order functions which are encoded as lower envelopes of linear functions. Maximum a Posterior inference on our learned models reduces to minimizing a pairwise function of discrete variables. We show that our framework can learn a compact representation that approximates a low curvature shape prior and demonstrate its effectiveness in solving shape inpainting and image segmentation problems. {\textcopyright} 2012 Springer-Verlag.}, isbn = {9783642327162}, issn = {03029743}, doi = {10.1007/978-3-642-32717-9_5}, url = {www.research.microsoft.com/vision/cambridge http://www.cs.ucl.ac.uk/staff/V.Kolmogorov/papers/StereoSegmentation_PAMI06.pdf\%5Cnpapers3://publication/uuid/F008E9F4-510D-4478-A3C0-1BFB22F6AEA0}, author = {Shekhovtsov, Alexander and Kohli, Pushmeet and Carsten Rother} } @conference {Shekhovtsov2012b, title = {Curvature prior for MRF-based segmentation and shape inpainting}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {7476 LNCS}, year = {2012}, month = {sep}, pages = {41{\textendash}51}, abstract = {Most image labeling problems such as segmentation and image reconstruction are fundamentally ill-posed and suffer from ambiguities and noise. Higher-order image priors encode high-level structural dependencies between pixels and are key to overcoming these problems. However, in general these priors lead to computationally intractable models. This paper addresses the problem of discovering compact representations of higher-order priors which allow efficient inference. We propose a framework for solving this problem that uses a recently proposed representation of higher-order functions which are encoded as lower envelopes of linear functions. Maximum a Posterior inference on our learned models reduces to minimizing a pairwise function of discrete variables. We show that our framework can learn a compact representation that approximates a low curvature shape prior and demonstrate its effectiveness in solving shape inpainting and image segmentation problems. {\textcopyright} 2012 Springer-Verlag.}, isbn = {9783642327162}, issn = {03029743}, doi = {10.1007/978-3-642-32717-9_5}, url = {http://arxiv.org/abs/1109.1480}, author = {Shekhovtsov, Alexander and Kohli, Pushmeet and Carsten Rother} } @conference {straehle_11_carving, title = {Carving: Scalable Interactive Segmentation of Neural Volume Electron Microscopy Images}, booktitle = {MICCAI 2011, Proceedings.}, volume = {6891}, year = {2011}, note = {1}, pages = {653-660}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-23623-5_82}, author = {Christoph N. Straehle and Ullrich K{\"o}the and G. W. Knott and Fred A. Hamprecht} } @phdthesis {erz2011, title = {Charakterisierung von Laufzeitkamerasystemen f{\"u}r Lumineszenzlebensdauermessungen}, volume = {Dissertation}, year = {2011}, publisher = {IWR, Fakult{\"a}t f{\"u}r Physik und Astronomie, Univ. Heidelberg}, url = {http://www.ub.uni-heidelberg.de/archiv/11598}, author = {Michael Erz} } @conference {rocholz2011a, title = {Combined visualization of wind waves and water surface temperature}, booktitle = {Gas Transfer at Water Surfaces 2010}, year = {2011}, pages = {496--506}, doi = {10.5281/zenodo.14957}, url = {http://hdl.handle.net/2433/156156}, author = {Roland Rocholz and Sven Wanner and Uwe Schimpf and Bernd J{\"a}hne}, editor = {S. Komori and W. R. McGilles and R. Kurose} } @conference {kraeuter2011, title = {A comparative lab study of tansfer velocities of volatile tracers with widely varying solubilities}, booktitle = {DPG Fr{\"u}hjahrstagung Dresden, Fachverband Umweltphysik}, year = {2011}, abstract = {Die L{\"o}slichkeit einer fl{\"u}chtigen Substanz in Wasser hat einen entscheidenden Einfluss auf den Gasaustausch zwischen Ozean und Atmosph{\"a}re. Bei Stoffen mit einer sehr hohen L{\"o}slichkeit wird der Austausch durch Diffusion in der luftseitigen Grenzschicht kontrolliert und bei solchen mit einer sehr niedrigen L{\"o}slichkeit von der wasserseitigen Grenzschicht. Bei vielen umweltrelevanten Stoffen (z.B. Aceton, Acetaldehyd, Acetonitril) ist es aber ein Wechselspiel von beiden Prozessen. Die Kombination der Prozesse ist bisher experimentell nicht untersucht worden und es gibt nur einfache Modelle, welche die Intermittenz der Prozesse ber{\"u}cksichtigen. In einem ersten Laborexperiment am Aeolotron, einem ringf{\"o}rmigen Wind-Wellen-Kanal, wurden die Transferwiderst{\"a}nde vieler Gase mit unterschiedlichen L{\"o}slichkeiten bei verschiedenen Windgeschwindigkeiten (1,4 m/s bis 8,4 m/s) bestimmt. Die dimensionslosen L{\"o}slichkeiten der verwendeten Gase deckten einen Bereich von 5 Gr{\"o}{\ss}enordnungen ab. Die Gaskonzentrationen wurden durch FTIR-Spektroskopie (Fourier Transform Infrared Spectroscopy) und mit einem PTR-MS (Proton Transfer Reaction - Mass Spectrometer) gemessen. Die Partitionierung des Transferwiderstandes von Gasen mittlerer L{\"o}slichkeit in einen luftseitigen und wasserseitigen Teil konnte nachgewiesen werden.}, doi = {10.5281/zenodo.12327}, url = {http://www.dpg-verhandlungen.de/year/2011/conference/dresden/part/up/session/1/contribution/29}, author = {Christine Kr{\"a}uter and Kerstin E. Richter and Bernd J{\"a}hne and Evridiki Mesarchaki and Jonathan Williams} } @conference {kaster_11_comparative, title = {Comparative Validation of Graphical Models for Learning Tumor Segmentations from Noisy Manual Annotations}, booktitle = {LNCS}, volume = {LNCS 6533}, year = {2011}, note = {1}, pages = {74-85}, publisher = {Springer, Heidelberg}, organization = {Springer, Heidelberg}, doi = {10.1007/978-3-642-18421-5_8}, author = {F. O. Kaster and M.-A. Weber and Fred A. Hamprecht}, editor = {Bjoern H. Menze and Bjoern H. Menze and Langs, G. and Criminisi, A. and Tu, Z.} } @article {Lellmann-Schnoerr-SIIMS-11, title = {Continuous Multiclass Labeling Approaches and Algorithms}, journal = {SIAM J.~Imag.~Sci.}, volume = {4}, number = {4}, year = {2011}, pages = {1049-1096}, author = {Lellmann, J. and Christoph Schn{\"o}rr} } @article {Lellmann2011corr, title = {Continuous Multiclass Labeling Approaches and Algorithms}, journal = {CoRR}, volume = {abs/1102.5448}, year = {2011}, url = {http://arxiv.org/abs/1102.5448}, author = {Lellmann, J. and Schn{\"o}rr, C.} } @conference {schlecht:BMVC:2011, title = {Contour-based Object Detection}, booktitle = {BMVC}, year = {2011}, pages = {1--9}, author = {Schlecht, J. and Bj{\"o}rn Ommer} } @techreport {rocholz2010b, title = {Calibration of the 2010-CISG Setup at the Aeolotron}, year = {2010}, institution = {Institute of Environmental Physics, University of Heidelberg}, author = {Roland Rocholz} } @phdthesis {hanselmann_10_computational, title = {Computational Methods for the Analysis of Mass Spectrometry Images}, year = {2010}, note = {1}, publisher = {University of Heidelberg}, author = {Hanselmann, M.} } @article {kirchner_10_computational, title = {Computational Protein Profile Similarity Screening for Quantitative Mass Spectrometry Experiments}, journal = {Bioinformatics}, volume = {26 (1)}, year = {2010}, note = {1}, pages = {77-83}, doi = {10.1093/bioinformatics/btp607}, author = {Kirchner, M. and B. Y. Renard and Ullrich K{\"o}the and Pappin, D. J. and Fred A. Hamprecht and Judith A. J. Steen and Steen, H.} } @mastersthesis {nair_10_construction, title = {Construction and analysis of random tree ensembles}, year = {2010}, school = {University of Heidelberg}, author = {Nair, R.} } @techreport {Lellmann2010, title = {Continuous Multiclass Labeling Approaches and Algorithms}, year = {2010}, month = {Feb.}, institution = {Univ. of Heidelberg}, type = {Tech. Rep.}, url = {http://www.ub.uni-heidelberg.de/archiv/10460/}, author = {Lellmann, J. and Schn{\"o}rr, C.} } @conference {Vicente2010, title = {Cosegmentation revisited: Models and optimization}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {6312 LNCS}, number = {PART 2}, year = {2010}, pages = {465{\textendash}479}, abstract = {The problem of cosegmentation consists of segmenting the same object (or objects of the same class) in two or more distinct images. Recently a number of different models have been proposed for this problem. However, no comparison of such models and corresponding optimization techniques has been done so far. We analyze three existing models: the L1 norm model of Rother et al. [1], the L2 norm model of Mukherjee et al. [2] and the "reward" model of Hochbaum and Singh [3]. We also study a new model, which is a straightforward extension of the Boykov-Jolly model for single image segmentation [4]. In terms of optimization, we use a Dual Decomposition (DD) technique in addition to optimization methods in [1,2]. Experiments show a significant improvement of DD over published methods. Our main conclusion, however, is that the new model is the best overall because it: (i) has fewest parameters; (ii) is most robust in practice, and (iii) can be optimized well with an efficient EM-style procedure. {\textcopyright} 2010 Springer-Verlag.}, isbn = {3642155510}, issn = {16113349}, doi = {10.1007/978-3-642-15552-9_34}, author = {Vicente, Sara and Kolmogorov, Vladimir and Carsten Rother} } @conference {kaster_09_classification, title = {Classification of Spectroscopic Images in the DIROlab Environment}, booktitle = {World Congress on Medical Physics and Biomedical Engineering, September 7 - 12, 2009, Munich, Germany}, volume = {25/V}, number = {25050252}, year = {2009}, pages = {252--255}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-03904-1_70}, author = {F. O. Kaster and B. Michael Kelm and C. M. Zechmann and M.-A. Weber and Fred A. Hamprecht and Nix, O.}, editor = {D{\"o}ssel, Olaf and Schlegel, Wolfgang C.} } @article {menze_09_comparison, title = {A Comparison of Random Forest and its Gini Importance with Standard Chemometric Methods for the Feature Selection and Classification of Spectral Data}, journal = {BMC Bioinformatics}, volume = {10:213}, year = {2009}, note = {1}, doi = {10.1186/1471-2105-10-213}, author = {Bjoern H. Menze and B. Michael Kelm and Masuch, R. and Himmelreich, U. and Bachert, P. and Petrich, W. and Fred A. Hamprecht} } @conference {keraenen:2009, title = {Computational Analysis of Quantitative Changes in Gene Expression and Embryo Morphology between Species}, booktitle = {Evolution-The Molecular Landscape}, year = {2009}, author = {Ker{\"a}nen, S. V. E. and DePace, A. and Luengo Hendriks, C. L. and Fowlkes, C. and Arbelaez, P. and Bj{\"o}rn Ommer and Brox, T. and Henriquez, C. and Wunderlich, Z. and Eckenrode, K. and B. Fischer and Hammonds, A. and Celniker, S. E.} } @conference {lauer_09_continuous, title = {A Continuous Optimization Framework for Hybrid System Identification}, booktitle = {submitted to Automatica}, year = {2009}, author = {Lauer, F. and Bloch, G. and Vidal, R.} } @phdthesis {Gosch-09, title = {Contour Methods for View Point Tracking}, year = {2009}, publisher = {University of Heidelberg}, type = {phd}, url = {http://www.ub.uni-heidelberg.de/archiv/9684/}, author = {Christian Gosch} } @article {Yuan-et-al-JMIV-09, title = {Convex Hodge Decomposition and Regularization of Image Flows}, journal = {J.~Math.~Imag.~Vision}, volume = {33}, number = {2}, year = {2009}, pages = {169-177}, author = {Yuan, Jing and Christoph Schn{\"o}rr and Steidl, Gabriele} } @conference {lellmann_09_convex2, title = {Convex Multi-Class Image Labeling by Simplex-Constrained Total Variation}, booktitle = {Scale Space and Variational Methods in Computer Vision (SSVM 2009)}, volume = {5567}, year = {2009}, note = {1}, pages = {150-162}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-642-02256-2_13}, author = {Lellmann, J. and J{\"o}rg H. Kappes and Yuan, J. and Florian Becker and Christoph Schn{\"o}rr and M{\'o}rken, K. and Lysaker, M.}, editor = {Tai, X.-C. and Lie, K.-A.} } @conference {Lellmann-et-al-09a, title = {Convex Multi-Class Image Labeling by Simplex-Constrained Total Variation}, booktitle = {Scale Space and Variational Methods in Computer Vision (SSVM 2009)}, volume = {5567}, year = {2009}, pages = {150-162}, publisher = {Springer}, organization = {Springer}, author = {Lellmann, J. and J{\"o}rg H. Kappes and Yuan, J. and Florian Becker and Christoph Schn{\"o}rr}, editor = {Tai, X.-C. and M{\'o}rken, K. and Lie, K.-A. and Lysaker, M.} } @conference {lellmann_09_convex, title = {Convex Optimization for Multi-Class Image Labeling with a Novel Family of Total Variation Based Regularizers}, booktitle = {Proceedings of the IEEE Conference on Computer Vision (ICCV 09) Kyoto, Japan}, year = {2009}, note = {1}, pages = {646-653}, doi = {10.1109/ICCV.2009.5459176}, author = {Lellmann, J. and Florian Becker and Christoph Schn{\"o}rr} } @conference {Lellmann2009a, title = {Convex Optimization for Multi-Class Image Labeling with a Novel Family of Total Variation Based Regularizers}, booktitle = {IEEE International Conference on Computer Vision (ICCV)}, year = {2009}, pages = {646 -- 653}, author = {Lellmann, J. and Florian Becker and Christoph Schn{\"o}rr} } @article {hering2009, title = {Correlated speckle noise in white-light interferometry: theoretical analysis of measurement uncertainty}, journal = {Appl. Optics}, volume = {48}, year = {2009}, pages = {525--538}, abstract = {The partial coherent illumination of the specimen, which is required for white-light interferometric measurements of optically rough surfaces, directly leads to speckle. The electric field of such speckle patterns strongly fluctuates in amplitude and phase. This spatially correlated noise influences the accuracy of the measuring device. Although a variety of noise sources in white-light interferometry has been studied in recent years, they do not account for spatial correlation and, hence, they cannot be applied to speckle noise. Thus, we derive a new model enabling quantitative predictions for measurement uncertainty caused by speckle. The model reveals that the accuracy can be attributed mainly to the degree of spatial correlation, i.e., the average size of a speckle, and to the coherence length of the light source. The same parameters define the signal-to-noise ratio in the spectral domain. The model helps to design filter functions that are perfectly adapted to the noise characteristics of the respective device, thus improving the accuracy of postprocessing algorithms for envelope detection. The derived expressions are also compared to numerical simulations and experimental data of two different types of interferometers. These results are a first validation of the theoretical considerations of this article.}, doi = {10.1364/AO.48.000525}, author = {Marco Hering and Klaus K{\"o}rner and Bernd J{\"a}hne} } @conference {fehr_09_cross-correlation, title = {Cross-Correlation and Rotation Estimation of Local 3D Vector FieldPatches}, booktitle = {Proceedings of the ISVC 2009, Part I}, volume = {5875}, number = {1}, year = {2009}, note = {1}, pages = {287-296}, publisher = {Springer}, organization = {Springer}, author = {Fehr, J. and Reisert, M. and Burkhardt, H.}, editor = {G. Bebis and et al.} } @article {Szeliski2008, title = {A comparative study of energy minimization methods for Markov random fields with smoothness-based priors}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {30}, number = {6}, year = {2008}, pages = {1068{\textendash}1080}, publisher = {Springer-Verlag}, abstract = {Among the most exciting advances in early vision has been the development of efficient energy minimization algorithms for pixel-labeling tasks such as depth or texture computation. It has been known for decades that such problems can be elegantly expressed as Markov random fields, yet the resulting energy minimization problems have been widely viewed as intractable. Recently, algorithms such as graph cuts and loopy belief propagation (LBP) have proven to be very powerful: for example, such methods form the basis for almost all the top-performing stereo methods. However, the tradeoffs among different energy minimization algorithms are still not well understood. In this paper we describe a set of energy minimization benchmarks and use them to compare the solution quality and running time of several common energy minimization algorithms. We investigate three promising recent methods graph cuts, LBP, and tree-reweighted message passing in addition to the well-known older iterated conditional modes (ICM) algorithm. Our benchmark problems are drawn from published energy functions used for stereo, image stitching, interactive segmentation, and denoising. We also provide a general-purpose software interface that allows vision researchers to easily switch between optimization methods. Benchmarks, code, images, and results are available at http://vision.middlebury.edu/ MRF/. {\textcopyright} 2008 IEEE.}, keywords = {Belief propagation, Global optimization, Graph Cuts, Markov random fields, Performance evaluation}, issn = {01628828}, doi = {10.1109/TPAMI.2007.70844}, url = {http://vision.middlebury.edu/MRF.}, author = {Szeliski, Richard and Zabih, Ramin and Scharstein, Daniel and Veksler, Olga and Kolmogorov, Vladimir and Agarwala, Aseem and Tappen, Marshall and Carsten Rother} } @article {Szeliski2008a, title = {A comparative study of energy minimization methods for Markov random fields with smoothness-based priors}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {30}, number = {6}, year = {2008}, month = {jun}, pages = {1068{\textendash}1080}, abstract = {Among the most exciting advances in early vision has been the development of efficient energy minimization algorithms for pixel-labeling tasks such as depth or texture computation. It has been known for decades that such problems can be elegantly expressed as Markov random fields, yet the resulting energy minimization problems have been widely viewed as intractable. Recently, algorithms such as graph cuts and loopy belief propagation (LBP) have proven to be very powerful: for example, such methods form the basis for almost all the top-performing stereo methods. However, the tradeoffs among different energy minimization algorithms are still not well understood. In this paper we describe a set of energy minimization benchmarks and use them to compare the solution quality and running time of several common energy minimization algorithms. We investigate three promising recent methods graph cuts, LBP, and tree-reweighted message passing in addition to the well-known older iterated conditional modes (ICM) algorithm. Our benchmark problems are drawn from published energy functions used for stereo, image stitching, interactive segmentation, and denoising. We also provide a general-purpose software interface that allows vision researchers to easily switch between optimization methods. Benchmarks, code, images, and results are available at http://vision.middlebury.edu/ MRF/. {\textcopyright} 2008 IEEE.}, keywords = {Belief propagation, Global optimization, Graph Cuts, Markov random fields, Performance evaluation}, issn = {01628828}, doi = {10.1109/TPAMI.2007.70844}, author = {Szeliski, Richard and Zabih, Ramin and Scharstein, Daniel and Veksler, Olga and Kolmogorov, Vladimir and Agarwala, Aseem and Tappen, Marshall and Carsten Rother} } @conference {haja2008b, title = {A Comparison of Region Detectors for Tracking}, booktitle = {Pattern Recognition, Proceedings 30th DAGM Symposium, Munich, Germany, June 2008}, volume = {5096}, year = {2008}, pages = {112--121}, abstract = {In this work, the performance of five popular region detectors is compared in the context of tracking. Firstly, conventional nearest-neighbor matching based on the similarity of region descriptors is used to assemble trajectories from unique region-to-region correspondences. Based on carefully estimated homographies between planar object surfaces in neighboring frames of an image sequence, both their localization accuracy and length, as well as the percentage of successfully tracked regions is evaluated and compared. The evaluation results serve as a supplement to existing studies and facilitate the selection of appropriate detectors suited to the requirements of a specific application. Secondly, a novel tracking method is presented, which integrates for each region all potential matches into directed multi-edge graphs. From these, trajectories are extracted using Dijkstra{\textquoteright}s algorithm. It is shown, that the resulting localization error is significantly lower than with nearest-neighbor matching while at the same time, the percentage of tracked regions is increased.}, doi = {10.1007/978-3-540-69321-5_12}, author = {Andreas Haja and Steffen Abraham and Bernd J{\"a}hne}, editor = {Gerhard Rigoll} } @article {hanselmann_08_concise, title = {Concise Representation of MS Images by Probabilistic Latent Semantic Analysis}, journal = {Analytical Chemistry}, volume = {80}, number = {24}, year = {2008}, note = {1}, pages = {9649-9658}, doi = {10.1021/ac801303x}, author = {Hanselmann, M. and Kirchner, M. and B. Y. Renard and Amstalden, E. R. and Glunde, K. and Heeren, R. M. A. and Fred A. Hamprecht} } @conference {Fundana-Heyden-Gosch-Schnoerr-2008, title = {Continuous Graph Cuts for Prior-Based Object Segmentation}, booktitle = {19th Int.~Conf.~Patt.~Recog.~(ICPR)}, year = {2008}, pages = {1--4}, author = {Fundana, Ketut and Heyden, Anders and Gosch, Christian and Christoph Schn{\"o}rr} } @conference {Yuan-et-al-2008b, title = {Convex Hodge Decomposition of Image Flows}, booktitle = {Pattern Recognition -- 30th DAGM Symposium}, volume = {5096}, year = {2008}, pages = {416--425}, publisher = {Springer Verlag}, organization = {Springer Verlag}, author = {Yuan, Jing and Steidl, Gabriele and Christoph Schn{\"o}rr} } @techreport {Lellmann2008, title = {Convex Multi-Class Image Labeling by Simplex-Constrained Total Variation}, year = {2008}, institution = {IWR, University of Heidelberg}, url = {http://www.ub.uni-heidelberg.de/archiv/8759/}, author = {Lellmann, J. and J{\"o}rg H. Kappes and Yuan, J. and Florian Becker and Christoph Schn{\"o}rr} } @conference {menze_07_classification, title = {Classification of multispectral ASTER imagery in the archaeological survey for settlement sites of the Near East}, booktitle = {Proc 10th International Symposium on Physical Measurements and Signature in Remote Sensing (ISPMRS 07), Davos, Switzerland}, year = {2007}, note = {1}, publisher = {International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences}, organization = {International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences}, author = {Bjoern H. Menze and Ur, J. A.} } @conference {Kannan2007, title = {Clustering appearance and shape by learning jigsaws}, booktitle = {Advances in Neural Information Processing Systems}, year = {2007}, pages = {657{\textendash}664}, abstract = {Patch-based appearance models are used in a wide range of computer vision applications. To learn such models it has previously been necessary to specify a suitable set of patch sizes and shapes by hand. In the jigsaw model presented here, the shape, size and appearance of patches are learned automatically from the repeated structures in a set of training images. By learning such irregularly shaped {\textquoteright}jigsaw pieces{\textquoteright}, we are able to discover both the shape and the appearance of object parts without supervision. When applied to face images, for example, the learned jigsaw pieces are surprisingly strongly associated with face parts of different shapes and scales such as eyes, noses, eyebrows and cheeks, to name a few. We conclude that learning the shape of the patch not only improves the accuracy of appearance-based part detection but also allows for shape-based part detection. This enables parts of similar appearance but different shapes to be distinguished; for example, while foreheads and cheeks are both skin colored, they have markedly different shapes.}, isbn = {9780262195683}, issn = {10495258}, doi = {10.7551/mitpress/7503.003.0087}, author = {Kannan, Anitha and Winn, John and Carsten Rother} } @conference {Kannan2007a, title = {Clustering appearance and shape by learning jigsaws}, booktitle = {Advances in Neural Information Processing Systems}, year = {2007}, pages = {657{\textendash}664}, abstract = {Patch-based appearance models are used in a wide range of computer vision applications. To learn such models it has previously been necessary to specify a suitable set of patch sizes and shapes by hand. In the jigsaw model presented here, the shape, size and appearance of patches are learned automatically from the repeated structures in a set of training images. By learning such irregularly shaped {\textquoteright}jigsaw pieces{\textquoteright}, we are able to discover both the shape and the appearance of object parts without supervision. When applied to face images, for example, the learned jigsaw pieces are surprisingly strongly associated with face parts of different shapes and scales such as eyes, noses, eyebrows and cheeks, to name a few. We conclude that learning the shape of the patch not only improves the accuracy of appearance-based part detection but also allows for shape-based part detection. This enables parts of similar appearance but different shapes to be distinguished; for example, while foreheads and cheeks are both skin colored, they have markedly different shapes.}, isbn = {9780262195683}, issn = {10495258}, doi = {10.7551/mitpress/7503.003.0087}, author = {Kannan, Anitha and Winn, John and Carsten Rother} } @article {weber_07_comparison, title = {Comparison of correctness of manuel and automatic evaluation of MR-spectrum with prostrate cancer}, journal = {Der Urologe}, volume = {46}, number = {9}, year = {2007}, pages = {1252}, doi = {10.1007/s00120-007-1488-1}, author = {C. Weber and C. M. Zechmann and B. Michael Kelm and Zamecnik, R. and Hendricks, D. and Waldherr, R. and Fred A. Hamprecht and Delorme, S. and Bachert, P. and Ikinger, U.} } @incollection {jaehne2007b, title = {Complex motion in environmental physics and live sciences}, volume = {3417}, year = {2007}, pages = {92--105}, publisher = {Springer}, abstract = {Image sequence processing techniques are an essential tool for the experimental investigation of dynamical processes such as exchange, growth, and transport processes. These processes constitute much more complex motions than normally encountered in computer vision. In this paper, optical flow based motion analysis is extended into a generalized framework to estimate the motion field and the parameters of dynamic processes simultaneously. Examples from environmental physics and live sciences illustrate how this framework helps to tackles some key scientific questions that could not be solved without taking and analyzing image sequences.}, doi = {10.1007/978-3-540-69866-1_8}, author = {Bernd J{\"a}hne and Bernd J{\"a}hne} } @book {jaehne2007a, title = {Complex Motion, Proceedings of the 1st Workshop, G{\"u}nzburg, October 2004}, volume = {3417}, year = {2007}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/978-3-540-69866-1}, author = {Bernd J{\"a}hne}, editor = {Mester, R. and Hanno Scharr and Erhardt Barth} } @conference {ommer:EMMCVPR:2007, title = {Compositional Object Recognition, Segmentation, and Tracking in Video}, booktitle = {Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition}, volume = {4679}, year = {2007}, pages = {318--333}, publisher = {Springer}, organization = {Springer}, author = {Bj{\"o}rn Ommer and J. M. Buhmann} } @conference {zechmann_06_can, title = {Can man still beat the machine? Automated vs. manual pattern recognition of 3D MRSI data of prostate cancer patients}, booktitle = {Proceedings of the 16th ISMRM}, year = {2006}, author = {C. M. Zechmann and B. Michael Kelm and Zamecnik, P. and Ikinger, U. and Waldherr, R. and R{\"o}ll, S. and Delorme, S. and Fred A. Hamprecht and Bachert, P.} } @conference {kelm_06_claret, title = {CLARET: a tool for fully automated evaluation of MRSI with pattern recognition methods.}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2006 - Algorithmen, Systeme, Anwendungen}, year = {2006}, note = {1}, pages = {51-55}, publisher = {Springer}, organization = {Springer}, doi = {10.1007/3-540-32137-3_7}, url = {http://www.efmi-wg-mip.net/service/bvm2006}, author = {B. Michael Kelm and Bjoern H. Menze and Neff, T. and C. M. Zechmann and Fred A. Hamprecht}, editor = {H. Handels and G. Bebis and et al.} } @inbook {carlsohn_06_spectral, title = {Color image processing}, volume = {7(17)}, year = {2006}, pages = {393-419}, publisher = {CRC Press}, organization = {CRC Press}, chapter = {Spectral Imaging and Applications}, author = {Carlsohn, M. F. and Bjoern H. Menze and B. Michael Kelm and Fred A. Hamprecht and Kercek, A. and Leitner, R. and Polder, G.}, editor = {Lukac, R. and Plataniotis, K.N.} } @conference {kelm_06_combining, title = {Combining Generative and Discriminative Methods for Pixel Classification with Multi-Conditional Learning.}, booktitle = {ICPR 2006}, volume = {2}, year = {2006}, note = {1}, pages = {828-832}, doi = {10.1109/ICPR.2006.384}, author = {B. Michael Kelm and Pal, C. and McCallum, A.} } @conference {Kolmogorov2006a, title = {Comparison of energy minimization algorithms for highly connected graphs}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {3952 LNCS}, year = {2006}, pages = {1{\textendash}15}, abstract = {Algorithms for discrete energy minimization play a fundamental role for low-level vision. Known techniques include graph cuts, belief propagation (BP) and recently introduced tree-reweighted message passing (TRW). So far, the standard benchmark for their comparison has been a 4-connected grid-graph arising in pixel-labelling stereo. This minimization problem, however, has been largely solved: recent work shows that for many scenes TRW finds the global optimum. Furthermore, it is known that a 4-connecled grid-graph is a poor stereo model since it does not take occlusions into account. We propose the problem of stereo with occlusions as a new test bed for minimization algorithms. This is a more challenging graph since it has much larger connectivity, and it also serves as a better stereo model. An attractive feature of this problem is that increased connectivity does not result in increased complexity of message passing algorithms. Indeed, one contribution of this paper is to show that sophisticated implementations of BP and TRW have the same time and memory complexity as that of 4-connecled grid-graph stereo. The main conclusion of our experimental study is that for our problem graph cut outperforms both TRW and BP considerably. TRW achieves consistently a lower energy than BP. However, as connectivity increases the speed of convergence of TRW becomes slower. Unlike 4-connected grids, the difference between the energy of the best optimization method and the lower bound of TRW appears significant. This shows the hardness of the problem and motivates future research. {\textcopyright} Springer-Verlag Berlin Heidelberg 2006.}, isbn = {3540338349}, issn = {16113349}, doi = {10.1007/11744047_1}, author = {Kolmogorov, Vladimir and Carsten Rother} } @conference {Kolmogorov2006b, title = {Comparison of energy minimization algorithms for highly connected graphs}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {3952 LNCS}, year = {2006}, pages = {1{\textendash}15}, abstract = {Algorithms for discrete energy minimization play a fundamental role for low-level vision. Known techniques include graph cuts, belief propagation (BP) and recently introduced tree-reweighted message passing (TRW). So far, the standard benchmark for their comparison has been a 4-connected grid-graph arising in pixel-labelling stereo. This minimization problem, however, has been largely solved: recent work shows that for many scenes TRW finds the global optimum. Furthermore, it is known that a 4-connecled grid-graph is a poor stereo model since it does not take occlusions into account. We propose the problem of stereo with occlusions as a new test bed for minimization algorithms. This is a more challenging graph since it has much larger connectivity, and it also serves as a better stereo model. An attractive feature of this problem is that increased connectivity does not result in increased complexity of message passing algorithms. Indeed, one contribution of this paper is to show that sophisticated implementations of BP and TRW have the same time and memory complexity as that of 4-connecled grid-graph stereo. The main conclusion of our experimental study is that for our problem graph cut outperforms both TRW and BP considerably. TRW achieves consistently a lower energy than BP. However, as connectivity increases the speed of convergence of TRW becomes slower. Unlike 4-connected grids, the difference between the energy of the best optimization method and the lower bound of TRW appears significant. This shows the hardness of the problem and motivates future research. {\textcopyright} Springer-Verlag Berlin Heidelberg 2006.}, isbn = {3540338349}, issn = {16113349}, doi = {10.1007/11744047_1}, author = {Kolmogorov, Vladimir and Carsten Rother} } @conference {Heiler-Schnoerr-06a, title = {Controlling Sparseness in Non-negative Tensor Factorization}, booktitle = {Computer Vision -- ECCV 2006}, volume = {3951}, year = {2006}, pages = {56-67}, publisher = {Springer}, organization = {Springer}, author = {Heiler, M. and Christoph Schn{\"o}rr} } @conference {Rother2006, title = {Cosegmentation of image pairs by histogram matching - Incorporating a global constraint into MRFs}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, volume = {1}, year = {2006}, pages = {994{\textendash}1000}, abstract = {We introduce the term cosegmentation which denotes the task of segmenting simultaneously the common parts of an image pair. A generative model for cosegmentation is presented. Inference in the model leads to minimizing an energy with an MRF term encoding spatial coherency and a global constraint which attempts to match the appearance histograms of the common parts. This energy has not been proposed previously and its optimization is challenging and NP-hard. For this problem a novel optimization scheme which we call trust region graph cuts is presented. We demonstrate that this framework has the potential to improve a wide range of research: Object driven image retrieval, video tracking and segmentation, and interactive image editing. The power of the framework lies in its generality, the common part can be a rigid/non-rigid object (or scene), observed from different viewpoints or even similar objects of the same class. {\textcopyright} 2006 IEEE.}, isbn = {0769525970}, issn = {10636919}, doi = {10.1109/CVPR.2006.91}, url = {http://research.microsoft.com/vision/cambridge/}, author = {Carsten Rother and Kolmogorov, Vladimir and Minka, Tom and Blake, Andrew} } @article {jaehne2005a, title = {Combined optical slope/height measurements of short wind waves: principles and calibration}, journal = {Meas. Sci. Technol.}, volume = {16}, number = {10}, year = {2005}, pages = {1937--1944}, abstract = {A novel short wave imaging technique is described. For the first time, it is capable of measuring the wave height and wave slope simultaneously with unprecedented accuracy. A telecentric optical system is used to image the waves so that the image magnification does not change with the wave height and the slope calibration is much less dependent on the position of the image. A telecentric illumination system contains an area-extended LED light source that is placed in the focal plane of a second lens below the water channel. In this way, the wave slope can be coded by the position-dependent intensity of the light source. LEDs with two different wavelengths in the red and near infrared part of the spectrum are used. Because the water column absorbs the two wavelengths differently, the difference in the observed intensities gives the wave height. The paper details the principle of the technique and the calibration procedures.}, issn = {0957-0233}, doi = {10.1088/0957-0233/16/10/008}, author = {Bernd J{\"a}hne and Schmidt, M. and Roland Rocholz} } @article {Neumann-et-al-ML, title = {Combined SVM-based Feature Selection and Classification}, journal = {Machine Learning}, volume = {61}, year = {2005}, pages = {129-150}, author = {Neumann, J. and Schn{\"o}rr, C. and Steidl, G.} } @incollection {hamprecht_04_classification, title = {Classification}, year = {2004}, pages = {509-519}, publisher = {CRC Press}, edition = {2nd}, author = {Fred A. Hamprecht}, editor = {Bernd J{\"a}hne} } @conference {menze_04_classification, title = {Classification of in vivo magnetic resonance spectra}, booktitle = {Classification in ubiquitous challenge: Proceedings of the GfKl 2004}, year = {2004}, pages = {362-369}, publisher = {Springer}, organization = {Springer}, author = {Bjoern H. Menze and Wormit, M. and Bachert, P. and M. P. Lichy and Schlemmer, H.-P. and Fred A. Hamprecht} } @conference {Yuan-et-al-04b, title = {Convex Set-Based Estimation of Image Flows}, booktitle = {ICPR 2004 {\textendash} 17th Int. Conf. on Pattern Recognition}, volume = {1}, year = {2004}, month = {Aug. 23-26}, pages = {124-127}, publisher = {IEEE}, organization = {IEEE}, address = {Cambridge, UK}, author = {Yuan, J. and Schn{\"o}rr, C. and Kohlberger, T. and Ruhnau, P.} } @conference {ommer:EMMCVPR:2003, title = {A Compositionality Architecture for Perceptual Feature Grouping}, booktitle = {Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition}, volume = {2683}, year = {2003}, pages = {275--290}, publisher = {Springer}, organization = {Springer}, author = {Bj{\"o}rn Ommer and J. M. Buhmann} } @article {hamprecht_02_chemical, title = {Chemical library subset selection algorithms: a unified derivation using spatial statistics}, journal = {Journal of Chemical Information and Computer Sciences}, volume = {42}, year = {2002}, pages = {414-428}, author = {Fred A. Hamprecht and Thiel, W. and van Gunsteren, W. F.} } @conference {long2002, title = {A closer look at short waves generated by wave interactions with adverse currents}, booktitle = {Gas Transfer at Water Surfaces}, volume = {127}, year = {2002}, pages = {121--128}, publisher = {American Geophysical Union}, organization = {American Geophysical Union}, author = {S. R. Long and J. Klinke}, editor = {E. S. Saltzman and M. A. Donelan and R. Wanninkhof and W. M. Drennan} } @conference {Bruhn-et-al-02b, title = {Combining the Advantages of Local and Global Optic Flow Methods}, booktitle = {Pattern Recognition, Proc. 24th DAGM Symposium}, series = {lncs}, volume = {2449}, year = {2002}, pages = {454{\textendash}462}, publisher = {Springer}, organization = {Springer}, address = {Z{\"u}rich, Switzerland}, author = {Bruhn, Andr{\'e}s and Weickert, Joachim and Christoph Schn{\"o}rr}, editor = {van Gool, L.} } @article {haussecker2001, title = {Computing optical flow with physical models of brightness variation}, journal = {IEEE Trans. Pattern Analysis Machine Intelligence}, volume = {23}, number = {6}, year = {2001}, pages = {661--673}, abstract = {Although most optical flow techniques presume brightness constancy, it is well-known that this constraint is often violated, producing poor estimates of image motion. This paper describes a generalized formulation of optical flow estimation based on models of brightness variations that are caused by time-dependent physical processes. These include changing surface orientation with respect to a directional illuminant, motion of the illuminant, and physical models of heat transport in infrared images. With these models, we simultaneously estimate the 2D image motion and the relevant physical parameters of the brightness change model. The estimation problem is formulated using total least squares (TLS), with confidence bounds on the parameters. Experiments in four domains, with both synthetic and natural inputs, show how this formulation produces superior estimates of the 2D image motion}, doi = {10.1109/34.927465}, author = {Horst Hau{\ss}ecker and Fleet, D. J.} } @conference {Keuchel-et-al-01a, title = {Convex Relaxations for Binary Image Partitioning and Perceptual Grouping}, booktitle = {Mustererkennung 2001}, series = {Lect. Notes Comp. Science}, volume = {2191}, year = {2001}, month = {Sept. 12{\textendash}14}, pages = {353{\textendash}360}, publisher = {Springer}, organization = {Springer}, address = {Munich, Germany}, author = {Keuchel, J. and Schellewald, C. and Daniel Cremers and Schn{\"o}rr, C.}, editor = {Radig, B. and Florczyk, S.} } @incollection {wenig2000, title = {Cloud classification analyzing image sequences}, year = {2000}, pages = {652--653}, publisher = {Academic Press}, chapter = {A22}, author = {Mark Wenig and Carsten Leue and Ulrich Platt and Bernd J{\"a}hne and Horst Hau{\ss}ecker} } @article {marxen2000, title = {Comparison of Gaussian particle center estimators and the achievable measurement density for particle tracking velocimetry}, journal = {Exp. Fluids}, volume = {29}, year = {2000}, pages = {145-153}, abstract = {A series of numerical simulations were conducted to investigate the performance of two particle center estimation algorithms for Particle Tracking Velocimetry: a simple three-point Gaussian estimator and a least-square Gaussian. The smallest position error for images with reasonable noise levels was found to be approximately 0.03 pixels for both estimators using particles with diameters of 4 pixels. As both estimators performed equally well, use of the simple three-point Gaussian algorithm is recommended because it executes 100 times faster than the least-square algorithm. The maximum achievable measurement density and accuracy for the three-point Gaussian estimator were determined with a numerical simulation of an Oseen vortex. Uncertainty measures have been introduced to filter out unreliable displacement measurements. It was found that 4 to 5 velocity vectors could be obtained within a 32{\texttimes}32 pixel area with an average displacement error of 0.1 pixels. This doubles the spatial resolution of conventional cross-correlation based Particle Image Velocimetry at comparable accuracy.}, doi = {10.1007/s003489900085}, author = {Michael Marxen and Sullivan, P. E. and Loewen, M. R. and Bernd J{\"a}hne} } @conference {Wulf-et-al-00, title = {On the computational r{\^o}le of the primate retina}, booktitle = {Proc. 2nd ICSC Symposium on Neural Computation (NC 2000)}, year = {2000}, month = {May, 23{\textendash}26}, address = {Berlin, Germany}, author = {Wulf, M. and Stiehl, H.S. and Schn{\"o}rr, C.}, editor = {Bothe, H. and Rojas, R.} } @book {jaehne2000c, title = {Computer Vision and Applications: A Guide for Students and Practitioners}, year = {2000}, pages = {679}, publisher = {Academic Press}, organization = {Academic Press}, author = {Bernd J{\"a}hne and Horst Hau{\ss}ecker} } @incollection {jaehne1999a, title = {Continuous and digital signals}, volume = {2}, year = {1999}, pages = {9--34}, publisher = {Academic Press}, chapter = {2}, author = {Bernd J{\"a}hne and Bernd J{\"a}hne and Horst Hau{\ss}ecker and Peter Gei{\ss}ler} } @conference {Heers-et-al-98a, title = {A class of parallel algorithms for nonlinear variational image segmentation}, booktitle = {Proc. Noblesse Workshop on Non{\textendash}Linear Model Based Image Analysis (NMBIA{\textquoteright}98)}, year = {1998}, month = {July, 1{\textendash}3}, address = {Glasgow, Scotland}, author = {Heers, J. and Schn{\"o}rr, C. and Stiehl, H.S.} } @conference {hering1997, title = {A comprehensive study of algorithms for multidimensional flow field diagnostics}, booktitle = {Proc. Optical 3D Measurement Techniques IV, Zurich, Sept. 29 - Oct. 2, 1997}, year = {1997}, pages = {436--443}, publisher = {Wichmann}, organization = {Wichmann}, author = {Frank Hering and Horst Hau{\ss}ecker and Jochen Dieter and T. Netzsch and Bernd J{\"a}hne}, editor = {A. Gr{\"u}n and H. Kahmen} } @conference {waas1996, title = {Combined height/slope/curvature measurements of short ocean wind waves}, booktitle = {Proc.\ The Air-Sea Interface, Radio and Acoustic Sensing, Turbulence and Wave Dynamics, Marseille, 24--30. June 1993}, year = {1996}, pages = {383--388}, publisher = {RSMAS, University of Miami}, organization = {RSMAS, University of Miami}, doi = {10.5281/zenodo.14000}, author = {Stefan Waas and Bernd J{\"a}hne}, editor = {M. A. Donelan and W. H. Hui and W. J. Plant} } @conference {hering1996b, title = {Combined wave and flow field visualization for investigation of short-wave/long-wave interaction}, booktitle = {Proc.\ The Air-Sea Interface, Radio and Acoustic Sensing, Turbulence and Wave Dynamics, Marseille, 24--30. June 1993}, year = {1996}, pages = {133--138}, publisher = {RSMAS, University of Miami}, organization = {RSMAS, University of Miami}, doi = {10.5281/zenodo.14002}, author = {Frank Hering and Dietmar Wierzimok and W. K. Melville and Bernd J{\"a}hne}, editor = {M. A. Donelan and W. H. Hui and W. J. Plant} } @conference {Schnoerr-icaos-96, title = {Convex Variational Segmentation of Multi-Channel Images}, booktitle = {Proc. 12th Int. Conf. on Analysis and Optimization of Systems: Images, Wavelets and PDE{\textquoteright}s}, series = {Lect. Notes in Control and Information Sciences}, volume = {219}, year = {1996}, month = {June 26-28}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Paris}, author = {Schn{\"o}rr, C.} } @conference {schmundt1995a, title = {The circular wind wave facilities at the University of Heidelberg}, booktitle = {Air-Water Gas Transfer - Selected papers from the Third International Symposium on Air-Water Gas Transfer}, year = {1995}, pages = {505--516}, publisher = {AEON}, organization = {AEON}, doi = {10.5281/zenodo.10404}, author = {Dominik Schmund and Thomas M{\"u}nsterer and Lauer, H. and Bernd J{\"a}hne and Bernd J{\"a}hne}, editor = {E. C. Monahan} } @booklet {waas1994, title = {Combined height/slope/curvature measurements of short ocean wind waves}, year = {1994}, author = {Stefan Waas and Bernd J{\"a}hne} } @conference {jaehne1994b, title = {A comparative analytical study of low-level motion estimators in space-time images}, booktitle = {Proc. 16. DAGM-Symposium Mustererkennung}, year = {1994}, doi = {10.5281/zenodo.14887}, author = {Bernd J{\"a}hne} } @conference {jaehne1992a, title = {Calibration and accuracy of optical slope measurements for short wind waves}, booktitle = {Optics of the Air-Sea Interface: Theory and Measurements}, volume = {1749}, year = {1992}, pages = {222--233}, doi = {10.1117/12.138851}, author = {Bernd J{\"a}hne and H. Schultz}, editor = {L. Estep} } @conference {waas1992a, title = {Combined slope-height measurements of short wind waves: first results from field and laboratory measurements}, booktitle = {Optics of the Air-Sea Interface: Theory and Measurements}, volume = {1749}, year = {1992}, pages = {295--306}, doi = {10.1117/12.138858}, author = {Stefan Waas and Bernd J{\"a}hne}, editor = {L. Estep} } @conference {halsema1992, title = {Comparisons of backscattering calculations with measurements made in a large wind/wave flume}, booktitle = {Proc. IGARSS{\textquoteright}92}, volume = {2}, year = {1992}, pages = {1451--1453}, doi = {10.1109/IGARSS.1992.578480}, author = {van Halsema, D. and C. J. Calkoen and W. A. Oost and P. Snoeij and J. Vogelzang and Bernd J{\"a}hne} } @article {Schnoerr-ijcv-92, title = {Computation of Discontinuous Optical Flow by Domain Decomposition and Shape Optimization}, journal = {ijcv}, volume = {8}, number = {2}, year = {1992}, pages = {153{\textendash}165}, author = {Schn{\"o}rr, C.} } @conference {jaehne1992, title = {A critical theoretical review of optical techniques for short ocean wave measurements}, booktitle = {Optics of the Air-Sea Interface: Theory and Measurements}, volume = {1749}, year = {1992}, pages = {204--215}, doi = {10.1117/12.138849}, author = {Bernd J{\"a}hne and Stefan Waas and J. Klinke}, editor = {L. Estep} } @conference {snoeij1991, title = {Comparison of microwave backscatter measurements and small-scale surface wave measurements made from the Dutch ocean research tower {\textquoteright}Noordwijk{\textquoteright}}, booktitle = {Proceedings IGARSS {\textquoteright}91}, volume = {3}, year = {1991}, pages = {1289--1292}, doi = {10.1109/IGARSS.1991.579315}, author = {P. Snoeij and van Halsema, D. and W. A. Oost and C. J. Calkoen and J. Vogelzang and Bernd J{\"a}hne} } @conference {Schnoerr-bmvc-90, title = {Computation of Discontinuous Optical Flow by Domain Decomposition and Shape Optimization}, booktitle = {Proc. British Machine Vision Conference}, year = {1990}, month = {sep}, pages = {109{\textendash}114}, address = {Oxford/UK}, author = {Schn{\"o}rr, C.} } @conference {halsema1989b, title = {Comparisons of X-band Radar Backscatter Measurements with Area extended wave slop measurements made in a large Wind/Wave Tank}, booktitle = {Proc. IGARSS{\textquoteright}89}, volume = {5}, year = {1989}, pages = {2997--3001}, doi = {10.1109/IGARSS.1989.575986}, author = {van Halsema, D. and C. J. Calkoen and W. A. Oost and P. Snoeij and Bernd J{\"a}hne} } @article {lange1982, title = {Comparison between an amplitude-measuring wire and a slope-measuring laser water wave gauge}, journal = {Rev. Sci. Instrum.}, volume = {53}, number = {5}, year = {1982}, pages = {651--655}, abstract = {Capillary waves produced in a laboratory wind wave tunnel have been measured using a wire resistance-type gauge (measuring wave amplitude) and a laser gauge (measuring wave slope). Comparison of power spectra of the gauges shows good agreement to 80 Hz, which is the upper frequency limit of the wire gauge. The upper frequency limit of the laser gauge depends upon laser beam diameter and is about 300 Hz.}, doi = {10.1063/1.1137036}, author = {Lange, P. A. and Bernd J{\"a}hne and Tschiersch, J. and Johann Ilmberger} }