@conference {Kondermann_2016_CVPR_Workshops, title = {The HCI Benchmark Suite: Stereo and Flow Ground Truth With Uncertainties for Urban Autonomous Driving}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops}, year = {2016}, month = {June}, abstract = { Recent advances in autonomous driving require more and more highly realistic reference data, even for difficult situations such as low light and bad weather. We present a new stereo and optical flow dataset to complement existing benchmarks. It was specifically designed to be representative for urban autonomous driving, including realistic, systematically varied radiometric and geometric challenges which were previously unavailable. The accuracy of the ground truth is evaluated based on Monte Carlo simulations yielding full, per-pixel distributions. Interquartile ranges are used as uncertainty measure to create binary masks for arbitrary accuracy thresholds and show that we achieved uncertainties better than those reported for comparable outdoor benchmarks. Binary masks for all dynamically moving regions are supplied with estimated stereo and flow values. An initial public benchmark dataset of 55 manually selected sequences between 19 and 100 frames long are made available in a dedicated website featuring interactive tools for database search, visualization, comparison and benchmarking. }, author = {Daniel Kondermann and Nair, Rahul and Katrin Honauer and Karsten Krispin and Jonas Andrulis and Alexander Brock and G{\"u}ssefeld, Burkhard and Mohsen Rahimimoghaddam and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {Nair2015, title = {Reflection modeling for passive stereo}, booktitle = {Proceedings of the IEEE International Conference on Computer Vision}, volume = {2015 Inter}, year = {2015}, pages = {2291{\textendash}2299}, abstract = {Stereo reconstruction in presence of reality faces many challenges that still need to be addressed. This paper considers reflections, which introduce incorrect matches due to the observation violating the diffuse-world assumption underlying the majority of stereo techniques. Unlike most existing work, which employ regularization or robust data terms to suppress such errors, we derive two least squares models from first principles that generalize diffuse world stereo and explicitly take reflections into account. These models are parametrized by depth, orientation and material properties, resulting in a total of up to 5 parameters per pixel that have to be estimated. Additionally large non-local interactions between viewed and reflected surface have to be taken into account. These two properties make inference of the model appear prohibitive, but we present evidence that inference is actually possible using a variant of patch match stereo.}, isbn = {9781467383912}, issn = {15505499}, doi = {10.1109/ICCV.2015.264}, author = {Nair, Rahul and Fitzgibbon, Andrew and Kondermann, Daniel and Carsten Rother} } @inbook {Kondermann-etal-2015-ACCV, title = {Stereo Ground Truth with Error Bars}, booktitle = {Computer Vision {\textendash} ACCV 2014: 12th Asian Conference on Computer Vision, Singapore, Singapore, November 1-5, 2014, Revised Selected Papers, Part V}, year = {2015}, pages = {595{\textendash}610}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {Creating stereo ground truth based on real images is a measurement task. Measurements are never perfectly accurate: the depth at each pixel follows an error distribution. A common way to estimate the quality of measurements are error bars. In this paper we describe a methodology to add error bars to images of previously scanned static scenes. The main challenge for stereo ground truth error estimates based on such data is the nonlinear matching of 2D images to 3D points. Our method uses 2D feature quality, 3D point and calibration accuracy as well as covariance matrices of bundle adjustments. We sample the reference data error which is the 3D depth distribution of each point projected into 3D image space. The disparity distribution at each pixel location is then estimated by projecting samples of the reference data error on the 2D image plane. An analytical Gaussian error propagation is used to validate the results. As proof of concept, we created ground truth of an image sequence with 100 frames. Results show that disparity accuracies well below one pixel can be achieved, albeit with much large errors at depth discontinuities mainly caused by uncertain estimates of the camera location.}, isbn = {978-3-319-16814-2}, doi = {10.1007/978-3-319-16814-2_39}, url = {http://dx.doi.org/10.1007/978-3-319-16814-2_39}, author = {Daniel Kondermann and Nair, Rahul and Stephan Meister and Wolfgang Mischler and G{\"u}ssefeld, Burkhard and Katrin Honauer and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {kondermann2014, title = {Stereo ground truth with error bars}, booktitle = {Asian Conference on Computer Vision, ACCV 2014}, year = {2014}, author = {Daniel Kondermann and Nair, Rahul and Stephan Meister and Wolfgang Mischler and G{\"u}ssefeld, Burkhard and Sabine Hofmann and Brenner, Claus and Bernd J{\"a}hne} } @conference {gottfried2014, title = {Time of flight motion compensation revisited}, booktitle = {International Conference on Image Processing, ICIP 2014}, year = {2014}, author = {Gottfried, Jens-Malte and Nair, Rahul and Stephan Meister and Christoph S. Garbe and Daniel Kondermann} } @incollection {Lenzen2013stategies, title = {Denoising Strategies for Time-of-Flight Data}, volume = {8200}, year = {2013}, pages = {25-45}, publisher = {Springer}, author = {Frank Lenzen and Kim, Kwang In and Sch{\"a}fer, Henrik and Nair, Rahul and Stephan Meister and Florian Becker and Christoph S. Garbe}, editor = {Grzegorzek, Marcin and Theobalt, Christian and Andreas Kolb and Theobalt, Christian and Reinhard Koch} } @incollection {Nair2013survey, title = {A Survey on Time-of-Flight Stereo Fusion}, volume = {8200}, year = {2013}, pages = {105-127}, publisher = {Springer}, author = {Nair, Rahul and Ruhl, Kai and Frank Lenzen and Stephan Meister and Sch{\"a}fer, Henrik and Christoph S. Garbe and Eisemann, Martin and Magnor, Marcus and Daniel Kondermann}, editor = {Grzegorzek, Marcin and Theobalt, Christian and Andreas Kolb and Reinhard Koch} } @incollection {Nair2013survey, title = {A Survey on Time-of-Flight Stereo Fusion}, volume = {8200}, year = {2013}, pages = {105-127}, publisher = {Springer}, author = {Nair, Rahul and Ruhl, Kai and Lenzen, Frank and Meister, Stephan and Sch{\"a}fer, Henrik and Garbe, Christoph S. and Eisemann, Martin and Magnor, Marcus and Kondermann, Daniel}, editor = {Grzegorzek, Marcin and Theobalt, Christian and Koch, Reinhard and Kolb, Andreas} } @incollection {nair2012, title = {Ground truth for evaluating time of flight imaging}, volume = {8200}, year = {2012}, pages = {52--74}, publisher = {Springer}, doi = {10.1007/978-3-642-44964-2_4}, author = {Nair, Rahul and Stephan Meister and Lambers, Martin and Balda, Michael and Hofmann, Hannes and Andreas Kolb and Daniel Kondermann and Bernd J{\"a}hne} } @conference {Nair2012highaccuracy, title = {High accuracy TOF and stereo sensor fusion at interactive rates}, booktitle = {Computer Vision--ECCV 2012. Workshops and Demonstrations}, year = {2012}, pages = {1--11}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, author = {Nair, Rahul and Frank Lenzen and Stephan Meister and Sch{\"a}fer, Henrik and Christoph S. Garbe and Daniel Kondermann} } @conference {Nair2012highaccuracy, title = {High accuracy TOF and stereo sensor fusion at interactive rates}, booktitle = {Computer Vision{\textendash}ECCV 2012. Workshops and Demonstrations}, year = {2012}, pages = {1{\textendash}11}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, author = {Nair, Rahul and Lenzen, Frank and Meister, Stephan and Sch{\"a}fer, Henrik and Garbe, Christoph S. and Kondermann, Daniel} }