@conference {Leistner2019, title = {Learning to Think Outside the Box: Wide-Baseline Light Field Depth Estimation with EPI-Shift}, booktitle = {Proceedings - 2019 International Conference on 3D Vision, 3DV 2019}, year = {2019}, month = {sep}, pages = {249{\textendash}257}, abstract = {We propose a method for depth estimation from light field data, based on a fully convolutional neural network architecture. Our goal is to design a pipeline which achieves highly accurate results for small-and wide-baseline light fields. Since light field training data is scarce, all learning-based approaches use a small receptive field and operate on small disparity ranges. In order to work with wide-baseline light fields, we introduce the idea of EPI-Shift: To virtually shift the light field stack which enables to retain a small receptive field, independent of the disparity range. In this way, our approach {\textquoteright}learns to think outside the box of the receptive field". Our network performs joint classification of integer disparities and regression of disparity-offsets. A U-Net component provides excellent long-range smoothing. EPI-Shift considerably outperforms the state-of-the-art learning-based approaches and is on par with hand-crafted methods. We demonstrate this on a publicly available, synthetic, small-baseline benchmark and on large-baseline real-world recordings.}, keywords = {Computer vision, deep learning, depth estimation, light fields, Stereo}, isbn = {9781728131313}, doi = {10.1109/3DV.2019.00036}, url = {http://arxiv.org/abs/1909.09059 http://dx.doi.org/10.1109/3DV.2019.00036}, author = {Titus Leistner and Schilling, Hendrik and Mackowiak, Radek and Gumhold, Stefan and Carsten Rother} } @conference {Schilling2018, title = {Trust your Model: Light Field Depth Estimation with Inline Occlusion Handling}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, year = {2018}, pages = {4530{\textendash}4538}, abstract = {We address the problem of depth estimation from light-field images. Our main contribution is a new way to handle occlusions which improves general accuracy and quality of object borders. In contrast to all prior work we work with a model which directly incorporates both depth and occlusion, using a local optimization scheme based on the PatchMatch algorithm. The key benefit of this joint approach is that we utilize all available data, and not erroneously discard valuable information in pre-processing steps. We see the benefit of our approach not only at improved object boundaries, but also at smooth surface reconstruction, where we outperform even methods which focus on good surface regularization. We have evaluated our method on a public light-field dataset, where we achieve state-of-the-art results in nine out of twelve error metrics, with a close tie for the remaining three.}, isbn = {9781538664209}, issn = {10636919}, doi = {10.1109/CVPR.2018.00476}, author = {Schilling, Hendrik and Maximilian Diebold and Carsten Rother and Bernd J{\"a}hne} }