@conference {Ramos2017, title = {Detecting unexpected obstacles for self-driving cars: Fusing deep learning and geometric modeling}, booktitle = {IEEE Intelligent Vehicles Symposium, Proceedings}, year = {2017}, month = {dec}, pages = {1025{\textendash}1032}, abstract = {The detection of small road hazards, such as lost cargo, is a vital capability for self-driving cars. We tackle this challenging and rarely addressed problem with a vision system that leverages appearance, contextual as well as geometric cues. To utilize the appearance and contextual cues, we propose a new deep learning-based obstacle detection framework. Here a variant of a fully convolutional network is proposed to predict a pixel-wise semantic labeling of (i) free-space, (ii) on-road unexpected obstacles, and (iii) background. The geometric cues are exploited using a state-of-The-Art detection approach that predicts obstacles from stereo input images via model-based statistical hypothesis tests. We present a principled Bayesian framework to fuse the semantic and stereo-based detection results. The mid-level Stixel representation is used to describe obstacles in a flexible, compact and robust manner. We evaluate our new obstacle detection system on the Lost and Found dataset, which includes very challenging scenes with obstacles of only 5 cm height. Overall, we report a major improvement over the state-of-The-Art, with a performance gain of 27.4\%. In particular, we achieve a detection rate of over 90\% for distances of up to 50 m. Our system operates at 22 Hz on our self-driving platform.}, isbn = {9781509048045}, doi = {10.1109/IVS.2017.7995849}, url = {http://arxiv.org/abs/1612.06573}, author = {Ramos, Sebastian and Gehrig, Stefan and Pinggera, Peter and Franke, Uwe and Carsten Rother} } @conference {Pinggera2016, title = {Lost and found: Detecting small road hazards for self-driving vehicles}, booktitle = {IEEE International Conference on Intelligent Robots and Systems}, volume = {2016-Novem}, year = {2016}, pages = {1099{\textendash}1106}, abstract = {Detecting small obstacles on the road ahead is a critical part of the driving task which has to be mastered by fully autonomous cars. In this paper, we present a method based on stereo vision to reliably detect such obstacles from a moving vehicle. The proposed algorithm performs statistical hypothesis tests in disparity space directly on stereo image data, assessing freespace and obstacle hypotheses on independent local patches. This detection approach does not depend on a global road model and handles both static and moving obstacles. For evaluation, we employ a novel lost-cargo image sequence dataset comprising more than two thousand frames with pixelwise annotations of obstacle and free-space and provide a thorough comparison to several stereo-based baseline methods. The dataset will be made available to the community to foster further research on this important topic4. The proposed approach outperforms all considered baselines in our evaluations on both pixel and object level and runs at frame rates of up to 20 Hz on 2 mega-pixel stereo imagery. Small obstacles down to the height of 5 cm can successfully be detected at 20 m distance at low false positive rates.}, isbn = {9781509037629}, issn = {21530866}, doi = {10.1109/IROS.2016.7759186}, url = {http://www.6d-vision.com/lostandfounddataset}, author = {Pinggera, Peter and Ramos, Sebastian and Gehrig, Stefan and Franke, Uwe and Carsten Rother and Mester, Rudolf} }