@conference {Richmond2016b, title = {Mapping auto-context decision forests to deep convnets for semantic segmentation}, booktitle = {British Machine Vision Conference 2016, BMVC 2016}, volume = {2016-Septe}, year = {2016}, pages = {144.1{\textendash}144.12}, abstract = {We consider the task of pixel-wise semantic segmentation given a small set of labeled training images. Among two of the most popular techniques to address this task are Random Forests (RF) and Neural Networks (NN). In this work, we explore the relationship between two special forms of these techniques: stacked RFs (namely Auto-context) and deep Convolutional Neural Networks (ConvNet). Our main contribution is to show that Auto-context can be mapped to a deep ConvNet with novel architecture, and thereby trained end-to-end. This mapping can be viewed as an intelligent initialization of a deep ConvNet, enabling training even in the face of very limited amounts of training data. We also demonstrate an approximate mapping back from the refined ConvNet to a second stacked RF, with improved performance over the original. We experimentally verify that these mappings outperform stacked RFs for two different applications in computer vision and biology: Kinect-based body part labeling from depth images, and somite segmentation in microscopy images of developing zebrafish.}, doi = {10.5244/C.30.144}, author = {Richmond, David L and Kainmueller, Dagmar and Yang, Michael Y and Myers, Eugene W and Carsten Rother} }