@article {Ardizzone2020, title = {Exact Information Bottleneck with Invertible Neural Networks: Getting the Best of Discriminative and Generative Modeling}, year = {2020}, month = {jan}, abstract = {Generative models are more informative about underlying phenomena than discriminative ones and offer superior uncertainty quantification and out-of-distribution robustness. However, these advantages often come at the expense of reduced classification accuracy. The Information Bottleneck objective (IB) formulates this trade-off in a clean information-theoretic way, but its practical application is hampered by a lack of accurate high-dimensional estimators of mutual information (MI), its main constituent. To overcome this limitation, we develop the theory and methodology of IB-INNs, which optimize the IB objective by means of Invertible Neural Networks (INNs), without the need for approximations of MI. Our experiments show that IB-INNs allow for a precise adjustment of the generative/discriminative trade-off: They learn accurate models of the class conditional likelihoods, generalize well to unseen data and reliably detect out-of-distribution examples, while at the same time exhibiting classification accuracy close to purely discriminative feed-forward networks.}, url = {http://arxiv.org/abs/2001.06448}, author = {Lynton Ardizzone and Mackowiak, Radek and Carsten Rother and Ullrich K{\"o}the} }