@proceedings {7044, title = {Behavior-Driven Synthesis of Human Dynamics}, year = {2021}, url = {https://arxiv.org/abs/2103.04677}, author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Bj{\"o}rn Ommer} } @proceedings {7070, title = {ImageBART: Bidirectional Context with Multinomial Diffusion for Autoregressive Image Synthesis}, year = {2021}, url = {https://arxiv.org/abs/2108.08827}, author = {Patrick Esser and Robin Rombach and Andreas Blattmann and Bj{\"o}rn Ommer} } @conference {7068, title = {iPOKE: Poking a Still Image for Controlled Stochastic Video Synthesis}, booktitle = {Proceedings of the International Conference on Computer Vision (ICCV)}, year = {2021}, url = {https://arxiv.org/abs/2107.02790}, author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Bj{\"o}rn Ommer} } @proceedings {7053, title = {Stochastic Image-to-Video Synthesis usin cINNs}, year = {2021}, author = {Michael Dorkenwald and Timo Milbich and Andreas Blattmann and Robin Rombach and Konstantinos G. Derpanis and Bj{\"o}rn Ommer} } @proceedings {7063, title = {Understanding Object Dynamics for Interactive Image-to-Video Synthesis}, year = {2021}, abstract = {What would be the effect of locally poking a static scene? We present an approach that learns naturally-looking global articulations caused by a local manipulation at a pixel level. Training requires only videos of moving objects but no information of the underlying manipulation of the physical scene. Our generative model learns to infer natural object dynamics as a response to user interaction and learns about the interrelations between different object body regions. Given a static image of an object and a local poking of a pixel, the approach then predicts how the object would deform over time. In contrast to existing work on video prediction, we do not synthesize arbitrary realistic videos but enable local interactive control of the deformation. Our model is not restricted to particular object categories and can transfer dynamics onto novel unseen object instances. Extensive experiments on diverse objects demonstrate the effectiveness of our approach compared to common video prediction frameworks.}, url = {https://arxiv.org/abs/2106.11303v1}, author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Bj{\"o}rn Ommer} }