@proceedings {7063, title = {Understanding Object Dynamics for Interactive Image-to-Video Synthesis}, year = {2021}, abstract = {What would be the effect of locally poking a static scene? We present an approach that learns naturally-looking global articulations caused by a local manipulation at a pixel level. Training requires only videos of moving objects but no information of the underlying manipulation of the physical scene. Our generative model learns to infer natural object dynamics as a response to user interaction and learns about the interrelations between different object body regions. Given a static image of an object and a local poking of a pixel, the approach then predicts how the object would deform over time. In contrast to existing work on video prediction, we do not synthesize arbitrary realistic videos but enable local interactive control of the deformation. Our model is not restricted to particular object categories and can transfer dynamics onto novel unseen object instances. Extensive experiments on diverse objects demonstrate the effectiveness of our approach compared to common video prediction frameworks.}, url = {https://arxiv.org/abs/2106.11303v1}, author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Bj{\"o}rn Ommer} }