@proceedings {7041, title = {Rethinking Style Transfer: From Pixels to Parameterized Brushstrokes}, year = {2021}, abstract = {There have been many successful implementations of neural style transfer in recent years. In most of these works, the stylization process is confined to the pixel domain. How- ever, we argue that this representation is unnatural because paintings usually consist of brushstrokes rather than pixels. We propose a method to stylize images by optimizing parameterized brushstrokes instead of pixels and further introduce a simple differentiable rendering mechanism. Our approach significantly improves visual quality and en- ables additional control over the stylization process such as controlling the flow of brushstrokes through user input. We provide qualitative and quantitative evaluations that show the efficacy of the proposed parameterized representation.}, url = {https://compvis.github.io/brushstroke-parameterized-style-transfer/}, author = {Dmytro Kotovenko and Matthias Wright and Arthur Heimbrecht and Bj{\"o}rn Ommer} } @conference {6322, title = {Content and Style Disentanglement for Artistic Style Transfer}, booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)}, year = {2019}, author = {Dmytro Kotovenko and Sanakoyeu, Artsiom and Sabine Lang and Bj{\"o}rn Ommer} } @proceedings {6300, title = {Using a Transformation Content Block For Image Style Transfer}, year = {2019}, author = {Dmytro Kotovenko and Sanakoyeu, A. and Sabine Lang and Ma, P. and Bj{\"o}rn Ommer} } @conference {style_aware_content_loss_eccv18, title = {A Style-Aware Content Loss for Real-time HD Style Transfer}, booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) (Oral)}, year = {2018}, abstract = {Recently, style transfer has received a lot of attention. While much of this research has aimed at speeding up processing, the approaches are still lacking from a principled, art historical standpoint: a style is more than just a single image or an artist, but previous work is limited to only a single instance of a style or shows no benefit from more images. Moreover, previous work has relied on a direct comparison of art in the domain of RGB images or on CNNs pre-trained on ImageNet, which requires millions of labeled object bounding boxes and can introduce an extra bias, since it has been assembled without artistic consideration. To circumvent these issues, we propose a style-aware content loss, which is trained jointly with a deep encoder-decoder network for real-time, high-resolution stylization of images and videos. We propose a quantitative measure for evaluating the quality of a stylized image and also have art historians rank patches from our approach against those from previous work. These and our qualitative results ranging from small image patches to megapixel stylistic images and videos show that our approach better captures the subtle nature in which a style affects content. }, keywords = {deep learning, generative network, Style transfer}, author = {Sanakoyeu, A. and Dmytro Kotovenko and Sabine Lang and Bj{\"o}rn Ommer} }