@inproceedings{e18ad39d84b9417fa8d2c5c1fad2777f,
title = "Image inpainting: A contextual consistent and deep generative adversarial training approach",
abstract = "Context encoder with loss function based on generative adversarial networks (GAN) have been shown superior in image inpainting. However, when using the adversarial loss alone, the texture of the original image and the recovered regions is occasionally inconsistent. In order to solve this problem, this paper introduces a new constraint called contextual consistent loss and proposes a novel algorithm which uses the contextual information combining with adversarial nets to generate texture seamless inpainting. In the proposed algorithm, contextual consistency is enhanced by enforcing the texture of a recovered part similar to those of some part of the existing image when generating the missing parts. Experimental results on Paris Street View Dataset show that the combination of context encoder and contextual information could recover more texture-consistent and more high-quality regions, which demonstrates the advantage of the proposed algorithm.",
keywords = "Context encoder, Contextual consistency, Generative adversarial networks, Image inpainting",
author = "Xiaoyi Qin and Weifu Chen and Qi Shen and Jianmin Jiang and Guocan Feng",
note = "Publisher Copyright: {\textcopyright} 2017 IEEE.; 4th Asian Conference on Pattern Recognition, ACPR 2017 ; Conference date: 26-11-2017 Through 29-11-2017",
year = "2018",
month = dec,
day = "13",
doi = "10.1109/ACPR.2017.120",
language = "English",
series = "Proceedings - 4th Asian Conference on Pattern Recognition, ACPR 2017",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "594--598",
booktitle = "Proceedings - 4th Asian Conference on Pattern Recognition, ACPR 2017",
address = "United States",
}