@inproceedings{baef4a9725f9402cae3bcde35dcc4bc6,
title = "Invisible Encoded Backdoor attack on DNNs using Conditional GAN",
abstract = "Deep Learning (DL) models deliver superior performance and have achieved remarkable results for classification and vision tasks. However, recent research focuses on exploring these Deep Neural Networks (DNNs) weaknesses as these can be vulnerable due to transfer learning and outsourced training data. This paper investigates the feasibility of generating a stealthy invisible backdoor attack during the training phase of deep learning models. For developing the poison dataset, an interpolation technique is used to corrupt the sub-feature space of the conditional generative adversarial network. Then, the generated poison dataset is mixed with the clean dataset to corrupt the training images dataset. The experiment results show that by injecting a 3% poison dataset combined with the clean dataset, the DL models can effectively fool with a high degree of model accuracy.",
keywords = "Backdoor Attack, Conditional Generative Adversarial Network, Image Synthesis",
author = "Iram Arshad and Yuansong Qiao and Brian Lee and Yuhang Ye",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 2023 IEEE International Conference on Consumer Electronics, ICCE 2023 ; Conference date: 06-01-2023 Through 08-01-2023",
year = "2023",
doi = "10.1109/ICCE56470.2023.10043484",
language = "English",
series = "Digest of Technical Papers - IEEE International Conference on Consumer Electronics",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2023 IEEE International Conference on Consumer Electronics, ICCE 2023",
address = "United States",
}