@inproceedings{fa96dee9457146d3a7b22d42ab8c5bc2,
title = "Pixdoor: A Pixel-space Backdoor Attack on Deep Learning Models",
abstract = "Deep learning algorithms outperform the machine learning techniques in various fields and are widely deployed for recognition and classification tasks. However, recent research focuses on exploring these deep learning models' weaknesses as these can be vulnerable due to outsourced training data and transfer learning. This paper proposed a rudimentary, stealthy Pixel-space based Backdoor attack (Pixdoor) during the training phase of deep learning models. For generating the poisoned dataset, the bit-inversion technique is used for injecting errors in the pixel bits of training images. Then 3% of the poisoned dataset is mixed with the clean dataset to corrupt the complete training images dataset. The experimental results show that the minimal percent of data poisoning can effectively fool a deep learning model with a high degree of accuracy. Likewise, in experiments, we witness a marginal degradation of the model accuracy by 0.02%.",
keywords = "Backdoor attack, Causative attack, Pixel-space, Poisoned dataset, Training phase",
author = "Iram Arshad and Asghar, {Mamoona Naveed} and Yuansong Qiao and Brian Lee and Yuhang Ye",
note = "Publisher Copyright: {\textcopyright} 2021 European Signal Processing Conference. All rights reserved.; 29th European Signal Processing Conference, EUSIPCO 2021 ; Conference date: 23-08-2021 Through 27-08-2021",
year = "2021",
doi = "10.23919/EUSIPCO54536.2021.9616118",
language = "English",
series = "European Signal Processing Conference",
publisher = "European Signal Processing Conference, EUSIPCO",
pages = "681--685",
booktitle = "29th European Signal Processing Conference, EUSIPCO 2021 - Proceedings",
address = "Belgium",
}