@inproceedings{DBLP:conf/iccv/LiL17,
author = {Li, Xin and Li, Fuxin},
booktitle = {{\{}IEEE{\}} International Conference on Computer Vision, {\{}ICCV{\}} 2017, Venice, Italy, October 22-29, 2017},
doi = {10.1109/ICCV.2017.615},
isbn = {978-1-5386-1032-9},
pages = {5775--5783},
publisher = {{\{}IEEE{\}} Computer Society},
title = {{Adversarial Examples Detection in Deep Networks with Convolutional Filter Statistics}},
url = {https://doi.org/10.1109/ICCV.2017.615},
year = {2017}
}
One deciding property is that there is a strong regularization effect in the adversarial examples on almost all the informative directionis. Hence, the predictions in adversarial examples are lower than those in normal examples, rather than the confidence values may have indicated.