@inproceedings{20605846cf84466395cec07d86c1ba60,
title = "Bias Quantification for Protected Features in Pattern Classification Problems",
abstract = "The need to measure and mitigate bias in machine learning data sets has gained wide recognition in the field of Artificial Intelligence (AI) during the past decade. The academic and business communities call for new general-purpose measures to quantify bias. In this paper, we propose a new measure that relies on the fuzzy-rough set theory. The intuition of our measure is that protected features should not change the fuzzy-rough set boundary regions significantly. The extent to which this happens can be understood as a proxy for bias quantification. Our measure can be categorized as an individual fairness measure since the fuzzy-rough regions are computed using instance-based information pieces. The main advantage of our measure is that it does not depend on any prediction model but on a distance function. At the same time, our measure offers an intuitive rationale for the bias concept. The results using a proof-of-concept show that our measure can capture the bias issues better than other state-of-the-art measures.",
author = "{Koutsoviti Koumeri}, Lisa and Gonzalo N{\'a}poles",
year = "2021",
doi = "10.1007/978-3-030-93420-0_33",
language = "English",
isbn = "9783030934200",
series = "Lecture Notes in Computer Science",
publisher = "Springer International Publishing",
pages = "351--360",
editor = "Tavares, {Jo{\~a}o Manuel R. S.} and Papa, {Jo{\~a}o Paulo} and {Gonz{\'a}lez Hidalgo}, Manuel",
booktitle = "Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications",
}