Attention une mise à jour du service Gitlab va être effectuée le mardi 30 novembre entre 17h30 et 18h00. Cette mise à jour va générer une interruption du service dont nous ne maîtrisons pas complètement la durée mais qui ne devrait pas excéder quelques minutes. Cette mise à jour intermédiaire en version 14.0.12 nous permettra de rapidement pouvoir mettre à votre disposition une version plus récente.

Commit 87907bfb authored by BERNIER Fabien's avatar BERNIER Fabien
Browse files

[+] FixOutText class, WIP

parent d2ae8618
"""
Implements the main procedures to build fairer ensembles, e.g. feature drop out, model training, ensemble bulding
"""
import os
from aif360 import metrics
from aif360.sklearn.metrics.metrics import statistical_parity_difference, \
equal_opportunity_difference, average_odds_difference, \
disparate_impact_ratio, difference, generalized_fpr, specificity_score
from aif360.sklearn.utils import check_groups
from imblearn.over_sampling import SMOTE
from sklearn.compose import ColumnTransformer
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics._classification import confusion_matrix
from sklearn.model_selection._split import train_test_split
from sklearn.preprocessing._encoders import OneHotEncoder
from sklearn.preprocessing._label import LabelEncoder
from lime_text_global import fairness_eval
import numpy as np
import pandas as pd
os.environ['KMP_DUPLICATE_LIB_OK']='True'
class FixOutText:
def __init__(self, X, y, vocabulary, to_drop, algo, train_size=0.7, exp=fairness_eval, max_features=10, sampling_size=None, seed=None, threshold=None):
self.exp = exp
self.algo = algo
self.seed = seed
self.threshold = threshold
self.data = X
le= LabelEncoder()
self.labels = le.fit_transform(y)
self.class_names = le.classes_
self.feature_names = vocabulary
self.train, self.test, self.labels_train, self.labels_test = train_test_split(self.data, self.labels, train_size=train_size, random_state=self.seed)
self.sensitive_f = to_drop
self.max_features = max_features
if sampling_size is None :
self.sampling_size = len(X)//10 # by default, we set the sampling_size for SubmodularPick to 10% of the total number of instances
else :
self.sampling_size = sampling_size
def is_fair(self):
model, encoder = train_classifier(self.algo, self.train, self.test, self.labels_train, [], self.seed)
self.original_model = Model([model],[encoder],[[]])
accuracy, threshold = evaluation(self.original_model.prob(self.test), self.labels_test)
actual_sensitive, is_fair_flag, ans_data, explainer = self.exp(self.original_model, self.train, self.max_features, self.sensitive_f, self.feature_names, self.class_names, self.sampling_size, self.threshold)
return actual_sensitive, is_fair_flag, ans_data, accuracy, threshold
def ensemble_out(self, actual_sensitive):
"""
Implements ENSEMBLE_Out
Parameters
algo: class of a classification algorithm
to_drop: list of features that must be dropped
train: X
labels_train: y
all_categorical_features: list of indices of categorical features
"""
models, encoders, features_to_remove = [], [], []
for i in actual_sensitive:
remove_features = [i]
categorical_features = remove(remove_features, self.all_categorical_f)
model, encoder = train_classifier(self.algo, self.train, self.test, self.labels_train, remove_features, categorical_features, self.seed)
models.append(model)
encoders.append(encoder)
features_to_remove.append(remove_features)
categorical_features4 = remove(actual_sensitive, self.all_categorical_f)
model4, encoder4 = train_classifier(self.algo, self.train, self.test, self.labels_train, actual_sensitive, categorical_features4, self.seed)
models.append(model4)
encoders.append(encoder4)
features_to_remove.append(actual_sensitive)
self.ensemble = Model(models,encoders,features_to_remove)
accuracy, threshold = evaluation(self.ensemble.prob(self.test), self.labels_test)
# accuracy = evaluation_fixed_threshold(self.ensemble.prob(self.test), self.labels_test, 0.5)
_, is_fair_flag ,ans_data, explainer = self.exp(self.ensemble, self.train, self.max_features, actual_sensitive, self.feature_names, self.class_names, self.all_categorical_f, self.categorical_names, self.sampling_size, self.threshold)
return is_fair_flag, ans_data, accuracy, threshold
# return False, None, accuracy
class Model:
"""Class for ensemble models
Saves a list of trained classifiers and their respective encoders and deleted features
"""
def __init__(self,models,encoders,features_to_remove):
self.models = models
self.encoders = encoders
self.features_to_remove = features_to_remove
"""
Args:
models: a list of trained classifiers
encoders: a list of encoders (1st encoder for the 1st model)
features_to_remove: a list of lists of indices to be removed in order to use models (feature removal mapping)
"""
def prob(self,X):
"""
Returns probability for each class label.
"""
probs = []
n_models = len(self.models)
for i in range(n_models):
model = self.models[i]
encoder = self.encoders[i]
to_remove = self.features_to_remove[i]
comp = model.predict_proba(encoder.transform(np.delete(X, to_remove, axis=1))).astype(float)
probs.append(comp)
res = sum(probs)/n_models
return res
def load_data(source_name, categorical_features, feature_names=None, delimiter=' ', target_features=None):
"""
Loads data from a text file source
"""
if feature_names != None:
data = pd.read_csv(source_name,names = feature_names, delimiter=delimiter)
else:
data = pd.read_csv(source_name, header=0, delimiter=delimiter)
if target_features != None:
data = data[target_features]
data.dropna(subset=target_features, inplace=True)
current_feature_names = data.columns.values.tolist()
current_feature_names = [f_name.replace(" ", "") for f_name in current_feature_names]
data = data.replace(np.nan, '', regex=True)
# data = data.replace('', 0, regex=True)
# data.iloc[categorical_features] = data.iloc[categorical_features].astype(str)
for i in get_numerical_features_indexes(len(current_feature_names), categorical_features):
data.iloc[:,i] = data.iloc[:,i].replace('', 0, regex=False)
data.iloc[:,i] = data.iloc[:,i].replace('?', 0, regex=False)
data = data.to_numpy()
labels = data[:,-1]
le= LabelEncoder()
le.fit(labels)
labels = le.transform(labels)
class_names = le.classes_
data = data[:,:-1]
current_feature_names = current_feature_names[:-1]
categorical_names = {}
for feature in categorical_features:
le = LabelEncoder()
column = data[:, feature].astype(str)
le.fit(column)
data[:, feature] = le.transform(column)
categorical_names[feature] = le.classes_
data = data.astype(float)
return data, labels, class_names, current_feature_names, categorical_names
def train_classifier(algo, train, test, train_labels, remove_features, seed):
train = np.delete(train, remove_features, axis = 1)
test = np.delete(test, remove_features, axis = 1)
encoder = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'), categorical_features)],
remainder='passthrough'
)
encoder.fit(np.concatenate([train, test]))
sm = SMOTE(sampling_strategy='auto', random_state=seed)
train_res, labels_train_res = sm.fit_sample(train, train_labels)
encoder.fit(np.concatenate([train_res, test]))
encoded_train_res = encoder.transform(train_res)
# model = algo(hidden_layer_sizes=(100,100,),random_state=seed)
model = algo()
model.fit(encoded_train_res, labels_train_res)
return model, encoder
def get_numerical_features_indexes(num_features, categorical_features):
res = []
for i in range(num_features):
if i not in categorical_features:
res.append(i)
return res
def remove(target_list, categorical_features):
"""
Returns a list of indices of categorical features that have to be keep
(removes categorical features that are in target_list and updates indices
of categorical features that must be keep)
"""
n_target_list = sorted(target_list, key=lambda x: x, reverse=False)
counter = 0
new_indices = categorical_features
for target in n_target_list:
t = target - counter
under = [v for v in new_indices if v < t]
above = [v-1 for v in new_indices if v > t]
new_indices = under + above
counter += 1
return new_indices
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def evaluation(probs, true_labels):
probs = probs[:, 1]
thresholds = np.arange(0, 1, 0.001) # define thresholds
scores = [accuracy_score(true_labels, to_labels(probs, t)) for t in thresholds] # evaluate each threshold
ix = np.argmax(scores) # get best threshold
# print('Threshold=%.3f, F-Score=%.5f' % (thresholds[ix], scores[ix]))
accuracy = accuracy_score(true_labels, to_labels(probs, thresholds[ix]))
return accuracy, thresholds[ix]
def evaluation_fixed_threshold(probs, true_labels, threshold):
probs = probs[:, 1]
return accuracy_score(true_labels, to_labels(probs, threshold))
def fairness_metrics_eval(probs, true_labels, protected_attr, p_group, p_label):
_probs = probs[:, 1]
thresholds = np.arange(0, 1, 0.001)
scores = [accuracy_score(true_labels, to_labels(_probs, t)) for t in thresholds]
ix = np.argmax(scores)
y_pred = to_labels(_probs, thresholds[ix])
dp = statistical_parity_difference(true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label) # Demographic Parity
eq = equal_opportunity_difference(true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label) # Equal Opportunity
ea = difference(accuracy_score, true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group)
aod = average_odds_difference(true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label) # Equal Odds
# fpr_diff = -difference(specificity_score, true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label)
# gfpr_diff = difference(generalized_fpr, true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label)
fpr_diff = __difference_fpr(true_labels, y_pred, prot_attr=str(protected_attr),priv_group=p_group, pos_label=p_label)
di = disparate_impact_ratio(true_labels, y_pred, prot_attr=str(protected_attr), priv_group=p_group, pos_label=p_label) # Disparate Impact
return dp,eq,ea,aod,fpr_diff,di
def __difference_fpr(y_true, y_pred, prot_attr=None, priv_group=1, pos_label=1):
# y_true
groups, _ = check_groups(y_true, prot_attr)
idx = (groups == priv_group)
n_y_true = y_true['y_true'].to_numpy()
priv_true = n_y_true[idx]
unpriv_true = n_y_true[~idx]
priv_pred = y_pred[idx]
unpriv_pred = y_pred[~idx]
neg_label = 1 - pos_label
negative_priv = list(priv_true).count(neg_label)
negative_unpriv = list(unpriv_true).count(neg_label)
tn1, fp1, fn1, tp1 = confusion_matrix(priv_true, priv_pred, labels=[0,1]).ravel()
tn2, fp2, fn2, tp2 = confusion_matrix(unpriv_true, unpriv_pred, labels=[0,1]).ravel()
fpr_priv = fp1/negative_priv if negative_priv > 0 else 0
fpr_unpriv = fp2/negative_unpriv if negative_unpriv > 0 else 0
return fpr_unpriv - fpr_priv
\ No newline at end of file
......@@ -22,7 +22,7 @@ def features_contributions(predict_fn, train, class_names, sample_size, kernel_w
return explainer, sp_obj
def fairness_eval(model, train, max_features, sensitive_features, feature_names, class_names, categorical_features, categorical_names, sample_size, threshold=None):
def fairness_eval(model, train, max_features, sensitive_features, feature_names, class_names, sample_size, threshold=None):
explainer, sp_obj = features_contributions(model.prob, train, class_names, sample_size)
......
......@@ -11,6 +11,7 @@ from sklearn.neural_network._multilayer_perceptron import MLPClassifier
from sklearn.svm._classes import SVC
import anchor_global
import lime_text_global
from core import FixOut
import lime_global
import shap_global
......@@ -61,6 +62,8 @@ def exp_parser(algo_str):
if algo == "lime":
return lime_global.fairness_eval
elif algo == "lime_text":
return lime_text_global.fairness_eval
elif algo == "anchors":
return anchor_global.fairness_eval
elif algo == "shap":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment