Commit 51cb5bdb authored by BERNIER Fabien's avatar BERNIER Fabien
Browse files

Added requirements.txt and implementation of fairness visualizations

parent 35ce914a
__pycache__
*/__pycache__
.idea
......@@ -3,6 +3,7 @@ import datetime
from core import FixOut, evaluation, Model, fairness_metrics_eval
from datasets_param import *
from runner import algo_parser, exp_parser
from fairmodels import ModelProb, FairnessObject
import numpy as np
import pandas as pd
......@@ -55,6 +56,34 @@ def one_experiment(source_name, sep, train_size, to_drop, all_categorical_featur
true_labels.set_index(sensitive_features_names, inplace=True)
print("\n\n****************\nFAIRNESS METRICS\n****************")
test_input = pd.DataFrame(fixout.test.astype(np.uint8), columns=fixout.feature_names)
original_predictions = ModelProb(fixout.original_model.prob(fixout.test)[:,1], threshold=0.5, name="Original")
fixout_predictions = ModelProb(fixout.ensemble.prob(fixout.test)[:,1], threshold=0.5, name="Ensemble")
fobject = FairnessObject(
model_probs=[original_predictions, fixout_predictions],
y=fixout.labels_test,
protected=test_input.sex,
privileged=0
)
fplt = fobject.plot()
fplt.show()
densityplt = fobject.plot_density()
densityplt.show()
hmplt = fobject.plot_fairness_heatmap()
hmplt.show()
radarplt = fobject.plot_fairness_radar()
radarplt.show()
print("fobject done")
for sen_feature in sensitive_features_names:
dp,eq,ea,aod,fpr_diff,di = fairness_metrics_eval(fixout.original_model.prob(fixout.test), true_labels, sen_feature, p_groups[str(sen_feature)], p_label)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment