Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 506dc6f1 authored by HUIN Nicolas's avatar HUIN Nicolas
Browse files

feat: add figures for slides

parent 4d082810
Branches algotel
No related tags found
No related merge requests found
...@@ -109,6 +109,7 @@ def generate_comparison(instances_available): ...@@ -109,6 +109,7 @@ def generate_comparison(instances_available):
index='instance').describe()) index='instance').describe())
fig.tight_layout() fig.tight_layout()
fig.savefig("comparison_algotel.png") fig.savefig("comparison_algotel.png")
fig.savefig("comparison_algotel.svg")
tikzplotlib.save("comparison_algotel.tex", standalone=True) tikzplotlib.save("comparison_algotel.tex", standalone=True)
...@@ -200,6 +201,193 @@ def sr_comparison(path_table): ...@@ -200,6 +201,193 @@ def sr_comparison(path_table):
hue='algorithm', **{'density_norm': 'width', 'cut': 0}, ax=ax) hue='algorithm', **{'density_norm': 'width', 'cut': 0}, ax=ax)
def generate_nb_topology(instances_available):
print(list(instances_available))
greedy_df = pd.DataFrame([utils.get_greedy_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(greedy_df) > 0
vigp_df = pd.DataFrame([utils.get_vigp_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(vigp_df) > 0
df = pd.concat([vigp_df, greedy_df])
for topo_type in ['real', 'virtual']:
df[f'average_demands_{topo_type}'] = df[f'nb_demands_{topo_type}'].apply(
lambda demands_list: sum(demands_list) / len(demands_list) if isinstance(demands_list, list) and len(demands_list) > 0 else 0)
stats_table = df.pivot_table(
columns='algorithm',
values=['nb_virtual_topologies', 'nb_real_topologies',
'nb_total_topologies', 'execution_time', 'vigp_time',
'average_demands_real', 'average_demands_virtual'],
index='instance').query('instance != "toy"')
fig, ax = plt.subplots(
figsize=( 1.618 * 3.125, 3.125),
)
PLOT_STYLE = {
'fill': True,
'density_norm': 'count',
'cut': 0,
'gap': .05,
'inner_kws': dict(box_width=15, whis_width=2),
}
# Drawing number of topologies comparison
topology_nb_df = stats_table[['nb_real_topologies', 'nb_virtual_topologies']].rename(
{"nb_real_topologies": "Real", "nb_virtual_topologies": "Virtual"}, axis=1).stack(
-1, future_stack=True).stack().reset_index(level=[1, 2])
topology_nb_df.columns = ['Algorithm', 'Type', 'Topology number']
print(topology_nb_df.pivot_table(
columns=['Type', 'Algorithm'], values='Topology number',
index='instance').describe())
seaborn.violinplot(topology_nb_df, y='Topology number',
x='Type',
hue='Algorithm', split=True, ax=ax, **PLOT_STYLE)
fig.tight_layout()
fig.savefig("nb_topologies.svg")
def generate_avg_demands(instances_available):
greedy_df = pd.DataFrame([utils.get_greedy_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(greedy_df) > 0
vigp_df = pd.DataFrame([utils.get_vigp_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(vigp_df) > 0
df = pd.concat([vigp_df, greedy_df])
for topo_type in ['real', 'virtual']:
df[f'average_demands_{topo_type}'] = df[f'nb_demands_{topo_type}'].apply(
lambda demands_list: sum(demands_list) / len(demands_list) if isinstance(demands_list, list) and len(demands_list) > 0 else 0)
stats_table = df.pivot_table(
columns='algorithm',
values=['nb_virtual_topologies', 'nb_real_topologies',
'nb_total_topologies', 'execution_time', 'vigp_time',
'average_demands_real', 'average_demands_virtual'],
index='instance').query('instance != "toy"')
fig, ax = plt.subplots(
figsize=( 1.618 * 3.125, 3.125),
)
PLOT_STYLE = {
'fill': True,
'density_norm': 'count',
'cut': 0,
'gap': .05,
'inner_kws': dict(box_width=15, whis_width=2),
}
# Drawing average number of demands
average_demands_df = stats_table[['average_demands_real', 'average_demands_virtual']].rename(
{"average_demands_real": "Real", "average_demands_virtual": "Virtual"}, axis=1).stack(
-1, future_stack=True).stack().reset_index(level=[1, 2])
average_demands_df.columns = [
'Algorithm', 'Type', 'Average demand']
print(average_demands_df.pivot_table(
columns=['Type', 'Algorithm'], values='Average demand',
index='instance').describe())
seaborn.violinplot(average_demands_df, y='Average demand',
x='Type',
hue='Algorithm', split=True, ax=ax, **PLOT_STYLE)
fig.tight_layout()
fig.savefig("avg_demands.svg")
def generate_time(instances_available):
greedy_df = pd.DataFrame([utils.get_greedy_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(greedy_df) > 0
vigp_df = pd.DataFrame([utils.get_vigp_stats(instance, RESULT_FOLDER)
for instance in instances_available])
assert len(vigp_df) > 0
df = pd.concat([vigp_df, greedy_df])
for topo_type in ['real', 'virtual']:
df[f'average_demands_{topo_type}'] = df[f'nb_demands_{topo_type}'].apply(
lambda demands_list: sum(demands_list) / len(demands_list) if isinstance(demands_list, list) and len(demands_list) > 0 else 0)
stats_table = df.pivot_table(
columns='algorithm',
values=['nb_virtual_topologies', 'nb_real_topologies',
'nb_total_topologies', 'execution_time', 'vigp_time',
'average_demands_real', 'average_demands_virtual'],
index='instance').query('instance != "toy"')
fig, ax = plt.subplots(
figsize=( 1.618 * 3.125, 3.125),
)
PLOT_STYLE = {
'fill': True,
'density_norm': 'count',
'cut': 0,
'gap': .05,
'inner_kws': dict(box_width=15, whis_width=2),
}
# Drawing execution time
average_demands_df = stats_table[['execution_time', 'vigp_time']].rename(
{"execution_time": "Real", "vigp_time": "Virtual"}, axis=1).stack(
-1, future_stack=True).stack().reset_index(level=[1, 2])
average_demands_df.columns = [
'Algorithm', 'Type', "Execution time (s)"]
seaborn.violinplot(average_demands_df, y="Execution time (s)",
x='Type',
hue='Algorithm', split=True, ax=ax, **PLOT_STYLE)
ax.set_yscale('log')
print(average_demands_df.pivot_table(
columns=['Type', 'Algorithm'], values="Execution time (s)",
index='instance').describe())
fig.tight_layout()
fig.savefig("time.svg")
def generate_qos_robustness_algotel(instances_available):
all_results_df = pd.concat([
utils.compare_paths(
instance,
utils.load_demand_graph(f'./json/{instance}_wdm.json'),
{'vMTR': utils.get_vipged_path(instance, RESULT_FOLDER,
'lambdaMin'),
'MTR':
utils.get_paths_from_greedy(instance,
utils.load_demand_graph(
f"./json/{instance}_wdm.json"),
RESULT_FOLDER, "greedy_200")})
for instance in instances_available]) .query("instance != 'toy'")
qos_df = pd.concat(
[pd.DataFrame(
data=[(instance, demand_property['id'],
demand_property['delay'],
demand_property['loss'])
for _, _, demand_property in utils.load_demand_graph(
f'./json/{instance}_wdm.json').edges(data=True)],
columns=['instance', 'demand_id', 'delay', 'loss'])
for instance in instances_available]
).query("instance != 'toy'")
path_table = all_results_df.pivot(index=['instance', 'demand_id'], values=[
'delay', 'loss', 'topo_id'], columns=['algo']).dropna()
qos_table = qos_df.pivot(index=['instance', 'demand_id'], values=[
'delay', 'loss'], columns=[]).dropna()
for algo, qos in itertools.product(['MTR', 'vMTR', ], ['delay', 'loss']):
path_table[(f'{qos}_ratio', algo)] = qos_robustness(
path_table[qos][algo], qos_table[qos])
ratio_df = path_table[['delay_ratio', 'loss_ratio']].rename({"delay_ratio": "Delay", "loss_ratio": "Loss"}, axis=1).stack(
1, future_stack=True).stack().reset_index(level=[2, 3])
ratio_df.columns = ['Algorithm', "Metric", "Robustness"]
print(ratio_df)
print(ratio_df.pivot_table(
columns=['Metric', 'Algorithm'], values="Robustness",
index=['instance', 'demand_id']))
fig, ax = plt.subplots(nrows=1, ncols=1,
figsize=(1 * 1.618 * 3.125, 3.125),
)
seaborn.violinplot(ratio_df, y="Robustness", gap=.1,
x='Metric',
hue='Algorithm', split=True, **VIOLINPLOT_STYLE)
ax.set_xlabel("")
ax.set_ylim(0, 1)
fig.savefig("qos.svg")
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
...@@ -208,6 +396,7 @@ if __name__ == "__main__": ...@@ -208,6 +396,7 @@ if __name__ == "__main__":
subparsers = parser.add_subparsers(help='Functions', dest="figure") subparsers = parser.add_subparsers(help='Functions', dest="figure")
comparison_parser = subparsers.add_parser('comparison_algotel') comparison_parser = subparsers.add_parser('comparison_algotel')
qos_parse = subparsers.add_parser('qos_robustness_algotel') qos_parse = subparsers.add_parser('qos_robustness_algotel')
slides_algotel = subparsers.add_parser('slides_algotel')
args = parser.parse_args() args = parser.parse_args()
instances_available = get_available_instances() instances_available = get_available_instances()
print(f"Get results for {instances_available}") print(f"Get results for {instances_available}")
...@@ -215,3 +404,8 @@ if __name__ == "__main__": ...@@ -215,3 +404,8 @@ if __name__ == "__main__":
generate_comparison(instances_available) generate_comparison(instances_available)
if args.figure == 'qos_robustness_algotel': if args.figure == 'qos_robustness_algotel':
generate_qos_robustness(instances_available) generate_qos_robustness(instances_available)
if args.figure == 'slides_algotel':
generate_nb_topology(instances_available)
generate_time(instances_available)
generate_avg_demands(instances_available)
generate_qos_robustness_algotel(instances_available)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment