Commit e4f06f83 authored by DARROUS Jad's avatar DARROUS Jad
Browse files

Initial commit

parent 07da66f6
# hadoop-ec-traces
# Hadoop traces under Erasure Coding
This repository contains traces of Hadoop MapReduce jobs under replication and erasure coding. Sort, Wordcount, and K-means applications are included. These traces are compiled of different software and hardware configurations.
\ No newline at end of file
This repository contains traces of Hadoop MapReduce jobs under replication and erasure coding. Sort, Wordcount, and K-means applications are included. These traces are compiled of different software (overlapping and non-overlapping shuffle, disk persistency, failure) and hardware configurations (HDD, SSD, DRAM, 1 Gbps and 10 Gbps network).
Plotting scripts are available in `scripts` directory.
Plotting scripts are written in Python 3.
Required python libraries
+ pyyaml
+ pandas
+ matplotlib
This diff is collapsed.
This diff is collapsed.
import os
import pandas as pd
import matplotlib.pyplot as plt
import statistics
pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
RED='\033[0;31m'
GREEN='\033[0;32m'
MAGENTA='\033[0;35m'
NC='\033[0m'
Colormap = ('Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r', 'mako', 'mako_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'rocket', 'rocket_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'viridis', 'viridis_r', 'vlag', 'vlag_r', 'winter', 'winter_r')
def get_rel_path(path):
import functools, os
if not os.path.exists(path):
os.makedirs(path)
return functools.partial(os.path.join, path)
def is_exists(path, fname):
import os
return os.path.exists(os.path.join(path, fname))
############################
# Load balance functions
############################
def cv(lst):
"""returns the coefficient of variation of lst:
as percentage, the lower the better"""
return statistics.stdev(lst) / statistics.mean(lst) * 100
def jain(lst):
"""Jain's fairness index of lst"""
return sum(lst)**2 / (len(lst) * sum([e**2 for e in lst]))
def percent_imbalance(lst):
"""from EC-Cache: Lower is better"""
avg = statistics.mean(lst)
return (max(lst) - avg) / (avg) * 100
def lb_func(x, func):
return func([int(v) for v in x.split('-')])
def lb_cv(x): return lb_func(x, cv)
def lb_jain(x): return lb_func(x, jain)
def lb_pi(x): return lb_func(x, percent_imbalance)
def lb_std(x): return lb_func(x, statistics.stdev)
############################
# Read csv files
############################
ONE_MB = 1024**2
ONE_GB = 1024**3
def str_to_int_list(delim_str, delim=' '):
return [int(v) for v in delim_str.split(delim)]
def human(file_size_mb_str):
fs = int(file_size_mb_str)
if fs < 1000:
return "%dMB" % fs
return "%dGB" % (fs // 1024)
def bytes_to_human(b):
if b < 1000:
return '%d MB' % b
if b % 1024 == 0:
return '%d GB' % (b // 1024)
return '%.1f GB' % (b / 1024)
def get_pretty_policy(df):
mp = {'ec': 'EC', 'rep3': 'REP', 'rep1': 'ONE',
'ec-RS-6-3-1024k' : 'EC',
# 'ec-RS-6-3-1024k' : 'RS(06, 3)',
# 'ec-RS-6-3-1024k' : '01M',
'ec-RS-6-3-8192k' : '08M',
'ec-RS-6-3-32768k' : '32M',
'ec-RS-3-2-1024k' : 'RS(03, 2)',
'ec-RS-10-4-1024k': 'RS(10, 4)'}
# return df['policy'].apply(lambda x: mp[x] if x in mp else x)
return df['policy'].replace(mp)#.astype('category')
def get_by_ec_cellsize(df):
dfx = df[df.policy.str.startswith('ec-RS-6-3')].copy()
dfx['policy'] = df['policy'].replace({
'ec-RS-6-3-1024k' : '01 MB',
'ec-RS-6-3-8192k' : '08 MB',
'ec-RS-6-3-32768k': '32 MB'})
return dfx
def get_by_ec_schema(df):
dfx = df[df.policy.str.endswith('1024k')].copy()
dfx['policy'] = df['policy'].replace({
'ec-RS-3-2-1024k' : 'RS(03, 2)',
'ec-RS-6-3-1024k' : 'RS(06, 3)',
'ec-RS-10-4-1024k': 'RS(10, 4)'})
return dfx
def read_mapred_csv(path):
def myround(x):
if x < 10: return 10
if x < 100: return ((x+5)//10)*10
return (x//100)*100
if 'FOG' in path:
def myround(x):
if x < 20: return 15
if x < 37: return 30
return 45
df = pd.read_csv(path)
df['datasize'] = df['hdfs_bytes_read'] // ONE_GB
df['datasize'] = df['datasize'].apply(myround)
df['input_size'] = df['hdfs_bytes_read']
df['output_size'] = df['hdfs_bytes_written']
df['job_name'] = df['job_name'].apply(lambda x: 'word-count' if x.startswith('word') else x)
df['policy'] = df['policy'].replace({'ec-RS-6-3-1024k': 'ec'})
df['policy'] = get_pretty_policy(df)
for colname in ['file_bytes_read_map', 'file_bytes_read_reduce',
'file_bytes_written_map', 'file_bytes_written_reduce']:
df[colname] //= ONE_GB
df['map_startTime'] /= 1000
df['map_finishTime'] /= 1000
df['reduce_startTime'] /= 1000
df['reduce_finishTime'] /= 1000
df['submittedAt'] /= 1000
df['launchedAt'] /= 1000
df['finishedAt'] /= 1000
df['schedule_time'] = df['launchedAt'] - df['submittedAt']
df['execution_time'] = df['finishedAt'] - df['launchedAt']
if 'spark' in path or 'NOL' in path or 'FOG' in path:
df['p_map'] = df['map_finishTime'] - df['launchedAt']
df['p_red'] = df['finishedAt'] - df['map_finishTime']
else:
df['p_map'] = df['reduce_startTime'] - df['launchedAt']
df['p_overlap'] = (df['map_finishTime'] - df['reduce_startTime']
).apply(lambda x: x if x>0 else 0)
df['p_red'] = df['finishedAt'] - df['map_finishTime']
df['p_map_percent'] = 100 * (df['p_map'] / df['execution_time'])
df['p_red_percent'] = 100 * (df['p_red'] / df['execution_time'])
df['p_overlap_percent'] = 100 * (df['p_map']+df['p_red']-df['execution_time']) / df['execution_time']
# df['schedule_time'] /= 1000
# df['execution_time'] /= 1000
df['throughput'] = df['datasize'] / df['execution_time']
df['num_succeeded_maps'] = df['total_launched_maps'] - df['num_killed_maps']
#
# for tasktype in ('map', 'reduce'):
# colname = '%s_exec_time' % tasktype
# df[tasktype + '_mean'] = df[colname].apply(lambda x:
# statistics.mean(str_to_int_list(x)))
# df[tasktype + '_stdev'] = df[colname].apply(lambda x:
# statistics.stdev(str_to_int_list(x)))
return df
def read_attempts_csv(path):
def myround(x):
if x < 10: return x
if x < 100: return ((x+5)//10)*10
return (x//100)*100
if 'FOG' in path:
def myround(x):
if x < 20: return 15
if x < 37: return 30
return 45
df = pd.read_csv(path)
df['input_size'] = df['input_size'] // ONE_GB
df['input_size'] = df['input_size'].apply(myround)
df['type'] = df['type'].apply(lambda x: x.lower())
df['job_name'] = df['job_name'].apply(lambda x: 'word-count' if x.startswith('word') else x)
df['policy'] = df['policy'].replace({'ec-RS-6-3-1024k': 'ec'})
df['policy'] = get_pretty_policy(df)
for col in ('tStartTime', 'tFinishTime',
'startTime', 'shuffleFinished', 'sortFinished', 'finishTime'):
df[col] /= 1000
if 'hdfsBytesRead' in df.columns:
df['hdfsBytesRead'] //= ONE_MB
if 'hdfsBytesWritten' in df.columns:
df['hdfsBytesWritten'] //= ONE_MB
if 'cpuMilliSeconds' in df.columns:
df['cpuMilliSeconds'] /= 1000
df['task_runtime'] = df['tFinishTime'] - df['tStartTime']
df['attempt_runtime'] = df['finishTime'] - df['startTime']
df['red_func_runtime'] = df['finishTime'] - df['sortFinished']
return df
def read_data_load_csv(csv_path):
df = pd.read_csv(csv_path)
df['file_size'] //= ONE_MB
# df['data_load'] //= ONE_MB
df['total_logical'] = df['file_size'] * df['nb_files']
df['total_logical'] //= 1024
r_factor = {'ec': 1.5, 'rep3': 3}
df['total_physical'] = df['total_logical'] * df['policy'].apply(lambda x: r_factor[x])
df['policy'] = get_pretty_policy(df)
df["policy_filesize"] = df["file_size"].apply(lambda fs:
"Big files (" if fs > 1000 else "Small files ("
) + df["policy"].apply(lambda x: x+")")
return df
def read_blocks_csv(csv_path):
df = pd.read_csv(csv_path)
# df['file_size'] //= ONE_MB
return df
def read_hdfs_csv(csv_path):
df = pd.read_csv(csv_path)
df['file_size'] //= ONE_MB
df['file_size_h'] = df['file_size'].apply(bytes_to_human)
df['dataset_size'] = df['file_size'] * df['nb_clients']
df['policy'] = get_pretty_policy(df)
if 'get_time' in df.columns:
df['get_throughput'] = df['file_size'] / df['get_time']
if 'put_time' in df.columns:
df['put_throughput'] = df['file_size'] / df['put_time']
if 'op_time' in df.columns:
df['op_throughput'] = df['file_size'] / df['op_time']
if 'network_data_size' in df.columns:
df['network_data_size'] /= ONE_GB
return df
def read_exchanged_data_csv(csv_path):
df = pd.read_csv(csv_path)
if 'tx' in df.columns:
df['tx'] //= ONE_GB
df['policy'] = get_pretty_policy(df)
df['job_name'] = df['job_name'].apply(lambda x: 'word-count' if x.startswith('word') else x)
if 'load_distribution' in df.columns:
df['load_cv'] = df['load_distribution'].apply(lb_cv)
df['load_jain'] = df['load_distribution'].apply(lb_jain)
df['load_pi'] = df['load_distribution'].apply(lb_pi)
df['load_std'] = df['load_distribution'].apply(lb_std)
return df
def read_testdfsio_csv(csv_path):
df = pd.read_csv(csv_path)
df['total_gb'] = df['total_bytes'] / 1024
df['filesize'] = df['total_gb'] / df['nb_files']
return df
def read_testdfsio_ed_csv(csv_path):
df = pd.read_csv(csv_path)
df['tx'] //= ONE_GB
df['total_gb'] = df['filesize'] * df['nb_files']
return df
def read_network_monitoring_csv(csv_path):
df = pd.read_csv(csv_path)
if 'tx' in df.columns:
df['tx'] //= ONE_MB
if 'rx' in df.columns:
df['rx'] //= ONE_MB
return df
def read_failure_info_csv(csv_path):
df = pd.read_csv(csv_path)
df['policy'] = df['policy'].replace({'ec-RS-6-3-1024k': 'ec'})
df['policy'] = get_pretty_policy(df)
return df
def read_monitoring_csv(csv_path):
df = pd.read_csv(csv_path)
df = df[(df.timestep > 1)]
for column in ('read_bytes', 'write_bytes', 'bytes_sent', 'bytes_recv'):
df[column] //= ONE_MB
for column in filter(lambda x: x.startswith('mem_'), list(df)):
df[column] //= ONE_GB
df['policy'] = get_pretty_policy(df)
if 'cpu_utilization' not in list(df):
df['cpu_SUM'] = df['cpu_user'] + df['cpu_system'] + df['cpu_iowait']
# ecotype network prefix 172.16.193
# econome network prefix 172.16.192
# valid for big4020 infrastructure configuration
# when the clients are hosted by ecotype cluster
def get_conn_list(connections):
lst = []
if connections == 'None':
return lst
for conn in connections.split('/'):
raddr, laddr = conn.split(':')
ip, port = raddr.split('-')
if '172.16.193' in ip:
lst.append(ip)
return lst
def cnt_conn(connections):
return len(get_conn_list(connections))
def cnt_conn_uniq(connections):
return len(set(get_conn_list(connections)))
# if 'connections' in df.columns:
# df['nb_conn_clients'] = df['connections'].apply(cnt_conn)
# df['nb_conn_clients_unique'] = df['connections'].apply(cnt_conn_uniq)
return df
############################
# DF analysis
############################
def groupby_p1_p2(df, p1, p2, index, columns, values):
unstack_df = lambda dfx, prop: getattr(dfx, prop)().unstack()
dfg = df.groupby([index, columns])[values]
return unstack_df(dfg, p1), unstack_df(dfg, p2)
def groupby_mean_std(df, index, columns, values):
return groupby_p1_p2(df, 'mean', 'std', index, columns, values)
def groupby_sum_std(df, index, columns, values):
return groupby_p1_p2(df, 'sum', 'std', index, columns, values)
def groupby_min_max(df, index, columns, values):
return groupby_p1_p2(df, 'min', 'max', index, columns, values)
def groupby_q50_q75(df, index, columns, values):
dfg1 = df.groupby([index, columns])[values].quantile(0.5).unstack()
dfg2 = df.groupby([index, columns])[values].quantile(0.75).unstack()
return dfg1, dfg2
def pivot_df(df, index, columns, values):
return groupby_mean_std(df, index, columns, values)
# rip = lambda x: x().reset_index().pivot(
# index=index, columns=columns, values=values)
# dfg = df.groupby([index, columns])[values]
# errors = rip(dfg.std)
# mean = rip(dfg.mean)
# return mean, errors
def pivot_df_sum(df, index, columns, values):
return groupby_sum_std(df, index, columns, values)
# rip = lambda x: x().reset_index().pivot(
# index=index, columns=columns, values=values)
# dfg = df.groupby([index, columns])[values]
# errors = rip(dfg.std)
# mean = rip(dfg.sum)
# return mean, errors
def pivot_df_max(df, index, columns, values):
return groupby_min_max(df, index, columns, values)
# rip = lambda x: x().reset_index().pivot(
# index=index, columns=columns, values=values)
# dfg = df.groupby([index, columns])[values]
# min = rip(dfg.min)
# max = rip(dfg.max)
# return min, max
import itertools, functools
def itr_df_props(df, props, filters=None):
if filters is None:
filters = [None] * range(len(props))
def select_list(filter_func, lst):
return [c for c in lst if not filter_func(c)] \
if filter_func is not None else lst
def select_list_df(filter_func, df, prop):
return select_list(filter_func, sorted(getattr(df, prop).unique()))
select_lists = [select_list_df(filter, df, prop)
for prop, filter in zip(props, filters)]
for select_vals in itertools.product(*select_lists):
dd = functools.reduce(lambda f, p: f[(getattr(f, p[1]) == p[0])],
zip(select_vals, props), df)
yield (dd, *select_vals)
# nb_clients_filter = lambda cl: cl != 5
# nb_clients_filter = None
# datasize_filter = lambda ds: ds != 1024*5 and 0
# datasize_filter = None
# for dfd, _, _ in itr_df_props(df, ['nb_clients', 'datasize'],
# [nb_clients_filter, datasize_filter]):
# print(dfd.nb_clients.unique(), dfd.datasize.unique())
# return
############################
# Plotting/Figures
############################
def save_close(fig, output_image_path, **kwargs):
if fig is None: return
dpi = kwargs.get("dpi", 100)
format = kwargs.get("format", "png")
fontsize = kwargs.get("fontsize", 16)
ticks_fontsize = kwargs.get("ticks_fontsize", fontsize)
def format_ax(ax):
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(ticks_fontsize)
for item in [ax.title, ax.xaxis.label, ax.yaxis.label] + \
([] if ax.get_legend() is None else [ax.get_legend().get_title()]):
item.set_fontsize(fontsize)
# for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]):
# item.set_fontweight('bold')
for ax in fig.get_axes():
format_ax(ax)
fig.tight_layout()
# plt.tight_layout(pad=5.4, w_pad=0.5, h_pad=5.0)
# fig.subplots_adjust(bottom=0.4, top=0.45)
to_eps = 0
if format == 'eps-pdf':
to_eps = 1
format = 'pdf'
fig.savefig(output_image_path + '.' + format, format=format, dpi=dpi)
plt.close(fig)
if to_eps:
import subprocess, os
subprocess.call(['inkscape', output_image_path + '.pdf', '--export-eps', output_image_path + '.eps'])
os.remove(output_image_path + '.pdf')
def hatch_cdf_ec_rep(ax):
lines = ax.patches
# linestyle or ls: ['solid' , 'dashed', 'dashdot', 'dotted']
linestyle = ('solid', 'dashdot')
linewidth = (2.0, 1.2)
for line, ls, lw in zip(lines, linestyle, linewidth):
line.set_lw(lw)
line.set_ls(ls)
ax.set_ylabel('')
ax.legend(loc='upper left', fontsize='xx-large').set_title('')
def hatch_bars(df, ax):
bars = ax.patches
nb_bars, nb_groups = len(bars), len(df)
nb_columns = nb_bars // nb_groups
linewidth = (1.1, ) * nb_bars
patterns = ('/', '\\', '-', 'x', '+', '//','\\\\', 'o','O', )[:nb_columns]
hatches = [p for p in patterns for i in range(nb_groups)]
for bar, hatch, lw in zip(bars, hatches, linewidth):
bar.set_hatch(hatch)
bar.set_lw(lw)
# bar.set_color('white')
bar.set_edgecolor('black')
def bar_plot(pdf, errors=None, **kwargs):
title = kwargs.get("title", '')
ylabel = kwargs.get("ylabel", '')
xlabel = kwargs.get("xlabel", '')
rot = kwargs.get('rot', 0)
figsize = kwargs.get('figsize', None)
legend_loc = kwargs.get("legend_loc", 'upper left')
legend_title = kwargs.get("legend_title", '')
legend_fontsize = kwargs.get("legend_fontsize", 'xx-large')
xticklabels = kwargs.get("xticklabels", None)
ax = pdf.plot.bar(rot=rot, title=title, yerr=errors, figsize=figsize,
error_kw=dict(ecolor='k', capsize=3,
elinewidth=1.5, markeredgewidth=1))
hatch_bars(pdf, ax)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.legend(title=legend_title, loc=legend_loc, fontsize=legend_fontsize)
# ax.legend( # loc='lower right',
# fontsize=legend_fontsize, columnspacing=1, handletextpad=0.5,
# ncol=3, loc='upper center', bbox_to_anchor=(0.45, 1.3))
# ax.legend( # loc='lower right',
# fontsize=legend_fontsize, columnspacing=1, handletextpad=0,
# ncol=1, loc='right', bbox_to_anchor=(1.4, 0.5))
ax.grid(which='major', linestyle=':')
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
return ax.get_figure()
This diff is collapsed.
run_id,policy,file_size,client_id,operation,op_time,nb_clients
0,ec-RS-6-3-1024k,268435456,0,put,4.275,1
0,ec-RS-6-3-1024k,268435456,0,get,4.574,1
0,ec-RS-6-3-1024k,1073741824,0,put,6.73,1
0,ec-RS-6-3-1024k,1073741824,0,get,5.349,1
0,ec-RS-6-3-1024k,5368709120,0,put,20.829,1
0,ec-RS-6-3-1024k,5368709120,0,get,13.49,1
0,ec-RS-6-3-1024k,10737418240,0,put,37.434,1
0,ec-RS-6-3-1024k,10737418240,0,get,20.001,1
0,ec-RS-6-3-1024k,21474836480,0,put,69.551,1
0,ec-RS-6-3-1024k,21474836480,0,get,36.056,1
0,ec-RS-10-4-1024k,268435456,0,put,3.977,1
0,ec-RS-10-4-1024k,268435456,0,get,3.463,1
0,ec-RS-10-4-1024k,1073741824,0,put,6.355,1
0,ec-RS-10-4-1024k,1073741824,0,get,4.754,1
0,ec-RS-10-4-1024k,5368709120,0,put,19.123,1
0,ec-RS-10-4-1024k,5368709120,0,get,12.444,1
0,ec-RS-10-4-1024k,10737418240,0,put,35.5,1
0,ec-RS-10-4-1024k,10737418240,0,get,21.149,1
0,ec-RS-10-4-1024k,21474836480,0,put,73.073,1
0,ec-RS-10-4-1024k,21474836480,0,get,36.459,1
0,ec-RS-3-2-1024k,268435456,0,put,4.002,1
0,ec-RS-3-2-1024k,268435456,0,get,3.825,1
0,ec-RS-3-2-1024k,1073741824,0,put,6.665,1
0,ec-RS-3-2-1024k,1073741824,0,get,6.033,1
0,ec-RS-3-2-1024k,5368709120,0,put,22.237,1
0,ec-RS-3-2-1024k,5368709120,0,get,18.034,1
0,ec-RS-3-2-1024k,10737418240,0,put,39.845,1
0,ec-RS-3-2-1024k,10737418240,0,get,32.165,1
0,ec-RS-3-2-1024k,21474836480,0,put,76.481,1
0,ec-RS-3-2-1024k,21474836480,0,get,63.039,1
0,ec-RS-6-3-8192k,268435456,0,put,4.69,1
0,ec-RS-6-3-8192k,268435456,0,get,3.646,1
0,ec-RS-6-3-8192k,1073741824,0,put,7.54,1
0,ec-RS-6-3-8192k,1073741824,0,get,5.137,1
0,ec-RS-6-3-8192k,5368709120,0,put,22.51,1
0,ec-RS-6-3-8192k,5368709120,0,get,13.504,1
0,ec-RS-6-3-8192k,10737418240,0,put,39.115,1
0,ec-RS-6-3-8192k,10737418240,0,get,23.891,1
0,ec-RS-6-3-8192k,21474836480,0,put,77.86,1
0,ec-RS-6-3-8192k,21474836480,0,get,43.504,1
0,ec-RS-6-3-32768k,268435456,0,put,5.286,1
0,ec-RS-6-3-32768k,268435456,0,get,3.775,1
0,ec-RS-6-3-32768k,1073741824,0,put,8.174,1
0,ec-RS-6-3-32768k,1073741824,0,get,6.035,1
0,ec-RS-6-3-32768k,5368709120,0,put,22.462,1
0,ec-RS-6-3-32768k,5368709120,0,get,16.191,1