diff --git a/.gitignore b/.gitignore index b9c45e842b0ff3b65e1e30a28491c0eff94f5f8c..d64ef6427711312daa9709fa20d97b764b8ef827 100644 --- a/.gitignore +++ b/.gitignore @@ -6,11 +6,6 @@ __pycache__/ *.py[cod] *$py.class -#custom -log/* -log*-*/* -*.zip - # C extensions *.so diff --git a/GPRL/MCTS/MCTS.py b/GPRL/MCTS/MCTS.py index 2b7bb175ad06d0c072fc68575bd5866f8c69603f..04cbedf009aaf4df68523635e6a3d6136fc2646f 100644 --- a/GPRL/MCTS/MCTS.py +++ b/GPRL/MCTS/MCTS.py @@ -1,4 +1,4 @@ -from ..reprenstation.linearGP import Instruction +from ..genetic_programming.linearGP import Instruction import numpy as np import copy @@ -7,7 +7,7 @@ import random from collections import deque from itertools import product -class TreeNMCS(object): +class TreeNMCS(object):#Nested monte-carlo to optimise a typed tree of operation def __init__(self, pset, max_len, fitness, nb_playout=1): self.max_len = max_len self.pset = pset @@ -49,7 +49,6 @@ class TreeNMCS(object): def run(self, individual, leaves, level): best_individual = None best_fitness = np.NINF - #idx = 0 while len(leaves) != 0: ops = self.possible_primitives(individual, leaves) for op in ops: @@ -65,10 +64,9 @@ class TreeNMCS(object): best_individual = copy.deepcopy(cp_ind) best_fitness = fitness individual, leaves = self.play(individual, leaves, best_individual[len(individual)]) - #idx += 1 return individual -class LinearNCMS(object):#/!\ never been tested +class LinearNCMS(object):#/!\ never been tested used the linear gp representation too build expressions with NMCS def __init__(self, interpreter, regCalcSize, regSize, max_len, fitness, nb_playout=1): self.max_len = max_len self.interpreter = interpreter @@ -78,7 +76,7 @@ class LinearNCMS(object):#/!\ never been tested self.regCalcSize = regCalcSize self.regSize = regSize - def play(self, individual, instruction, R_eff): + def play(self, individual, instruction, R_eff):#add an instruction to the list if not self.interpreter.branch[instruction.opcode]: R_eff.remove(instruction.dst) R_eff.add(instruction.inpt1) @@ -88,7 +86,7 @@ class LinearNCMS(object):#/!\ never been tested individual.appendleft(instruction) return individual - def possible_primitives(self, individual, R_eff, opsOnly=False): + def possible_primitives(self, individual, R_eff, opsOnly=False):#possible primitives, opsOnly=False -> test also register values, opsOnly=True -> test only ops with random registers instructions = [] out = R_eff.intersection(set(range(self.regCalcsize))) if not bool(out): diff --git a/GPRL/UCB.py b/GPRL/UCB.py index 666f3429182e2588d1e1a305375d3e4a1933e65e..9aaaab13ef362e0681d53628a737212460fbd07a 100644 --- a/GPRL/UCB.py +++ b/GPRL/UCB.py @@ -9,7 +9,7 @@ from deap.base import Fitness from deap import tools import numpy as np -class UCBFitness(Fitness): +class UCBFitness(Fitness):#calculation of the fitness using UCB c=np.sqrt(2) sigma=1.0 @@ -33,7 +33,7 @@ class UCBFitness(Fitness): self.rewards = [] self.offset = 0 -class HeapWithKey(list): +class HeapWithKey(list):# For speeder arm selection use heap representations def __init__(self, initial=[], key=lambda x:x): super(HeapWithKey, self).__init__([(key(item), i, item) for i, item in enumerate(initial)]) self.key = key @@ -51,6 +51,7 @@ class HeapWithKey(list): def pop(self): return heapq.heappop(self)[2] +#Selection with double tournament to keep not only best individuals but also most pulled arms def selDoubleTournament(individuals, k, fitness_size, parsimony_size, fitness_first=True, fit_attr="fitness"): assert (0.0 < parsimony_size <= 2), "Parsimony tournament size has to be in the range [1, 2]." @@ -84,12 +85,16 @@ def selDoubleTournament(individuals, k, fitness_size, parsimony_size, fitness_fi return _fitTournament(individuals, k, tsize) -class ArmHof(tools.HallOfFame): +class ArmHof(tools.HallOfFame):#Hall of fame of most tested individual def __init__(self, maxsize): super().__init__(maxsize, similar=eq) def update(self, population): for ind in population: + for i in range(len(self)):#to avoid duplicate + if self.similar(self[i], ind): + self.remove(i) + break if len(self) == 0 and self.maxsize !=0: # Working on an empty hall of fame is problematic for the # "for else" @@ -101,13 +106,14 @@ class ArmHof(tools.HallOfFame): self.insert(ind) def insert(self, item): - item = deepcopy(item) - i = bisect_right(self.keys, len(item.fitness.rewards)) + _item = deepcopy(item) + _item.fitness.rewards = deepcopy(item.fitness.rewards) + i = bisect_right(self.keys, len(_item.fitness.rewards)) self.items.insert(len(self) - i, item) - self.keys.insert(i, len(item.fitness.rewards)) + self.keys.insert(i, len(_item.fitness.rewards)) -class UpdateFitnessHof(tools.HallOfFame): +class UpdateFitnessHof(tools.HallOfFame):#Fitness hof that take into account fitness change of individuals def __init__(self, maxsize, similar=eq, maxsize_arm=None): super().__init__(maxsize, similar=similar) self.maxsize_arm = maxsize_arm @@ -116,17 +122,18 @@ class UpdateFitnessHof(tools.HallOfFame): def update(self, population): for ind in population: - if len(self) == 0 and self.maxsize !=0: - # Working on an empty hall of fame is problematic for the - # "for else" - self.insert(population[0]) - continue idx = 0 - while idx < len(self): + while idx < len(self):#to avoid duplicate if self.similar(ind, self[idx]): self.remove(idx) break idx+=1 + if len(self) == 0 and self.maxsize !=0: + # Working on an empty hall of fame is problematic for the + # "for else" + self.insert(ind) + continue + if ind.fitness > self[-1].fitness or len(self) < self.maxsize: # The individual is unique and strictly better than # the worst @@ -136,34 +143,23 @@ class UpdateFitnessHof(tools.HallOfFame): if self.maxsize_arm is not None: self.arm_hof.update(population) -class UpdateFitnessParetoFront(tools.ParetoFront): - def __init__(self, similar, maxsize_arm=None): +class UpdateFitnessParetoFront(tools.ParetoFront):#Pareto front that take into account fitness change of individuals + def __init__(self, similar=eq, maxsize_arm=None): super().__init__(similar=similar) self.maxsize_arm = maxsize_arm if maxsize_arm: self.arm_hof = ArmHof(self.maxsize_arm) def update(self, population): - for ind in population: - is_dominated = False - dominates_one = False - has_twin = False - to_remove = [] - for i, hofer in enumerate(self): # hofer = hall of famer - if not dominates_one and hofer.fitness.dominates(ind.fitness): - is_dominated = True - break - elif ind.fitness.dominates(hofer.fitness): - dominates_one = True - to_remove.append(i) - elif ind.fitness == hofer.fitness and self.similar(ind, hofer):#à corriger! - has_twin = True - break + for ind in population:#remove duplicate to update there value + idx = 0 + while idx < len(self): + if self.similar(ind, self[idx]): + self.remove(idx) + idx-=1 + idx+=1 - for i in reversed(to_remove): # Remove the dominated hofer - self.remove(i) - if not is_dominated and not has_twin: - self.insert(ind) - + super().update(population) + if self.maxsize_arm is not None: - self.arm_hof.update() \ No newline at end of file + self.arm_hof.update(population) \ No newline at end of file diff --git a/GPRL/algorithms.py b/GPRL/algorithms.py index 79b2869db5f9b24732c9b672e97d41711cdb1155..2c7799e0a82c28db5c4bda4f61348d3b04532fbf 100644 --- a/GPRL/algorithms.py +++ b/GPRL/algorithms.py @@ -5,9 +5,9 @@ from timeit import default_timer as timer from .UCB import UCBFitness, HeapWithKey -#warning ucb on first objective ! +#stochastique objective must be placed first ! def eaMuPlusLambdaUCB(population, toolbox, simulation_budget, parallel_update, mu, lambda_, cxpb, mutpb, ngen, - select=False, stats=None, halloffame=None, verbose=__debug__, budget_scheduler=None): + select=False, stats=None, halloffame=None, verbose=__debug__, budget_scheduler=None, iteration_callback=None): assert all([isinstance(ind.fitness, UCBFitness) for ind in population]) logbook = tools.Logbook() @@ -43,11 +43,13 @@ def eaMuPlusLambdaUCB(population, toolbox, simulation_budget, parallel_update, m logbook.record(gen=0, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) + if iteration_callback is not None: + iteration_callback(0, population, halloffame, logbook) # Begin the generational process for gen in range(1, ngen + 1): if budget_scheduler is not None: - simulation_budget = budget_scheduler(ngen, population) + simulation_budget, parallel_update = budget_scheduler(ngen, population, simulation_budget, parallel_update) # Vary the population offspring = algorithms.varOr(population, toolbox, lambda_, cxpb, mutpb) @@ -90,6 +92,8 @@ def eaMuPlusLambdaUCB(population, toolbox, simulation_budget, parallel_update, m logbook.record(gen=gen, nevals=len(invalid_ind), **record) if verbose: print(logbook.stream) + if iteration_callback is not None: + iteration_callback(gen, population, halloffame, logbook) return population, logbook @@ -243,5 +247,4 @@ def qdLambda(init_batch, toolbox, container, batch_size, ngen, lambda_, cxpb = 0 if iteration_callback is not None: iteration_callback(i, batch, container, logbook) - return batch, logbook - + return container, logbook diff --git a/GPRL/containers/grid.py b/GPRL/containers/grid.py new file mode 100644 index 0000000000000000000000000000000000000000..8f5c13ec26aea68388077de129ef9d33e180f7b4 --- /dev/null +++ b/GPRL/containers/grid.py @@ -0,0 +1,20 @@ +from functools import reduce +import operator +import numpy as np +from qdpy.containers import Grid + +class FixGrid(Grid):# fix a bug in qdpy 0.1.2.1 with Deep grid (nb_items_per_bin) + def _init_grid(self) -> None: + """Initialise the grid to correspond to the shape `self.shape`.""" + self._solutions = {x: [] for x in self._index_grid_iterator()} + self._nb_items_per_bin = np.zeros(self._shape, dtype=bool) #{x: 0 for x in self._index_grid_iterator()} + self._fitness = {x: [] for x in self._index_grid_iterator()} + self._features = {x: [] for x in self._index_grid_iterator()} + self._quality = {x: None for x in self._index_grid_iterator()} + self._quality_array = np.full(self._shape + (len(self.fitness_domain),), np.nan) + self._bins_size = [(self.features_domain[i][1] - self.features_domain[i][0]) / float(self.shape[i]) for i in range(len(self.shape))] + self._filled_bins = 0 + self._nb_bins = reduce(operator.mul, self._shape) + self.recentness_per_bin = {x: [] for x in self._index_grid_iterator()} + self.history_recentness_per_bin = {x: [] for x in self._index_grid_iterator()} + self.activity_per_bin = np.zeros(self._shape, dtype=float) \ No newline at end of file diff --git a/GPRL/factory.py b/GPRL/factory.py index 515cb6ed4a0cbb7502fcbc181bd563ea618af485..4098c6cac465b28bc9d2382005d13035521e1929 100644 --- a/GPRL/factory.py +++ b/GPRL/factory.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod -class EvolveFactory(ABC): +class EvolveFactory(ABC):#Base class to produce toolbox in different script and keep multiprocessing support (evolve.py script) def __init__(self, conf): self.conf = conf diff --git a/GPRL/genetic_programming/linearGP.py b/GPRL/genetic_programming/linearGP.py index 2c831fb452c49db74b95a5791a979b2b8f52363c..5c19ea6561e78d8dfc9130555c6b90588cbdef2a 100644 --- a/GPRL/genetic_programming/linearGP.py +++ b/GPRL/genetic_programming/linearGP.py @@ -8,13 +8,17 @@ import numpy as np from collections import namedtuple -#/!\ warning authorized dst in effective mutINstr and mutInsert (range(regCalcSize)) - +""" +Implementation based on : +https://github.com/ChengyuanSha/SMILE/blob/master/linear_genetic_programming/_program.py +and +Book : Brameier, M. F., & Banzhaf, W. (2007). Linear genetic programming. Springer Science & Business Media. +""" NUM_OPS = 12 opcode_complexity = np.array([1, 1, 1, 2, 2, 4, 4, 4, 3, 4, 4, 4]) -class Interpreter(ABC): +class Interpreter(ABC):# Abstract class to define custom interpreter @abstractmethod def opcode(code, x1, x2): @@ -32,10 +36,10 @@ class BasicInterpreter: self.arity = np.array([2]*6 + [1]*3 + [2]*3) self.branch = np.zeros(self.num_ops, dtype=bool) self.branch[10:] = True - self.masked_branch = self.branch[mask].copy() + self.masked_branch = self.branch[mask].copy()# for random selection of operator self.ops = np.arange(0, self.num_ops)[mask] - def opcode(self, code, x1, x2): + def opcode(self, code, x1, x2):#code to operator translation c_undef = 1e7 if code==0: return np.add(x1, x2) @@ -45,7 +49,6 @@ class BasicInterpreter: return np.multiply(x1, x2) elif code==3: return np.float_power(np.abs(x1),x2, where=np.logical_and(np.abs(x2) <= 10, np.logical_and(np.abs(x1) > 0.001, np.abs(x1) < 1000)), out=np.full_like(x1, c_undef)) - #return np.where(np.logical_and(np.abs(x2) <= 10, np.abs(x1) != 0), np.float_power(np.abs(x1), x2), x1 + c_undef) elif code==4: return np.divide(x1, x2, where=np.abs(x2) > 0.001, out=np.full_like(x1, c_undef)) elif code==5: @@ -54,10 +57,9 @@ class BasicInterpreter: return np.sin(x1) elif code==7: return np.exp(x1, where=np.abs(x1)<32, out=np.full_like(x1, c_undef)) - #return np.where(np.abs(x1)<32, np.exp(x1), x1 + c_undef) elif code==8: return np.log(np.abs(x1), where=np.abs(x1) > 0.00001, out=np.full_like(x1, c_undef)) - #return np.where(np.abs(x1) > 0.001, np.log(x1), x1 + c_undef) + elif code==9: return np.where(x1 > x2, -x1, x1) elif code==10: @@ -120,7 +122,7 @@ class Program(list): def to_effective(self, outputIdxs, stopAt=0): effective = [] - idxs = [] + idxs = []#index of the effective instructions in self (for mutation pupose) R_eff = set(outputIdxs) k = len(self)-1 while k >= stopAt: @@ -224,7 +226,7 @@ class Program(list): def mutInstr(prog, pReg, pOp, pConst, pBranch, sigma=1.0, effective=None): if not prog: return prog - if effective:#revoir + if effective:#only make effective mutation eff, _, idxs =prog.to_effective(effective) idx = random.choice(idxs) if idxs else random.randint(0, len(prog)-1) else: @@ -285,7 +287,7 @@ class Program(list): register[program.regCalcSize:program.regCalcSize+inputs.shape[0]] = inputs.copy() output = Program._execute(program, register) return output[outputIdxs] - elif inputs.ndim == 2: + elif inputs.ndim == 2:# speed up the programm execution by using numpy array operator assert inputs.shape[1] == program.regInputSize ndim_register = np.zeros((register.shape[0], inputs.shape[0])) for k in range(inputs.shape[0]): @@ -311,7 +313,7 @@ class Program(list): else: mask = tmp - if ~mask.all(): + if ~mask.all():#if no input verify the condition skip subsequent instructions branch_flag = False while i < len(program) - 1 and program.interpreter.branch[program[i + 1].opcode]: # if next is still a branch i += 1 @@ -330,6 +332,16 @@ class Program(list): return register def graph(prog, outputIdxs, debug=False, terminals_name=None): + """ + Gives graph representation of the programm. Use pygraphviz to get the graph (see tutorial.ipynb) + arguments: + prog: the programm to draw as a graph + outputIdx: registers used as output + debug: Show instruction number in node of the graph (optional) + terminals_name: names of the terminials nodes to display (optionnal) + return: + nodes, edges, labels, branch_edges + """ prgm, _, _ = prog.to_effective(outputIdxs) start = prog.regCalcSize + prog.regInputSize + prog.regConstSize nodes = [] @@ -347,14 +359,8 @@ def graph(prog, outputIdxs, debug=False, terminals_name=None): labels[k+start] = str(k)+ "\n" + prog.interpreter.toString(instr.opcode) else: labels[k+start] = prog.interpreter.toString(instr.opcode) - - arity = prog.interpreter.arity[instr.opcode] - if instr.inpt1 in nodes_dst.keys(): - edges.append((nodes_dst[instr.inpt1], k+start)) - else: - terminal_nodes.add(instr.inpt1) - edges.append((instr.inpt1, k+start)) + arity = prog.interpreter.arity[instr.opcode] if arity==2: if instr.inpt2 in nodes_dst.keys(): edges.append((nodes_dst[instr.inpt2], k+start)) @@ -362,6 +368,12 @@ def graph(prog, outputIdxs, debug=False, terminals_name=None): terminal_nodes.add(instr.inpt2) edges.append((instr.inpt2, k+start)) + if instr.inpt1 in nodes_dst.keys(): + edges.append((nodes_dst[instr.inpt1], k+start)) + else: + terminal_nodes.add(instr.inpt1) + edges.append((instr.inpt1, k+start)) + if not prog.interpreter.branch[instr.opcode]: if branch_flag and instr.dst in nodes_dst.keys(): @@ -386,6 +398,8 @@ def graph(prog, outputIdxs, debug=False, terminals_name=None): labels[k] = "ARG" + str(k-prog.regCalcSize) else: labels[k] = 'Const' + str(k-prog.regCalcSize-prog.regInputSize) + if debug: + labels[k] += "\n"+str(k) return nodes, edges, labels, branch_edges @@ -412,16 +426,6 @@ def edit_distance(p1, p2): distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1]))) distances = distances_ return distances[-1] - -def sementic_introns(prog, block_size=None): - import copy - if not block_size: - block_size = len(prog) - for block in range(1, block_size): - for k in range(len(prog)-block): - prgm = copy.deepcopy(prog) - del prgm[k:k+block] - yield prgm def initProgam(pcls, regCalcSize, regInputSize, regConstSize, pConst, pBranch, min_, max_, rnd=None, ops=None): kwargs = dict() @@ -433,6 +437,18 @@ def initProgam(pcls, regCalcSize, regInputSize, regConstSize, pConst, pBranch, m return pcls(prgm, prgm.regCalcSize, prgm.regInputSize, prgm.regConstSize, prgm.regConst, prgm.interpreter) def semantic_introns(evaluate, programm, max_block_size=None, lower_bound=None, test=lambda x,y: x<=y): + """ + Algorithm 3.2 (elimination of semantic introns) from the book + arguments: + evaluate: a function that take a programm and his fitness + programm: the programm to evaluate + max_block_size: max block size to delete , affect the for loop (optionnal) + lower_bound: Minimum threshold value used to keep the programm, it not provided fitness of the base programm will be used (optionnal) + test: test that define at which condition a programm will be kept (optionnal, default better fitness than base fitness) + return: + Programm + """ + if lower_bound is None: base_score = evaluate(programm) else: @@ -461,6 +477,7 @@ def semantic_introns(evaluate, programm, max_block_size=None, lower_bound=None, from deap import tools from operator import attrgetter from copy import deepcopy +#Diversity tournament -> book Chapter 9 : CONTROL OF DIVERSITY AND VARIATION STEP SIZE def selDoubleTournament(individuals, k, fitness_size, diversity_size, fitness_first=True, fit_attr="fitness", effective=None): assert (1 <= diversity_size <= 2), "Parsimony tournament size has to be in the range [1, 2]." @@ -510,6 +527,20 @@ def selDoubleTournament(individuals, k, fitness_size, diversity_size, fitness_fi #CrossOver def cxLinear(ind1, ind2, l_min, l_max, l_smax, dc_max, ds_max): + """ + Linear cross over similar to the one defined in the book chapter 5.7.1 Linear Crossover + inidiviualq should be list like. + arguments: + ind1: first individual to mate + ind2: second individual to mate + l_min: minimum length of an individual + l_max: maximum length of an individual + l_smax: maximum length of the segment + dc_max: maximum start difference of segment + ds_max: maximum size difference of segment + return: + mated ind1, ind2 + """ if len(ind1)>len(ind2): ind1, ind2 = ind2, ind1 if len(ind1)<2: return ind1, ind2 @@ -572,13 +603,4 @@ def mutSwap(prog, effective=None): i1 = random.randint(0, len(prog)-1) i2 = random.randint(0, len(prog)-1) prog[i1], prog[i2] = prog[i2], prog[i1] - return prog - -if __name__ == "__main__": - test = Program.randomProgram(8, 2, 11, 20, 0.3, 0.5) - eff, _, _ = test.to_effective([0]) - print(eff) - print(Program.mutInstr(eff, 0.5, 0.5, 0.0, 0.1)) - #Program.interpreter.opcode(4, np.array(10.0), np.array(11.0)) - register = eff.init_register() - #print(Program.execute(eff, np.array([[0.5, 1.0], [0.5, 1.0]]), register, np.array([0]))) \ No newline at end of file + return prog \ No newline at end of file diff --git a/GPRL/genetic_programming/team.py b/GPRL/genetic_programming/team.py index 48739930d4f0d5ec49874e3431304ef7c13ac31c..273049ff4e21f030851103e41d097fc94700d5a1 100644 --- a/GPRL/genetic_programming/team.py +++ b/GPRL/genetic_programming/team.py @@ -2,7 +2,12 @@ import random import numpy as np from sklearn.linear_model import LinearRegression -class MGGP(list): +""" +This module define all the functions to evolve individual in groups. +Each individual of the group is an output. +""" + +class MGGP(list):#Multi-gene genetic programming def __init__(self, content): list.__init__(self, content) self.linear = None @@ -15,24 +20,24 @@ class MGGP(list): def predict(self, func, x): return self.linear.predict(func(*x).T) -def mutate(team, unit_mut): +def mutate(team, unit_mut):#Randomly mute an individual of the team idx = random.randint(0, len(team)-1) team[idx] = unit_mut(team[idx]) return team, -def fixed_mate(team1, team2, unit_cx): +def fixed_mate(team1, team2, unit_cx):#Randomly mate two individuals that has the same position in the team assert len(team1)==len(team2) idx = random.randint(0, len(team1)-1) team1[idx], team2[idx] = unit_cx(team1[idx], team2[idx]) return team1, team2 -def cx_low_level(team1, team2, unit_cx): +def cx_low_level(team1, team2, unit_cx):#crossover between any individuals of the two groups idx1 = random.randint(0, len(team1)-1) idx2 = random.randint(0, len(team2)-1) team1[idx1], team2[idx2] = unit_cx(team1[idx1], team2[idx2]) return team1, team2 -def cx_hight_level(team1, team2, cxprb): +def cx_hight_level(team1, team2, cxprb):#exchange gene from each team add_team1 = [] k = 0 while k<len(team2): @@ -54,7 +59,7 @@ def cx_hight_level(team1, team2, cxprb): team2.append(team1[-1]) return team1, team2 -def mutation_del(team): +def mutation_del(team):#delete an individual in the team team.pop(random.randint(0, len(team)-1)) return team @@ -74,17 +79,9 @@ def team_size_constraint(team, size): mutation_del(team) return team -def team_compile(team, unit_compile): +def team_compile(team, unit_compile):#compile each individual in the the team funcs = list(map(unit_compile, team)) def func(*args): - #ret = np.zeros((len(funcs), len(args[0]))) - #for k, f in enumerate(funcs): - # res = f(*args) - # if isinstance(res, float) or res.size == 1: - # ret[k, : ] = res - # else: - # ret[k,:] = res[:] - #return ret return [f(*args) for f in funcs] return func diff --git a/GPRL/policy.py b/GPRL/policy.py deleted file mode 100644 index 8987864087acfca36a28f650debbf5d1d8fbf014..0000000000000000000000000000000000000000 --- a/GPRL/policy.py +++ /dev/null @@ -1,20 +0,0 @@ -import json - -class Policy(object): - def __init__(self, program) -> None: - super().__init__() - self.policy_type = type(program) - self.repr = str(program) - #args - - def __call__(self, inputs): - pass - - def __str__(self): - return self.repr - - def save_policy(self): - pass - - def load_policy(self): - pass \ No newline at end of file diff --git a/GPRL/utils/FEC.py b/GPRL/utils/FEC.py index 4a0fe66ebd561c9c12e7d816d0b71afba8bd7e7d..d2044e5c9fe53fcc597d6706d8e4910666cfac23 100644 --- a/GPRL/utils/FEC.py +++ b/GPRL/utils/FEC.py @@ -40,7 +40,7 @@ class FEC(object):#Functionnal Equuivalence Checking return fitness @staticmethod - def _cartesian_product_transpose(arrays): + def _cartesian_product_transpose(arrays):# Uniform grid as data util broadcastable = np.ix_(*arrays) broadcasted = np.broadcast_arrays(*broadcastable) rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted) @@ -54,7 +54,7 @@ class FEC(object):#Functionnal Equuivalence Checking return out.reshape(cols, rows).T @staticmethod - def uniform_domain_dataset(num, *domains): + def uniform_domain_dataset(num, *domains):# Uniform grid as dataset (if there to many dimensions use pseudo-random point sampled from Low-discrepancy sequence like Sobol (scipy qmc)) vec = [] for (min_, max_) in domains: diff --git a/GPRL/utils/gp_utils.py b/GPRL/utils/gp_utils.py index 317a57f5892f403a4d417ca85b913c491e9a20b9..66f9d2c5227294c6220687c26fa8b1b38a68d702 100644 --- a/GPRL/utils/gp_utils.py +++ b/GPRL/utils/gp_utils.py @@ -8,18 +8,18 @@ operator_complexity = { 'cos':4, 'sin':4, 'power':3, 'cube':3, 'square':3, 'sqrt':3, 'inv':2, 'log':3, 'exp':3 } -def similar(i1, i2): - return complexity(i1)==complexity(i2) - def complexity(individual): return sum(map(lambda x: operator_complexity.get(x.name, 1), individual)) def div(x1, x2): - with np.errstate(divide='ignore', invalid='ignore', over='ignore'): - return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1000.) + if isinstance(x1, float) or x1.ndim == 0: + out = np.full_like(x2, 10_000) + else: + out = np.full_like(x1, 10_000) + return np.divide(x1, x2, where=np.abs(x2) > 0.0001, out=out) def if_then_else(cond, true, false): - return true if cond else false + return np.where(cond, true, false) def classification(*args): return np.argmax(args) @@ -32,10 +32,10 @@ def intervales(x): return 0 def exp(x): - return np.exp(x, where=np.abs(x)<32, out=np.ones_like(x)) + return np.exp(x, where=np.abs(x)<32, out=np.full_like(x, 100_000)) def log(x): - return np.log(np.abs(x), where=np.abs(x) > 0.00001, out=np.ones_like(x)) + return np.log(np.abs(x), where=np.abs(x) > 0.00001, out=np.full_like(x, 10_000)) def power(x, n): with np.errstate(over='ignore', divide='ignore', invalid='ignore'): @@ -43,7 +43,7 @@ def power(x, n): return np.where(np.logical_and(np.abs(x) < 1e6, np.abs(x)>0.001), np.sign(x) * (np.abs(x)) ** (n), 0.0) return np.where(np.abs(x) < 1e6, np.sign(x) * (np.abs(x)) ** (n), 0.0) -def ephemeral_mut(individual, mode, mu=0, std=1): +def ephemeral_mut(individual, mode, mu=0, std=1):#mutation of constant ephemerals_idx = [index for index, node in enumerate(individual) if isinstance(node, gp.Ephemeral)] if len(ephemerals_idx) > 0: if mode == "one": @@ -54,27 +54,9 @@ def ephemeral_mut(individual, mode, mu=0, std=1): individual[i] = new return individual, -def mutate(individual, expr=None, pset=None, mode="one", mu=0, std=1): +def mutate(individual, expr=None, pset=None, mode="one", mu=0, std=1):#mutate that include constant and operation modifications #mut = gp.mutEphemeral(mut, mode) if random.random() < 0.3: return ephemeral_mut(individual, mode=mode, mu=mu, std=std) else: - return gp.mutUniform(individual, expr=expr, pset=pset) - -class MyHeap(object): - def __init__(self, initial=None, key=lambda x:x): - self.key = key - self.index = 0 - if initial: - self._data = [(key(item), i, item) for i, item in enumerate(initial)] - self.index = len(self._data) - heapq.heapify(self._data) - else: - self._data = [] - - def push(self, item): - heapq.heappush(self._data, (self.key(item), self.index, item)) - self.index += 1 - - def pop(self): - return heapq.heappop(self._data)[2] \ No newline at end of file + return gp.mutUniform(individual, expr=expr, pset=pset) \ No newline at end of file diff --git a/GPRL/utils/optim.py b/GPRL/utils/optim.py index 7dbdf641cd8aa8b1066ad021097c3ced45a9d776..2c3553585eb1273b4b25c6a43d0f4b7195c11fc9 100644 --- a/GPRL/utils/optim.py +++ b/GPRL/utils/optim.py @@ -5,7 +5,7 @@ from functools import partial def eval(epsi, F, w): return F(w+epsi) -def Open_AI_ES(map, F, weights, n, steps, alpha=0.001, sigma=0.1): +def Open_AI_ES(map, F, weights, n, steps, alpha=0.001, sigma=0.1):#open ai ES that use eval as objectives for _ in range(steps): epsi = np.random.normal(loc=0.0, scale=sigma, size=(n, weights.size)).reshape((n, *weights.shape)) evaluate = partial(eval, F=F, w=weights) diff --git a/GPRL/utils/policy.py b/GPRL/utils/policy.py deleted file mode 100644 index e448802ee16377dfc473340360ee5c6cd255cf5a..0000000000000000000000000000000000000000 --- a/GPRL/utils/policy.py +++ /dev/null @@ -1,32 +0,0 @@ -import json - -class Policy(object):# generic class that handle different policy representation (Tree or Linear) for serealization purpose - def __init__(self, programm) -> None: - super().__init__() - self.policy_type = str(type(programm)) - self.repr = str(programm) - - self.policy = programm - #env id - def __call__(self): - pass - - def save_policy(self, path): - data = {} - data['policy_type'] = self.policy_type - data['policy'] = self.repr - #kwargs from policy - with open(path, 'w') as outfile: - json.dump(data, outfile) - - def load_policy(self, path): - with open(path) as json_file: - data = json.load(json_file) - - self.policy_type = data['policy_type'] - self.repr = data['policy'] - - self.policy - - def __str__(self): - return self.repr \ No newline at end of file diff --git a/GPRL/utils/utils.py b/GPRL/utils/utils.py index e06baec7f7c6fd47a884ba983a3bdf6b476e7459..7d7136762dd648520f01ac3d7eba0c830c9ffafc 100644 --- a/GPRL/utils/utils.py +++ b/GPRL/utils/utils.py @@ -1,7 +1,9 @@ from functools import reduce from operator import add, itemgetter +import pickle import numpy as np import pandas as pd +import os def convert_logbook_to_dataframe(logbook): chapter_keys = logbook.chapters.keys() @@ -28,8 +30,18 @@ def convert_logbook_to_dataframe(logbook): df[k] = d return df -def basic_budget_scheduler(gen_threshold): - dico = {k:v for k,v in gen_threshold} - def scheduler(gen, pop): - return dico(gen) +def basic_budget_scheduler(gen_threshold):# list of tuple (genertion, simulation_budget) + def scheduler(gen, population, simulation_budget, parallel_update): + for g, v in gen_threshold: + if gen>g: + return v, parallel_update + return simulation_budget, parallel_update return scheduler + +def save_each_generation(path, modulo=10): + def save(i, pop, hof, logbook): + data = {"pop":pop, "hof":hof} + if i%modulo == 0: + with open(os.path.join(path,"data-"+str(i)+".pkl"), "wb") as input_file: + pickle.dump(data, input_file, pickle.HIGHEST_PROTOCOL) + return save \ No newline at end of file diff --git a/README.md b/README.md index 7ee48df0740db37f6d558d6229e2436e4e0cf61b..d7d250e88808d32a960f8989f43be197873c3982 100644 --- a/README.md +++ b/README.md @@ -1 +1,75 @@ # eXplainable GP-based RL Policy +A python implementation of symbolic policy for interpretable reinforcement learning using genetic programming. +## Setup +### Requirements +- numpy +- deap +- qdpy +- pygraphviz (for easier understanding of program) + +### Installing dependencies +- clone this repo +- install with ` python -m pip install -r requirement.txt ` for base installation (no pygraphiz) +- install with ` python -m pip install -r requirement_with_pygrphivz.txt ` if you want to visualize program easily + +## How to use +### Core functions +Core function and representation are in the GPRL folder. +``` +. +|── GPRL +| |── containers # Fix a bug in qdpy grid 0.1.2.1 (current last stable version) +| |── genetic_programming # Individual definition of linear GP and Team for deap +| |── MCTS # Nested Monte-Carlo code +| |── utils # Various utils and callback functions to run easily experiments +| |── algorithms.py # deap like algorithm using toolbox +| |── factory.py # Abstract class to make better used of toolbox between script +| |── UCB.py # Subclass of deap base Fitness to use UCB +└── ... +``` +By using DEAP and these functions, we can conduct our experiments. Examples can be found at : +<https://github.com/DEAP/deap/tree/master/examples> + +### Experiments script +Each experiment code is available in separate script using DEAP. More details can be found in the `Readme.md` of experiments folder + +### Main evolve script +The `evolve.py` script use configuration files in `.yml` to launch experiments. This script let you run QD, Tree GP and Linear GP. +Basically, you can run an experiment with this command : +``` +python evolve.py --conf /path/to/conf.yml +``` +By default, the results is saved in the `results/` folder. + +### yaml configurations file +Here is a skeleton for the `conf.yml` file. This shows how an experiment can be set up +``` +algorithm: + name: # algorithm name in deap (algorithms.<name>) or algoritm name from GPRL (algo.name) + args: + # args of the algorithm chosen (lambda_, mu, ngen ...) + +population: + init_size: #size of the population (int) + +selection: + name: # selection method for the evolutionnay algorithm. ex: selTournament (from deap.tools.sel*) + args: + # argument for the selection method. ex: tournsize: 5 + +individual: # Individual representation ("Tree" or "Linear") + +params: + env: # env-id from the gym/bullet env. ex:"MountainCarContinuous-v0" + function_set: #F unction set size ("small" or "extended") + c: # Exploration constante for UCB (float) + n_episodes: # Number of episode per evaluation (int) + n_steps: # Number of step per evaluation (int) + gamma: # Discount factor γ (float in [0,1]) + n_thread: # Number of thread to use (int) + ... (many others depending of the individual representation (Tree or Linear). see conf/ for examples) +seed: #set seed for random +``` + +## See the result +Once an experiment is finished, you can see inspect results like in `tutorial.ipynb`. This notebook show how to see and run an individual from a saved population. \ No newline at end of file diff --git a/conf/conf_gp.yml b/conf/conf_gp.yml index 4c0f1645deee085997821eb0f04edb732c6ee0ae..b5d71c0e743fc9ab711a05f7eeac5dbbb37279bb 100644 --- a/conf/conf_gp.yml +++ b/conf/conf_gp.yml @@ -1,9 +1,7 @@ algorithm: name: algorithms.eaSimple args: - #mu: 10 - #lambda: 10 - ngen: 5 + ngen: 100 cxpb: 0.1 mutpb: 0.9 diff --git a/conf/conf_gpUCB.yml b/conf/conf_gpUCB.yml new file mode 100644 index 0000000000000000000000000000000000000000..a414c29cb672e1c9130d833a79e7f0218da92739 --- /dev/null +++ b/conf/conf_gpUCB.yml @@ -0,0 +1,32 @@ +algorithm: + name: algo.eaMuPlusLambdaUCB + args: + mu: 100 + lambda_: 100 + simulation_budget: 5 + parallel_update: 16 + save_every: 10 + ngen: 200 + cxpb: 0.1 + mutpb: 0.9 + budget_scheduler: [[50, 10], [100, 20], [190, 50]] + +population: + init_size: 100 + +selection: + name: selNSGA2 + args: + +individual: Tree + +params: + env: "CartPole-v1" + function_set: small + c: 0.0 + n_episodes: 1 + n_steps: 500 + gamma: 1.0 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/conf/conf_lingp.yml b/conf/conf_lingp.yml index 7bab3a07a32303fea2a81254dc9c002a0f71e471..9b31e24296a4c697d9fc9b368507486fcfce0cde 100644 --- a/conf/conf_lingp.yml +++ b/conf/conf_lingp.yml @@ -1,11 +1,15 @@ algorithm: - name: algorithms.eaSimple + name: algo.eaMuPlusLambdaUCB args: - #mu: 10 - #lambda: 10 - ngen: 5 - cxpb: 0.1 - mutpb: 0.9 + mu: 100 + lambda_: 100 + simulation_budget: 5 + parallel_update: 16 + save_every: 10 + ngen: 200 + cxpb: 0.0 + mutpb: 1.0 + budget_scheduler: [[50, 10], [100, 20], [190, 100]] population: init_size: 100 @@ -22,9 +26,9 @@ params: function_set: small c: 0.0 n_episodes: 1 - n_steps: 100 + n_steps: 500 gamma: 1.0 - regCalcSize: 8 + regCalcSize: 4 regConstSize: 10 init_size_min: 2 init_size_max: 5 @@ -34,6 +38,6 @@ params: pDel: 0.6 pSwap: 0.1 pMut: 0.5 - n_thread: 1 + n_thread: 16 seed: 42 \ No newline at end of file diff --git a/conf/conf_qdgp-BipedalWalker.yml b/conf/conf_qdgp-BipedalWalker.yml new file mode 100644 index 0000000000000000000000000000000000000000..43d11de6020d2f1ddfe1722f4203c9c309e31622 --- /dev/null +++ b/conf/conf_qdgp-BipedalWalker.yml @@ -0,0 +1,37 @@ +algorithm: + name: algo.qdLambda + args: + batch_size: 100 + lambda_: 500 + ngen: 5000 + cxpb: 0.0 + mutpb: 1.0 + show_warnings: True + verbose: True + +population: + init_size: 1000 + args: + shape: [10, 10] + max_items_per_bin: 10 + features_domain: [[0, 100], [0., 20.0], [0., 2.5], [0., 2.5], [0., 2.5], [0., 2.5], [0., 1.0], [0., 1.0]] + fitness_domain: [[-200_000.0, 350.0],] + +selection: + name: selRandom + args: + +individual: Tree + +params: + env: BipedalWalker-v3 + function_set: extended + c: 0.0 + n_episodes: 3 + n_steps: 500 + gamma: 1.0 + features_kept: [False, False, True, False, True, False, False, False] + tree_max_depth: 10 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/conf/conf_qdgp-Hopper.yml b/conf/conf_qdgp-Hopper.yml new file mode 100644 index 0000000000000000000000000000000000000000..077c0acb759be2ca702ecccf649764d8a0caeb45 --- /dev/null +++ b/conf/conf_qdgp-Hopper.yml @@ -0,0 +1,38 @@ +algorithm: + name: algo.qdLambda + args: + batch_size: 100 + lambda_: 500 + ngen: 5000 + cxpb: 0.0 + mutpb: 1.0 + show_warnings: True + verbose: True + save_every: 10 + +population: + init_size: 1000 + args: + shape: [10, 10, 10] + max_items_per_bin: 5 + features_domain: [[0, 100], [0., 20.], [0.0, 1.2], [0.0, 1.2], [0.0, 1.2], [0.0, 1.0]] + fitness_domain: [[-200_000.0, 2000.0],] + +selection: + name: selRandom + args: + +individual: Tree + +params: + env: HopperBulletEnv-v0 + function_set: extended + c: 0.0 + n_episodes: 3 + n_steps: 500 + gamma: 1.0 + features_kept: [False, False, False, True, True, True] + tree_max_depth: 10 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/conf/conf_qd-lingp-BipedalWalker.yml b/conf/conf_qdlingp-BipedalWalker.yml similarity index 80% rename from conf/conf_qd-lingp-BipedalWalker.yml rename to conf/conf_qdlingp-BipedalWalker.yml index 43e91af87b434fc76812a844425b7b212fbe4403..9281981a63e4d64785722a113418229565ff563f 100644 --- a/conf/conf_qd-lingp-BipedalWalker.yml +++ b/conf/conf_qdlingp-BipedalWalker.yml @@ -3,18 +3,19 @@ algorithm: args: batch_size: 100 lambda_: 500 - ngen: 5 + ngen: 5000 cxpb: 0.0 mutpb: 1.0 show_warnings: True verbose: True + save_every: 10 population: init_size: 1000 args: shape: [10, 10] max_items_per_bin: 10 - features_domain: [[0, 100], [0., 10.0], [0., 2.5], [0., 2.5], [0., 2.5], [0., 2.5], [0., 1.0], [0., 1.0]] + features_domain: [[0, 100], [0., 20.0], [0., 2.5], [0., 2.5], [0., 2.5], [0., 2.5], [0., 1.0], [0., 1.0]] fitness_domain: [[-200_000.0, 350.0],] selection: @@ -27,12 +28,12 @@ params: env: BipedalWalker-v3 function_set: extended c: 0.0 - n_episodes: 1 - n_steps: 100 + n_episodes: 3 + n_steps: 500 gamma: 1.0 features_kept: [False, False, True, False, True, False, False, False] - regCalcSize: 8 - regConstSize: 10 + regCalcSize: 16 + regConstSize: 20 init_size_min: 2 init_size_max: 5 pConst: 0.3 diff --git a/conf/conf_qdlingp-Hopper.yml b/conf/conf_qdlingp-Hopper.yml new file mode 100644 index 0000000000000000000000000000000000000000..b4e8c63a4fa48f4193400edffb68361c29b17345 --- /dev/null +++ b/conf/conf_qdlingp-Hopper.yml @@ -0,0 +1,47 @@ +algorithm: + name: algo.qdLambda + args: + batch_size: 100 + lambda_: 500 + ngen: 5000 + cxpb: 0.0 + mutpb: 1.0 + show_warnings: True + verbose: True + save_every: 10 + +population: + init_size: 1000 + args: + shape: [10, 10, 10] + max_items_per_bin: 5 + features_domain: [[0, 100], [0., 20.], [0.0, 1.2], [0.0, 1.2], [0.0, 1.2], [0.0, 1.0]] + fitness_domain: [[-200_000.0, 2000.0],] + +selection: + name: selRandom + args: + +individual: Linear + +params: + env: HopperBulletEnv-v0 + function_set: extended + c: 0.0 + n_episodes: 3 + n_steps: 500 + gamma: 1.0 + features_kept: [False, False, False, True, True, True] + regCalcSize: 16 + regConstSize: 20 + init_size_min: 2 + init_size_max: 5 + pConst: 0.3 + pBranch: 0.3 + pIns: 0.3 + pDel: 0.6 + pSwap: 0.1 + pMut: 0.5 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/evolve.py b/evolve.py index 65392d8ebb9331659f01f725628e5b6bfae7b5af..52c7efef36b97fb295fde5d99b0bea8aff75c213 100644 --- a/evolve.py +++ b/evolve.py @@ -1,3 +1,4 @@ +from GPRL.utils.utils import basic_budget_scheduler, save_each_generation import pandas as pd import numpy as np import random @@ -10,20 +11,22 @@ if "__main__" == __name__: from deap import algorithms from GPRL import algorithms as algo from GPRL.utils.utils import convert_logbook_to_dataframe - + from GPRL.UCB import UpdateFitnessHof, UpdateFitnessParetoFront + import os + import ntpath + from shutil import copyfile + import pickle + import time + parser = argparse.ArgumentParser(description='Main programm to launch experiments from yaml configuration file') parser.add_argument("--conf", required=True, help="configuration file path", type=str) - parser.add_argument("--path", help="directory for results", default="", type=str) - parser.add_argument("--name", help="name for saving results", default="", type=str) + parser.add_argument("--path", help="path to save the results", default="results", type=str) args = parser.parse_args() with open(args.conf) as f: conf = yaml.load(f, Loader=yaml.SafeLoader) - if args.name == "": - args.name = hash(conf.values()) - if conf["individual"] == "Tree": import experiments.gp as evoTool @@ -45,7 +48,7 @@ if "__main__" == __name__: random.seed(conf["seed"]) if "qd" in conf["algorithm"]["name"]: - from qdpy.containers import Grid + from GPRL.containers.grid import FixGrid as Grid conf["population"]["args"]["features_domain"] = np.array(conf["population"]["args"]["features_domain"])[conf["params"]["features_kept"]] conf["algorithm"]["args"]["container"] = Grid(**conf["population"]["args"]) @@ -58,16 +61,31 @@ if "__main__" == __name__: pop = evoTool.toolbox.population(n=conf["population"]["init_size"]) - hof = tools.HallOfFame(10) + #hof = tools.HallOfFame(10) + hof = UpdateFitnessHof(10, maxsize_arm=10) + #hof = UpdateFitnessParetoFront() + + dir = os.path.join(args.path, "log-"+ conf["params"]["env"] + "-"+ ntpath.basename(args.conf)[:-4] +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) + copyfile(args.conf, os.path.join(dir, "conf.yml")) + + if conf["algorithm"]["args"].get("budget_scheduler", None): + conf["algorithm"]["args"]["budget_scheduler"] = basic_budget_scheduler(conf["algorithm"]["args"]["budget_scheduler"]) + if conf["algorithm"]["args"].get("save_every", None): + conf["algorithm"]["args"]["iteration_callback"] = save_each_generation(dir, modulo=conf["algorithm"]["args"]["save_every"]) + del conf["algorithm"]["args"]["save_every"] algorithm = eval(conf["algorithm"]["name"])#/!\ not good from a security point of view but flexible pop, log = algorithm(pop, evoTool.toolbox, halloffame=hof, stats=mstats, **conf["algorithm"]["args"]) - name = "log_"+ str(args.name) + ".csv" - - convert_logbook_to_dataframe(log).to_csv(name, index=False) + with open(os.path.join(dir, "pop-final.pkl"), 'wb') as output: + pickle.dump(list(pop), output, pickle.HIGHEST_PROTOCOL) + with open(os.path.join(dir, "hof-final.pkl"), 'wb') as output: + pickle.dump(list(hof), output, pickle.HIGHEST_PROTOCOL) - print("Experiment is saved at : ", name) + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log.csv"), index=False) + print("Experiment is saved at : ", dir) factory.close() pool.close() diff --git a/experiments/ADF.py b/experiments/ADF.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b58592dceeb4849a601153803f35c8798e28ab --- /dev/null +++ b/experiments/ADF.py @@ -0,0 +1,229 @@ +from deap import gp, creator, base, tools +import numpy as np + +import random +import operator +import warnings +from functools import partial + +import gym +try: + import pybullet_envs +except ImportError: + warnings.warn("PyBullet environment not found") + +from GPRL.utils import gp_utils +from GPRL.utils.utils import convert_logbook_to_dataframe, save_each_generation + +def MC_fitness(individual, n_steps, num_episodes, gamma): + agent = toolbox.compile(individual) + + s = 0 + steps = 0 + for _ in range(num_episodes): + state = env.reset() + for k in range(n_steps): + #state, reward, done, _ = env.step(int(agent(*state))) + #state, reward, done, _ = env.step([agent(*state)]) + state, reward, done, _ = env.step(agent(*state)) + s+= gamma*reward + steps += 1 + if done: + break + return s, + +def compile(expr, psets, compile, outputs):# Here same pset for all output, redifining compilation for multi-ouptut support + adfdict = {} + func = None + for pset, subexpr in reversed(list(zip(psets[1:], expr[outputs:]))): + pset.context.update(adfdict) + func = compile(subexpr, pset) + adfdict.update({pset.name: func}) + pset = psets[0] + pset.context.update(adfdict) + funcs = [ compile(outexpr, pset) for outexpr in expr[:outputs] ] + return lambda *args: [f(*args) for f in funcs] + + + +def main(conf): + def check_output_depth(ind):# Impose output trees very sort depth to ensure use of ADF + for tree in ind[:OUTPUT]: + if tree.height > 3: + return True + return False + + env = gym.make(conf['env']) + + INPUT = env.observation_space.shape[0] + OUTPUT = env.action_space.shape[0] + NUM_ADF = conf['num_ADF'] + + feature_ADF = [] + for k in range(NUM_ADF):#ADF function set definition (only 1 level) + adf = gp.PrimitiveSetTyped("ADF"+str(k), [float]*conf["num_args"], float) + + adf.addPrimitive(np.add, [float, float], float) + adf.addPrimitive(np.subtract, [float, float], float) + adf.addPrimitive(np.multiply, [float, float], float) + adf.addPrimitive(gp_utils.div, [float, float], float) + + adf.addPrimitive(np.sin, [float], float) + adf.addPrimitive(gp_utils.exp, [float], float) + adf.addPrimitive(gp_utils.log, [float], float) + + adf.addEphemeralConstant("const_"+str(k), lambda: np.random.uniform(-10.0, 10.0), float) + + feature_ADF.append(adf) + + pset = gp.PrimitiveSetTyped("MAIN", [float]*INPUT, float) + + pset.addPrimitive(operator.or_, [bool, bool], bool) + pset.addPrimitive(operator.and_, [bool, bool], bool) + pset.addPrimitive(operator.gt, [float, float], bool) + pset.addPrimitive(gp_utils.if_then_else, [bool, float, float], float) + pset.addTerminal(True, bool) + + pset.addPrimitive(np.add, [float, float], float) + pset.addPrimitive(np.subtract, [float, float], float) + pset.addPrimitive(np.multiply, [float, float], float) + + pset.addEphemeralConstant("const", lambda: np.random.uniform(-10.0, 10.0), float) + + for adf in feature_ADF: + pset.addADF(adf) + + psets = [pset] + feature_ADF + + from GPRL.UCB import UCBFitness + creator.create("FitnessMin", UCBFitness, weights=(1.0,), c=2, sigma=5.0) + creator.create("Tree", gp.PrimitiveTree) + + creator.create("Individual", list, fitness=creator.FitnessMin) + + toolbox = base.Toolbox() + gen = [] + for k, adf in enumerate(feature_ADF): + toolbox.register('adf_expr'+str(k), gp.genFull, pset=adf, min_=1, max_=5) + + toolbox.register('main_expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=3) + + for k in range(NUM_ADF): + toolbox.register('ADF'+str(k), tools.initIterate, creator.Tree, getattr(toolbox, 'adf_expr'+str(k))) + + toolbox.register('MAIN', tools.initIterate, creator.Tree, toolbox.main_expr) + + func_cycle = [toolbox.MAIN]*OUTPUT + [ getattr(toolbox, "ADF"+str(k)) for k in range(NUM_ADF) ]# Using adf as feature extraction + + toolbox.register('individual', tools.initCycle, creator.Individual, func_cycle) + toolbox.register('population', tools.initRepeat, list, toolbox.individual) + + toolbox.register('compile', compile, psets=psets, compile=gp.compile, outputs=OUTPUT) + toolbox.register("evaluate", MC_fitness, n_steps=conf["n_steps"], num_episodes=conf["n_episodes"], gamma=conf["gamma"]) + toolbox.register('select', tools.selTournament, tournsize=7) + + def mutate(individual, expr, mute, outputs):#Same pset for each outputs + for i, tree in enumerate(individual[:outputs]): + if random.random() < conf['mutpb']: + individual[i], = mute(individual=tree, expr=expr, pset=psets[0]) + for i, (tree, pset) in enumerate(zip(individual[outputs:], psets[1:])): + if random.random() < conf['mutpb']: + individual[i+outputs], = mute(individual=tree, expr=expr, pset=pset) + return individual, + + def mate(ind1, ind2, cx): + for i, (tree1, tree2) in enumerate(zip(ind1, ind2)): + if random.random() < conf['cxpb']: + ind1[i], ind2[i] = cx(tree1, tree2) + return ind1, ind2 + + toolbox.register('mate', mate, cx=gp.cxOnePoint) + toolbox.register('expr', gp.genFull, min_=1, max_=2) + toolbox.register('mutate', mutate, expr=toolbox.expr, mute=partial(gp_utils.mutate, mode='all'), outputs=OUTPUT) + + funcs = [] + constrain = partial(gp.staticLimit, max_value=0) + if True: + funcs.append(check_output_depth) # Maximum length + for func in funcs: + for variation in ["mate", "mutate"]: + toolbox.decorate(variation, constrain(func)) + return toolbox, creator, env + +def initializer(conf): + global creator, toolbox, env + + toolbox, creator, env = main(conf) + +if __name__ == "__main__": + import multiprocessing + import argparse + import os + import time + from deap import algorithms + from GPRL import algorithms as algo + import pickle + + parser = argparse.ArgumentParser() + parser.add_argument("--env", required=True, help="environment ID", type=str) + parser.add_argument("--n-episodes", help="Number of episodes", default=1, type=int) + parser.add_argument("--n-steps", help="Number of step per episode", default=2000, type=int) + parser.add_argument("--gamma", help="discount factor", default=1.0, type=float) + parser.add_argument("--algorithm", help="algorithm (mu+lambda), (mu, lambda) or UCB", default="UCB", type=str) + parser.add_argument("--cxpb", help="crossover probability", default=0.0, type=float) + parser.add_argument("--mutpb", help="mutation probability", default=1.0, type=float) + parser.add_argument("--lambda_", help="number of offspring", default=100, type=int) + parser.add_argument("--mu", help="number of parents", default=100, type=int) + parser.add_argument("--n-gen", help="number of generation", default=100, type=int) + parser.add_argument("--function-set", help="function set", default="small", type=str) + parser.add_argument("--simulation-budget", help="number of simulation allowed for UCB", default=1, type=int) + parser.add_argument("--c", help="constante d'exploration", default=1.0, type=float) + parser.add_argument("--n-thread", help="number of thread to use", default=1, type=int) + parser.add_argument("--num-ADF", help="number of ADF to use", default=5, type=int) + parser.add_argument("--num-args", help="number of argument for each ADF", default=4, type=int) + parser.add_argument("--save-every", help="save hof and population every n generation", default=10, type=int) + parser.add_argument("--path", help="path to save the results", default=os.path.join("experiments", "results", "ADF"), type=str) + + args = parser.parse_args() + conf = vars(args) + initializer(conf) + + stats_fit = tools.Statistics(lambda ind: ind.fitness.values[0]) + stats_size = tools.Statistics(len) + stats_bandit = tools.Statistics(lambda ind: len(ind.fitness.rewards)) + + mstats = tools.MultiStatistics(fitness=stats_fit, bandit=stats_bandit)#size=stats_size) + mstats.register("avg", lambda x: np.mean(x)) + mstats.register("std", lambda x: np.std(x)) + mstats.register("min", lambda x: np.min(x)) + mstats.register("max", lambda x: np.max(x)) + + pool = multiprocessing.Pool(args.n_thread, initializer=initializer, initargs=(conf,)) + toolbox.register("map", pool.map) + + pop = toolbox.population(n=args.mu) + hof = tools.ParetoFront() + + dir = os.path.join(args.path, "log-ADF-"+ args.env +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) + + if args.algorithm == "UCB": + algo = partial(algo.eaMuPlusLambdaUCB, simulation_budget=args.simulation_budget, parallel_update=args.n_thread, iteration_callback=save_each_generation(dir, modulo=args.save_every)) + elif args.algorithm == "(mu, lambda)": + algo = algorithms.eaMuCommaLambda + elif args.algorithm == "(mu + lambda)": + algo = algorithms.eaMuPlusLambda + + pop, log = algo(population=pop, toolbox=toolbox, cxpb=args.cxpb, mutpb=args.mutpb, mu=args.mu, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, halloffame=hof, verbose=True) + + with open(os.path.join(dir, "pop-final.pkl"), 'wb') as output: + pickle.dump(list(pop), output, pickle.HIGHEST_PROTOCOL) + with open(os.path.join(dir, "hof-final.pkl"), 'wb') as output: + pickle.dump(list(hof), output, pickle.HIGHEST_PROTOCOL) + + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log_qdgp.csv"), index=False) + print("Experiment is saved at : ", dir) + + pool.close() + env.close() diff --git a/experiments/README.md b/experiments/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23768833751ff123858a90ea3e64928fe1c9a8e4 --- /dev/null +++ b/experiments/README.md @@ -0,0 +1,13 @@ +# Experiments + +In this folder, each experiment is defined in a separate script. These script needs at least one argument giving the environment to use and have to be run at the root of the project :   +``` +python -m experiments.<script_name> +``` + +Details about script argument can be found using `-h` : + ``` + python -m GPRL.experiments.<script_name.py> -h + ``` + +By default, results are saved at : `experiments\results\<experiment_name>` \ No newline at end of file diff --git a/experiments/bench.py b/experiments/bench.py index 9838a54db17844a090f1824fb3e148d7de00931e..07591d1e672a21b72b44fc4127aef4bccc260d19 100644 --- a/experiments/bench.py +++ b/experiments/bench.py @@ -1,34 +1,26 @@ -from UCB import ArmHof, selDoubleTournament +from GPRL.UCB import UpdateFitnessHof, selDoubleTournament, UCBFitness from functools import partial import operator import random from deap import base, creator, tools, algorithms -import gp_utils -import team +from GPRL.utils import gp_utils +from GPRL.genetic_programming import team import numpy as np -from numba import njit +#from numba import njit# Used for oneMax problem only so deactivated to not add more dependencies -from algorithms import eaMuPlusLambdaUCB +from GPRL.algorithms import eaMuPlusLambdaUCB tools.selUCBDoubleTounament = selDoubleTournament -#benchmark bandit : -# - budget -# - best cumreward/stability -# - solution complexity -# - maybe constant effect sigma/c -# compare too fixed sample and maybe big size pop (implicite vs explicite) -# One max+noise (various dim) & maybe one openai env (an easy/fast one) - def evalOneMax(individual, DIMS, STD, n_eval=1): if STD == 0.0: n_eval=1 return numba_evalOneMax(np.array(individual, dtype=int), DIMS, STD, n_eval=n_eval), -@njit +#@njit def numba_evalOneMax(individual, DIMS, STD, n_eval): std = np.random.normal(0.0, STD, n_eval) return (np.sum(individual)/DIMS) + std.mean() @@ -36,7 +28,6 @@ def numba_evalOneMax(individual, DIMS, STD, n_eval): def oneMax(conf, STD, DIMS, N_EVAL): DIMS = 2**DIMS if "UCB" in conf["algorithm"]["name"]: - from UCB import UCBFitness creator.create("FitnessMax", UCBFitness, weights=(1.0,), sigma=STD) else: creator.create("FitnessMax", base.Fitness, weights=(1.0,)) @@ -98,7 +89,6 @@ def env(conf, UCB_SIGMA, NUM_EPISODE, tmp=0): # pset.addEphemeralConstant("const_"+str(k), lambda: np.random.uniform(-10.0, 10.0), float) if "UCB" in conf["algorithm"]["name"]: - from UCB import UCBFitness creator.create("FitnessMax", UCBFitness, weights=(1.0, -1.0), sigma=UCB_SIGMA) else: creator.create("FitnessMax", base.Fitness, weights=(1.0, -1.0)) @@ -147,13 +137,22 @@ if __name__ == "__main__": import multiprocessing import time import pandas as pd - import parser - from algorithms import eaMuPlusLambdaUCB - from UCB import UpdateFitnessHof, selDoubleTournament - + import os + import ntpath + import argparse import yaml + from GPRL.algorithms import eaMuPlusLambdaUCB + + parser = argparse.ArgumentParser(description='Main programm to launch experiments from yaml configuration file') + parser.add_argument("--conf", required=True, help="configuration file path", type=str) + parser.add_argument("--path", help="directory for results", default="", type=str) + + args = parser.parse_args() - with open("results/Bench/env/conf/mu+lambdaUCB-mountaincar.yml") as f: + if args.path == "": + args.path = os.path.join("experiments", "results", "bench", ntpath.basename(args.conf)[:-4]) + + with open(args.conf) as f: conf = yaml.load(f, Loader=yaml.FullLoader) algo = eval(conf["algorithm"]["name"]) @@ -231,8 +230,6 @@ if __name__ == "__main__": results["best_arm"].append(hof.arm_hof[0].fitness.values[0]) else: results["best_arm"].append(None) - pd.DataFrame(results).to_csv("results/mu+lambdaUCB-pop=100.csv", index=False) - elif pbm == "env": results = {"best_pop":[], "best_hof":[], "best_arm":[], "env":[], "n_step": [], @@ -310,8 +307,4 @@ if __name__ == "__main__": results["best_arm"].append(None) results["complexity_arm"].append(None) - pd.DataFrame(results).to_csv("results/UCBmu+lambda.csv", index=False) - - - #amoeba net 2**6 => 2**9, sigma 0.01 on accruracy (sum(bits)/dimensions) averaged on 100 runs - # design stats ! \ No newline at end of file + pd.DataFrame(results).to_csv(args.path, index=False) \ No newline at end of file diff --git a/experiments/gp.py b/experiments/gp.py index 2950b637c8f8d740636669ae45d08c257c51aa13..91966d6af80d464089133eb64920be34a6d1e068 100644 --- a/experiments/gp.py +++ b/experiments/gp.py @@ -16,7 +16,7 @@ from deap import base, creator, tools, gp from GPRL.genetic_programming import team from GPRL.utils import gp_utils -from GPRL.utils.utils import convert_logbook_to_dataframe +from GPRL.utils.utils import convert_logbook_to_dataframe, save_each_generation from GPRL.factory import EvolveFactory from GPRL.UCB import UCBFitness @@ -60,22 +60,25 @@ class Factory(EvolveFactory): if self.conf['function_set'] == "small": function_set = core_function + if_function elif self.conf["function_set"] == "extended": - function_set = core_function + exp_function + trig_function + function_set = core_function + exp_function + trig_function + if_function INPUT = ENV.observation_space.shape[0] if bool(ENV.action_space.shape): ret = float OUTPUT = ENV.action_space.shape[0] else: - ret = int - OUTPUT = 1 + OUTPUT = 1 + if ENV.action_space.n == 2: + ret = bool + else: + ret = int + function_set += classification_func pset = gp.PrimitiveSetTyped("MAIN", [float]*INPUT, ret) for primitive in function_set: pset.addPrimitive(*primitive) - pset.addTerminal(0.1, float) - + for k in range(INPUT//2): pset.addEphemeralConstant("const_"+str(k), lambda: np.random.uniform(-20.0, 20.0), float) pset.addTerminal(True, bool) @@ -138,6 +141,9 @@ if __name__== "__main__": import argparse from deap import algorithms from GPRL import algorithms as algo + import os + import pickle + import time parser = argparse.ArgumentParser() parser.add_argument("--env", required=True, help="environment ID", type=str) @@ -154,7 +160,8 @@ if __name__== "__main__": parser.add_argument("--simulation-budget", help="number of simulation allowed for UCB", default=1, type=int) parser.add_argument("--c", help="constante d'exploration", default=1.0, type=float) parser.add_argument("--n-thread", help="number of thread to use", default=1, type=int) - parser.add_argument("--path", help="path to save the results", default="", type=str) + parser.add_argument("--save-every", help="save hof and population every n generation", default=10, type=int) + parser.add_argument("--path", help="path to save the results", default=os.path.join("experiments", "results", "gp"), type=str) args = parser.parse_args() @@ -169,18 +176,29 @@ if __name__== "__main__": pop = toolbox.population(n=args.mu) hof = tools.ParetoFront() + dir = os.path.join(args.path, "log-gp-"+ args.env +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) + + + if args.algorithm == "UCB": - algo = partial(algo.eaMuPlusLambdaUCB, simulation_budget=args.simulation_budget, parallel_update=args.n_thread) + algo = partial(algo.eaMuPlusLambdaUCB, simulation_budget=args.simulation_budget, parallel_update=args.n_thread, iteration_callback=save_each_generation(dir, modulo=args.save_every)) elif args.algorithm == "(mu, lambda)": algo = algorithms.eaMuCommaLambda elif args.algorithm == "(mu + lambda)": algo = algorithms.eaMuPlusLambda - - pop, log = algo(population=pop, toolbox=toolbox, cxpb=args.cxpb, mutpb=args.mutpb, mu=args.mu, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, halloffame=hof, verbose=True) - name = str(hash(vars(args).values())) - convert_logbook_to_dataframe(log).to_csv(args.path+"log_gp"+ name + ".csv", index=False) - print("Experiment is saved at : ", args.path + "log_gp"+name) + pop, log = algo(population=pop, toolbox=toolbox, cxpb=args.cxpb, mutpb=args.mutpb, mu=args.mu, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, halloffame=hof, verbose=True) + + + with open(os.path.join(dir, "pop-final.pkl"), 'wb') as output: + pickle.dump(list(pop), output, pickle.HIGHEST_PROTOCOL) + with open(os.path.join(dir, "hof-final.pkl"), 'wb') as output: + pickle.dump(list(hof), output, pickle.HIGHEST_PROTOCOL) + + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log_gp.csv"), index=False) + print("Experiment is saved at : ", dir) pool.close() factory.close() \ No newline at end of file diff --git a/experiments/imitation_learning/NN/td3-AntBulletEnv-v0.zip b/experiments/imitation_learning/NN/td3-AntBulletEnv-v0.zip new file mode 100644 index 0000000000000000000000000000000000000000..95a113d0b8defd581a6c1f0924b4d0bd3f2219c8 Binary files /dev/null and b/experiments/imitation_learning/NN/td3-AntBulletEnv-v0.zip differ diff --git a/experiments/imitation_learning/Readme.md b/experiments/imitation_learning/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..2ca456193ca4f00b16ef0ff3aa3d529b145a7a0e --- /dev/null +++ b/experiments/imitation_learning/Readme.md @@ -0,0 +1,7 @@ +# Imitation learning +Experiments based on imitation learning, leading to no significant amelioration +This experimental code had to be run at the root of the project :   +``` +python -m experiments.imitation_learning.<script_name> +``` +It also requires additional dependencies to run the neural network. \ No newline at end of file diff --git a/experiments/imitation_learning/imitation_gp.py b/experiments/imitation_learning/imitation_gp.py new file mode 100644 index 0000000000000000000000000000000000000000..a02b0776da1449dc4110c8c884eaeffe830eb8a5 --- /dev/null +++ b/experiments/imitation_learning/imitation_gp.py @@ -0,0 +1,312 @@ +from deap import gp, creator, base, tools +import numpy as np + +import operator +from functools import partial + +import gym +import pybulletgym + +from stable_baselines3 import TD3 + +from GPRL.utils import gp_utils +from GPRL.genetic_programming import team +from experiments.imitation_learning.imitation_utils import RingReplayBuffer +from experiments.imitation_learning.time_feature import TimeFeatureWrapper + +from sklearn.metrics import mean_squared_error + +def check_env(): + return 'ENV' in globals() + +def select_data(a_learner, a_demo, delta, n=2): + add = (np.abs(a_learner-a_demo)**n).sum(axis=1)>delta + return add + +def MC_collect_label(agent_learner, agent_demo, env, n_steps): + if check_env(): + env = ENV + data = [] + + state = env.reset() + a_learner = [] + for k in range(n_steps): + a_learner.append(agent_learner(*state)) + a_demo, _ = agent_demo(state) + _state, reward, done, _ = env.step(a_learner[k]) + data.append((state, a_demo, _state, reward)) + state = _state + if done: + break + return a_learner, data + +def MC_collect_label_(agent_learner, agent_demo, env, n_steps): + if check_env(): + env = ENV + data = [] + + flag = False + state = env.reset() + a_learner = [] + for k in range(n_steps): + a_learner.append(agent_learner(*state)) + a_demo, _ = agent_demo(state) + if flag: + _state, reward, done, _ = env.step(a_demo) + data.append((state, a_demo, _state, reward)) + else: + _state, reward, done, _ = env.step(a_learner[k]) + if sum(np.abs(a_demo-a_learner[k]))>0.1: + flag = True + + state = _state + if done: + break + return a_learner, data + +def MC_collect_single(agent, env, n_steps): + if check_env() and env is None: + env = ENV + data = [] + state = env.reset() + for k in range(n_steps): + a_demo, _ = agent(state) + _state, reward, done, _ = env.step(a_demo) + data.append((state, a_demo, _state, reward)) + state = _state + if done: + break + return data + +def MC_fitness(individual, env, n_steps, num_episodes, gamma): + if check_env() and env is None: + env = ENV + agent = toolbox.compile(individual) + s = 0 + steps = 0 + for _ in range(num_episodes): + state = env.reset() + for k in range(n_steps): + state, reward, done, _ = env.step(agent(*state)) + s+= gamma*reward + steps += 1 + if done: + break + return s + +def mse_loss(individual, data, target): + func = toolbox.compile(individual) + y = func(*data) + for k in range(len(y)): + if isinstance(y[k], float) or y[k].ndim==0: + y[k] = np.full(target.shape[1], y[k]) + y = np.array(y) + if(~np.isfinite(y)).any(): + return -np.inf + mse = mean_squared_error(target, y) + return mse + + +def initializer(name, seed=None, wrapper=lambda x:x): + global ENV + ENV = wrapper(gym.make(name)) + if seed: + ENV.seed(seed) + #env.action_space.seed(seed) + #env.observation_space.seed(seed) + +ENV = TimeFeatureWrapper(gym.make("AntPyBulletEnv-v0")) + +INPUT = ENV.observation_space.shape[0] +OUTPUT = ENV.action_space.shape[0] + +core_function = [ (np.add, [float]*2, float), (np.subtract, [float]*2, float), (np.multiply, [float]*2, float), (gp_utils.div, [float]*2, float)] +trig_function = [(np.sin, [float], float)] +exp_function = [ (gp_utils.exp, [float], float), (gp_utils.log, [float], float)] +if_function = [ (gp_utils.if_then_else, [bool, float, float], float), (operator.gt, [float, float], bool), (operator.and_, [bool, bool], bool), (operator.or_, [bool, bool], bool) ] + +function_set = core_function + exp_function + if_function + trig_function + +pset = gp.PrimitiveSetTyped("MAIN", [float]*INPUT, float) +for primitive in function_set: + pset.addPrimitive(*primitive) + +for i in range(INPUT//2):# Force the use of more constante. + pset.addEphemeralConstant("const_"+str(i), lambda: np.random.uniform(-20, 20), float) +pset.addTerminal(True, bool) + + +from GPRL.UCB import UCBFitness +creator.create("FitnessMin", UCBFitness, weights=(-1.0, 1.0)) +creator.create("Individual", list, fitness=creator.FitnessMin) + +toolbox = base.Toolbox() +toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=3, max_=8) +toolbox.register("team_grow", team.init_team, size=OUTPUT, unit_init=lambda: gp.PrimitiveTree(toolbox.expr())) +toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.team_grow) + +toolbox.register("population", tools.initRepeat, list, toolbox.individual) +toolbox.register("compile_gp", gp.compile, pset=pset) +toolbox.register("compile", team.team_compile, unit_compile=toolbox.compile_gp) + +def fitness(individual, data, target, env, n_steps, num_episodes, gamma): + if check_env() and env is None: + env = ENV + return mse_loss(individual, data, target), MC_fitness(individual, env, n_steps, num_episodes, gamma)#, team.team_complexity(individual, gp_utils.complexity) + +toolbox.register("evaluate_MC", MC_fitness, env=None, n_steps=300, num_episodes=1, gamma=0.99) +toolbox.register("evaluate", fitness, env=None, n_steps=300, num_episodes=1, gamma=0.99) + +#toolbox.register("select", tools.selTournament, tournsize=12) +toolbox.register("select", tools.selNSGA2) +#toolbox.register("select", tools.selDoubleTournament, fitness_size=12, parsimony_size=1.3, fitness_first=True) + +def cx(x1, x2): + tmp1, tmp2 = gp.cxOnePoint(x1, x2) + return gp.PrimitiveTree(tmp1), gp.PrimitiveTree(tmp2) +toolbox.register("mate", team.fixed_mate, unit_cx=cx) +toolbox.register("expr_mut", gp.genFull, min_=0, max_=2) +toolbox.register("mutate_gp", gp_utils.mutate, expr=toolbox.expr_mut, pset=pset, mode="all", mu=0, std=1) +toolbox.register("mutate", team.mutate, unit_mut=lambda x: gp.PrimitiveTree(toolbox.mutate_gp(x)[0])) + +toolbox.decorate("mate", gp.staticLimit(key=lambda x: team.height(x, operator.attrgetter("height")), max_value=17)) +toolbox.decorate("mutate", gp.staticLimit(key=lambda x: team.height(x, operator.attrgetter("height")), max_value=17)) + +if __name__== "__main__": + import multiprocessing + from deap import algorithms + + demonstrator = TD3.load('experiments/imitation_learning/NN/td3-AntBulletEnv-v0.zip') + demonstrator.action = partial(demonstrator.predict, deterministic=True) + + dataset = RingReplayBuffer(INPUT, OUTPUT, 20_000) + data = [] + for _ in range(10): + data.extend(MC_collect_single(demonstrator.action, ENV, 300)) + dataset.core_transition(data) + + pool = multiprocessing.Pool(14) + toolbox.register("map", pool.map) + + stats_fit = tools.Statistics(lambda ind: ind.fitness.values) + stats_size = tools.Statistics(len) + stats_bandit = tools.Statistics(lambda ind: len(ind.fitness.rewards)) + + mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size, bandit=stats_bandit) + mstats.register("avg", lambda x: np.mean(x, axis=0)) + mstats.register("std", lambda x: np.std(x, axis=0)) + mstats.register("min", lambda x: np.min(x, axis=0)) + mstats.register("max", lambda x: np.max(x, axis=0)) + + logbook = tools.Logbook() + logbook.header = ['gen', 'nevals'] + (mstats.fields if mstats else []) + + #hof = tools.HallOfFame(10) + hof = tools.ParetoFront() + pop = toolbox.population(n=1000) + + simulation_budget = 10 + paralelle_update = 12 + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'].T, target=data['a'].T), pop) + + for ind, fit in zip(pop, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + tmp = 0 + max_n = 0 + max_ind = None + while(tmp < simulation_budget): + inds = toolbox.select(pop, paralelle_update) + fitnesses = toolbox.map(toolbox.evaluate_MC, inds) + for ind, fit in zip(inds, fitnesses): + ind.fitness.add_reward(fit) + ind.fitness.values = ind.fitness.values[0], ind.fitness.calc_fitness(simulation_budget)#, ind.fitness.values[2] + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + tmp+=1 + + pop = toolbox.select(pop, 2001) + hof.update(pop) + + record = mstats.compile(pop) if mstats is not None else {} + logbook.record(gen=0, nevals=len(pop), **record) + with open('experiments/imitation_learning/log/log_gp.txt', 'w') as f: + txt = logbook.stream + f.write(txt) + f.write('\n') + print(txt) + + for gen in range(0, 2001): + offspring = algorithms.varOr(pop, toolbox, 200, cxpb=0.0, mutpb=1.0) + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'].T, target=data['a'].T), offspring) + + for ind, fit in zip(offspring, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + max_n = 0 + max_ind = None + for ind in pop: + ind.fitness.update_offset() + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + + popoff = pop+offspring + tmp = 0 + while(tmp < simulation_budget): + inds = toolbox.select(popoff, paralelle_update) + fitnesses = toolbox.map(toolbox.evaluate_MC, inds) + for ind, fit in zip(inds, fitnesses): + ind.fitness.add_reward(fit) + #ind.fitness.values = ind.fitness.calc_fitness(simulation_budget), + ind.fitness.values = ind.fitness.values[0], ind.fitness.calc_fitness(simulation_budget)#, ind.fitness.values[2] + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + tmp+=1 + + pop = toolbox.select(pop + offspring, 20) + + hof.update(pop) + + if gen%10 == 0 and gen!=0: + data = [] + for ind in tools.selBest(pop, 5): + #a_learner, inter_data = MC_collect_label(toolbox.compile(ind), demonstrator.action, ENV, 300) + a_learner, inter_data = MC_collect_label_(toolbox.compile(ind), demonstrator.action, ENV, 300) + a_learner, inter_data = np.array(a_learner), np.array(inter_data, dtype=dataset.data_type) + #inter_data = inter_data[select_data(a_learner, inter_data['a'], 1e-2)]#Only keep diverging actions + dataset.add_transition(inter_data) + + for ind in pop: + del ind.fitness.values + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'].T, target=data['a'].T), pop) + for ind, fit in zip(pop, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + record = mstats.compile(pop) if mstats is not None else {} + logbook.record(gen=gen, nevals=len(pop), **record) + with open('experiments/imitation_learning/log/log_gp.txt', 'a') as f: + string = logbook.stream + f.write(string) + f.write('\n') + print(string) + + if gen%10==0: + import pickle + with open('experiments/imitation_learning/log/'+'hof-'+str(gen)+'.pkl', 'wb') as output: + pickle.dump(hof, output, pickle.HIGHEST_PROTOCOL) + with open('experiments/imitation_learning/log/'+'pop-'+str(gen)+'.pkl', 'wb') as output: + pickle.dump(pop, output, pickle.HIGHEST_PROTOCOL) + + pool.close() + ENV.close() + + diff --git a/experiments/imitation_learning/imitation_linGP.py b/experiments/imitation_learning/imitation_linGP.py new file mode 100644 index 0000000000000000000000000000000000000000..59fe3d9d42835fa2df7bf96b84354642ac6dc673 --- /dev/null +++ b/experiments/imitation_learning/imitation_linGP.py @@ -0,0 +1,290 @@ + +from functools import partial +from GPRL.genetic_programming import linearGP as linGP + +from deap import creator, tools, base + +import random + +import numpy as np +from sklearn.metrics import mean_squared_error + +import gym +import pybulletgym + +from experiments.imitation_learning.time_feature import TimeFeatureWrapper +from experiments.imitation_learning.imitation_utils import RingReplayBuffer + + +def initializer(name, seed=None, wrapper=lambda x:x): + global ENV + ENV = wrapper(gym.make(name)) + if seed: + ENV.seed(seed) + ENV.action_space.seed(seed) + ENV.observation_space.seed(seed) + +ENV = TimeFeatureWrapper(gym.make("AntPyBulletEnv-v0")) + +INPUT = ENV.observation_space.shape[0] +OUTPUT = ENV.action_space.shape[0] + + +from GPRL.UCB import UCBFitness +creator.create("FitnessMin", UCBFitness, weights=(-1.0, 1.0)) +creator.create("Individual", linGP.Program, fitness=creator.FitnessMin) + +toolbox = base.Toolbox() +toolbox.register("Program", linGP.initProgam, creator.Individual, regCalcSize=8, regInputSize=INPUT, regConstSize=32, pConst=0.3, pBranch=0.3, min_=16, max_=128) +toolbox.register("population", tools.initRepeat, list, toolbox.Program) + +def check_env(): + return 'ENV' in globals() + +def select_data(a_learner, a_demo, delta, n=2): + add = (np.abs(a_learner-a_demo)**n).sum(axis=1)>delta + return add + +def MC_collect_label(agent_learner, agent_demo, env, n_steps): + if check_env(): + env = ENV + data = [] + + env.seed(0) + env.action_space.seed(0) + env.observation_space.seed(0) + + state = env.reset() + a_learner = [] + for k in range(n_steps): + a_learner.append(agent_learner(state)) + a_demo, _ = agent_demo(state) + _state, reward, done, _ = env.step(a_learner[k]) + data.append((state, a_demo, _state, reward)) + state = _state + if done: + break + return a_learner, data + +def MC_collect_single(agent, env, n_steps): + if check_env() and env is None: + env = ENV + data = [] + state = env.reset() + for k in range(n_steps): + a_demo, _ = agent(state) + _state, reward, done, _ = env.step(a_demo) + data.append((state, a_demo, _state, reward)) + state = _state + if done: + break + return data + +def MC_fitness(individual, env, n_steps, num_episodes, gamma): + if check_env() and env is None: + env = ENV + eff, _, _ = individual.to_effective(list(range(OUTPUT))) + register = eff.init_register() + agent = lambda inputs: eff.execute(eff, inputs, register, list(range(OUTPUT))) + s = 0 + steps = 0 + for _ in range(num_episodes): + state = env.reset() + for k in range(n_steps): + state, reward, done, _ = env.step(agent(state)) + s+= gamma*reward + steps += 1 + if done: + break + return s + +def mse_loss(individual, data, target): + eff, _, _ = individual.to_effective(list(range(OUTPUT))) + register = eff.init_register() + func = lambda inputs: eff.execute(eff, inputs, register, list(range(OUTPUT))) + y = func(data) + if(~np.isfinite(y)).any(): + return -np.inf + mse = mean_squared_error(target, y) + return mse + +def fitness(individual, data, target, env, n_steps, num_episodes, gamma): + if check_env() and env is None: + env = ENV + env.seed(0) + env.action_space.seed(0) + env.observation_space.seed(0) + return mse_loss(individual, data, target), MC_fitness(individual, env, n_steps, num_episodes, gamma), + +toolbox.register("evaluate_MC", MC_fitness, env=None, n_steps=300, num_episodes=1, gamma=0.99) +toolbox.register("evaluate_mse", mse_loss) +toolbox.register("evaluate", fitness, env=None, n_steps=300, num_episodes=1, gamma=0.99) + +#toolbox.register("select", tools.selTournament, tournsize=12) +toolbox.register("select", tools.selNSGA2) +#toolbox.register("select", tools.selDoubleTournament, fitness_size=12, parsimony_size=1.2, fitness_first=True) + +def mutate(individual, pIns, pDel, pSwap, pMut, pConst, pBranch): + if random.random() < pIns: + linGP.mutInsert(individual, pConst, pBranch, effective=list(range(OUTPUT))) + if random.random() < pDel: + linGP.mutDelete(individual, effective=None) + if random.random() < pSwap: + _, _, idxs = individual.to_effective(list(range(OUTPUT))) + linGP.mutSwap(individual, effective=idxs) + if random.random() < pMut: + linGP.Program.mutInstr(individual, 0.3, 0.3, 0.4, pBranch, effective=list(range(OUTPUT))) + return individual, + +toolbox.register("mate", linGP.cxLinear, l_min=2, l_max=128, l_smax=8, dc_max=8, ds_max=10) +toolbox.register("mutate", mutate, pIns=0.45, pDel=0.55, pSwap=0.2, pMut=0.5, pConst=0.3, pBranch=0.3) + +def F(weights, individual, idxs, evaluate): + individual.regConst[idxs] = weights + return evaluate(individual) + +if __name__== "__main__": + import multiprocessing + from deap import algorithms + from stable_baselines3 import TD3 + + demonstrator = TD3.load('experiments/imitation_learning/NN/td3-AntBulletEnv-v0.zip') + demonstrator.action = partial(demonstrator.predict, deterministic=True) + + dataset = RingReplayBuffer(INPUT, OUTPUT, 20_000) + data = [] + for _ in range(10): + data.extend(MC_collect_single(demonstrator.action, ENV, 1000)) + dataset.core_transition(data) + + pool = multiprocessing.Pool(12, initializer=initializer, initargs=("AntPyBulletEnv-v0", 0, TimeFeatureWrapper)) + toolbox.register("map", pool.map) + + stats_fit = tools.Statistics(lambda ind: ind.fitness.values) + stats_eff = tools.Statistics(lambda ind: len(ind.to_effective(list(range(OUTPUT)))[0])) + stats_size = tools.Statistics(len) + stats_bandit = tools.Statistics(lambda ind: len(ind.fitness.rewards)) + + mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size, effective=stats_eff, bandit=stats_bandit) + mstats.register("avg", lambda x: np.mean(x, axis=0)) + mstats.register("std", lambda x: np.std(x, axis=0)) + mstats.register("min", lambda x: np.min(x, axis=0)) + mstats.register("max", lambda x: np.max(x, axis=0)) + + logbook = tools.Logbook() + logbook.header = ['gen', 'nevals'] + (mstats.fields if mstats else []) + + #hof = tools.HallOfFame(10) + hof = tools.ParetoFront() + pop = toolbox.population(n=1000) + + simulation_budget = 10 + paralelle_update = 12 + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'], target=data['a']), pop) + for ind, fit in zip(pop, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + for ind, fit in zip(pop, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + tmp = 0 + max_n = 0 + max_ind = None + while(tmp < simulation_budget): + inds = toolbox.select(pop, paralelle_update) + fitnesses = toolbox.map(toolbox.evaluate_MC, inds) + for ind, fit in zip(inds, fitnesses): + ind.fitness.add_reward(fit) + ind.fitness.values = ind.fitness.values[0], ind.fitness.calc_fitness(simulation_budget)#, ind.fitness.values[2] + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + tmp+=1 + + pop = toolbox.select(pop, 20) + pop.append(max_ind) + hof.update(pop) + + record = mstats.compile(pop) if mstats is not None else {} + logbook.record(gen=0, nevals=len(pop), **record) + with open('experiments/imitation_learning/log/log.txt', 'w') as f: + txt = logbook.stream + f.write(txt) + f.write('\n') + print(txt) + + for gen in range(0, 2001): + offspring = algorithms.varOr(pop, toolbox, 200, cxpb=0.1, mutpb=0.9) + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'], target=data['a']), offspring) + + for ind, fit in zip(offspring, fitnesses): + ind.fitness.reset() + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + max_n = 0 + max_ind = None + for ind in pop: + ind.fitness.update_offset() + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + + popoff = pop+offspring + tmp = 0 + while(tmp < simulation_budget): + inds = toolbox.select(popoff, paralelle_update) + fitnesses = toolbox.map(toolbox.evaluate_MC, inds) + for ind, fit in zip(inds, fitnesses): + ind.fitness.add_reward(fit) + ind.fitness.values = ind.fitness.values[0], ind.fitness.calc_fitness(simulation_budget)#, ind.fitness.values[2] + if (max_n < len(ind.fitness.rewards)): + max_n, max_ind = len(ind.fitness.rewards), ind + tmp+=1 + + pop = toolbox.select(popoff, 25) + + hof.update(pop) + + if gen%20 == 0 and gen!=0: + data = [] + for ind in pop: + eff, _, _ = ind.to_effective(list(range(OUTPUT))) + register = eff.init_register() + agent = lambda inputs: eff.execute(eff, inputs, register, list(range(OUTPUT))) + a_learner, inter_data = MC_collect_label(agent, demonstrator.action, ENV, 300) + a_learner, inter_data = np.array(a_learner), np.array(inter_data, dtype=dataset.data_type) + #inter_data = inter_data[select_data(a_learner, inter_data['a'], 1e-2)]#filter data and keep only diverging actions + dataset.add_transition(inter_data) + + for ind in pop: + del ind.fitness.values + + data = dataset.get_data() + fitnesses = toolbox.map(partial(toolbox.evaluate, data=data['s'], target=data['a']), pop) + + for ind, fit in zip(pop, fitnesses): + ind.fitness.add_reward(fit[1]) + ind.fitness.values = fit[0], ind.fitness.calc_fitness(simulation_budget)#, fit[2] + + record = mstats.compile(pop) if mstats is not None else {} + logbook.record(gen=gen, nevals=len(pop), **record) + with open('experiments/imitation_learning/log/log.txt', 'a') as f: + string = logbook.stream + f.write(string) + f.write('\n') + print(string) + + if gen%10==0: + import pickle + with open('experiments/imitation_learning/log/'+'hof-'+str(gen)+'.pkl', 'wb') as output: + pickle.dump(hof, output, pickle.HIGHEST_PROTOCOL) + with open('experiments/imitation_learning/log/'+'pop-'+str(gen)+'.pkl', 'wb') as output: + pickle.dump(pop, output, pickle.HIGHEST_PROTOCOL) + + pool.close() + ENV.close() \ No newline at end of file diff --git a/experiments/imitation_learning/imitation_utils.py b/experiments/imitation_learning/imitation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4990216b631587fd185f4adb22be669ebfbbf9 --- /dev/null +++ b/experiments/imitation_learning/imitation_utils.py @@ -0,0 +1,38 @@ +import numpy as np + +class RingReplayBuffer(object): + data_type = None + def __init__(self, state_size, action_size, size, offset=0): + self.data_type = [('s', 'f4', (state_size,)), ('a', 'f4', (action_size,)), ('next_s','f4', (state_size,)),('r','f4')] + self.data = np.zeros(size, dtype=self.data_type) + self.offset = offset + self.size = size - offset + self.idx = 0 + + def core_transition(self, data): + self.size += self.offset + self.offset = len(data) + for k, item in enumerate(data): + self.data[k%self.size] = np.array(item, dtype=self.data_type) + self.size -= self.offset + + def add_transition(self, data): + if isinstance(data, list): + for item in data: + self.data[self.offset + (self.idx%self.size)] = np.array(item, dtype=self.data_type) + self.idx += 1 + else: + idx = (self.idx%self.size) + if data.shape[0] < self.size - idx: + self.data[self.offset + idx:self.offset+ idx + data.shape[0]] = data + else: + print("yolo")#, data.shape[0], self.size -idx, ) + self.data[self.offset + idx:] = data[:self.size-idx] + self.data[self.offset: self.offset + data.shape[0] - self.size + idx] = data[self.size-idx:] + self.idx += data.shape[0] + + def get_data(self): + if self.offset + self.idx > self.size: + return self.data + else: + return self.data[:self.offset+self.idx] \ No newline at end of file diff --git a/experiments/imitation_learning/requirements.txt b/experiments/imitation_learning/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..71b748ce6c71df22e4961ec4f16a322ef7379030 --- /dev/null +++ b/experiments/imitation_learning/requirements.txt @@ -0,0 +1,50 @@ +# +# This file is autogenerated by pip-compile with python 3.8 +# To update, run: +# +# pip-compile +# +cloudpickle==1.6.0 + # via + # gym + # stable-baselines3 +cycler==0.10.0 + # via matplotlib +deap==1.3.1 + # via -r requirements.in +gym==0.19.0 + # via stable-baselines3 +kiwisolver==1.3.2 + # via matplotlib +matplotlib==3.4.3 + # via stable-baselines3 +numpy==1.21.2 + # via + # -r requirements.in + # deap + # gym + # matplotlib + # pandas + # stable-baselines3 +pandas==1.3.3 + # via stable-baselines3 +pillow==8.3.2 + # via matplotlib +pyparsing==2.4.7 + # via matplotlib +python-dateutil==2.8.2 + # via + # matplotlib + # pandas +pytz==2021.1 + # via pandas +six==1.16.0 + # via + # cycler + # python-dateutil +stable-baselines3==1.2.0 + # via -r requirements.in +torch==1.9.0 + # via stable-baselines3 +typing-extensions==3.10.0.2 + # via torch diff --git a/experiments/imitation_learning/time_feature.py b/experiments/imitation_learning/time_feature.py new file mode 100644 index 0000000000000000000000000000000000000000..3d04c469d935223e6a5f5e76f7850312472dfac2 --- /dev/null +++ b/experiments/imitation_learning/time_feature.py @@ -0,0 +1,87 @@ +from typing import Dict, Union + +import gym +import numpy as np +from stable_baselines3.common.type_aliases import GymObs, GymStepReturn + + +class TimeFeatureWrapper(gym.Wrapper): + """ + Add remaining, normalized time to observation space for fixed length episodes. + See https://arxiv.org/abs/1712.00378 and https://github.com/aravindr93/mjrl/issues/13. + .. note:: + Only ``gym.spaces.Box`` and ``gym.spaces.Dict`` (``gym.GoalEnv``) 1D observation spaces + are supported for now. + :param env: Gym env to wrap. + :param max_steps: Max number of steps of an episode + if it is not wrapped in a ``TimeLimit`` object. + :param test_mode: In test mode, the time feature is constant, + equal to zero. This allow to check that the agent did not overfit this feature, + learning a deterministic pre-defined sequence of actions. + """ + + def __init__(self, env: gym.Env, max_steps: int = 1000, test_mode: bool = False): + assert isinstance( + env.observation_space, (gym.spaces.Box, gym.spaces.Dict) + ), "`TimeFeatureWrapper` only supports `gym.spaces.Box` and `gym.spaces.Dict` (`gym.GoalEnv`) observation spaces." + + # Add a time feature to the observation + if isinstance(env.observation_space, gym.spaces.Dict): + assert "observation" in env.observation_space.spaces, "No `observation` key in the observation space" + obs_space = env.observation_space.spaces["observation"] + assert isinstance( + obs_space, gym.spaces.Box + ), "`TimeFeatureWrapper` only supports `gym.spaces.Box` observation space." + obs_space = env.observation_space.spaces["observation"] + else: + obs_space = env.observation_space + + assert len(obs_space.shape) == 1, "Only 1D observation spaces are supported" + + low, high = obs_space.low, obs_space.high + low, high = np.concatenate((low, [0.0])), np.concatenate((high, [1.0])) + + if isinstance(env.observation_space, gym.spaces.Dict): + env.observation_space.spaces["observation"] = gym.spaces.Box(low=low, high=high, dtype=np.float32) + else: + env.observation_space = gym.spaces.Box(low=low, high=high, dtype=np.float32) + + super(TimeFeatureWrapper, self).__init__(env) + + # Try to infer the max number of steps per episode + try: + self._max_steps = env.spec.max_episode_steps + except AttributeError: + self._max_steps = None + + # Fallback to provided value + if self._max_steps is None: + self._max_steps = max_steps + + self._current_step = 0 + self._test_mode = test_mode + + def reset(self) -> GymObs: + self._current_step = 0 + return self._get_obs(self.env.reset()) + + def step(self, action: Union[int, np.ndarray]) -> GymStepReturn: + self._current_step += 1 + obs, reward, done, info = self.env.step(action) + return self._get_obs(obs), reward, done, info + + def _get_obs(self, obs: Union[np.ndarray, Dict[str, np.ndarray]]) -> Union[np.ndarray, Dict[str, np.ndarray]]: + """ + Concatenate the time feature to the current observation. + :param obs: + :return: + """ + # Remaining time is more general + time_feature = 1 - (self._current_step / self._max_steps) + if self._test_mode: + time_feature = 1.0 + + if isinstance(obs, dict): + obs["observation"] = np.append(obs["observation"], time_feature) + return obs + return np.append(obs, time_feature) \ No newline at end of file diff --git a/experiments/linGP.py b/experiments/linGP.py index 6bc501a99c91d04d2cf5c8210457f5f8a0c3baac..7d70ef2d17ace3a6d149ae1bb772699a79226134 100644 --- a/experiments/linGP.py +++ b/experiments/linGP.py @@ -14,7 +14,7 @@ from GPRL.genetic_programming import linearGP as linGP from GPRL.UCB import UCBFitness from GPRL.factory import EvolveFactory -from GPRL.utils.utils import convert_logbook_to_dataframe +from GPRL.utils.utils import convert_logbook_to_dataframe, save_each_generation def MC_fitness(individual, n_steps, num_episodes, gamma): eff, _, _ = individual.to_effective(list(range(OUTPUT))) @@ -72,8 +72,8 @@ class Factory(EvolveFactory): toolbox, creator = self.make_toolbox() def make_toolbox(self): - creator.create("FitnessMin", UCBFitness, weights=(1.0, -1.0)) - creator.create("Individual", linGP.Program, fitness=creator.FitnessMin) + creator.create("FitnessMax", UCBFitness, weights=(1.0, -1.0)) + creator.create("Individual", linGP.Program, fitness=creator.FitnessMax) if self.conf['function_set']=="small": ops = np.array([True]*3 + [False, True] + [False]*5 + [True]*2) @@ -125,7 +125,9 @@ if __name__ == '__main__': import argparse from deap import algorithms import GPRL.algorithms as my_algorithms - import pandas as pd + import os + import pickle + import time parser = argparse.ArgumentParser() parser.add_argument("--env", required=True, help="environment ID", type=str) @@ -155,7 +157,8 @@ if __name__ == '__main__': parser.add_argument("--simulation-budget", help="number of simulation allowed for UCB", default=1, type=int) parser.add_argument("--c", help="constante d'exploration", default=1.0, type=float) parser.add_argument("--n-thread", help="number of thread to use", default=1, type=int) - parser.add_argument("--path", help="path to save the results", default="", type=str) + parser.add_argument("--save-every", help="save hof and population every n generation", default=10, type=int) + parser.add_argument("--path", help="path to save the results", default=os.path.join("experiments", "results", "linGP"), type=str) args = parser.parse_args() @@ -168,20 +171,28 @@ if __name__ == '__main__': toolbox.register("map", pool.map) pop = toolbox.population(n=args.mu) - hof = tools.ParetoFront() + hof = tools.HallOfFame(10) + + dir = os.path.join(args.path, "log-linGP-"+ args.env + "-" + os.path.basename(args.path) +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) if args.algorithm == "UCB": - algo = partial(my_algorithms.eaMuPlusLambdaUCB, simulation_budget=args.simulation_budget, parallel_update=args.n_thread) + algo = partial(my_algorithms.eaMuPlusLambdaUCB, simulation_budget=args.simulation_budget, parallel_update=args.n_thread, iteration_callback=save_each_generation(dir, modulo=args.save_every)) elif args.algorithm == "(mu, lambda)": algo = algorithms.eaMuCommaLambda elif args.algorithm == "(mu + lambda)": algo = algorithms.eaMuPlusLambda pop, log = algo(population=pop, toolbox=toolbox, cxpb=args.cxpb, mutpb=args.mutpb, mu=args.mu, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, halloffame=hof, verbose=True) - - name = str(hash(vars(args).values())) - convert_logbook_to_dataframe(log).to_csv(args.path+"log_lingp"+ name + ".csv", index=False) - print("Experiment is saved at : ", args.path + "log_lingp-"+name) + + with open(os.path.join(dir, "pop-final.pkl"), 'wb') as output: + pickle.dump(list(pop), output, pickle.HIGHEST_PROTOCOL) + with open(os.path.join(dir, "hof-final.pkl"), 'wb') as output: + pickle.dump(list(hof), output, pickle.HIGHEST_PROTOCOL) + + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log_linGP.csv"), index=False) + print("Experiment is saved at : ", dir) pool.close() factory.close() \ No newline at end of file diff --git a/experiments/qdgp.py b/experiments/qdgp.py index f68d387800faca551f051f6485da2853eb11f9c4..c106dca354616e32bd1e11a39fcde8a80055f53b 100644 --- a/experiments/qdgp.py +++ b/experiments/qdgp.py @@ -3,12 +3,12 @@ import numpy as np import pandas as pd import gym from deap import gp -from qdpy.containers import Grid +from GPRL.containers.grid import FixGrid as Grid from GPRL.utils import gp_utils -from GPRL.utils.utils import convert_logbook_to_dataframe +from GPRL.utils.utils import convert_logbook_to_dataframe, save_each_generation from GPRL.genetic_programming import team -import gp as gp_script +import experiments.gp as gp_script def MC_fitness(individual, n_steps, num_episodes, gamma, features_kept): agent = gp_script.toolbox.compile(individual) @@ -22,9 +22,10 @@ def MC_fitness(individual, n_steps, num_episodes, gamma, features_kept): state, reward, done, _ = gp_script.ENV.step(agent(*state)) if gp_script.ENV.unwrapped.spec.id == "HopperBulletEnv-v0": features = [abs(state[-8]), abs(state[-5]), abs(state[-3]), state[-1]] + # thight, leg, knee , contact with the ground elif gp_script.ENV.unwrapped.spec.id == "BipedalWalker-v3": features = [abs(state[4]), abs(state[6]), abs(state[9]), abs(state[11]), state[8], state[13]] - + # hip0, knee0, hip1, , knee1 , contact0, contact1 with the ground s+= gamma*reward total_features = tuple(x + y for x, y in zip(total_features, features)) if done: @@ -59,11 +60,14 @@ if '__main__' == __name__: import multiprocessing import argparse from deap import algorithms - from .. import algorithms as algo + from GPRL import algorithms as algo + import os + import pickle + import time parser = argparse.ArgumentParser() parser.add_argument("--env", required=True, help="environment ID", type=str) - parser.add_argument("--n-episodes", help="Number of episodes", default=1, type=int) + parser.add_argument("--n-episodes", help="Number of episodes", default=3, type=int) parser.add_argument("--n-steps", help="Number of step per episode", default=500, type=int) parser.add_argument("--gamma", help="discount factor", default=1.0, type=float) @@ -73,10 +77,9 @@ if '__main__' == __name__: parser.add_argument("--lambda_", help="number of offspring", default=500, type=int) parser.add_argument("--n-gen", help="number of generation", default=100, type=int) parser.add_argument("--function-set", help="function set", default="small", type=str) - parser.add_argument("--simulation-budget", help="number of simulation allowed for UCB", default=1, type=int) - parser.add_argument("--c", help="constante d'exploration", default=1.0, type=float) parser.add_argument("--n-thread", help="number of thread to use", default=1, type=int) - parser.add_argument("--path", help="path to save the results", default="", type=str) + parser.add_argument("--save-every", help="save hof and population every n generation", default=10, type=int) + parser.add_argument("--path", help="path to save the results", default=os.path.join("experiments", "results", "qdgp"), type=str) args = parser.parse_args() @@ -101,8 +104,9 @@ if '__main__' == __name__: fitness_domain = ((-200_000.0, 1100.0),) max_items_per_bin = 5 else: - raise ValueError("Environment not supported ! Please use env-id : BipedalWalkerFeatures-v3 or HopperBulletEnvFeatures-v3") + raise ValueError("Environment not supported ! Please use env-id : BipedalWalker-v3 or HopperBulletEnv-v0") + args.c = 0.0 args.features_kept = features_kept conf = vars(args) factory = gp_script.Factory(conf) @@ -117,13 +121,18 @@ if '__main__' == __name__: pop = gp_script.toolbox.population(n=args.batch_size*10) grid = Grid(shape=nbBins, max_items_per_bin=max_items_per_bin, fitness_domain=fitness_domain, features_domain=features_domain[features_kept], storage_type=list) + + dir = os.path.join(args.path, "log-qdgp-"+ args.env +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) - pop, log = algo.qdLambda(pop, gp_script.toolbox, grid, args.batch_size, cxpb=args.cxpb, mutpb=args.mutpb, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, verbose=True) + pop, log = algo.qdLambda(pop, gp_script.toolbox, grid, args.batch_size, cxpb=args.cxpb, mutpb=args.mutpb, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, iteration_callback=save_each_generation(dir, modulo=args.save_every), verbose=True) - name = str(hash(vars(args).values())) - convert_logbook_to_dataframe(log).to_csv("log_gp-"+ name + ".csv", index=False) + with open(os.path.join(dir, "grid-final.pkl"), 'wb') as output: + pickle.dump(list(grid), output, pickle.HIGHEST_PROTOCOL) - print("Experiment is saved at : ", name) + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log_qdgp.csv"), index=False) + print("Experiment is saved at : ", dir) pool.close() factory.close() diff --git a/experiments/qdlinGP.py b/experiments/qdlinGP.py index 5a27b094ebea9c7caf62b3e5fff145eb8c874c22..216149ae6815a7c3d76854e4698c734c4ac9e445 100644 --- a/experiments/qdlinGP.py +++ b/experiments/qdlinGP.py @@ -1,8 +1,8 @@ import numpy as np -from qdpy.containers import Grid +from GPRL.containers.grid import FixGrid as Grid -from GPRL.utils.utils import convert_logbook_to_dataframe -from . import linGP as linGP_script +from GPRL.utils.utils import convert_logbook_to_dataframe, save_each_generation +import experiments.linGP as linGP_script from GPRL.genetic_programming import linearGP as linGP def MC_fitness(individual, n_steps, num_episodes, gamma, features_kept): @@ -27,8 +27,10 @@ def MC_fitness(individual, n_steps, num_episodes, gamma, features_kept): #Handcrafted Features if linGP_script.ENV.unwrapped.spec.id == "HopperBulletEnv-v0": features = [abs(state[-8]), abs(state[-5]), abs(state[-3]), state[-1]] + # thight, leg, knee , contact with the ground elif linGP_script.ENV.unwrapped.spec.id == "BipedalWalker-v3": features = [abs(state[4]), abs(state[6]), abs(state[9]), abs(state[11]), state[8], state[13]] + # hip0, knee0, hip1, , knee1 , contact0, contact1 with the ground s+= gamma*reward total_features = tuple(x + y for x, y in zip(total_features, features)) if done: @@ -57,10 +59,13 @@ if '__main__' == __name__: import argparse from deap import algorithms from GPRL import algorithms as algo + import os + import pickle + import time parser = argparse.ArgumentParser() parser.add_argument("--env", required=True, help="environment ID", type=str) - parser.add_argument("--n-episodes", help="Number of episodes", default=1, type=int) + parser.add_argument("--n-episodes", help="Number of episodes", default=3, type=int) parser.add_argument("--n-steps", help="Number of step per episode", default=2000, type=int) parser.add_argument("--gamma", help="discount factor", default=1.0, type=float) @@ -76,17 +81,15 @@ if '__main__' == __name__: parser.add_argument("--pSwap", help="macro-mutation probability of swaping instruction", default=0.1, type=float) parser.add_argument("--pMut", help="micro-mutation probability of mutating an existing instruction", default=0.5, type=float) - parser.add_argument("--algorithm", help="algorithm (mu+lambda), (mu, lambda) or UCB", default="UCB", type=str) parser.add_argument("--cxpb", help="crossover probability", default=0.0, type=float) parser.add_argument("--mutpb", help="mutation probability", default=1.0, type=float) parser.add_argument("--lambda_", help="number of offspring", default=100, type=int) parser.add_argument("--batch-size", help="same thing as population size", default=500, type=int) parser.add_argument("--n-gen", help="number of generation", default=100, type=int) parser.add_argument("--function-set", help="function set", default="small", type=str) - parser.add_argument("--simulation-budget", help="number of simulation allowed for UCB", default=1, type=int) - parser.add_argument("--c", help="constante d'exploration", default=1.0, type=float) parser.add_argument("--n-thread", help="number of thread to use", default=1, type=int) - parser.add_argument("--path", help="path to save the results", default="", type=str) + parser.add_argument("--save-every", help="save hof and population every n generation", default=10, type=int) + parser.add_argument("--path", help="path to save the results", default=os.path.join("experiments", "results", "qdlinGP"), type=str) args = parser.parse_args() @@ -111,9 +114,10 @@ if '__main__' == __name__: fitness_domain = ((-200_000.0, 1100.0),) max_items_per_bin = 5 else: - raise ValueError("Environment not supported ! Please use env-id : BipedalWalker-v3 or HopperBulletEnv-v3") + raise ValueError("Environment not supported ! Please use env-id : BipedalWalker-v3 or HopperBulletEnv-v0") args.features_kept = features_kept + args.c = 0 conf = vars(args) factory = linGP_script.Factory(conf) factory.init_global_var() @@ -128,12 +132,17 @@ if '__main__' == __name__: grid = Grid(shape=nbBins, max_items_per_bin=max_items_per_bin, fitness_domain=fitness_domain, features_domain=features_domain[features_kept], storage_type=list) - pop, log = algo.qdLambda(pop, linGP_script.toolbox, grid, args.batch_size, cxpb=args.cxpb, mutpb=args.mutpb, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, verbose=True) + dir = os.path.join(args.path, "log-qdlinGP-"+ args.env +"-"+str(time.time())) + if not os.path.exists(dir): + os.mkdir(dir) - name = str(hash(vars(args).values())) - convert_logbook_to_dataframe(log).to_csv("log_lingp"+ name + ".csv", index=False) + pop, log = algo.qdLambda(pop, linGP_script.toolbox, grid, args.batch_size, cxpb=args.cxpb, mutpb=args.mutpb, lambda_=args.lambda_, ngen=args.n_gen, stats=mstats, iteration_callback=save_each_generation(dir, modulo=args.save_every), verbose=True) - print("Experiment is saved at : ", name) + with open(os.path.join(dir, "grid-final.pkl"), 'wb') as output: + pickle.dump(list(grid), output, pickle.HIGHEST_PROTOCOL) + + convert_logbook_to_dataframe(log).to_csv(os.path.join(dir, "log_qdlinGP.csv"), index=False) + print("Experiment is saved at : ", dir) pool.close() factory.close() diff --git a/experiments/bench/env/conf/easimple-mountaincar.yml b/experiments/results/bench/env/conf/easimple-mountaincar.yml similarity index 100% rename from experiments/bench/env/conf/easimple-mountaincar.yml rename to experiments/results/bench/env/conf/easimple-mountaincar.yml diff --git a/experiments/bench/env/conf/mu+lambda-mountaincar.yml b/experiments/results/bench/env/conf/mu+lambda-mountaincar.yml similarity index 100% rename from experiments/bench/env/conf/mu+lambda-mountaincar.yml rename to experiments/results/bench/env/conf/mu+lambda-mountaincar.yml diff --git a/experiments/bench/env/conf/mu+lambdaUCB-mountaincar.yml b/experiments/results/bench/env/conf/mu+lambdaUCB-mountaincar.yml similarity index 100% rename from experiments/bench/env/conf/mu+lambdaUCB-mountaincar.yml rename to experiments/results/bench/env/conf/mu+lambdaUCB-mountaincar.yml diff --git a/experiments/bench/env/data/UCBmu+lambda-mountaincar.csv b/experiments/results/bench/env/data/UCBmu+lambda-mountaincar.csv similarity index 100% rename from experiments/bench/env/data/UCBmu+lambda-mountaincar.csv rename to experiments/results/bench/env/data/UCBmu+lambda-mountaincar.csv diff --git a/experiments/bench/env/data/easimple-mountaincar.csv b/experiments/results/bench/env/data/easimple-mountaincar.csv similarity index 100% rename from experiments/bench/env/data/easimple-mountaincar.csv rename to experiments/results/bench/env/data/easimple-mountaincar.csv diff --git a/experiments/bench/env/data/mu+lambda-mountaincar.csv b/experiments/results/bench/env/data/mu+lambda-mountaincar.csv similarity index 100% rename from experiments/bench/env/data/mu+lambda-mountaincar.csv rename to experiments/results/bench/env/data/mu+lambda-mountaincar.csv diff --git a/experiments/bench/env/easimple.yml b/experiments/results/bench/env/easimple.yml similarity index 100% rename from experiments/bench/env/easimple.yml rename to experiments/results/bench/env/easimple.yml diff --git a/experiments/bench/oneMax/conf/easimple-pop=100-n_evals.yml b/experiments/results/bench/oneMax/conf/easimple-pop=100-n_evals.yml similarity index 100% rename from experiments/bench/oneMax/conf/easimple-pop=100-n_evals.yml rename to experiments/results/bench/oneMax/conf/easimple-pop=100-n_evals.yml diff --git a/experiments/bench/oneMax/conf/easimple-pop=100.yml b/experiments/results/bench/oneMax/conf/easimple-pop=100.yml similarity index 100% rename from experiments/bench/oneMax/conf/easimple-pop=100.yml rename to experiments/results/bench/oneMax/conf/easimple-pop=100.yml diff --git a/experiments/bench/oneMax/conf/easimple-pop=1000.yml b/experiments/results/bench/oneMax/conf/easimple-pop=1000.yml similarity index 100% rename from experiments/bench/oneMax/conf/easimple-pop=1000.yml rename to experiments/results/bench/oneMax/conf/easimple-pop=1000.yml diff --git a/experiments/bench/oneMax/conf/mu+lambda-pop=100.yml b/experiments/results/bench/oneMax/conf/mu+lambda-pop=100.yml similarity index 100% rename from experiments/bench/oneMax/conf/mu+lambda-pop=100.yml rename to experiments/results/bench/oneMax/conf/mu+lambda-pop=100.yml diff --git a/experiments/bench/oneMax/conf/mu+lambda-pop=1000.yml b/experiments/results/bench/oneMax/conf/mu+lambda-pop=1000.yml similarity index 100% rename from experiments/bench/oneMax/conf/mu+lambda-pop=1000.yml rename to experiments/results/bench/oneMax/conf/mu+lambda-pop=1000.yml diff --git a/experiments/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals-buget=0.yml b/experiments/results/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals-buget=0.yml similarity index 100% rename from experiments/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals-buget=0.yml rename to experiments/results/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals-buget=0.yml diff --git a/experiments/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals.yml b/experiments/results/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals.yml similarity index 100% rename from experiments/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals.yml rename to experiments/results/bench/oneMax/conf/mu+lambdaUCB-pop=100-n_evals.yml diff --git a/experiments/bench/oneMax/data/easimple-pop=100-n_evals.csv b/experiments/results/bench/oneMax/data/easimple-pop=100-n_evals.csv similarity index 100% rename from experiments/bench/oneMax/data/easimple-pop=100-n_evals.csv rename to experiments/results/bench/oneMax/data/easimple-pop=100-n_evals.csv diff --git a/experiments/bench/oneMax/data/easimple-pop=100.csv b/experiments/results/bench/oneMax/data/easimple-pop=100.csv similarity index 100% rename from experiments/bench/oneMax/data/easimple-pop=100.csv rename to experiments/results/bench/oneMax/data/easimple-pop=100.csv diff --git a/experiments/bench/oneMax/data/easimple-pop=1000.csv b/experiments/results/bench/oneMax/data/easimple-pop=1000.csv similarity index 100% rename from experiments/bench/oneMax/data/easimple-pop=1000.csv rename to experiments/results/bench/oneMax/data/easimple-pop=1000.csv diff --git a/experiments/bench/oneMax/data/mu+lambda-pop=100.csv b/experiments/results/bench/oneMax/data/mu+lambda-pop=100.csv similarity index 100% rename from experiments/bench/oneMax/data/mu+lambda-pop=100.csv rename to experiments/results/bench/oneMax/data/mu+lambda-pop=100.csv diff --git a/experiments/bench/oneMax/data/mu+lambda-pop=1000.csv b/experiments/results/bench/oneMax/data/mu+lambda-pop=1000.csv similarity index 100% rename from experiments/bench/oneMax/data/mu+lambda-pop=1000.csv rename to experiments/results/bench/oneMax/data/mu+lambda-pop=1000.csv diff --git a/experiments/bench/oneMax/data/mu+lambdaUCB-pop=100-budget=0.csv b/experiments/results/bench/oneMax/data/mu+lambdaUCB-pop=100-budget=0.csv similarity index 100% rename from experiments/bench/oneMax/data/mu+lambdaUCB-pop=100-budget=0.csv rename to experiments/results/bench/oneMax/data/mu+lambdaUCB-pop=100-budget=0.csv diff --git a/experiments/bench/oneMax/data/mu+lambdaUCB-pop=100-n_evals.csv b/experiments/results/bench/oneMax/data/mu+lambdaUCB-pop=100-n_evals.csv similarity index 100% rename from experiments/bench/oneMax/data/mu+lambdaUCB-pop=100-n_evals.csv rename to experiments/results/bench/oneMax/data/mu+lambdaUCB-pop=100-n_evals.csv diff --git a/img/Linear-GP.png b/img/Linear-GP.png new file mode 100644 index 0000000000000000000000000000000000000000..fb29db6d155c02963f5abdef6a51d9979cf24ff6 Binary files /dev/null and b/img/Linear-GP.png differ diff --git a/img/Tree-GP.png b/img/Tree-GP.png new file mode 100644 index 0000000000000000000000000000000000000000..41cdec0722fb4ce8e97bdba62b9ef7bff59de276 Binary files /dev/null and b/img/Tree-GP.png differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..752cf9c27c328673874c51e827e178ba26146c43 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,61 @@ +# +# This file is autogenerated by pip-compile with python 3.8 +# To update, run: +# +# pip-compile +# +cycler==0.10.0 + # via matplotlib +deap==1.3.1 + # via -r requirements.in +joblib==1.0.1 + # via scikit-learn +kiwisolver==1.3.2 + # via matplotlib +matplotlib==3.4.3 + # via qdpy +numpy==1.21.2 + # via + # -r requirements.in + # deap + # matplotlib + # pandas + # qdpy + # scikit-learn + # scipy +pandas==1.3.3 + # via + # -r requirements.in + # qdpy +pillow==8.3.2 + # via matplotlib +psutil==5.8.0 + # via qdpy +pyparsing==2.4.7 + # via matplotlib +python-dateutil==2.8.2 + # via + # matplotlib + # pandas +pytz==2021.1 + # via pandas +pyyaml==5.4.1 + # via qdpy +qdpy==0.1.2.1 + # via -r requirements.in +scikit-learn==0.24.2 + # via sklearn +scipy==1.7.1 + # via + # qdpy + # scikit-learn +six==1.16.0 + # via + # cycler + # python-dateutil +sklearn==0.0 + # via qdpy +threadpoolctl==2.2.0 + # via scikit-learn +typing-extensions==3.10.0.2 + # via qdpy diff --git a/requirements_with_pygraphviz.txt b/requirements_with_pygraphviz.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9fe93af48f5a6056fa643c447aad3b0abf784a4 --- /dev/null +++ b/requirements_with_pygraphviz.txt @@ -0,0 +1,63 @@ +# +# This file is autogenerated by pip-compile with python 3.8 +# To update, run: +# +# pip-compile +# +cycler==0.10.0 + # via matplotlib +deap==1.3.1 + # via -r requirements.in +joblib==1.0.1 + # via scikit-learn +kiwisolver==1.3.2 + # via matplotlib +matplotlib==3.4.3 + # via qdpy +numpy==1.21.2 + # via + # -r requirements.in + # deap + # matplotlib + # pandas + # qdpy + # scikit-learn + # scipy +pandas==1.3.3 + # via + # -r requirements.in + # qdpy +pillow==8.3.2 + # via matplotlib +psutil==5.8.0 + # via qdpy +pygraphviz==1.7 + # via -r requirements.in +pyparsing==2.4.7 + # via matplotlib +python-dateutil==2.8.2 + # via + # matplotlib + # pandas +pytz==2021.1 + # via pandas +pyyaml==5.4.1 + # via qdpy +qdpy==0.1.2.1 + # via -r requirements.in +scikit-learn==0.24.2 + # via sklearn +scipy==1.7.1 + # via + # qdpy + # scikit-learn +six==1.16.0 + # via + # cycler + # python-dateutil +sklearn==0.0 + # via qdpy +threadpoolctl==2.2.0 + # via scikit-learn +typing-extensions==3.10.0.2 + # via qdpy diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/conf.yml b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/conf.yml new file mode 100644 index 0000000000000000000000000000000000000000..a414c29cb672e1c9130d833a79e7f0218da92739 --- /dev/null +++ b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/conf.yml @@ -0,0 +1,32 @@ +algorithm: + name: algo.eaMuPlusLambdaUCB + args: + mu: 100 + lambda_: 100 + simulation_budget: 5 + parallel_update: 16 + save_every: 10 + ngen: 200 + cxpb: 0.1 + mutpb: 0.9 + budget_scheduler: [[50, 10], [100, 20], [190, 50]] + +population: + init_size: 100 + +selection: + name: selNSGA2 + args: + +individual: Tree + +params: + env: "CartPole-v1" + function_set: small + c: 0.0 + n_episodes: 1 + n_steps: 500 + gamma: 1.0 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-0.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..77ee319c2c0cc37227241c2de08d6e3bc7c8ca96 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-0.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-10.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-10.pkl new file mode 100644 index 0000000000000000000000000000000000000000..0000c122f99c95fe3307248b5769b6b3137a16d1 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-10.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-100.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-100.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c734ab41c4230809e054cda0c0dadbbc75f7070d Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-100.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-110.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-110.pkl new file mode 100644 index 0000000000000000000000000000000000000000..139b7d0caf8bbbd2c81cb3ccac592ce7e2bbada7 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-110.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-120.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-120.pkl new file mode 100644 index 0000000000000000000000000000000000000000..69cf0d150428014d831227d03f1c4084efa6a816 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-120.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-130.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-130.pkl new file mode 100644 index 0000000000000000000000000000000000000000..282ed1c5f2212477278f774bc7537be9c4e692eb Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-130.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-140.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-140.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b221fa886febaeceda64db605561da871d11229a Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-140.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-150.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-150.pkl new file mode 100644 index 0000000000000000000000000000000000000000..95c783a2359470e64707b600b768c200bfdf2b5f Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-150.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-160.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-160.pkl new file mode 100644 index 0000000000000000000000000000000000000000..df04c11b0c4b06a45b738fe2193dd07462db7c32 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-160.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-170.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-170.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e5decf17df0ed36959dc3ef2332785d9b2f5a070 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-170.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-180.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-180.pkl new file mode 100644 index 0000000000000000000000000000000000000000..2a9e63a6738a27e5c0a971ba6059f768470321ac Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-180.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-190.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-190.pkl new file mode 100644 index 0000000000000000000000000000000000000000..12b67d4910440412cb994ba0443cc57fb5b88799 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-190.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-20.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-20.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d0cbe3127340baf0dc4bcfd2e6cba4519b9c0bd8 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-20.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-200.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-200.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7c59a30736721d9982db33b50a5c8f30b2caa4ee Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-200.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-30.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-30.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ad804d03a7346258dcb8dd3a19acc18459aeb457 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-30.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-40.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-40.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8c059c8e4b7a538cba0af1980a5d7f7935073c22 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-40.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-50.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-50.pkl new file mode 100644 index 0000000000000000000000000000000000000000..091cd8959766daa7322be32abaa238a65f731a44 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-50.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-60.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-60.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7bb85c2f6d8f954f67798e02d06b836687adfe72 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-60.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-70.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-70.pkl new file mode 100644 index 0000000000000000000000000000000000000000..dabc83635473257826496ba8c758010a0887fc07 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-70.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-80.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-80.pkl new file mode 100644 index 0000000000000000000000000000000000000000..55667104c42e358cad47ce01aa01bc01d37d8b59 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-80.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-90.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-90.pkl new file mode 100644 index 0000000000000000000000000000000000000000..96b0d79adbc940efe7e16c4d3fa81f1751183c41 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/data-90.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/hof-final.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/hof-final.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6948453460b608becb3739a0288b9da6bedc3cf9 Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/hof-final.pkl differ diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/log.csv b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/log.csv new file mode 100644 index 0000000000000000000000000000000000000000..55d07f65ccfd4c39fbbe75f723f44b78040b83db --- /dev/null +++ b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/log.csv @@ -0,0 +1,202 @@ +fitness_avg,fitness_std,fitness_min,fitness_max,fitness_gen,fitness_nevals,size_avg,size_std,size_min,size_max,size_gen,size_nevals,complexity_avg,complexity_std,complexity_min,complexity_max,complexity_gen,complexity_nevals,gen,nevals +15.183333333333332,26.59621422516955,8.0,193.33333333333334,0.0,100.0,1.0,0.0,1.0,1.0,0.0,100.0,19.37,16.519476383953577,6.0,79.0,0.0,100.0,0,100 +20.519665404040406,37.22650884632466,8.0,204.5625,1.0,100.0,1.0,0.0,1.0,1.0,1.0,100.0,11.47,12.8128490196365,1.0,79.0,1.0,100.0,1,100 +23.81238872238872,41.8229987843982,9.0,209.30769230769232,2.0,100.0,1.0,0.0,1.0,1.0,2.0,100.0,8.36,6.818386906006434,1.0,41.0,2.0,100.0,2,100 +30.974440675811643,52.633071860464675,8.0,214.80555555555554,3.0,100.0,1.0,0.0,1.0,1.0,3.0,100.0,7.0,6.732013071882734,1.0,41.0,3.0,100.0,3,100 +40.3540464257575,63.85283613524745,8.0,227.8181818181818,4.0,100.0,1.0,0.0,1.0,1.0,4.0,100.0,6.06,6.881598651476269,1.0,33.0,4.0,100.0,4,100 +57.29402660046259,74.44843067406931,8.0,217.0,5.0,100.0,1.0,0.0,1.0,1.0,5.0,100.0,5.84,8.783757738007123,1.0,41.0,5.0,100.0,5,100 +37.44328623343385,74.82978966435171,9.0,471.72727272727275,6.0,100.0,1.0,0.0,1.0,1.0,6.0,100.0,3.34,7.74495965128289,1.0,52.0,6.0,100.0,6,100 +38.643260587722075,84.30094212278424,9.0,500.0,7.0,100.0,1.0,0.0,1.0,1.0,7.0,100.0,2.51,4.539812771469766,1.0,26.0,7.0,100.0,7,100 +31.193182179616716,77.57339541295902,10.0,478.6666666666667,8.0,100.0,1.0,0.0,1.0,1.0,8.0,100.0,1.96,3.4840206658399717,1.0,21.0,8.0,100.0,8,100 +29.12053507992139,76.07724290135602,10.0,485.5483870967742,9.0,100.0,1.0,0.0,1.0,1.0,9.0,100.0,1.82,3.229179462340239,1.0,21.0,9.0,100.0,9,100 +29.113877339910285,77.25623821807936,10.0,500.0,10.0,100.0,1.0,0.0,1.0,1.0,10.0,100.0,1.82,3.229179462340239,1.0,21.0,10.0,100.0,10,100 +39.0155460541988,101.75822073250023,10.0,500.0,11.0,100.0,1.0,0.0,1.0,1.0,11.0,100.0,2.22,4.196617685708338,1.0,21.0,11.0,100.0,11,100 +48.982800339436906,120.55789974526617,10.0,500.0,12.0,100.0,1.0,0.0,1.0,1.0,12.0,100.0,2.64,5.024977611890426,1.0,23.0,12.0,100.0,12,100 +51.13684756318949,125.25243357504144,10.0,500.0,13.0,100.0,1.0,0.0,1.0,1.0,13.0,100.0,2.75,5.305421755148219,1.0,23.0,13.0,100.0,13,100 +61.22639646152431,140.56336364806052,10.0,500.0,14.0,100.0,1.0,0.0,1.0,1.0,14.0,100.0,3.15,5.881113840081656,1.0,23.0,14.0,100.0,14,100 +76.1871256684492,158.4459500961276,10.0,500.0,15.0,100.0,1.0,0.0,1.0,1.0,15.0,100.0,3.73,6.557217397646658,1.0,23.0,15.0,100.0,15,100 +41.70545454545454,110.71820019556611,10.0,500.0,16.0,100.0,1.0,0.0,1.0,1.0,16.0,100.0,2.18,4.219905212205601,1.0,21.0,16.0,100.0,16,100 +42.2682302346776,109.20695927916039,10.0,500.0,17.0,100.0,1.0,0.0,1.0,1.0,17.0,100.0,2.3,4.3531597719357835,1.0,21.0,17.0,100.0,17,100 +34.76694223339384,91.0425321111569,10.0,500.0,18.0,100.0,1.0,0.0,1.0,1.0,18.0,100.0,2.02,3.5888159607313384,1.0,19.0,18.0,100.0,18,100 +41.75481341173906,103.52050858185444,10.0,500.0,19.0,100.0,1.0,0.0,1.0,1.0,19.0,100.0,2.32,4.086269692519083,1.0,19.0,19.0,100.0,19,100 +44.849071681420845,112.17509924728908,10.0,500.0,20.0,100.0,1.0,0.0,1.0,1.0,20.0,100.0,2.34,4.152637715958376,1.0,19.0,20.0,100.0,20,100 +61.909584842734745,136.7474985047969,10.0,500.0,21.0,100.0,1.0,0.0,1.0,1.0,21.0,100.0,2.96,4.983813800695207,1.0,19.0,21.0,100.0,21,100 +60.81077503142019,136.86119115138828,10.0,500.0,22.0,100.0,1.0,0.0,1.0,1.0,22.0,100.0,2.84,4.884096641140509,1.0,19.0,22.0,100.0,22,100 +66.13854489596424,144.23272961005802,10.0,500.0,23.0,100.0,1.0,0.0,1.0,1.0,23.0,100.0,3.0,5.079370039680118,1.0,19.0,23.0,100.0,23,100 +75.97552760378882,155.88386780448133,10.0,500.0,24.0,100.0,1.0,0.0,1.0,1.0,24.0,100.0,3.32,5.434850503923728,1.0,19.0,24.0,100.0,24,100 +102.83789694176791,180.7668192114527,10.0,500.0,25.0,100.0,1.0,0.0,1.0,1.0,25.0,100.0,4.21,6.182709761908606,1.0,19.0,25.0,100.0,25,100 +117.55789694176791,192.1713736274484,10.0,500.0,26.0,100.0,1.0,0.0,1.0,1.0,26.0,100.0,4.69,6.526400232900217,1.0,19.0,26.0,100.0,26,100 +152.71545454545455,212.94624681208725,10.0,500.0,27.0,100.0,1.0,0.0,1.0,1.0,27.0,100.0,5.81,7.141001330345765,1.0,19.0,27.0,100.0,27,100 +194.41545454545457,226.93581718685016,10.0,500.0,28.0,100.0,1.0,0.0,1.0,1.0,28.0,100.0,7.2,7.57495874576225,1.0,19.0,28.0,100.0,28,100 +88.90545454545455,168.1437302915682,10.0,500.0,29.0,100.0,1.0,0.0,1.0,1.0,29.0,100.0,3.64,5.583045763738642,1.0,17.0,29.0,100.0,29,100 +36.98,96.98432656878119,10.0,500.0,30.0,100.0,1.0,0.0,1.0,1.0,30.0,100.0,1.86,3.17811264746862,1.0,17.0,30.0,100.0,30,100 +40.07681818181818,102.86293610314813,10.0,500.0,31.0,100.0,1.0,0.0,1.0,1.0,31.0,100.0,2.09,3.5639725026997615,1.0,17.0,31.0,100.0,31,100 +47.60067099567099,113.44508027206588,10.0,500.0,32.0,100.0,1.0,0.0,1.0,1.0,32.0,100.0,2.63,5.096381068954715,1.0,36.0,32.0,100.0,32,100 +62.49206744868035,137.1432732098304,10.0,500.0,33.0,100.0,1.0,0.0,1.0,1.0,33.0,100.0,2.94,4.851432778056397,1.0,19.0,33.0,100.0,33,100 +65.17206674534488,142.91149992727807,10.0,500.0,34.0,100.0,1.0,0.0,1.0,1.0,34.0,100.0,2.94,4.851432778056397,1.0,19.0,34.0,100.0,34,100 +89.68102642276422,170.63126939729025,10.0,500.0,35.0,100.0,1.0,0.0,1.0,1.0,35.0,100.0,3.74,5.708975389682461,1.0,19.0,35.0,100.0,35,100 +118.39764979938762,191.00969655521547,10.0,500.0,36.0,100.0,1.0,0.0,1.0,1.0,36.0,100.0,4.82,6.573248816224745,1.0,24.0,36.0,100.0,36,100 +143.0576497993876,206.31426632690648,10.0,500.0,37.0,100.0,1.0,0.0,1.0,1.0,37.0,100.0,5.58,6.958706776406086,1.0,24.0,37.0,100.0,37,100 +193.2209642857143,225.84944970149374,10.0,500.0,38.0,100.0,1.0,0.0,1.0,1.0,38.0,100.0,7.07,7.369199413776235,1.0,19.0,38.0,100.0,38,100 +257.26763095238096,234.2510546180612,10.0,500.0,39.0,100.0,1.0,0.0,1.0,1.0,39.0,100.0,9.11,7.574820129877673,1.0,19.0,39.0,100.0,39,100 +196.10763095238093,225.7135521136671,10.0,500.0,40.0,100.0,1.0,0.0,1.0,1.0,40.0,100.0,7.05,7.189401922274201,1.0,17.0,40.0,100.0,40,100 +108.21763095238094,181.2654541730957,10.0,500.0,41.0,100.0,1.0,0.0,1.0,1.0,41.0,100.0,4.11,5.544177125597631,1.0,17.0,41.0,100.0,41,100 +140.77525,207.17786559786617,10.0,500.0,42.0,100.0,1.0,0.0,1.0,1.0,42.0,100.0,4.81,5.962709115829817,1.0,17.0,42.0,100.0,42,100 +170.17525,220.85000179689268,10.0,500.0,43.0,100.0,1.0,0.0,1.0,1.0,43.0,100.0,5.65,6.340938416354476,1.0,17.0,43.0,100.0,43,100 +217.285,234.79244296825226,10.0,500.0,44.0,100.0,1.0,0.0,1.0,1.0,44.0,100.0,6.92,6.685327217122586,1.0,17.0,44.0,100.0,44,100 +253.275,240.24580511426208,10.5,500.0,45.0,100.0,1.0,0.0,1.0,1.0,45.0,100.0,7.82,6.738516157137266,1.0,15.0,45.0,100.0,45,100 +253.275,240.24580511426208,10.5,500.0,46.0,100.0,1.0,0.0,1.0,1.0,46.0,100.0,7.8,6.720119046564578,1.0,15.0,46.0,100.0,46,100 +253.275,240.24580511426208,10.5,500.0,47.0,100.0,1.0,0.0,1.0,1.0,47.0,100.0,7.76,6.682993341310462,1.0,15.0,47.0,100.0,47,100 +243.495,239.95599070454568,10.5,500.0,48.0,100.0,1.0,0.0,1.0,1.0,48.0,100.0,7.42,6.607843823820295,1.0,15.0,48.0,100.0,48,100 +236.375,238.25207632883286,10.5,500.0,49.0,100.0,1.0,0.0,1.0,1.0,49.0,100.0,7.08,6.383854634936482,1.0,15.0,49.0,100.0,49,100 +231.485,237.80895120873814,10.5,500.0,50.0,100.0,1.0,0.0,1.0,1.0,50.0,100.0,6.82,6.2263633045301825,1.0,15.0,50.0,100.0,50,100 +226.6,237.25968894862856,10.5,500.0,51.0,100.0,1.0,0.0,1.0,1.0,51.0,100.0,6.44,5.903083939772498,1.0,15.0,51.0,100.0,51,100 +304.605,236.36716454490877,10.5,500.0,52.0,100.0,1.0,0.0,1.0,1.0,52.0,100.0,8.27,5.798025525987274,1.0,15.0,52.0,100.0,52,100 +312.74,236.65804951448408,11.0,500.0,53.0,100.0,1.0,0.0,1.0,1.0,53.0,100.0,8.37,5.8114628106871695,1.0,13.0,53.0,100.0,53,100 +302.96,238.8116379073683,11.0,500.0,54.0,100.0,1.0,0.0,1.0,1.0,54.0,100.0,8.13,5.8628576649958,1.0,13.0,54.0,100.0,54,100 +298.07,239.7316105564721,11.0,500.0,55.0,100.0,1.0,0.0,1.0,1.0,55.0,100.0,8.01,5.884717495343341,1.0,13.0,55.0,100.0,55,100 +298.07,239.7316105564721,11.0,500.0,56.0,100.0,1.0,0.0,1.0,1.0,56.0,100.0,8.01,5.884717495343341,1.0,13.0,56.0,100.0,56,100 +298.07,239.7316105564721,11.0,500.0,57.0,100.0,1.0,0.0,1.0,1.0,57.0,100.0,8.01,5.884717495343341,1.0,13.0,57.0,100.0,57,100 +273.62,242.80674537582354,11.0,500.0,58.0,100.0,1.0,0.0,1.0,1.0,58.0,100.0,7.41,5.956668531990008,1.0,13.0,58.0,100.0,58,100 +268.73,243.12222666798695,11.0,500.0,59.0,100.0,1.0,0.0,1.0,1.0,59.0,100.0,7.29,5.963715284954506,1.0,13.0,59.0,100.0,59,100 +258.95,243.4574860216872,11.0,500.0,60.0,100.0,1.0,0.0,1.0,1.0,60.0,100.0,7.05,5.970552738231195,1.0,13.0,60.0,100.0,60,100 +254.06,243.47767125549726,11.0,500.0,61.0,100.0,1.0,0.0,1.0,1.0,61.0,100.0,6.93,5.970351748431578,1.0,13.0,61.0,100.0,61,100 +254.06,243.47767125549726,11.0,500.0,62.0,100.0,1.0,0.0,1.0,1.0,62.0,100.0,6.93,5.970351748431578,1.0,13.0,62.0,100.0,62,100 +249.17,243.39963249766836,11.0,500.0,63.0,100.0,1.0,0.0,1.0,1.0,63.0,100.0,6.81,5.967738265038104,1.0,13.0,63.0,100.0,63,100 +239.39,242.94838525909162,11.0,500.0,64.0,100.0,1.0,0.0,1.0,1.0,64.0,100.0,6.57,5.955258180801232,1.0,13.0,64.0,100.0,64,100 +234.5,242.57462769218054,11.0,500.0,65.0,100.0,1.0,0.0,1.0,1.0,65.0,100.0,6.45,5.945376354781924,1.0,13.0,65.0,100.0,65,100 +149.3,217.96391903248573,10.0,500.0,66.0,100.0,1.0,0.0,1.0,1.0,66.0,100.0,4.39,5.28563146653264,1.0,13.0,66.0,100.0,66,100 +33.59,94.46344213503973,10.0,500.0,67.0,100.0,1.0,0.0,1.0,1.0,67.0,100.0,1.58,2.241338885577101,1.0,13.0,67.0,100.0,67,100 +35.71227272727273,97.87755106393256,10.0,500.0,68.0,100.0,1.0,0.0,1.0,1.0,68.0,100.0,1.83,2.884631692261596,1.0,18.0,68.0,100.0,68,100 +45.29785714285714,117.2624525728079,10.0,500.0,69.0,100.0,1.0,0.0,1.0,1.0,69.0,100.0,2.07,3.277971933986013,1.0,18.0,69.0,100.0,69,100 +51.609809384164215,125.98250410865477,10.0,500.0,70.0,100.0,1.0,0.0,1.0,1.0,70.0,100.0,2.3,3.5846896657869842,1.0,18.0,70.0,100.0,70,100 +61.31415843139289,140.4831163683442,10.0,500.0,71.0,100.0,1.0,0.0,1.0,1.0,71.0,100.0,2.54,3.8792267270681666,1.0,18.0,71.0,100.0,71,100 +71.1025660888011,153.0694341231839,10.0,500.0,72.0,100.0,1.0,0.0,1.0,1.0,72.0,100.0,2.73,4.04439117791541,1.0,18.0,72.0,100.0,72,100 +91.25386738351256,174.59798380921504,10.0,500.0,73.0,100.0,1.0,0.0,1.0,1.0,73.0,100.0,3.1,4.4732538492690095,1.0,18.0,73.0,100.0,73,100 +114.78322222222221,195.039538558781,10.0,500.0,74.0,100.0,1.0,0.0,1.0,1.0,74.0,100.0,3.57,4.846142796080197,1.0,18.0,74.0,100.0,74,100 +158.65488888888888,219.75779746735657,10.0,500.0,75.0,100.0,1.0,0.0,1.0,1.0,75.0,100.0,4.57,5.378206020598318,1.0,18.0,75.0,100.0,75,100 +173.095,226.577985415618,10.5,500.0,76.0,100.0,1.0,0.0,1.0,1.0,76.0,100.0,4.65,5.183386923624359,1.0,13.0,76.0,100.0,76,100 +163.785,222.97835382610572,10.5,500.0,77.0,100.0,1.0,0.0,1.0,1.0,77.0,100.0,4.12,4.701659281572836,1.0,13.0,77.0,100.0,77,100 +154.005,218.70014283260087,10.5,500.0,78.0,100.0,1.0,0.0,1.0,1.0,78.0,100.0,3.53,3.98862131569293,1.0,13.0,78.0,100.0,78,100 +139.325,211.28004372159717,10.0,500.0,79.0,100.0,1.0,0.0,1.0,1.0,79.0,100.0,2.99,3.2480609600190697,1.0,13.0,79.0,100.0,79,100 +173.61,226.72227041029737,10.5,500.0,80.0,100.0,1.0,0.0,1.0,1.0,80.0,100.0,3.45,3.3626626354720752,1.0,10.0,80.0,100.0,80,100 +183.9,232.60307392637785,11.0,500.0,81.0,100.0,1.0,0.0,1.0,1.0,81.0,100.0,3.5,3.3391615714128,1.0,8.0,81.0,100.0,81,100 +179.01,231.0411865880194,11.0,500.0,82.0,100.0,1.0,0.0,1.0,1.0,82.0,100.0,3.43,3.317393555187566,1.0,8.0,82.0,100.0,82,100 +169.53,227.61197046728452,11.0,500.0,83.0,100.0,1.0,0.0,1.0,1.0,83.0,100.0,3.29,3.2689294883799493,1.0,8.0,83.0,100.0,83,100 +150.06,219.25819574191522,11.0,500.0,84.0,100.0,1.0,0.0,1.0,1.0,84.0,100.0,3.01,3.151174384257399,1.0,8.0,84.0,100.0,84,100 +135.39,211.5782075262006,11.0,500.0,85.0,100.0,1.0,0.0,1.0,1.0,85.0,100.0,2.8,3.0430248109405875,1.0,8.0,85.0,100.0,85,100 +135.39,211.5782075262006,11.0,500.0,86.0,100.0,1.0,0.0,1.0,1.0,86.0,100.0,2.8,3.0430248109405875,1.0,8.0,86.0,100.0,86,100 +120.72,202.5490103653928,11.0,500.0,87.0,100.0,1.0,0.0,1.0,1.0,87.0,100.0,2.59,2.915801776527341,1.0,8.0,87.0,100.0,87,100 +115.83,199.20853671466992,11.0,500.0,88.0,100.0,1.0,0.0,1.0,1.0,88.0,100.0,2.52,2.868727941091661,1.0,8.0,88.0,100.0,88,100 +101.16,188.07156722907376,11.0,500.0,89.0,100.0,1.0,0.0,1.0,1.0,89.0,100.0,2.31,2.7118075152930743,1.0,8.0,89.0,100.0,89,100 +91.38,179.60060022171416,11.0,500.0,90.0,100.0,1.0,0.0,1.0,1.0,90.0,100.0,2.17,2.592508437787619,1.0,8.0,90.0,100.0,90,100 +91.38,179.60060022171416,11.0,500.0,91.0,100.0,1.0,0.0,1.0,1.0,91.0,100.0,2.17,2.592508437787619,1.0,8.0,91.0,100.0,91,100 +76.71,165.00268452361615,11.0,500.0,92.0,100.0,1.0,0.0,1.0,1.0,92.0,100.0,1.96,2.3871321706181248,1.0,8.0,92.0,100.0,92,100 +66.93,153.73062512069612,11.0,500.0,93.0,100.0,1.0,0.0,1.0,1.0,93.0,100.0,1.82,2.2288113423975573,1.0,8.0,93.0,100.0,93,100 +57.15,140.8865767204243,11.0,500.0,94.0,100.0,1.0,0.0,1.0,1.0,94.0,100.0,1.68,2.048804529475665,1.0,8.0,94.0,100.0,94,100 +67.36545454545454,153.06262342880802,10.5,500.0,95.0,100.0,1.0,0.0,1.0,1.0,95.0,100.0,1.89,2.3276382880507875,1.0,10.0,95.0,100.0,95,100 +77.59261904761905,164.66873084218776,10.5,500.0,96.0,100.0,1.0,0.0,1.0,1.0,96.0,100.0,2.05,2.515452245621054,1.0,10.0,96.0,100.0,96,100 +85.53333333333335,174.50776487022003,11.0,500.0,97.0,100.0,1.0,0.0,1.0,1.0,97.0,100.0,2.1,2.5278449319529073,1.0,8.0,97.0,100.0,97,100 +129.54333333333335,208.51030166397052,11.0,500.0,98.0,100.0,1.0,0.0,1.0,1.0,98.0,100.0,2.73,3.0028486475345373,1.0,8.0,98.0,100.0,98,100 +175.8,230.05951403930248,11.0,500.0,99.0,100.0,1.0,0.0,1.0,1.0,99.0,100.0,3.36,3.293994535514593,1.0,8.0,99.0,100.0,99,100 +210.03,239.19019440604168,11.0,500.0,100.0,100.0,1.0,0.0,1.0,1.0,100.0,100.0,3.85,3.424543765233553,1.0,8.0,100.0,100.0,100,100 +278.49,242.3859111004598,11.0,500.0,101.0,100.0,1.0,0.0,1.0,1.0,101.0,100.0,4.83,3.4700288183241357,1.0,8.0,101.0,100.0,101,100 +382.61,208.82700471921729,11.0,500.0,102.0,100.0,1.0,0.0,1.0,1.0,102.0,100.0,6.3,2.98496231131986,1.0,8.0,102.0,100.0,102,100 +490.19,68.4563649341681,11.0,500.0,103.0,100.0,1.0,0.0,1.0,1.0,103.0,100.0,7.84,0.9971960689854324,1.0,8.0,103.0,100.0,103,100 +490.19,68.4563649341681,11.0,500.0,104.0,100.0,1.0,0.0,1.0,1.0,104.0,100.0,7.84,0.9971960689854324,1.0,8.0,104.0,100.0,104,100 +490.19,68.4563649341681,11.0,500.0,105.0,100.0,1.0,0.0,1.0,1.0,105.0,100.0,7.84,0.9971960689854324,1.0,8.0,105.0,100.0,105,100 +490.19,68.4563649341681,11.0,500.0,106.0,100.0,1.0,0.0,1.0,1.0,106.0,100.0,7.84,0.9971960689854324,1.0,8.0,106.0,100.0,106,100 +490.19,68.4563649341681,11.0,500.0,107.0,100.0,1.0,0.0,1.0,1.0,107.0,100.0,7.84,0.9971960689854324,1.0,8.0,107.0,100.0,107,100 +490.19,68.4563649341681,11.0,500.0,108.0,100.0,1.0,0.0,1.0,1.0,108.0,100.0,7.84,0.9971960689854324,1.0,8.0,108.0,100.0,108,100 +490.19,68.4563649341681,11.0,500.0,109.0,100.0,1.0,0.0,1.0,1.0,109.0,100.0,7.84,0.9971960689854324,1.0,8.0,109.0,100.0,109,100 +490.19,68.4563649341681,11.0,500.0,110.0,100.0,1.0,0.0,1.0,1.0,110.0,100.0,7.84,0.9971960689854324,1.0,8.0,110.0,100.0,110,100 +490.19,68.4563649341681,11.0,500.0,111.0,100.0,1.0,0.0,1.0,1.0,111.0,100.0,7.84,0.9971960689854324,1.0,8.0,111.0,100.0,111,100 +490.19,68.4563649341681,11.0,500.0,112.0,100.0,1.0,0.0,1.0,1.0,112.0,100.0,7.84,0.9971960689854324,1.0,8.0,112.0,100.0,112,100 +490.19,68.4563649341681,11.0,500.0,113.0,100.0,1.0,0.0,1.0,1.0,113.0,100.0,7.84,0.9971960689854324,1.0,8.0,113.0,100.0,113,100 +490.19,68.4563649341681,11.0,500.0,114.0,100.0,1.0,0.0,1.0,1.0,114.0,100.0,7.84,0.9971960689854324,1.0,8.0,114.0,100.0,114,100 +490.19,68.4563649341681,11.0,500.0,115.0,100.0,1.0,0.0,1.0,1.0,115.0,100.0,7.84,0.9971960689854324,1.0,8.0,115.0,100.0,115,100 +490.19,68.4563649341681,11.0,500.0,116.0,100.0,1.0,0.0,1.0,1.0,116.0,100.0,7.84,0.9971960689854324,1.0,8.0,116.0,100.0,116,100 +490.19,68.4563649341681,11.0,500.0,117.0,100.0,1.0,0.0,1.0,1.0,117.0,100.0,7.84,0.9971960689854324,1.0,8.0,117.0,100.0,117,100 +490.19,68.4563649341681,11.0,500.0,118.0,100.0,1.0,0.0,1.0,1.0,118.0,100.0,7.84,0.9971960689854324,1.0,8.0,118.0,100.0,118,100 +490.19,68.4563649341681,11.0,500.0,119.0,100.0,1.0,0.0,1.0,1.0,119.0,100.0,7.84,0.9971960689854324,1.0,8.0,119.0,100.0,119,100 +490.19,68.4563649341681,11.0,500.0,120.0,100.0,1.0,0.0,1.0,1.0,120.0,100.0,7.84,0.9971960689854324,1.0,8.0,120.0,100.0,120,100 +490.19,68.4563649341681,11.0,500.0,121.0,100.0,1.0,0.0,1.0,1.0,121.0,100.0,7.84,0.9971960689854324,1.0,8.0,121.0,100.0,121,100 +490.19,68.4563649341681,11.0,500.0,122.0,100.0,1.0,0.0,1.0,1.0,122.0,100.0,7.84,0.9971960689854324,1.0,8.0,122.0,100.0,122,100 +490.19,68.4563649341681,11.0,500.0,123.0,100.0,1.0,0.0,1.0,1.0,123.0,100.0,7.84,0.9971960689854324,1.0,8.0,123.0,100.0,123,100 +490.19,68.4563649341681,11.0,500.0,124.0,100.0,1.0,0.0,1.0,1.0,124.0,100.0,7.84,0.9971960689854324,1.0,8.0,124.0,100.0,124,100 +490.19,68.4563649341681,11.0,500.0,125.0,100.0,1.0,0.0,1.0,1.0,125.0,100.0,7.84,0.9971960689854324,1.0,8.0,125.0,100.0,125,100 +490.19,68.4563649341681,11.0,500.0,126.0,100.0,1.0,0.0,1.0,1.0,126.0,100.0,7.84,0.9971960689854324,1.0,8.0,126.0,100.0,126,100 +490.19,68.4563649341681,11.0,500.0,127.0,100.0,1.0,0.0,1.0,1.0,127.0,100.0,7.84,0.9971960689854324,1.0,8.0,127.0,100.0,127,100 +490.19,68.4563649341681,11.0,500.0,128.0,100.0,1.0,0.0,1.0,1.0,128.0,100.0,7.84,0.9971960689854324,1.0,8.0,128.0,100.0,128,100 +490.19,68.4563649341681,11.0,500.0,129.0,100.0,1.0,0.0,1.0,1.0,129.0,100.0,7.84,0.9971960689854324,1.0,8.0,129.0,100.0,129,100 +490.19,68.4563649341681,11.0,500.0,130.0,100.0,1.0,0.0,1.0,1.0,130.0,100.0,7.84,0.9971960689854324,1.0,8.0,130.0,100.0,130,100 +490.19,68.4563649341681,11.0,500.0,131.0,100.0,1.0,0.0,1.0,1.0,131.0,100.0,7.84,0.9971960689854324,1.0,8.0,131.0,100.0,131,100 +490.19,68.4563649341681,11.0,500.0,132.0,100.0,1.0,0.0,1.0,1.0,132.0,100.0,7.84,0.9971960689854324,1.0,8.0,132.0,100.0,132,100 +490.19,68.4563649341681,11.0,500.0,133.0,100.0,1.0,0.0,1.0,1.0,133.0,100.0,7.84,0.9971960689854324,1.0,8.0,133.0,100.0,133,100 +490.19,68.4563649341681,11.0,500.0,134.0,100.0,1.0,0.0,1.0,1.0,134.0,100.0,7.84,0.9971960689854324,1.0,8.0,134.0,100.0,134,100 +490.19,68.4563649341681,11.0,500.0,135.0,100.0,1.0,0.0,1.0,1.0,135.0,100.0,7.84,0.9971960689854324,1.0,8.0,135.0,100.0,135,100 +490.19,68.4563649341681,11.0,500.0,136.0,100.0,1.0,0.0,1.0,1.0,136.0,100.0,7.84,0.9971960689854324,1.0,8.0,136.0,100.0,136,100 +490.19,68.4563649341681,11.0,500.0,137.0,100.0,1.0,0.0,1.0,1.0,137.0,100.0,7.84,0.9971960689854324,1.0,8.0,137.0,100.0,137,100 +490.19,68.4563649341681,11.0,500.0,138.0,100.0,1.0,0.0,1.0,1.0,138.0,100.0,7.84,0.9971960689854324,1.0,8.0,138.0,100.0,138,100 +490.19,68.4563649341681,11.0,500.0,139.0,100.0,1.0,0.0,1.0,1.0,139.0,100.0,7.84,0.9971960689854324,1.0,8.0,139.0,100.0,139,100 +490.19,68.4563649341681,11.0,500.0,140.0,100.0,1.0,0.0,1.0,1.0,140.0,100.0,7.84,0.9971960689854324,1.0,8.0,140.0,100.0,140,100 +490.19,68.4563649341681,11.0,500.0,141.0,100.0,1.0,0.0,1.0,1.0,141.0,100.0,7.84,0.9971960689854324,1.0,8.0,141.0,100.0,141,100 +490.19,68.4563649341681,11.0,500.0,142.0,100.0,1.0,0.0,1.0,1.0,142.0,100.0,7.84,0.9971960689854324,1.0,8.0,142.0,100.0,142,100 +490.19,68.4563649341681,11.0,500.0,143.0,100.0,1.0,0.0,1.0,1.0,143.0,100.0,7.84,0.9971960689854324,1.0,8.0,143.0,100.0,143,100 +490.19,68.4563649341681,11.0,500.0,144.0,100.0,1.0,0.0,1.0,1.0,144.0,100.0,7.84,0.9971960689854324,1.0,8.0,144.0,100.0,144,100 +490.19,68.4563649341681,11.0,500.0,145.0,100.0,1.0,0.0,1.0,1.0,145.0,100.0,7.84,0.9971960689854324,1.0,8.0,145.0,100.0,145,100 +490.19,68.4563649341681,11.0,500.0,146.0,100.0,1.0,0.0,1.0,1.0,146.0,100.0,7.84,0.9971960689854324,1.0,8.0,146.0,100.0,146,100 +490.19,68.4563649341681,11.0,500.0,147.0,100.0,1.0,0.0,1.0,1.0,147.0,100.0,7.84,0.9971960689854324,1.0,8.0,147.0,100.0,147,100 +490.19,68.4563649341681,11.0,500.0,148.0,100.0,1.0,0.0,1.0,1.0,148.0,100.0,7.84,0.9971960689854324,1.0,8.0,148.0,100.0,148,100 +490.19,68.4563649341681,11.0,500.0,149.0,100.0,1.0,0.0,1.0,1.0,149.0,100.0,7.84,0.9971960689854324,1.0,8.0,149.0,100.0,149,100 +490.19,68.4563649341681,11.0,500.0,150.0,100.0,1.0,0.0,1.0,1.0,150.0,100.0,7.84,0.9971960689854324,1.0,8.0,150.0,100.0,150,100 +490.19,68.4563649341681,11.0,500.0,151.0,100.0,1.0,0.0,1.0,1.0,151.0,100.0,7.84,0.9971960689854324,1.0,8.0,151.0,100.0,151,100 +490.19,68.4563649341681,11.0,500.0,152.0,100.0,1.0,0.0,1.0,1.0,152.0,100.0,7.84,0.9971960689854324,1.0,8.0,152.0,100.0,152,100 +490.19,68.4563649341681,11.0,500.0,153.0,100.0,1.0,0.0,1.0,1.0,153.0,100.0,7.84,0.9971960689854324,1.0,8.0,153.0,100.0,153,100 +490.19,68.4563649341681,11.0,500.0,154.0,100.0,1.0,0.0,1.0,1.0,154.0,100.0,7.84,0.9971960689854324,1.0,8.0,154.0,100.0,154,100 +490.19,68.4563649341681,11.0,500.0,155.0,100.0,1.0,0.0,1.0,1.0,155.0,100.0,7.84,0.9971960689854324,1.0,8.0,155.0,100.0,155,100 +490.19,68.4563649341681,11.0,500.0,156.0,100.0,1.0,0.0,1.0,1.0,156.0,100.0,7.84,0.9971960689854324,1.0,8.0,156.0,100.0,156,100 +490.19,68.4563649341681,11.0,500.0,157.0,100.0,1.0,0.0,1.0,1.0,157.0,100.0,7.84,0.9971960689854324,1.0,8.0,157.0,100.0,157,100 +490.19,68.4563649341681,11.0,500.0,158.0,100.0,1.0,0.0,1.0,1.0,158.0,100.0,7.84,0.9971960689854324,1.0,8.0,158.0,100.0,158,100 +490.19,68.4563649341681,11.0,500.0,159.0,100.0,1.0,0.0,1.0,1.0,159.0,100.0,7.84,0.9971960689854324,1.0,8.0,159.0,100.0,159,100 +490.19,68.4563649341681,11.0,500.0,160.0,100.0,1.0,0.0,1.0,1.0,160.0,100.0,7.84,0.9971960689854324,1.0,8.0,160.0,100.0,160,100 +490.19,68.4563649341681,11.0,500.0,161.0,100.0,1.0,0.0,1.0,1.0,161.0,100.0,7.84,0.9971960689854324,1.0,8.0,161.0,100.0,161,100 +490.19,68.4563649341681,11.0,500.0,162.0,100.0,1.0,0.0,1.0,1.0,162.0,100.0,7.84,0.9971960689854324,1.0,8.0,162.0,100.0,162,100 +490.19,68.4563649341681,11.0,500.0,163.0,100.0,1.0,0.0,1.0,1.0,163.0,100.0,7.84,0.9971960689854324,1.0,8.0,163.0,100.0,163,100 +490.19,68.4563649341681,11.0,500.0,164.0,100.0,1.0,0.0,1.0,1.0,164.0,100.0,7.84,0.9971960689854324,1.0,8.0,164.0,100.0,164,100 +490.19,68.4563649341681,11.0,500.0,165.0,100.0,1.0,0.0,1.0,1.0,165.0,100.0,7.84,0.9971960689854324,1.0,8.0,165.0,100.0,165,100 +490.19,68.4563649341681,11.0,500.0,166.0,100.0,1.0,0.0,1.0,1.0,166.0,100.0,7.84,0.9971960689854324,1.0,8.0,166.0,100.0,166,100 +490.19,68.4563649341681,11.0,500.0,167.0,100.0,1.0,0.0,1.0,1.0,167.0,100.0,7.84,0.9971960689854324,1.0,8.0,167.0,100.0,167,100 +490.19,68.4563649341681,11.0,500.0,168.0,100.0,1.0,0.0,1.0,1.0,168.0,100.0,7.84,0.9971960689854324,1.0,8.0,168.0,100.0,168,100 +490.19,68.4563649341681,11.0,500.0,169.0,100.0,1.0,0.0,1.0,1.0,169.0,100.0,7.84,0.9971960689854324,1.0,8.0,169.0,100.0,169,100 +490.19,68.4563649341681,11.0,500.0,170.0,100.0,1.0,0.0,1.0,1.0,170.0,100.0,7.84,0.9971960689854324,1.0,8.0,170.0,100.0,170,100 +490.19,68.4563649341681,11.0,500.0,171.0,100.0,1.0,0.0,1.0,1.0,171.0,100.0,7.84,0.9971960689854324,1.0,8.0,171.0,100.0,171,100 +490.19,68.4563649341681,11.0,500.0,172.0,100.0,1.0,0.0,1.0,1.0,172.0,100.0,7.84,0.9971960689854324,1.0,8.0,172.0,100.0,172,100 +490.19,68.4563649341681,11.0,500.0,173.0,100.0,1.0,0.0,1.0,1.0,173.0,100.0,7.84,0.9971960689854324,1.0,8.0,173.0,100.0,173,100 +490.19,68.4563649341681,11.0,500.0,174.0,100.0,1.0,0.0,1.0,1.0,174.0,100.0,7.84,0.9971960689854324,1.0,8.0,174.0,100.0,174,100 +490.19,68.4563649341681,11.0,500.0,175.0,100.0,1.0,0.0,1.0,1.0,175.0,100.0,7.84,0.9971960689854324,1.0,8.0,175.0,100.0,175,100 +490.19,68.4563649341681,11.0,500.0,176.0,100.0,1.0,0.0,1.0,1.0,176.0,100.0,7.84,0.9971960689854324,1.0,8.0,176.0,100.0,176,100 +490.19,68.4563649341681,11.0,500.0,177.0,100.0,1.0,0.0,1.0,1.0,177.0,100.0,7.84,0.9971960689854324,1.0,8.0,177.0,100.0,177,100 +490.19,68.4563649341681,11.0,500.0,178.0,100.0,1.0,0.0,1.0,1.0,178.0,100.0,7.84,0.9971960689854324,1.0,8.0,178.0,100.0,178,100 +490.19,68.4563649341681,11.0,500.0,179.0,100.0,1.0,0.0,1.0,1.0,179.0,100.0,7.84,0.9971960689854324,1.0,8.0,179.0,100.0,179,100 +490.19,68.4563649341681,11.0,500.0,180.0,100.0,1.0,0.0,1.0,1.0,180.0,100.0,7.84,0.9971960689854324,1.0,8.0,180.0,100.0,180,100 +490.19,68.4563649341681,11.0,500.0,181.0,100.0,1.0,0.0,1.0,1.0,181.0,100.0,7.84,0.9971960689854324,1.0,8.0,181.0,100.0,181,100 +490.19,68.4563649341681,11.0,500.0,182.0,100.0,1.0,0.0,1.0,1.0,182.0,100.0,7.84,0.9971960689854324,1.0,8.0,182.0,100.0,182,100 +490.19,68.4563649341681,11.0,500.0,183.0,100.0,1.0,0.0,1.0,1.0,183.0,100.0,7.84,0.9971960689854324,1.0,8.0,183.0,100.0,183,100 +490.19,68.4563649341681,11.0,500.0,184.0,100.0,1.0,0.0,1.0,1.0,184.0,100.0,7.84,0.9971960689854324,1.0,8.0,184.0,100.0,184,100 +490.19,68.4563649341681,11.0,500.0,185.0,100.0,1.0,0.0,1.0,1.0,185.0,100.0,7.84,0.9971960689854324,1.0,8.0,185.0,100.0,185,100 +490.19,68.4563649341681,11.0,500.0,186.0,100.0,1.0,0.0,1.0,1.0,186.0,100.0,7.84,0.9971960689854324,1.0,8.0,186.0,100.0,186,100 +490.19,68.4563649341681,11.0,500.0,187.0,100.0,1.0,0.0,1.0,1.0,187.0,100.0,7.84,0.9971960689854324,1.0,8.0,187.0,100.0,187,100 +490.19,68.4563649341681,11.0,500.0,188.0,100.0,1.0,0.0,1.0,1.0,188.0,100.0,7.84,0.9971960689854324,1.0,8.0,188.0,100.0,188,100 +490.19,68.4563649341681,11.0,500.0,189.0,100.0,1.0,0.0,1.0,1.0,189.0,100.0,7.84,0.9971960689854324,1.0,8.0,189.0,100.0,189,100 +490.19,68.4563649341681,11.0,500.0,190.0,100.0,1.0,0.0,1.0,1.0,190.0,100.0,7.84,0.9971960689854324,1.0,8.0,190.0,100.0,190,100 +490.19,68.4563649341681,11.0,500.0,191.0,100.0,1.0,0.0,1.0,1.0,191.0,100.0,7.84,0.9971960689854324,1.0,8.0,191.0,100.0,191,100 +490.19,68.4563649341681,11.0,500.0,192.0,100.0,1.0,0.0,1.0,1.0,192.0,100.0,7.84,0.9971960689854324,1.0,8.0,192.0,100.0,192,100 +490.19,68.4563649341681,11.0,500.0,193.0,100.0,1.0,0.0,1.0,1.0,193.0,100.0,7.84,0.9971960689854324,1.0,8.0,193.0,100.0,193,100 +490.19,68.4563649341681,11.0,500.0,194.0,100.0,1.0,0.0,1.0,1.0,194.0,100.0,7.84,0.9971960689854324,1.0,8.0,194.0,100.0,194,100 +490.19,68.4563649341681,11.0,500.0,195.0,100.0,1.0,0.0,1.0,1.0,195.0,100.0,7.84,0.9971960689854324,1.0,8.0,195.0,100.0,195,100 +490.19,68.4563649341681,11.0,500.0,196.0,100.0,1.0,0.0,1.0,1.0,196.0,100.0,7.84,0.9971960689854324,1.0,8.0,196.0,100.0,196,100 +490.19,68.4563649341681,11.0,500.0,197.0,100.0,1.0,0.0,1.0,1.0,197.0,100.0,7.84,0.9971960689854324,1.0,8.0,197.0,100.0,197,100 +490.19,68.4563649341681,11.0,500.0,198.0,100.0,1.0,0.0,1.0,1.0,198.0,100.0,7.84,0.9971960689854324,1.0,8.0,198.0,100.0,198,100 +490.19,68.4563649341681,11.0,500.0,199.0,100.0,1.0,0.0,1.0,1.0,199.0,100.0,7.84,0.9971960689854324,1.0,8.0,199.0,100.0,199,100 +490.19,68.4563649341681,11.0,500.0,200.0,100.0,1.0,0.0,1.0,1.0,200.0,100.0,7.84,0.9971960689854324,1.0,8.0,200.0,100.0,200,100 diff --git a/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/pop-final.pkl b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/pop-final.pkl new file mode 100644 index 0000000000000000000000000000000000000000..00428cdffd5ac086b3206b81f4c95d15d8f17c7d Binary files /dev/null and b/results/log-CartPole-v1-conf_gpUCB-1631700679.1661417/pop-final.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/conf.yml b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/conf.yml new file mode 100644 index 0000000000000000000000000000000000000000..9b31e24296a4c697d9fc9b368507486fcfce0cde --- /dev/null +++ b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/conf.yml @@ -0,0 +1,43 @@ +algorithm: + name: algo.eaMuPlusLambdaUCB + args: + mu: 100 + lambda_: 100 + simulation_budget: 5 + parallel_update: 16 + save_every: 10 + ngen: 200 + cxpb: 0.0 + mutpb: 1.0 + budget_scheduler: [[50, 10], [100, 20], [190, 100]] + +population: + init_size: 100 + +selection: + name: selTournament + args: + tournsize: 5 + +individual: Linear + +params: + env: "MountainCarContinuous-v0" + function_set: small + c: 0.0 + n_episodes: 1 + n_steps: 500 + gamma: 1.0 + regCalcSize: 4 + regConstSize: 10 + init_size_min: 2 + init_size_max: 5 + pConst: 0.3 + pBranch: 0.3 + pIns: 0.3 + pDel: 0.6 + pSwap: 0.1 + pMut: 0.5 + n_thread: 16 + +seed: 42 \ No newline at end of file diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-0.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6c2ce8d73199d67705af8cf5fe519558919df0d3 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-0.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-10.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-10.pkl new file mode 100644 index 0000000000000000000000000000000000000000..55393f65a10b47b442f7852168c1cdf2da1f9e53 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-10.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-100.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-100.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6d68ee83376a0fc1d0298e1c03e2104c4ef7ff93 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-100.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-110.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-110.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c28f0fd41c06bbbc818f29cc0c7a32ceeb4d4082 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-110.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-120.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-120.pkl new file mode 100644 index 0000000000000000000000000000000000000000..5e7ba133bd83ff1a09543cc0e2155601dd42aa96 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-120.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-130.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-130.pkl new file mode 100644 index 0000000000000000000000000000000000000000..608b52fe5a367ebfe974cce3ecc1b6ded96a1279 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-130.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-140.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-140.pkl new file mode 100644 index 0000000000000000000000000000000000000000..981fc6acd7fe78503c0158179e3cefe1109b610a Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-140.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-150.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-150.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ca8ceb825f1c66f877ddef1ea10db2903dafe9f1 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-150.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-160.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-160.pkl new file mode 100644 index 0000000000000000000000000000000000000000..66a15912d49230ddae14d84f7759a9abc6e7bd88 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-160.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-170.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-170.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a360cf31fd73325deb8f7c71e13e9c99b44051a2 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-170.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-180.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-180.pkl new file mode 100644 index 0000000000000000000000000000000000000000..5b9f515415ccffd036216869eab259880b9b05a5 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-180.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-190.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-190.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b1e4a0058e6bc9cfc06c1e3f633495920c09f97f Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-190.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-20.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-20.pkl new file mode 100644 index 0000000000000000000000000000000000000000..54b1e3f48546a1ee0e7fbc8ae006de3ded0ca82d Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-20.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-200.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-200.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c0044d67c0a861a8f40aa8f345cc85ca3e68f3b7 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-200.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-30.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-30.pkl new file mode 100644 index 0000000000000000000000000000000000000000..94125280a13540a370875e31c109306466188415 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-30.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-40.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-40.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e5df101c45339e72e0c1310df84979f3c21789bf Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-40.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-50.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-50.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4b39f572d9e8108680a90e19b42af175afc4d859 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-50.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-60.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-60.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8199cbd63b236dcffb765cb47fc6de368e0c2b3c Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-60.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-70.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-70.pkl new file mode 100644 index 0000000000000000000000000000000000000000..705c0574c5fbf1146a8e86b3a79e5fe4d407a952 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-70.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-80.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-80.pkl new file mode 100644 index 0000000000000000000000000000000000000000..612d6f8e44d503606a89f2fa7de89ade8c431040 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-80.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-90.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-90.pkl new file mode 100644 index 0000000000000000000000000000000000000000..40b96ca2c9e71fe4a77bb419c8055e50579b90c9 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/data-90.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/hof-final.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/hof-final.pkl new file mode 100644 index 0000000000000000000000000000000000000000..86af0471e25df4b9b52ffe47062b5b42b47ea93b Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/hof-final.pkl differ diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/log.csv b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/log.csv new file mode 100644 index 0000000000000000000000000000000000000000..efec4dc57ebc0477e263e01b576b85bcf884fff7 --- /dev/null +++ b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/log.csv @@ -0,0 +1,202 @@ +fitness_avg,fitness_std,fitness_min,fitness_max,fitness_gen,fitness_nevals,complexity_avg,complexity_std,complexity_min,complexity_max,complexity_gen,complexity_nevals,size_avg,size_std,size_min,size_max,size_gen,size_nevals,effective_avg,effective_std,effective_min,effective_max,effective_gen,effective_nevals,gen,nevals +-496200000058445.25,1487805619006248.8,-4999999999999998.0,0.9457356846615036,0.0,100.0,1.68,2.7381745744199733,0.0,12.0,0.0,100.0,3.37,1.083097410208334,2.0,5.0,0.0,100.0,0.87,1.1459057552870566,0.0,5.0,0.0,100.0,0,100 +-500000082226.7605,4974938000145.846,-50000008187521.24,0.9490626224139714,1.0,100.0,1.79,2.187669993394799,0.0,12.0,1.0,100.0,3.14,1.2167168939404105,1.0,6.0,1.0,100.0,1.17,0.8491760712596651,0.0,4.0,1.0,100.0,1,100 +0.77743386973954,2.331282719441645,-22.4022682620019,1.0949646735850365,2.0,100.0,2.02,2.0638798414636446,1.0,12.0,2.0,100.0,2.71,1.402105559506844,1.0,6.0,2.0,100.0,1.4,0.7615773105863908,1.0,5.0,2.0,100.0,2,100 +1.2161732913720327,0.07575999296254775,1.0940124206114785,1.264356298094903,3.0,100.0,2.12,1.930181338631166,1.0,7.0,3.0,100.0,2.33,1.4075155416548693,1.0,7.0,3.0,100.0,1.39,0.6617401302626281,1.0,3.0,3.0,100.0,3,100 +3.3478762077648465,11.848444384374949,1.262012074418551,70.72098289846303,4.0,100.0,1.96,1.6365818036383029,1.0,6.0,4.0,100.0,2.14,1.303993865016243,1.0,6.0,4.0,100.0,1.31,0.4836320915737498,1.0,3.0,4.0,100.0,4,100 +4.819492557262065,14.82104768191537,1.2643179140263376,69.42016252233911,5.0,100.0,1.33,0.9061456836513652,1.0,5.0,5.0,100.0,2.07,1.4018202452525788,1.0,6.0,5.0,100.0,1.16,0.36660605559646725,1.0,2.0,5.0,100.0,5,100 +10.495116410028928,24.27602480701836,1.264356298094903,81.10046324320845,6.0,100.0,1.29,0.8636550237218563,1.0,7.0,6.0,100.0,2.51,1.757811138888362,1.0,6.0,6.0,100.0,1.19,0.4624932431938871,1.0,4.0,6.0,100.0,6,100 +27.2039311097472,35.05805078402687,1.5483080124396675,89.85653748762932,7.0,100.0,1.78,1.2536347155371852,1.0,7.0,7.0,100.0,3.48,1.9923855048659633,1.0,6.0,7.0,100.0,1.5,0.5916079783099616,1.0,4.0,7.0,100.0,7,100 +56.20280435500203,31.733662714148977,2.18990622495778,89.94104851205283,8.0,100.0,1.78,0.4377213725647858,1.0,3.0,8.0,100.0,4.89,1.4205280708243677,1.0,6.0,8.0,100.0,1.77,0.4208325082500163,1.0,2.0,8.0,100.0,8,100 +75.33038548149172,18.884853283188356,2.189929347170073,98.92155484275827,9.0,100.0,2.04,0.3720215047547655,1.0,3.0,9.0,100.0,5.25,1.0897247358851685,1.0,6.0,9.0,100.0,2.04,0.3720215047547655,1.0,3.0,9.0,100.0,9,100 +84.59220573048188,10.522730719753755,50.448203032469756,99.0833369237176,10.0,100.0,2.14,0.34698703145794946,2.0,3.0,10.0,100.0,4.99,0.9433451118228154,3.0,6.0,10.0,100.0,2.14,0.34698703145794946,2.0,3.0,10.0,100.0,10,100 +92.02114657549005,8.629221017410805,66.94614381822593,99.21404338634169,11.0,100.0,2.12,0.32496153618543844,2.0,3.0,11.0,100.0,4.66,1.0021975853094038,3.0,6.0,11.0,100.0,2.12,0.32496153618543844,2.0,3.0,11.0,100.0,11,100 +93.77029837581149,16.74097264346835,2.189929347170073,99.16635095910924,12.0,100.0,2.13,0.5413871073455666,1.0,6.0,12.0,100.0,4.18,0.9937806599043875,3.0,6.0,12.0,100.0,2.1,0.3872983346207417,1.0,3.0,12.0,100.0,12,100 +95.14771004358673,11.562580034732747,2.189929347170073,98.82849814603155,13.0,100.0,2.14,0.510294032886923,1.0,6.0,13.0,100.0,4.15,0.8645808232895291,2.0,6.0,13.0,100.0,2.11,0.3433656942677879,1.0,3.0,13.0,100.0,13,100 +97.36637513662674,4.168458913646786,74.10030869006549,98.73055899331219,14.0,100.0,2.1,0.4582575694955841,2.0,6.0,14.0,100.0,3.78,0.756042326857432,3.0,6.0,14.0,100.0,2.07,0.2551470164434615,2.0,3.0,14.0,100.0,14,100 +98.25825510425081,2.073334281995328,79.11509192326099,99.07822086882764,15.0,100.0,2.05,0.40926763859362253,2.0,6.0,15.0,100.0,3.5,0.6855654600401044,2.0,6.0,15.0,100.0,2.02,0.13999999999999999,2.0,3.0,15.0,100.0,15,100 +96.72522117798857,10.347126948464089,2.189929347170073,98.75573334871737,16.0,100.0,2.07,0.5701754116059371,1.0,6.0,16.0,100.0,3.43,0.552358579185659,2.0,5.0,16.0,100.0,2.01,0.17291616465790585,1.0,3.0,16.0,100.0,16,100 +96.27609173817592,13.735863573778119,0.9956977240680063,98.89067122098751,17.0,100.0,2.13,0.7022107945624305,1.0,6.0,17.0,100.0,3.18,0.5173006862551024,2.0,5.0,17.0,100.0,2.04,0.24166091947189144,1.0,3.0,17.0,100.0,17,100 +97.94161591058477,2.029360366526146,91.5894595845951,98.85160447536562,18.0,100.0,2.11,0.5812916651733447,2.0,6.0,18.0,100.0,3.33,0.6641535966928133,2.0,5.0,18.0,100.0,2.05,0.2179449471770337,2.0,3.0,18.0,100.0,18,100 +98.24064017183035,1.9520287663718436,84.60686765833032,98.95948416423438,19.0,100.0,2.28,0.9064215354899728,2.0,6.0,19.0,100.0,3.46,0.6696267617113282,2.0,5.0,19.0,100.0,2.12,0.32496153618543844,2.0,3.0,19.0,100.0,19,100 +97.91479865810912,3.0806396531906337,79.27791882421525,98.85139513206518,20.0,100.0,2.34,0.9718024490605075,2.0,7.0,20.0,100.0,3.41,0.8257723657279892,2.0,5.0,20.0,100.0,2.18,0.43312815655415426,2.0,4.0,20.0,100.0,20,100 +98.37995611606027,1.797753093419521,82.51268441510257,98.92834902455098,21.0,100.0,2.12,0.604648658313239,2.0,7.0,21.0,100.0,3.39,0.881986394452885,2.0,5.0,21.0,100.0,2.06,0.27640549922170504,2.0,4.0,21.0,100.0,21,100 +98.33540435154848,3.2075198630625787,66.43964531152879,99.01731903037482,22.0,100.0,2.07,0.5339475629684997,2.0,7.0,22.0,100.0,3.68,0.9682974749528163,2.0,6.0,22.0,100.0,2.04,0.27999999999999997,2.0,4.0,22.0,100.0,22,100 +97.46506009550454,9.87775634412245,2.189929347170073,98.86346760287998,23.0,100.0,2.51,1.5066187308008618,1.0,9.0,23.0,100.0,3.64,1.0150862032359615,1.0,6.0,23.0,100.0,2.23,0.6760917097554148,1.0,5.0,23.0,100.0,23,100 +97.56758122452374,9.62291099662122,2.189929347170073,98.82269080209748,24.0,100.0,2.37,1.3315780112332887,1.0,8.0,24.0,100.0,3.55,1.0037429949942367,1.0,7.0,24.0,100.0,2.16,0.5953150426454885,1.0,5.0,24.0,100.0,24,100 +98.23489733221297,2.7100285653658696,79.23027862091116,98.77338770931573,25.0,100.0,2.36,1.2126005112979297,2.0,7.0,25.0,100.0,3.3,1.004987562112089,2.0,6.0,25.0,100.0,2.18,0.5546169849544819,2.0,4.0,25.0,100.0,25,100 +95.51103923161372,14.484645497230856,2.189929347170073,98.66994298390655,26.0,100.0,2.72,1.6618062462272787,1.0,7.0,26.0,100.0,3.45,1.0037429949942367,1.0,6.0,26.0,100.0,2.36,0.7939773296511682,1.0,4.0,26.0,100.0,26,100 +96.49307489496347,9.8150813061741,2.189929347170073,98.63183358493868,27.0,100.0,2.42,1.3504073459515837,1.0,11.0,27.0,100.0,3.46,1.033634364753804,2.0,6.0,27.0,100.0,2.24,0.6651315659326357,1.0,5.0,27.0,100.0,27,100 +95.68596190532158,14.325843252883335,2.189929347170073,98.68466915543615,28.0,100.0,2.23,1.056929515152264,1.0,11.0,28.0,100.0,3.21,1.1427598172844546,2.0,6.0,28.0,100.0,2.13,0.5413871073455666,1.0,5.0,28.0,100.0,28,100 +97.36384327504702,3.200287495836238,87.43906595685081,98.84791229668139,29.0,100.0,2.39,1.5677691156544704,2.0,11.0,29.0,100.0,3.47,1.0904586191139947,2.0,6.0,29.0,100.0,2.21,0.637102817447859,2.0,5.0,29.0,100.0,29,100 +98.41424573798349,1.1105415173794346,87.43906595685081,98.89229662402771,30.0,100.0,2.49,1.2609123680890757,2.0,7.0,30.0,100.0,3.58,1.1931470990619724,2.0,6.0,30.0,100.0,2.24,0.5499090833947009,2.0,4.0,30.0,100.0,30,100 +97.20565083470925,9.838404956100964,2.189929347170073,98.72458247991801,31.0,100.0,2.01,0.2233830790368868,1.0,4.0,31.0,100.0,3.05,1.1346805717910216,2.0,6.0,31.0,100.0,2.0,0.1414213562373095,1.0,3.0,31.0,100.0,31,100 +97.90661646578532,2.0766696437579357,87.90995864859961,98.56963899997928,32.0,100.0,2.02,0.13999999999999999,2.0,3.0,32.0,100.0,2.72,0.8494704232638122,2.0,5.0,32.0,100.0,2.02,0.13999999999999999,2.0,3.0,32.0,100.0,32,100 +97.0820375975914,9.633455627056179,2.189929347170073,98.61153351689804,33.0,100.0,2.02,0.19899748742132398,1.0,3.0,33.0,100.0,2.52,0.7934733769950949,1.0,5.0,33.0,100.0,2.02,0.19899748742132398,1.0,3.0,33.0,100.0,33,100 +96.09661672744598,10.76400981959693,2.189929347170073,98.61063417943703,34.0,100.0,2.05,0.43301270189221935,1.0,6.0,34.0,100.0,2.67,0.938669270829721,1.0,5.0,34.0,100.0,2.02,0.19899748742132398,1.0,3.0,34.0,100.0,34,100 +98.21693761056201,0.6420108178515032,95.32144192656686,98.79355763437655,35.0,100.0,2.03,0.17058722109231983,2.0,3.0,35.0,100.0,3.11,1.0285426583277915,2.0,5.0,35.0,100.0,2.03,0.17058722109231983,2.0,3.0,35.0,100.0,35,100 +98.31130618539983,0.8904047924106429,92.63909321727594,98.90642583520076,36.0,100.0,2.04,0.19595917942265426,2.0,3.0,36.0,100.0,3.62,1.1115754585272202,2.0,5.0,36.0,100.0,2.04,0.19595917942265426,2.0,3.0,36.0,100.0,36,100 +97.55093152977759,9.585406042074776,2.189929347170073,98.7907141112416,37.0,100.0,2.01,0.17291616465790585,1.0,3.0,37.0,100.0,3.58,1.2262136844775466,2.0,5.0,37.0,100.0,2.01,0.17291616465790585,1.0,3.0,37.0,100.0,37,100 +97.62858052440072,3.9516297851352205,79.53478567938612,98.92510290292843,38.0,100.0,2.12,0.84,2.0,8.0,38.0,100.0,3.36,1.4249210504445502,2.0,6.0,38.0,100.0,2.04,0.27999999999999997,2.0,4.0,38.0,100.0,38,100 +97.89159744212822,2.5694786401534238,79.53478567938612,98.81697412086547,39.0,100.0,2.1,0.7000000000000002,2.0,7.0,39.0,100.0,3.36,1.4319217855734998,2.0,6.0,39.0,100.0,2.04,0.27999999999999997,2.0,4.0,39.0,100.0,39,100 +97.89910186846426,1.7476307899924048,90.32350854887633,98.88858051156086,40.0,100.0,2.1,0.7000000000000003,2.0,7.0,40.0,100.0,3.81,1.3978197308666094,2.0,6.0,40.0,100.0,2.04,0.27999999999999997,2.0,4.0,40.0,100.0,40,100 +96.85696983146292,10.716676090486297,2.189929347170073,98.83307695653791,41.0,100.0,2.0,0.1414213562373095,1.0,3.0,41.0,100.0,3.59,1.4703400967123217,2.0,6.0,41.0,100.0,2.0,0.1414213562373095,1.0,3.0,41.0,100.0,41,100 +96.91288386403183,10.178087943428364,2.189929347170073,98.804056838509,42.0,100.0,2.04,0.31368774282716244,2.0,5.0,42.0,100.0,2.93,1.3057947771376632,2.0,6.0,42.0,100.0,2.01,0.09949874371066199,2.0,3.0,42.0,100.0,42,100 +98.27690371546312,3.175926624637319,66.69613461327246,99.01203225222635,43.0,100.0,2.37,1.1717934971657762,2.0,8.0,43.0,100.0,3.02,1.3113351974228404,2.0,6.0,43.0,100.0,2.12,0.35440090293338705,2.0,4.0,43.0,100.0,43,100 +98.15506313999131,3.1044767895382686,73.97407130282485,98.87678436499161,44.0,100.0,2.33,1.0300970828033638,2.0,6.0,44.0,100.0,3.09,1.4006784070585223,2.0,6.0,44.0,100.0,2.12,0.32496153618543844,2.0,3.0,44.0,100.0,44,100 +96.81753122549009,10.715926004608013,2.189929347170073,98.60549981944541,45.0,100.0,2.27,0.7463913182774836,2.0,6.0,45.0,100.0,3.66,1.524598307751914,2.0,6.0,45.0,100.0,2.18,0.38418745424597095,2.0,3.0,45.0,100.0,45,100 +98.18581412107207,1.5159230456164499,90.30707097635039,98.81035642220897,46.0,100.0,2.23,0.5450688029964658,2.0,6.0,46.0,100.0,3.17,1.5624019969265273,2.0,7.0,46.0,100.0,2.2,0.4,2.0,3.0,46.0,100.0,46,100 +98.52268371944601,0.04647336737850954,98.32251013010539,98.69636536643449,47.0,100.0,2.53,1.3073255141700557,2.0,12.0,47.0,100.0,3.29,1.6204628968291745,2.0,7.0,47.0,100.0,2.35,0.6062177826491072,2.0,6.0,47.0,100.0,47,100 +98.02316084984203,2.015303946740828,87.46651908173173,98.69493286011547,48.0,100.0,2.9,1.997498435543818,2.0,12.0,48.0,100.0,3.6,1.8110770276274832,2.0,7.0,48.0,100.0,2.51,0.8543418519538885,2.0,6.0,48.0,100.0,48,100 +97.31065036042686,9.645525871012946,2.189929347170073,98.60612281381425,49.0,100.0,2.85,2.0414455662593602,2.0,12.0,49.0,100.0,2.86,1.5100993344810134,2.0,7.0,49.0,100.0,2.39,0.8590110593001701,2.0,6.0,49.0,100.0,49,100 +94.23810049018526,7.934229342835094,77.63362668435498,98.64225233739576,50.0,100.0,3.67,2.5022190151943136,2.0,12.0,50.0,100.0,3.66,1.7788760496448313,2.0,7.0,50.0,100.0,2.86,1.1226753760548949,2.0,6.0,50.0,100.0,50,100 +96.46383987312767,5.673212176787577,74.62804314337463,98.57822456864949,51.0,100.0,4.06,2.5132449144482516,1.0,12.0,51.0,100.0,4.3,1.7578395831246945,1.0,7.0,51.0,100.0,3.16,1.1637869220780925,1.0,6.0,51.0,100.0,51,100 +98.2978904359466,2.403020642809364,74.41529885030158,98.86919734328778,52.0,100.0,3.61,2.3447601156621545,2.0,12.0,52.0,100.0,4.08,1.7302023003105735,2.0,7.0,52.0,100.0,2.97,1.1086478250553689,2.0,6.0,52.0,100.0,52,100 +98.52904404725132,0.04104960678114337,98.41342861824647,98.63734388284249,53.0,100.0,3.51,2.5079673044120807,2.0,12.0,53.0,100.0,4.06,1.9890701345100932,2.0,8.0,53.0,100.0,2.78,1.0254754994635416,2.0,6.0,53.0,100.0,53,100 +97.51881986796894,9.67742199058858,1.2332686853507586,98.58643359230847,54.0,100.0,3.34,2.593144808914458,2.0,13.0,54.0,100.0,3.76,1.8927229062913569,2.0,9.0,54.0,100.0,2.67,1.1229870880824941,2.0,7.0,54.0,100.0,54,100 +97.96085668958989,2.04547185549522,88.73612315397483,98.62055226011009,55.0,100.0,2.81,2.230224203975914,2.0,13.0,55.0,100.0,3.25,1.5580436450882884,2.0,9.0,55.0,100.0,2.38,0.9141115905621151,2.0,6.0,55.0,100.0,55,100 +97.2796827218972,3.91234840855023,73.88645655071984,98.66712779611737,56.0,100.0,3.3,2.98496231131986,2.0,14.0,56.0,100.0,3.57,1.5313719339206922,2.0,9.0,56.0,100.0,2.54,1.1438531374263043,2.0,7.0,56.0,100.0,56,100 +98.17797792641457,1.6669129703318342,85.83895532902628,98.69497468133186,57.0,100.0,3.86,3.6959978354972014,2.0,18.0,57.0,100.0,3.87,1.7358283325260018,2.0,10.0,57.0,100.0,2.82,1.2278436382536664,2.0,7.0,57.0,100.0,57,100 +98.30260063022415,1.4432699535628943,85.83895532902628,98.71187765827429,58.0,100.0,3.94,3.245982131805411,2.0,18.0,58.0,100.0,3.82,1.499199786552813,2.0,10.0,58.0,100.0,2.91,1.149739100839838,2.0,6.0,58.0,100.0,58,100 +98.39264858561477,1.0413693080386253,90.93408946518963,98.95688483134309,59.0,100.0,3.48,2.7874002224294956,2.0,18.0,59.0,100.0,3.74,1.4115239990875112,2.0,10.0,59.0,100.0,2.75,1.0428326807307104,2.0,6.0,59.0,100.0,59,100 +98.41988817164298,1.3852750462971304,84.65115474852786,98.68000963498733,60.0,100.0,3.23,2.473277178158566,2.0,18.0,60.0,100.0,3.51,1.3819913169047047,2.0,10.0,60.0,100.0,2.59,0.9601562372864115,2.0,6.0,60.0,100.0,60,100 +97.9367791893768,2.9343096849717623,78.93627075143287,98.86031128172796,61.0,100.0,3.74,2.6480936539329574,2.0,18.0,61.0,100.0,3.96,1.4207040508142434,2.0,10.0,61.0,100.0,2.83,1.0775435026020992,2.0,6.0,61.0,100.0,61,100 +96.43478782362835,13.526944607483358,2.189929347170073,98.79672777607621,62.0,100.0,3.5,2.896549671592048,1.0,18.0,62.0,100.0,3.91,1.3348782716038192,1.0,7.0,62.0,100.0,2.6,0.9899494936611665,1.0,6.0,62.0,100.0,62,100 +98.37798774142377,1.9337843059086453,79.15651832862589,98.92576079878181,63.0,100.0,3.38,2.7668754941269045,2.0,18.0,63.0,100.0,4.11,1.2320308437697491,2.0,6.0,63.0,100.0,2.48,0.8657944328765347,2.0,6.0,63.0,100.0,63,100 +97.25128294984192,9.94900590639582,2.189929347170073,99.07482705472285,64.0,100.0,3.37,2.8236678274896287,1.0,18.0,64.0,100.0,4.03,1.3073255141700555,2.0,6.0,64.0,100.0,2.43,0.8631917515824626,1.0,6.0,64.0,100.0,64,100 +98.64950848987318,0.09850824050559527,98.51408523512417,98.92511360131796,65.0,100.0,3.21,2.7651220587887257,1.0,18.0,65.0,100.0,4.1,1.2124355652982142,2.0,6.0,65.0,100.0,2.37,0.8678133439859057,1.0,6.0,65.0,100.0,65,100 +97.3158098989643,9.748207686335586,2.1899293435080693,98.71195542241867,66.0,100.0,3.25,3.4011027623404737,2.0,18.0,66.0,100.0,4.64,1.2289833196589774,2.0,6.0,66.0,100.0,2.38,0.9463614531456784,2.0,6.0,66.0,100.0,66,100 +97.76213915377632,5.289737436808154,49.56758515547655,98.75811396197265,67.0,100.0,3.43,4.2219782093232086,2.0,18.0,67.0,100.0,5.09,1.2007914056987583,2.0,6.0,67.0,100.0,2.4,1.1224972160321824,2.0,6.0,67.0,100.0,67,100 +97.66957170007973,9.596332597566342,2.188077711626875,98.73775629542234,68.0,100.0,2.99,3.4102639194056517,2.0,18.0,68.0,100.0,5.37,1.1194194924156002,2.0,7.0,68.0,100.0,2.26,0.8674099376880576,2.0,6.0,68.0,100.0,68,100 +96.70367606534482,13.502236119484142,2.189929347170073,98.93370922310747,69.0,100.0,2.4,2.370653918225939,1.0,18.0,69.0,100.0,5.39,1.1304423912787416,2.0,7.0,69.0,100.0,2.09,0.6015812497078014,1.0,6.0,69.0,100.0,69,100 +98.45152510598591,1.9086402472467023,79.4843826601645,98.93979821249935,70.0,100.0,2.16,1.5919798993705918,2.0,18.0,70.0,100.0,5.4,0.9695359714832658,2.0,7.0,70.0,100.0,2.04,0.39799497484264795,2.0,6.0,70.0,100.0,70,100 +98.34067917088284,3.2018981946151635,66.49205953669895,98.86122204861503,71.0,100.0,2.4,2.297825058615211,2.0,18.0,71.0,100.0,5.1,0.9643650760992956,2.0,7.0,71.0,100.0,2.1,0.5744562646538027,2.0,6.0,71.0,100.0,71,100 +98.05694112077013,4.928978020583851,50.02637578562106,98.85970953064033,72.0,100.0,2.92,3.242468195680568,2.0,18.0,72.0,100.0,4.71,1.1251222155837115,2.0,7.0,72.0,100.0,2.23,0.810617048920142,2.0,6.0,72.0,100.0,72,100 +98.26369280549223,3.289816546136029,66.37970636120939,98.83661851227667,73.0,100.0,3.92,4.914631217090454,2.0,18.0,73.0,100.0,4.94,1.1473447607410774,2.0,7.0,73.0,100.0,2.48,1.2286578042726135,2.0,6.0,73.0,100.0,73,100 +98.43992842149902,2.058833743402987,78.93961815293437,98.9737617122312,74.0,100.0,4.08,5.158837078257076,1.0,18.0,74.0,100.0,4.85,1.2359207094308278,2.0,7.0,74.0,100.0,2.51,1.3076314465475354,1.0,6.0,74.0,100.0,74,100 +98.65225370353308,0.6296121331963848,92.40445191575495,98.78802329493418,75.0,100.0,3.3,4.129164564412516,1.0,18.0,75.0,100.0,4.86,0.9057593499379403,2.0,6.0,75.0,100.0,2.31,1.0458967444255671,1.0,6.0,75.0,100.0,75,100 +98.30323527724295,1.6819678971368832,84.61930313155311,98.73869010684285,76.0,100.0,3.03,3.47406102421935,1.0,18.0,76.0,100.0,4.7,1.0816653826391966,2.0,6.0,76.0,100.0,2.25,0.8760707733967615,1.0,6.0,76.0,100.0,76,100 +98.7286919916604,0.05958300913810108,98.39967007633486,98.94792709609601,77.0,100.0,3.4,3.893584466786357,2.0,18.0,77.0,100.0,4.49,1.299961537892564,2.0,6.0,77.0,100.0,2.35,0.9733961166965892,2.0,6.0,77.0,100.0,77,100 +97.41899694160061,9.757418938416851,2.189929347170073,98.86727469154805,78.0,100.0,4.04,4.588943233468899,1.0,18.0,78.0,100.0,4.73,1.0569295151522642,2.0,6.0,78.0,100.0,2.51,1.1618519699169942,1.0,6.0,78.0,100.0,78,100 +97.51557542207003,3.2389752333482873,79.53514867539454,98.71904167678676,79.0,100.0,4.85,5.227571137727348,2.0,18.0,79.0,100.0,5.09,0.9807650075323855,2.0,6.0,79.0,100.0,2.72,1.312097557348538,2.0,6.0,79.0,100.0,79,100 +98.14147974166312,4.866532482713688,50.29618918451126,99.08456113274892,80.0,100.0,5.3,5.812916651733446,2.0,18.0,80.0,100.0,5.51,0.7415524256584964,3.0,7.0,80.0,100.0,2.83,1.4632498077908638,2.0,6.0,80.0,100.0,80,100 +98.66511370154936,0.7535313731487092,91.22787547937445,99.07556369922563,81.0,100.0,4.86,5.442462677869274,2.0,18.0,81.0,100.0,5.59,0.788606365685695,3.0,7.0,81.0,100.0,2.72,1.3717142559585798,2.0,6.0,81.0,100.0,81,100 +98.31289026954892,2.3298840612380878,78.91190642322844,98.82588382655638,82.0,100.0,5.62,5.4950523200420935,2.0,18.0,82.0,100.0,5.78,0.5582114294781145,4.0,7.0,82.0,100.0,2.93,1.4018202452525788,2.0,6.0,82.0,100.0,82,100 +98.12660732901556,2.7414341660246415,79.56897046608108,98.94374564885163,83.0,100.0,6.95,6.158530668917709,2.0,18.0,83.0,100.0,5.71,0.6212084996198941,4.0,7.0,83.0,100.0,3.27,1.5802215034608282,2.0,6.0,83.0,100.0,83,100 +97.31764878636757,9.700628245728629,2.189929347170073,98.81467314223443,84.0,100.0,7.63,5.9340626892543025,2.0,18.0,84.0,100.0,5.48,0.7678541528180988,4.0,7.0,84.0,100.0,3.43,1.5116547224812948,2.0,6.0,84.0,100.0,84,100 +97.44587973502612,3.814558047542658,73.97403125529247,98.75931054809229,85.0,100.0,7.17,5.734204391194998,2.0,18.0,85.0,100.0,5.32,0.8704022058795577,3.0,7.0,85.0,100.0,3.32,1.4688771221582833,2.0,6.0,85.0,100.0,85,100 +98.37222607103382,1.2087118841421773,90.92310177088586,98.67693362536421,86.0,100.0,6.57,5.463066904221474,2.0,15.0,86.0,100.0,5.2,1.0583005244258363,3.0,7.0,86.0,100.0,3.21,1.4716996976285617,2.0,6.0,86.0,100.0,86,100 +98.68707726058206,0.08867872240841478,98.4760149390029,98.95848647718533,87.0,100.0,6.03,5.408243707526501,2.0,15.0,87.0,100.0,5.07,1.2268251709188231,3.0,7.0,87.0,100.0,3.09,1.4972975656161336,2.0,6.0,87.0,100.0,87,100 +98.72191160349239,0.07781625974166427,98.60646204353189,98.99007546823495,88.0,100.0,6.67,5.528209475047052,2.0,15.0,88.0,100.0,5.57,1.18536914081648,3.0,7.0,88.0,100.0,3.4,1.6733200530681511,2.0,6.0,88.0,100.0,88,100 +98.70987276886957,0.04823385326589534,98.48254830031328,98.83480043752391,89.0,100.0,7.08,5.576163555707454,2.0,15.0,89.0,100.0,5.64,1.1791522378386943,3.0,7.0,89.0,100.0,3.6,1.760681686165901,2.0,6.0,89.0,100.0,89,100 +98.70347892418329,0.056758492139928146,98.35611873860039,98.86721898522171,90.0,100.0,7.58,5.365034948628014,2.0,15.0,90.0,100.0,5.72,1.0684568311354465,3.0,7.0,90.0,100.0,3.77,1.6784218778364397,2.0,6.0,90.0,100.0,90,100 +98.19856778437186,3.749831278965676,66.1948876464702,98.87458240401776,91.0,100.0,6.61,4.640894310367346,2.0,15.0,91.0,100.0,5.68,0.8231646250902672,3.0,7.0,91.0,100.0,3.52,1.513142425550219,2.0,6.0,91.0,100.0,91,100 +98.61047620420797,1.2076765936777745,86.62018681192369,98.91294859591687,92.0,100.0,6.52,4.622726468221973,2.0,15.0,92.0,100.0,5.54,1.0041912168506553,3.0,7.0,92.0,100.0,3.58,1.6442627527253666,2.0,6.0,92.0,100.0,92,100 +98.74009388212248,0.05156773649734376,98.51373801173094,98.88535654856194,93.0,100.0,6.44,4.831811254591802,2.0,16.0,93.0,100.0,5.72,1.1142710621747296,3.0,8.0,93.0,100.0,3.56,1.6811900546934009,2.0,7.0,93.0,100.0,93,100 +98.75707377032165,0.051754138603060384,98.58853899975641,98.91859132541516,94.0,100.0,6.11,4.392937513782776,2.0,15.0,94.0,100.0,5.54,1.2682271089990151,3.0,7.0,94.0,100.0,3.47,1.5129771974487918,2.0,6.0,94.0,100.0,94,100 +98.77548048903776,0.05469565396125966,98.60115498655357,98.94558985676355,95.0,100.0,6.58,4.322453007263353,2.0,12.0,95.0,100.0,5.69,1.083466658462548,3.0,7.0,95.0,100.0,3.64,1.4527215837867902,2.0,6.0,95.0,100.0,95,100 +98.84960566362595,0.08033501934779731,98.65855721225728,99.04082577681271,96.0,100.0,5.44,4.003298639871875,2.0,12.0,96.0,100.0,5.39,1.156676272774712,3.0,8.0,96.0,100.0,3.31,1.3243488966280752,2.0,6.0,96.0,100.0,96,100 +98.86717256371693,0.07340593303588798,98.52973247283316,99.02445754979132,97.0,100.0,5.31,3.579650820960056,2.0,12.0,97.0,100.0,5.43,1.2510395677195827,4.0,8.0,97.0,100.0,3.27,1.2558264211267418,2.0,6.0,97.0,100.0,97,100 +98.4356187712355,2.753263321011918,79.16734627327544,98.90628036653357,98.0,100.0,7.07,3.8138038753978947,2.0,13.0,98.0,100.0,5.95,1.2678722333105967,4.0,8.0,98.0,100.0,3.88,1.351147660324363,2.0,6.0,98.0,100.0,98,100 +98.70573276977329,1.0944415932871296,87.83010102740859,98.87111475381516,99.0,100.0,8.0,4.13763217311544,1.0,16.0,99.0,100.0,6.22,1.2615863030328125,3.0,9.0,99.0,100.0,4.3,1.5132745950421556,1.0,7.0,99.0,100.0,99,100 +98.10495673516544,5.405988054443105,50.27955592630635,98.92453481675045,100.0,100.0,9.15,3.7212229172679234,2.0,20.0,100.0,100.0,6.51,1.004937809021036,4.0,9.0,100.0,100.0,4.7,1.2922847983320085,2.0,8.0,100.0,100.0,100,100 +98.3605272468194,4.870991122790425,49.89519493363872,98.95937441335157,101.0,100.0,10.04,2.716321041408765,2.0,15.0,101.0,100.0,6.43,0.7906326580656784,4.0,9.0,101.0,100.0,4.93,0.9407975340103736,2.0,6.0,101.0,100.0,101,100 +98.6704390185327,1.7868152569178362,80.89771648701426,98.90756898415538,102.0,100.0,9.75,2.7726341266023544,2.0,15.0,102.0,100.0,6.36,0.6248199740725323,4.0,8.0,102.0,100.0,4.83,0.9386692708297208,2.0,6.0,102.0,100.0,102,100 +98.36681602585524,4.8442255920100425,50.172003604857274,99.05877092077722,103.0,100.0,8.57,3.5979299604077895,2.0,16.0,103.0,100.0,5.91,1.087152243248387,3.0,8.0,103.0,100.0,4.35,1.2678722333105965,2.0,7.0,103.0,100.0,103,100 +98.47799313857145,2.5463156567131233,74.75304176727106,98.84912701423018,104.0,100.0,10.22,2.133447913589643,2.0,16.0,104.0,100.0,5.93,0.5874521257089808,4.0,8.0,104.0,100.0,4.8,0.7348469228349535,2.0,7.0,104.0,100.0,104,100 +98.64715770562772,0.2614632910592656,98.10364573037873,98.91905016486952,105.0,100.0,9.08,2.8202127579315714,2.0,16.0,105.0,100.0,5.57,0.7778817390837761,4.0,7.0,105.0,100.0,4.46,1.071634265969505,2.0,7.0,105.0,100.0,105,100 +98.72397526924878,0.13335347251667856,98.10364573037873,98.89999694503737,106.0,100.0,8.81,3.0156093911513144,2.0,16.0,106.0,100.0,5.53,0.7543871685016919,4.0,7.0,106.0,100.0,4.44,1.0892199043352082,2.0,7.0,106.0,100.0,106,100 +98.79661230779884,0.03735545682918595,98.65686522200764,98.89293975070038,107.0,100.0,7.92,2.5364542179980303,2.0,16.0,107.0,100.0,5.31,0.9348261870529729,3.0,7.0,107.0,100.0,4.11,0.9684523736353793,2.0,7.0,107.0,100.0,107,100 +98.64683746766349,1.238711750975379,86.37360206749715,98.97304876149613,108.0,100.0,7.91,2.6423285185608543,2.0,12.0,108.0,100.0,5.58,1.1240996397117118,3.0,7.0,108.0,100.0,4.1,0.9643650760992956,2.0,6.0,108.0,100.0,108,100 +98.70447373822131,0.6248558101730224,92.58832586622886,98.82767164708983,109.0,100.0,8.63,3.022101917540175,2.0,12.0,109.0,100.0,5.37,1.0065286881157438,3.0,7.0,109.0,100.0,4.31,1.0648474069086142,2.0,6.0,109.0,100.0,109,100 +96.9240125079565,10.123881540394265,2.1893381872022273,98.9049999369696,110.0,100.0,7.26,2.9685686786732757,2.0,12.0,110.0,100.0,4.93,1.0700934538627922,3.0,7.0,110.0,100.0,3.84,1.0556514576317317,2.0,6.0,110.0,100.0,110,100 +98.51214093239105,2.169540128809396,82.47262458144753,98.92128695588728,111.0,100.0,6.45,2.7654113618049667,2.0,12.0,111.0,100.0,4.34,1.041345283755585,3.0,7.0,111.0,100.0,3.66,1.041345283755585,2.0,6.0,111.0,100.0,111,100 +97.27960519019935,4.225708339796168,79.2468857745331,99.06447555212893,112.0,100.0,5.35,3.179229466395906,2.0,12.0,112.0,100.0,4.33,1.2494398745037714,3.0,7.0,112.0,100.0,3.28,1.217209924376235,2.0,6.0,112.0,100.0,112,100 +97.6630312207359,3.285319291994652,74.45203189935515,98.78811726137158,113.0,100.0,6.14,3.4957688710782926,2.0,12.0,113.0,100.0,4.72,1.2655433615645102,2.0,7.0,113.0,100.0,3.53,1.3375724279454928,2.0,6.0,113.0,100.0,113,100 +98.73332701455935,0.4264978589875532,94.52428234041714,98.96457283145143,114.0,100.0,6.67,3.171923706522589,2.0,12.0,114.0,100.0,5.04,1.0287856919689347,2.0,7.0,114.0,100.0,3.72,1.208966500776593,2.0,6.0,114.0,100.0,114,100 +98.80194328697635,0.04471913774447178,98.67537293969485,98.96152480732526,115.0,100.0,6.73,3.3907373829301495,2.0,16.0,115.0,100.0,5.07,1.2021231218140676,2.0,7.0,115.0,100.0,3.58,1.274205634895718,2.0,7.0,115.0,100.0,115,100 +98.41210423169709,2.202760002200288,82.4012020002491,99.1702459818971,116.0,100.0,6.38,3.3757369565770374,2.0,16.0,116.0,100.0,5.18,1.219672087079146,2.0,7.0,116.0,100.0,3.5,1.2288205727444508,2.0,7.0,116.0,100.0,116,100 +97.47700176025847,9.929796275651949,2.1892195179071905,98.98576162895039,117.0,100.0,5.05,3.226065715387707,2.0,11.0,117.0,100.0,5.34,1.2978443666326098,2.0,7.0,117.0,100.0,2.98,1.122319027727856,2.0,5.0,117.0,100.0,117,100 +96.93384362498533,4.549672775066007,66.84927877956596,98.82861897233107,118.0,100.0,5.48,3.0082553083141064,2.0,12.0,118.0,100.0,5.17,1.1318568814121333,2.0,7.0,118.0,100.0,3.14,1.095627673984187,2.0,6.0,118.0,100.0,118,100 +98.75176485474921,0.38751937276353493,95.07674887867796,99.00524865963371,119.0,100.0,6.04,3.2308512810093877,2.0,11.0,119.0,100.0,4.84,1.2387090053761618,2.0,7.0,119.0,100.0,3.4,1.131370849898476,2.0,5.0,119.0,100.0,119,100 +98.46914802826653,1.9374134070437807,86.52072700236035,98.87288074385151,120.0,100.0,6.87,2.985481535699057,2.0,11.0,120.0,100.0,4.9,1.004987562112089,3.0,7.0,120.0,100.0,3.72,1.0684568311354465,2.0,5.0,120.0,100.0,120,100 +98.55712291275614,2.4565638824938594,74.11875773641694,98.94794881176044,121.0,100.0,7.08,3.2792682110495317,2.0,11.0,121.0,100.0,5.16,1.026839812239475,3.0,8.0,121.0,100.0,3.72,1.149608629055993,2.0,5.0,121.0,100.0,121,100 +98.79241962941828,0.06915570793120122,98.50097199494569,99.03259668101124,122.0,100.0,7.53,3.753544991071773,2.0,11.0,122.0,100.0,5.13,0.996544028129214,3.0,6.0,122.0,100.0,3.9,1.2688577540449522,2.0,5.0,122.0,100.0,122,100 +98.70778725742808,1.3792446470299489,84.99826211980783,99.00308342518004,123.0,100.0,6.19,3.8747774129619366,2.0,15.0,123.0,100.0,4.84,1.09288608738514,3.0,7.0,123.0,100.0,3.55,1.3369741957120937,2.0,6.0,123.0,100.0,123,100 +97.28857961437546,10.212792828284142,2.189929347170073,98.96527236661674,124.0,100.0,6.18,3.4651984070179873,2.0,15.0,124.0,100.0,4.96,0.8475848040166835,3.0,6.0,124.0,100.0,3.55,1.2278029157808674,2.0,6.0,124.0,100.0,124,100 +98.83581608927344,0.06995292958633285,98.49566924662518,99.01912650676509,125.0,100.0,5.66,3.490615991483452,2.0,15.0,125.0,100.0,5.14,0.8487638069569177,3.0,7.0,125.0,100.0,3.36,1.2690153663372246,2.0,6.0,125.0,100.0,125,100 +98.3636219456235,2.65679421151801,82.18786227250087,98.97254383143218,126.0,100.0,5.87,2.961941930558396,2.0,15.0,126.0,100.0,4.91,0.7628237017817419,4.0,7.0,126.0,100.0,3.4,1.0488088481701514,2.0,6.0,126.0,100.0,126,100 +98.85050250941813,0.03904870592890018,98.71480887306322,99.00872922982826,127.0,100.0,6.06,2.9793287834678472,2.0,15.0,127.0,100.0,4.83,0.7753063910480811,3.0,6.0,127.0,100.0,3.33,1.0005498488331306,2.0,6.0,127.0,100.0,127,100 +98.46452305924123,2.0074968690638006,85.29559137704588,98.84403103299113,128.0,100.0,5.93,2.6730319863406047,2.0,15.0,128.0,100.0,4.6,0.8717797887081347,3.0,6.0,128.0,100.0,3.35,0.9205976319760985,2.0,6.0,128.0,100.0,128,100 +97.83972401508912,2.662935587060671,87.99872174496483,98.92713626176965,129.0,100.0,5.38,2.7596376573746055,2.0,15.0,129.0,100.0,4.61,1.0187737727287645,3.0,6.0,129.0,100.0,3.28,1.0107423014794623,2.0,6.0,129.0,100.0,129,100 +98.38637608237202,3.2447296039309363,66.11433691284631,98.92499102246113,130.0,100.0,5.29,2.8435716977069525,2.0,15.0,130.0,100.0,4.51,1.117989266495882,3.0,6.0,130.0,100.0,3.28,1.049571341072154,2.0,6.0,130.0,100.0,130,100 +98.22763658058959,1.9744247771232808,87.09785085636612,98.84070360256744,131.0,100.0,4.38,2.824110479425336,2.0,15.0,131.0,100.0,4.53,1.108647825055369,3.0,6.0,131.0,100.0,2.88,0.9927738916792685,2.0,6.0,131.0,100.0,131,100 +97.0046544377802,9.835211704752016,2.143017089161419,98.73428235896401,132.0,100.0,4.26,2.381680079271773,2.0,15.0,132.0,100.0,4.27,1.037834283496166,3.0,6.0,132.0,100.0,2.97,0.9322553298319082,2.0,6.0,132.0,100.0,132,100 +97.63447152973933,9.634909040201723,2.189929347170073,98.75913646500148,133.0,100.0,3.67,2.121579600203584,1.0,11.0,133.0,100.0,4.3,1.1789826122551594,2.0,6.0,133.0,100.0,2.68,0.8231646250902672,1.0,5.0,133.0,100.0,133,100 +98.46960907468616,1.1641571849980907,91.63683866911332,98.78848995516448,134.0,100.0,4.49,2.0952088201418015,2.0,12.0,134.0,100.0,4.19,1.1286717857729942,2.0,7.0,134.0,100.0,3.02,0.8715503427800371,2.0,6.0,134.0,100.0,134,100 +98.59516274664765,0.7132769043621209,91.63683866911332,98.76165791361166,135.0,100.0,4.49,2.2113118278524175,2.0,12.0,135.0,100.0,3.98,1.0581115253129039,2.0,7.0,135.0,100.0,2.9,0.842614977317636,2.0,6.0,135.0,100.0,135,100 +98.58269673425443,0.9650512663342963,89.05149020463394,98.9516230240942,136.0,100.0,4.56,2.2948638303829707,2.0,12.0,136.0,100.0,3.76,0.9911609354691094,2.0,7.0,136.0,100.0,2.85,0.82915619758885,2.0,6.0,136.0,100.0,136,100 +97.1275181322386,10.0184021634653,2.189929347170073,98.81066491698063,137.0,100.0,5.11,3.0063100305856683,1.0,12.0,137.0,100.0,4.14,1.303993865016243,2.0,7.0,137.0,100.0,3.26,1.269803134347998,1.0,6.0,137.0,100.0,137,100 +98.684232125961,0.08806825429180853,98.11865074757982,98.91475513010933,138.0,100.0,3.87,2.0574498778828127,2.0,12.0,138.0,100.0,3.91,0.9909086738948246,2.0,7.0,138.0,100.0,2.82,0.8529947244854448,2.0,6.0,138.0,100.0,138,100 +98.70663131749563,0.03890037332910148,98.46377908400363,98.7946693554378,139.0,100.0,3.25,1.7399712641305316,2.0,7.0,139.0,100.0,3.64,0.7002856560004638,2.0,5.0,139.0,100.0,2.46,0.5730619512757761,2.0,4.0,139.0,100.0,139,100 +98.70642170461713,0.0508121739320459,98.47060191227085,98.84775593326867,140.0,100.0,3.04,1.6548111674750083,2.0,6.0,140.0,100.0,3.29,0.840178552451799,2.0,5.0,140.0,100.0,2.37,0.5413871073455666,2.0,4.0,140.0,100.0,140,100 +98.50966486689684,1.5205278874113328,86.49534356800832,99.01220784593966,141.0,100.0,2.76,1.4907716122867378,2.0,6.0,141.0,100.0,3.16,0.7838367176906169,2.0,5.0,141.0,100.0,2.29,0.534696175411794,2.0,4.0,141.0,100.0,141,100 +97.31334980161274,4.03233216000336,74.16988480669308,98.70013619826103,142.0,100.0,2.87,1.5915715503865984,2.0,6.0,142.0,100.0,3.22,0.6415605972938176,2.0,5.0,142.0,100.0,2.3,0.5385164807134504,2.0,4.0,142.0,100.0,142,100 +93.2208621696593,14.823004719427578,2.1899293194853153,98.88439063575731,143.0,100.0,2.95,1.6147755261955143,1.0,6.0,143.0,100.0,3.16,0.7838367176906169,1.0,5.0,143.0,100.0,2.36,0.5919459434779496,1.0,4.0,143.0,100.0,143,100 +97.09213910674909,6.09850125868009,59.488049180555116,98.84570368655052,144.0,100.0,2.79,1.4233411397131748,2.0,7.0,144.0,100.0,3.15,0.7262919523166975,2.0,5.0,144.0,100.0,2.39,0.6147357155721472,2.0,4.0,144.0,100.0,144,100 +97.30852973780455,5.791860857722988,59.488049180555116,98.79458990149108,145.0,100.0,2.54,1.0992724866929038,2.0,6.0,145.0,100.0,3.15,0.7262919523166975,2.0,5.0,145.0,100.0,2.34,0.5517245689653488,2.0,4.0,145.0,100.0,145,100 +97.88533742153889,3.5565980370412857,74.76586275753174,98.85330575596319,146.0,100.0,2.47,0.9322553298319083,2.0,6.0,146.0,100.0,3.22,0.6095900261651268,2.0,5.0,146.0,100.0,2.33,0.5109794516416486,2.0,4.0,146.0,100.0,146,100 +97.94833196824248,2.9510500019394796,81.1175583573235,98.70781961951035,147.0,100.0,2.34,0.8392854103342915,2.0,6.0,147.0,100.0,3.2,0.5099019513592784,2.0,5.0,147.0,100.0,2.24,0.47159304490206383,2.0,4.0,147.0,100.0,147,100 +97.07018601732001,3.3097932785167767,78.74413318197752,98.66734249842327,148.0,100.0,2.51,0.8543418519538885,2.0,6.0,148.0,100.0,3.38,0.6128621378417825,2.0,5.0,148.0,100.0,2.42,0.5509990925582365,2.0,4.0,148.0,100.0,148,100 +97.90218829137365,2.949552518186236,77.12120634264495,98.66627901265666,149.0,100.0,2.6,1.16619037896906,2.0,7.0,149.0,100.0,3.36,0.6711184694225006,2.0,5.0,149.0,100.0,2.42,0.6352952069707436,2.0,4.0,149.0,100.0,149,100 +97.31424308784362,9.720171030593656,2.189929347170073,98.74143979157746,150.0,100.0,3.09,1.685793581670069,2.0,7.0,150.0,100.0,3.53,0.9214662229295222,2.0,6.0,150.0,100.0,2.62,0.8096912991998865,2.0,4.0,150.0,100.0,150,100 +97.41088218422037,9.869878773449033,2.189928470994812,98.71976389886619,151.0,100.0,3.37,1.836600119786558,2.0,7.0,151.0,100.0,3.55,0.9836157786453003,2.0,5.0,151.0,100.0,2.75,0.852936105461599,2.0,4.0,151.0,100.0,151,100 +98.62003671730223,0.21979633653514125,96.50288874730307,98.93449072121409,152.0,100.0,3.1,2.0420577856662137,2.0,10.0,152.0,100.0,3.4,1.0198039027185568,2.0,6.0,152.0,100.0,2.61,0.8705745229444749,2.0,5.0,152.0,100.0,152,100 +98.1551064308571,2.2506135588289964,82.53640895687408,98.8559403164824,153.0,100.0,2.38,0.845931439302264,2.0,6.0,153.0,100.0,3.38,0.6128621378417825,2.0,5.0,153.0,100.0,2.3,0.5385164807134504,2.0,4.0,153.0,100.0,153,100 +97.45877943510214,5.297331321362515,50.5188922356302,98.76774702661808,154.0,100.0,2.28,0.8376156636548769,2.0,6.0,154.0,100.0,3.21,0.667757440991862,2.0,5.0,154.0,100.0,2.2,0.5099019513592784,2.0,4.0,154.0,100.0,154,100 +98.60662507483295,0.2662628259953397,96.50288874730307,98.81222195681471,155.0,100.0,2.14,0.6931089380465382,2.0,6.0,155.0,100.0,2.94,0.5624944444170092,2.0,5.0,155.0,100.0,2.08,0.3655133376499413,2.0,4.0,155.0,100.0,155,100 +94.85914821146461,5.435997748348356,78.93161527436872,98.70894653378751,156.0,100.0,2.61,1.3848826665100549,2.0,6.0,156.0,100.0,3.17,0.9061456836513652,2.0,5.0,156.0,100.0,2.33,0.7078841713161836,2.0,4.0,156.0,100.0,156,100 +97.45807697082286,5.400539548579277,49.75227342536781,98.75892340799227,157.0,100.0,3.19,1.8038569788095726,2.0,6.0,157.0,100.0,3.55,1.2031209415515967,2.0,6.0,157.0,100.0,2.61,0.9043782394551519,2.0,4.0,157.0,100.0,157,100 +98.47164124409463,1.0821185609994803,90.92283460250249,98.87964408655941,158.0,100.0,2.84,1.7362027531368567,2.0,11.0,158.0,100.0,3.25,1.2278029157808674,2.0,7.0,158.0,100.0,2.43,0.8631917515824626,2.0,6.0,158.0,100.0,158,100 +97.46085118712976,9.636194763098175,2.1898253760395083,98.81453158531735,159.0,100.0,2.66,1.6805951326836577,1.0,8.0,159.0,100.0,3.15,1.2196310917650468,2.0,6.0,159.0,100.0,2.29,0.697065276713738,1.0,4.0,159.0,100.0,159,100 +97.82075280686459,4.661814279129708,66.08540364962339,98.6741117129746,160.0,100.0,2.38,1.1728597529116598,2.0,8.0,160.0,100.0,2.93,1.1937755232873557,2.0,6.0,160.0,100.0,2.21,0.5530822723609934,2.0,4.0,160.0,100.0,160,100 +97.60735623622253,9.590169880542609,2.189929347170073,98.72135701329137,161.0,100.0,2.51,1.3819913169047047,1.0,8.0,161.0,100.0,2.83,1.1406577050105786,2.0,6.0,161.0,100.0,2.24,0.6499230723708769,1.0,4.0,161.0,100.0,161,100 +98.41556182945429,0.772462623012993,94.65054238357462,98.76888344083005,162.0,100.0,3.0,1.865475810617763,2.0,10.0,162.0,100.0,3.12,1.1685888926393233,2.0,6.0,162.0,100.0,2.44,0.8039900496896714,2.0,5.0,162.0,100.0,162,100 +97.86257736219828,2.5961085251077782,84.3396429635384,98.72696303591272,163.0,100.0,2.53,1.5391880976670789,2.0,10.0,163.0,100.0,3.35,0.9205976319760985,2.0,5.0,163.0,100.0,2.24,0.6343500610861483,2.0,5.0,163.0,100.0,163,100 +97.69069953822391,2.5790401051133793,89.22405946993196,98.68180548273696,164.0,100.0,3.32,2.391150350772615,2.0,11.0,164.0,100.0,3.05,0.9205976319760985,2.0,5.0,164.0,100.0,2.49,0.8543418519538886,2.0,5.0,164.0,100.0,164,100 +97.42634281318195,9.619920368631444,2.1897925021006626,98.6318552763151,165.0,100.0,3.3,2.4351591323771844,2.0,11.0,165.0,100.0,2.84,0.9457272334029512,2.0,5.0,165.0,100.0,2.46,0.876584280032445,2.0,5.0,165.0,100.0,165,100 +98.05859056230636,1.8157728736045733,91.18210660624301,98.76333770700623,166.0,100.0,3.56,2.5546819762937223,2.0,10.0,166.0,100.0,3.01,0.8184741901856161,2.0,5.0,166.0,100.0,2.53,0.9214662229295222,2.0,5.0,166.0,100.0,166,100 +98.40483219720404,1.6131643144965522,82.36999159127689,98.7837501336221,167.0,100.0,3.94,2.83485449362044,2.0,14.0,167.0,100.0,3.04,0.8822698000045109,2.0,6.0,167.0,100.0,2.63,0.9864583113340372,2.0,6.0,167.0,100.0,167,100 +97.95299724664015,5.034502798255812,50.56398171910048,99.01772020522009,168.0,100.0,3.5,3.0773365106858237,2.0,14.0,168.0,100.0,2.91,1.0207350292803712,2.0,6.0,168.0,100.0,2.48,1.0533755265810956,2.0,6.0,168.0,100.0,168,100 +98.0713901430404,1.9451246670975109,88.63736860352296,98.6326290798677,169.0,100.0,3.02,2.4737825288412076,2.0,12.0,169.0,100.0,2.93,0.8860586888011426,2.0,6.0,169.0,100.0,2.34,0.9079647570252934,2.0,6.0,169.0,100.0,169,100 +98.08285027244041,3.644062504622839,65.48732038143234,98.78397724097982,170.0,100.0,2.24,1.1056219968868202,2.0,10.0,170.0,100.0,2.69,0.6114736298484179,2.0,5.0,170.0,100.0,2.07,0.3536947836765477,2.0,5.0,170.0,100.0,170,100 +98.113123986427,3.178552123365375,71.79282218398694,98.78519987026185,171.0,100.0,2.16,0.7838367176906169,2.0,6.0,171.0,100.0,2.75,0.45552167895721496,2.0,4.0,171.0,100.0,2.04,0.19595917942265423,2.0,3.0,171.0,100.0,171,100 +92.72231771341362,10.94212500684909,2.189929347170073,98.5731778798424,172.0,100.0,2.1,0.5916079783099617,1.0,6.0,172.0,100.0,2.71,0.534696175411794,2.0,4.0,172.0,100.0,2.04,0.24166091947189144,1.0,3.0,172.0,100.0,172,100 +92.95354876973893,11.210147480833447,2.189929347170073,98.72806640325234,173.0,100.0,2.09,0.4710626285325551,1.0,6.0,173.0,100.0,2.76,0.6019966777316964,2.0,4.0,173.0,100.0,2.06,0.2764054992217051,1.0,3.0,173.0,100.0,173,100 +93.93041309194452,10.437939880264603,2.189929347170073,98.52393464570542,174.0,100.0,2.17,0.548725796732758,1.0,6.0,174.0,100.0,2.85,0.6062177826491071,2.0,4.0,174.0,100.0,2.13,0.36482872693909396,1.0,3.0,174.0,100.0,174,100 +95.85115919724812,9.609822369959534,2.189929347170073,98.58979857413603,175.0,100.0,2.38,0.6896375859826667,1.0,6.0,175.0,100.0,3.03,0.7135124385741288,1.0,4.0,175.0,100.0,2.31,0.5038849074937648,1.0,4.0,175.0,100.0,175,100 +97.78363565130536,1.6865290549170553,84.38909943697061,98.62027387395212,176.0,100.0,2.47,0.7543871685016919,2.0,6.0,176.0,100.0,3.09,0.8496469855181034,2.0,5.0,176.0,100.0,2.39,0.5271622141238881,2.0,4.0,176.0,100.0,176,100 +98.09305500878256,1.1063319949817187,89.6699042932936,98.61186922918954,177.0,100.0,2.5,0.9848857801796105,2.0,6.0,177.0,100.0,2.79,0.8977193325310534,2.0,5.0,177.0,100.0,2.35,0.5894913061275799,2.0,4.0,177.0,100.0,177,100 +97.46409990732002,5.774817033726637,50.311551681646826,98.72114405249387,178.0,100.0,2.21,0.7654410493303845,2.0,6.0,178.0,100.0,2.53,0.6701492371106604,2.0,5.0,178.0,100.0,2.12,0.40693979898751603,2.0,4.0,178.0,100.0,178,100 +98.40949616895824,0.1368799477450012,97.64011513704322,98.71888453180173,179.0,100.0,2.04,0.19595917942265423,2.0,3.0,179.0,100.0,2.44,0.5535341001239219,2.0,4.0,179.0,100.0,2.04,0.19595917942265423,2.0,3.0,179.0,100.0,179,100 +98.07373545895788,1.9829738881375079,81.70397408752302,98.86612187690412,180.0,100.0,2.03,0.17058722109231983,2.0,3.0,180.0,100.0,2.56,0.5535341001239219,2.0,4.0,180.0,100.0,2.03,0.17058722109231983,2.0,3.0,180.0,100.0,180,100 +97.60905635929144,2.3293495765203254,86.5552105773445,98.45633701986637,181.0,100.0,2.05,0.2598076211353316,2.0,4.0,181.0,100.0,2.57,0.5701754116059373,2.0,4.0,181.0,100.0,2.05,0.2598076211353316,2.0,4.0,181.0,100.0,181,100 +97.99667714658881,1.5622042095683308,89.10186047883279,98.85200631272693,182.0,100.0,2.04,0.27999999999999997,2.0,4.0,182.0,100.0,2.35,0.51720402163943,2.0,4.0,182.0,100.0,2.04,0.27999999999999997,2.0,4.0,182.0,100.0,182,100 +98.17169744102804,1.1349722899568622,91.51559641290244,98.67944608719937,183.0,100.0,2.03,0.2984962311319859,2.0,5.0,183.0,100.0,2.31,0.5233545643251809,2.0,4.0,183.0,100.0,2.02,0.19899748742132398,2.0,4.0,183.0,100.0,183,100 +98.02957586106177,2.5743064234855475,73.8488237535782,98.727819483163,184.0,100.0,2.01,0.09949874371066199,2.0,3.0,184.0,100.0,2.34,0.494368283772331,2.0,4.0,184.0,100.0,2.01,0.09949874371066199,2.0,3.0,184.0,100.0,184,100 +94.58214592905661,9.834179674603591,2.189929347170073,98.58043704119532,185.0,100.0,2.11,0.6307931515164064,1.0,6.0,185.0,100.0,2.24,0.4715930449020639,1.0,4.0,185.0,100.0,2.03,0.22158519806160334,1.0,3.0,185.0,100.0,185,100 +94.07832312688552,10.245162874660783,2.189929347170073,98.58667446650186,186.0,100.0,2.05,0.32787192621510003,1.0,4.0,186.0,100.0,2.2,0.4898979485566356,1.0,4.0,186.0,100.0,2.03,0.22158519806160337,1.0,3.0,186.0,100.0,186,100 +95.96762238896886,10.212531421303034,2.189929347170073,98.85254901708377,187.0,100.0,2.24,0.6499230723708769,2.0,6.0,187.0,100.0,2.59,0.7495998932764065,2.0,5.0,187.0,100.0,2.15,0.35707142142714254,2.0,3.0,187.0,100.0,187,100 +97.81066369086179,3.284016061631362,66.60692377840255,98.65549037054436,188.0,100.0,2.1,0.3872983346207417,2.0,4.0,188.0,100.0,2.6,0.7483314773547883,2.0,5.0,188.0,100.0,2.07,0.2551470164434615,2.0,3.0,188.0,100.0,188,100 +98.32223690796992,0.07196653506687552,98.13371662804724,98.66440613258548,189.0,100.0,2.08,0.30594117081556704,2.0,4.0,189.0,100.0,2.54,0.573061951275776,2.0,4.0,189.0,100.0,2.07,0.2551470164434615,2.0,3.0,189.0,100.0,189,100 +98.3740500825539,0.1037536994716691,97.87885372895647,98.75984983042044,190.0,100.0,2.02,0.24413111231467402,1.0,3.0,190.0,100.0,2.37,0.5413871073455665,2.0,4.0,190.0,100.0,2.02,0.24413111231467402,1.0,3.0,190.0,100.0,190,100 +97.27838555039234,9.585729590303055,2.189929347170073,98.74957915498716,191.0,100.0,2.02,0.19899748742132398,1.0,3.0,191.0,100.0,2.32,0.5455272678794342,1.0,4.0,191.0,100.0,2.02,0.19899748742132398,1.0,3.0,191.0,100.0,191,100 +98.22204279091278,1.0579993799396912,90.84093797442226,99.09554950726972,192.0,100.0,2.06,0.23748684174075835,2.0,3.0,192.0,100.0,2.29,0.534696175411794,2.0,4.0,192.0,100.0,2.06,0.23748684174075835,2.0,3.0,192.0,100.0,192,100 +95.8965574806131,13.816267495773898,2.189929347170073,98.65095557288986,193.0,100.0,2.05,0.2598076211353316,1.0,3.0,193.0,100.0,2.24,0.5851495535331117,1.0,4.0,193.0,100.0,2.05,0.2598076211353316,1.0,3.0,193.0,100.0,193,100 +94.42397939357191,6.669538678795538,59.07506680454567,98.400210468904,194.0,100.0,2.09,0.28618176042508375,2.0,3.0,194.0,100.0,2.39,0.6147357155721472,2.0,4.0,194.0,100.0,2.09,0.28618176042508375,2.0,3.0,194.0,100.0,194,100 +95.67065019542615,10.648975060478483,2.189929347170073,98.70258188077422,195.0,100.0,2.1,0.36055512754639896,1.0,4.0,195.0,100.0,2.51,0.7415524256584964,1.0,4.0,195.0,100.0,2.09,0.3192177939902474,1.0,3.0,195.0,100.0,195,100 +95.51328991955705,13.46535220353762,2.189929347170073,98.54650784912519,196.0,100.0,2.16,0.463033476111609,1.0,4.0,196.0,100.0,2.67,0.8252878285786117,1.0,5.0,196.0,100.0,2.14,0.4004996878900157,1.0,3.0,196.0,100.0,196,100 +96.29240974391305,13.457879461190847,2.189929347170073,98.66104062122591,197.0,100.0,2.06,0.3411744421846396,1.0,4.0,197.0,100.0,2.99,0.6999285677838846,2.0,5.0,197.0,100.0,2.05,0.2958039891549808,1.0,3.0,197.0,100.0,197,100 +96.13143009769547,13.6450604998044,2.189929347170073,98.54991644891179,198.0,100.0,2.25,1.061838029079765,1.0,8.0,198.0,100.0,3.26,0.7825599018605541,2.0,5.0,198.0,100.0,2.16,0.5953150426454887,1.0,5.0,198.0,100.0,198,100 +97.40138251753346,9.569561931645177,2.189929347170073,98.58179255197406,199.0,100.0,2.18,0.6063002556489647,1.0,7.0,199.0,100.0,3.27,0.7463913182774836,2.0,4.0,199.0,100.0,2.15,0.40926763859362253,1.0,4.0,199.0,100.0,199,100 +96.4080737438848,10.67508576926515,2.189929347170073,98.76292232089415,200.0,100.0,2.15,0.5172040216394301,1.0,6.0,200.0,100.0,3.35,0.7921489758877429,2.0,4.0,200.0,100.0,2.12,0.35440090293338705,1.0,3.0,200.0,100.0,200,100 diff --git a/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/pop-final.pkl b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/pop-final.pkl new file mode 100644 index 0000000000000000000000000000000000000000..2a2db28132fdc999aba0ad70cdc413a71ebfea27 Binary files /dev/null and b/results/log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944/pop-final.pkl differ diff --git a/tutorial.ipynb b/tutorial.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..dd75a716e02ab851305ba768001af3cbe586efbf --- /dev/null +++ b/tutorial.ipynb @@ -0,0 +1,310 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "## Tree GP" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 2, + "source": [ + "import pickle\r\n", + "import yaml\r\n", + "from experiments import gp as gp_script\r\n", + "\r\n", + "if not \"conf_gp\" in globals():#test if every thing is loaded to not load it twice\r\n", + " with open(\"results\\log-CartPole-v1-conf_gpUCB-1631700679.1661417\\conf.yml\") as f:#load the conf associated to the experiments (for env input, output and pset initialization)\r\n", + " conf_gp = yaml.load(f, Loader=yaml.SafeLoader)\r\n", + "\r\n", + " gp_script.Factory(conf_gp[\"params\"]).init_global_var()\r\n", + "\r\n", + "with open(\"results\\log-CartPole-v1-conf_gpUCB-1631700679.1661417\\hof-final.pkl\", \"rb\") as input_file:\r\n", + " hof = pickle.load(input_file)\r\n", + "print(len(hof))\r\n", + "best = hof[0] \r\n", + "\r\n", + "for k, tree in enumerate(best):\r\n", + " print(\"OUTPUT:\", str(k), tree)\r\n", + "print(best.fitness.values, len(best.fitness.rewards))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "10\n", + "OUTPUT: 0 gt(ARG3, multiply(ARG2, -2.6824846543167147))\n", + "(500.0, 8.0) 0\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### See agent Behavior" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 3, + "source": [ + "gp_script.ENV = gp_script.gym.make(conf_gp[\"params\"][\"env\"])\r\n", + "\r\n", + "if gp_script.ENV.action_space.shape:\r\n", + " agent = gp_script.toolbox.compile(best)\r\n", + "else:\r\n", + " func = gp_script.toolbox.compile(best)\r\n", + " agent = lambda *s: int(func(*s)[0])\r\n", + "s = 0\r\n", + "steps = 0\r\n", + "\r\n", + "gp_script.ENV.render() \r\n", + "state = gp_script.ENV.reset() \r\n", + "for k in range(2000):\r\n", + " state, reward, done, _ = gp_script.ENV.step(agent(*state))\r\n", + " gp_script.ENV.render() \r\n", + " s+= reward\r\n", + " steps += 1\r\n", + " if done:\r\n", + " break\r\n", + "\r\n", + "print(\"End! cumulative rewards:\", s, \" Done?\", done, \" nb_steps:\", k)\r\n", + "gp_script.ENV.close()" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "End! cumulative rewards: 500.0 Done? True nb_steps: 499\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### Tree GP Graph" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 4, + "source": [ + "import numpy as np\r\n", + "from deap import gp\r\n", + "import pygraphviz as pgv\r\n", + "\r\n", + "offset = 0\r\n", + "n = []\r\n", + "e = []\r\n", + "l = {}\r\n", + "for tree in best:#multi-output support\r\n", + " expr = tree\r\n", + " nodes, edges, labels = gp.graph(expr)\r\n", + " n += list(np.array(nodes)+offset) \r\n", + " e += map(tuple, list(np.array(edges)+offset))\r\n", + " for key in list(labels.keys()):\r\n", + " l[key+offset] = labels[key]\r\n", + " offset += np.max(nodes)+1\r\n", + "nodes = n\r\n", + "edges = e\r\n", + "labels = l\r\n", + "\r\n", + "g = pgv.AGraph()\r\n", + "g.add_nodes_from(nodes)\r\n", + "g.add_edges_from(edges)\r\n", + "g.layout(prog=\"dot\")\r\n", + "\r\n", + "for i in nodes:\r\n", + " n = g.get_node(i)\r\n", + " n.attr[\"label\"] = labels[i]\r\n", + "\r\n", + "g.draw(\"img/Tree-GP.png\", prog=\"dot\")" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "## Linear GP" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 7, + "source": [ + "import pickle\r\n", + "import yaml\r\n", + "from experiments import linGP as linGP_script\r\n", + "\r\n", + "if not \"conf_lingp\" in globals():#test if every thing is loaded to not load it twice\r\n", + " with open(\"results\\log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944\\conf.yml\") as f:#load the conf associated to the experiments (for env input, output and pset initialization)\r\n", + " conf_lingp = yaml.load(f, Loader=yaml.SafeLoader)\r\n", + "\r\n", + " linGP_script.Factory(conf_lingp[\"params\"]).init_global_var()# gives Warning from creator of gp_script\r\n", + "\r\n", + "with open(\"results\\log-MountainCarContinuous-v0-conf_lingp-1631701228.1234944\\hof-final.pkl\", \"rb\") as input_file:\r\n", + " hof = pickle.load(input_file)\r\n", + "print(len(hof))\r\n", + "best = hof[-1]\r\n", + "print(best.to_effective(list(range(linGP_script.OUTPUT)))[0], best.fitness.values, len(best.fitness.rewards))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "10\n", + " op dst inpt1 inpt2\n", + "0 + 0 1 14\n", + "1 * 0 5 0\n", + " (98.78012634838458, 2.0) 0\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### See agent behavior" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 8, + "source": [ + "linGP_script.ENV = linGP_script.gym.make(conf_lingp[\"params\"][\"env\"])\r\n", + "\r\n", + "### Define agent according to the environment\r\n", + "eff, _, _ = best.to_effective(list(range(linGP_script.OUTPUT)))\r\n", + "if linGP_script.ENV.action_space.shape:\r\n", + " def agent(inputs):\r\n", + " register = eff.init_register()\r\n", + " return eff.execute(eff, inputs, register, list(range(linGP_script.OUTPUT)))\r\n", + "else:\r\n", + " if linGP_script.OUTPUT==1:\r\n", + " def agent(inputs):\r\n", + " register = eff.init_register()\r\n", + " return int(eff.execute(eff, inputs, register, list(range(linGP_script.OUTPUT)))>0)\r\n", + " else:\r\n", + " def agent(inputs):\r\n", + " register = eff.init_register()\r\n", + " return np.argmax(eff.execute(eff, inputs, register, list(range(linGP_script.OUTPUT))))\r\n", + "s = 0\r\n", + "steps = 0\r\n", + "\r\n", + "linGP_script.ENV.render()\r\n", + "\r\n", + "state = linGP_script.ENV.reset()\r\n", + "for k in range(2000):\r\n", + " state, reward, done, _ = linGP_script.ENV.step(agent(state))\r\n", + " linGP_script.ENV.render()\r\n", + " s+= reward\r\n", + " steps += 1\r\n", + " if done:\r\n", + " break\r\n", + "print(\"End! cumulative rewards:\", s, \" Done?\", done, \" nb_steps:\", k)\r\n", + "linGP_script.ENV.close()" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "End! cumulative rewards: 98.38424791624433 Done? True nb_steps: 750\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "### Linear GP plot as graph" + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 9, + "source": [ + "from GPRL.genetic_programming.linearGP import graph\r\n", + "import pygraphviz as pgv\r\n", + "\r\n", + "####Give a default name to terminals\r\n", + "register = best.init_register()\r\n", + "terminals_name = [str(round(register[k],2)) for k in range(len(register))]\r\n", + "args = [\"ARG\"+str(k) for k in range(linGP_script.INPUT)]\r\n", + "terminals_name[best.regCalcSize:best.regCalcSize+best.regInputSize] = args\r\n", + "\r\n", + "### Graph attributs\r\n", + "nodes, edges, labels, branch_edges = graph(best, list(range(linGP_script.OUTPUT)), debug=False, terminals_name=terminals_name)\r\n", + "\r\n", + "g = pgv.AGraph(directed=True)\r\n", + "g.add_nodes_from(nodes)\r\n", + "g.add_edges_from(edges)\r\n", + "g.add_edges_from(branch_edges, style=\"dashed\")\r\n", + "g.layout(prog=\"dot\")\r\n", + "\r\n", + "for i in nodes:\r\n", + " n = g.get_node(i)\r\n", + " n.attr[\"label\"] = labels[i]\r\n", + "\r\n", + "g.draw(\"img/Linear-GP.png\", prog=\"dot\")" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "" + ], + "metadata": {} + } + ], + "metadata": { + "orig_nbformat": 4, + "language_info": { + "name": "python", + "version": "3.8.5", + "mimetype": "text/x-python", + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "pygments_lexer": "ipython3", + "nbconvert_exporter": "python", + "file_extension": ".py" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3.8.5 64-bit ('ML': conda)" + }, + "interpreter": { + "hash": "561162cfaa0eace6a18a123654cd15a5f7ece9700d6ab1f62f4b2474ddafece3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file