Created
February 2, 2021 20:31
-
-
Save galleon/c6e3e1111ecdc5b0d52a9825da3c8322 to your computer and use it in GitHub Desktop.
hal-cgp issue
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from random import randint | |
from time import perf_counter | |
import cgp | |
def display_msg(msg: str, no_skip: bool): | |
if no_skip: | |
print(msg, flush=True) | |
current_best_func = None | |
opponents = [ | |
("myself", lambda o, c: None), | |
] | |
start_time = 0 | |
best_score = -float("inf") | |
def callback(population): | |
global best_score, current_best_func | |
if population.champion.fitness > best_score: | |
display_msg("-----ooooOoooo-----", True) | |
display_msg( | |
"t={}s: new best found with score={}: {}".format( | |
perf_counter() - start_time, | |
population.champion.fitness, | |
population.champion.to_sympy(), | |
), | |
True, | |
) | |
current_best_func = population.champion.to_func | |
best_score = population.champion.fitness | |
def objective(individual, ntrials=5): | |
global opponents, configuration | |
w = 0 | |
l = 0 | |
for k in range(ntrials): | |
for name, o in opponents: | |
display_msg( | |
"{} playing against {} ".format(individual.to_sympy(), name), True | |
) | |
r = [[{"reward": randint(600, 1200)}, {"reward": randint(600, 1200)}]] | |
display_msg( | |
"-> me against {}: {} to {}".format( | |
name, r[-1][0]["reward"], r[-1][1]["reward"] | |
), | |
False, | |
) | |
if r[-1][0]["reward"] > r[-1][1]["reward"]: | |
w += r[-1][0]["reward"] | |
elif r[-1][0]["reward"] < r[-1][1]["reward"]: | |
l += r[-1][1]["reward"] | |
fitness = 2 + (w - l) / (1000 * len(opponents) * ntrials) | |
display_msg("fitness for {}: {:.2f} ".format(individual.to_sympy(), fitness), True) | |
# fitness in [0, 4] therefore returns fitness | |
individual.fitness = fitness | |
return individual | |
class Exp(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.exp(<scale> * x_0)" | |
_def_numpy_output = "np.exp(<scale> * x_0)" | |
_def_torch_output = "torch.exp(<scale> * x_0)" | |
_def_sympy_output = "exp(<scale> * x_0)" | |
class Log(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.log(<scale> * x_0) if <scale> * x_0 > 0 else -float('inf')" | |
_def_numpy_output = "np.log(<scale> * x_0)" | |
_def_torch_output = "torch.log(<scale> * x_0)" | |
_def_sympy_output = "log(<scale> * x_0)" | |
class Cos(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.cos(<scale> * x_0)" | |
_def_numpy_output = "np.cos(<scale> * x_0)" | |
_def_torch_output = "torch.cos(<scale> * x_0)" | |
_def_sympy_output = "cos(<scale> * x_0)" | |
class Sin(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.sin(<scale> * x_0)" | |
_def_numpy_output = "np.sin(<scale> * x_0)" | |
_def_torch_output = "torch.sin(<scale> * x_0)" | |
_def_sympy_output = "sin(<scale> * x_0)" | |
class Tan(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.tan(<scale> * x_0)" | |
_def_numpy_output = "np.tan(<scale> * x_0)" | |
_def_torch_output = "torch.tan(<scale> * x_0)" | |
_def_sympy_output = "tan(<scale> * x_0)" | |
class Acos(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.acos(<scale> * x_0)" | |
_def_numpy_output = "np.arccos(<scale> * x_0)" | |
_def_torch_output = "torch.acos(<scale> * x_0)" | |
_def_sympy_output = "acos(<scale> * x_0)" | |
class Asin(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.asin(<scale> * x_0)" | |
_def_numpy_output = "np.arcsin(<scale> * x_0)" | |
_def_torch_output = "torch.asin(<scale> * x_0)" | |
_def_sympy_output = "asin(<scale> * x_0)" | |
class Atan(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.atan(<scale> * x_0)" | |
_def_numpy_output = "np.arctan(<scale> * x_0)" | |
_def_torch_output = "torch.atan(<scale> * x_0)" | |
_def_sympy_output = "atan(<scale> * x_0)" | |
class Tanh(cgp.OperatorNode): | |
_arity = 1 | |
_initial_values = {"<scale>": lambda: 1.0} | |
_def_output = "math.tanh(<scale> * x_0)" | |
_def_numpy_output = "np.tanh(<scale> * x_0)" | |
_def_torch_output = "torch.tanh(<scale> * x_0)" | |
_def_sympy_output = "tanh(<scale> * x_0)" | |
population_params = {"n_parents": 10, "seed": 8188211, "mutation_rate": 0.04} | |
genome_params = { | |
"n_inputs": 5, | |
"n_outputs": 1, | |
"n_columns": 12, | |
"n_rows": 1, | |
"levels_back": 5, | |
"primitives": ( | |
cgp.Add, | |
cgp.Sub, | |
cgp.Mul, | |
cgp.Div, | |
cgp.Pow, | |
cgp.IfElse, | |
cgp.ConstantFloat, | |
Exp, | |
Log, | |
# Cos, | |
# Sin, | |
Tanh, | |
), # , Tan, Acos, Asin, Atan), | |
} | |
ea_params = {"n_offsprings": 4, "tournament_size": 1, "n_processes": 4} | |
evolve_params = {"max_generations": 1000, "min_fitness": 2.0} | |
pop = cgp.Population(**population_params, genome_params=genome_params) | |
ea = cgp.ea.MuPlusLambda(**ea_params) | |
if __name__ == "__main__": | |
cgp.evolve( | |
pop, objective, ea, **evolve_params, print_progress=True, callback=callback | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment