Last active
May 17, 2018 00:10
-
-
Save Yatoom/7125a1443b67539470d43ae0d4d6bc51 to your computer and use it in GitHub Desktop.
Benchmark on openml's CC18 using TPOT and AutoSklearn
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import openml | |
from arbok.bench import Benchmark | |
# We create a benchmark setup where we specify the headers, the interpreter we | |
# want to use, the directory to where we store the jobs (.sh-files), and we give | |
# it the config-file we created earlier. | |
bench = Benchmark( | |
headers="#PBS -lnodes=1:cpu3\n#PBS -lwalltime=15:00:00", | |
python_interpreter="/home/jhoof/python/python36/bin/python3", # Path to interpreter | |
root="/home/jhoof/benchmark-long/", | |
jobs_dir="jobs", | |
config_file="config.json", | |
log_file="log.json" | |
) | |
# Config file | |
config_file = bench.create_config_file( | |
# Wrapper parameters | |
wrapper={"refit": True, "verbose": False, "retry_on_error": True}, | |
# TPOT parameters | |
tpot={ | |
"max_time_mins": 60, # Max total time in minutes | |
}, | |
# Autosklearn parameters | |
autosklearn={ | |
"time_left_for_this_task": 3600, # Max total time in seconds | |
} | |
) | |
# Next, we load the tasks we want to benchmark on from OpenML. | |
# In this case, we load a list of task id's from study 99. | |
tasks = openml.study.get_study(99).tasks | |
# Next, we create jobs for both tpot and autosklearn. | |
bench.create_jobs(tasks, classifiers=["autosklearn", "tpot"]) | |
# And finally, we submit the jobs using qsub | |
bench.submit_jobs() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment