Last active
May 21, 2018 12:14
-
-
Save Yatoom/db20ba0bbb8fc00599cd7e083904e45a to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import openml | |
from arbok.bench import Benchmark | |
# We create a benchmark setup where we specify the headers, the interpreter we | |
# want to use, the directory to where we store the jobs (.sh-files), and we give | |
# it the config-file we created earlier. | |
bench = Benchmark( | |
headers="#PBS -lnodes=1:cpu3\n#PBS -lwalltime=15:00:00", | |
python_interpreter="/home/jhoof/python/python36/bin/python3", # Path to interpreter | |
root="/home/jhoof/benchmark-test/", | |
jobs_dir="jobs", | |
config_file="config.json", | |
log_file="log.json" | |
) | |
# Config file | |
config_file = bench.create_config_file( | |
# Wrapper parameters | |
wrapper={"refit": True, "verbose": False, "retry_on_error": False}, | |
# TPOT parameters | |
tpot={ | |
"max_time_mins": 60, # Max total time in minutes | |
}, | |
# Autosklearn parameters | |
autosklearn={ | |
"time_left_for_this_task": 30, # Max total time in seconds | |
} | |
) | |
# Next, we load the tasks we want to benchmark on from OpenML. | |
# In this case, we load a list of task id's from study 99. | |
tasks = [31] | |
# Next, we create jobs for both tpot and autosklearn. | |
bench.create_jobs(tasks, classifiers=["autosklearn"]) | |
# And finally, we submit the jobs using qsub | |
# bench.submit_jobs() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment