Skip to content

Instantly share code, notes, and snippets.

View WillKoehrsen's full-sized avatar
🌆
building

Will Koehrsen WillKoehrsen

🌆
building
View GitHub Profile
# Define the search space
space = {
'class_weight': hp.choice('class_weight', [None, 'balanced']),
'boosting_type': hp.choice('boosting_type',
[{'boosting_type': 'gbdt',
'subsample': hp.uniform('gdbt_subsample', 0.5, 1)},
{'boosting_type': 'dart',
'subsample': hp.uniform('dart_subsample', 0.5, 1)},
{'boosting_type': 'goss'}]),
# Define the search space
space = {
'class_weight': hp.choice('class_weight', [None, 'balanced']),
'boosting_type': hp.choice('boosting_type', [{'boosting_type': 'gbdt', 'subsample': hp.uniform('gdbt_subsample', 0.5, 1)},
{'boosting_type': 'dart', 'subsample': hp.uniform('dart_subsample', 0.5, 1)},
{'boosting_type': 'goss'}]),
'num_leaves': hp.quniform('num_leaves', 30, 150, 1),
'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(0.2)),
'subsample_for_bin': hp.quniform('subsample_for_bin', 20000, 300000, 20000),
'min_child_samples': hp.quniform('min_child_samples', 20, 500, 5),
# Define the search space
space = {
'class_weight': hp.choice('class_weight', [None, 'balanced']),
'boosting_type': hp.choice('boosting_type', [{'boosting_type': 'gbdt', 'subsample': hp.uniform('gdbt_subsample', 0.5, 1)},
{'boosting_type': 'dart', 'subsample': hp.uniform('dart_subsample', 0.5, 1)},
{'boosting_type': 'goss'}]),
'num_leaves': hp.quniform('num_leaves', 30, 150, 1),
'learning_rate': hp.loguniform('learning_rate', np.log(0.01), np.log(0.2)),
'subsample_for_bin': hp.quniform('subsample_for_bin', 20000, 300000, 20000),
'min_child_samples': hp.quniform('min_child_samples', 20, 500, 5),
import lightgbm as lgb
from hyperopt import STATUS_OK
N_FOLDS = 10
# Create the dataset
train_set = lgb.Dataset(train_features, train_labels)
def objective(params, n_folds = N_FOLDS):
"""Objective function for Gradient Boosting Machine Hyperparameter Tuning"""
from hyperopt import fmin
# Run 2000 evals with the tpe algorithm
tpe_best = fmin(fn=objective, space=space,
algo=tpe_algo, trials=tpe_trials,
max_evals=2000)
print(tpe_best)
import numpy as np
from hyperopt import hp, tpe, fmin
# Single line bayesian optimization of polynomial function
best = fmin(fn = lambda x: np.poly1d([1, -2, -28, 28, 12, -26, 100])(x),
space = hp.normal('x', 4.9, 0.5), algo=tpe.suggest,
max_evals = 2000)
# Dataframe of results from optimization
tpe_results = pd.DataFrame({'loss': [x['loss'] for x in tpe_trials.results],
'iteration': tpe_trials.idxs_vals[0]['x'],
'x': tpe_trials.idxs_vals[1]['x']})
tpe_results.head()
from hyperopt import tpe
# Create the hyperparameter optimization algorithm
tpe_algo = tpe.suggest
from hyperopt import hp
# Create the domain space
space = hp.uniform('x', -4, 6)
import numpy as np
def objective(x):
"""Objective function to minimize"""
# Create the polynomial object
f = np.poly1d([1, -2, -28, 28, 12, -26, 100])
# Return the value of the polynomial
return f(x) * 0.05