This document explains how to test migrated schema of Optuna's RDBStorage. The test consists of following two parts:
- Test for values
- Test for schema
Each section describes the test instruction for each database.
""" | |
Optuna example that optimizes multi-layer perceptrons using PyTorch distributed. | |
In this example, we optimize the validation accuracy of hand-written digit recognition using | |
PyTorch distributed data parallel and MNIST. We optimize the neural network architecture as well | |
as the optimizer configuration. As it is too time consuming to use the whole MNIST dataset, we | |
here use a small subset of it. | |
You can execute this example with mpirun command as follows: | |
$ mpirun -n 2 python pytorch_distributed_simple.py |
import torch | |
import torch.distributed as dist | |
import torch.multiprocessing as mp | |
def example(rank, world_size): | |
dist.init_process_group("gloo", rank=rank, world_size=world_size) | |
if rank == 0: | |
z = torch.tensor([True], dtype=torch.bool) |
def _dominates_both_feasible(extended: np.ndarray, const_extended: np.ndarray) -> np.ndarray: | |
return np.logical_and( | |
np.logical_and( | |
np.all(const_extended <= 0, axis=2), | |
np.all(np.swapaxes(const_extended, 0, 1) <= 0, axis=2), | |
), np.logical_and( | |
np.all(extended <= np.swapaxes(extended, 0, 1), axis=2), | |
np.any(extended < np.swapaxes(extended, 0, 1), axis=2), | |
) | |
) |
import sklearn.datasets | |
from sklearn.model_selection import train_test_split | |
import optuna.integration.lightgbm as lgb | |
if __name__ == "__main__": | |
X, y = sklearn.datasets.load_boston(return_X_y=True) | |
train_x, val_x, train_y, val_y = train_test_split(X, y, test_size=0.25) | |
dtrain = lgb.Dataset(train_x, label=train_y) |
import numpy as np | |
import sklearn.datasets | |
from sklearn.metrics import accuracy_score | |
from sklearn.model_selection import train_test_split | |
import optuna.integration.lightgbm as lgb | |
if __name__ == "__main__": | |
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True) |
import logging | |
import os | |
import optuna | |
logging.basicConfig() | |
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) | |
from multiprocessing import Pool | |
import os | |
import sys | |
import optuna | |
def f(x, y): | |
return (x - 3) ** 2 + y |
import optuna | |
from optuna import distributions | |
from optuna.samplers import TPESampler | |
from optuna.study import StudyDirection | |
from optuna.trial import TrialState | |
import numpy as np | |
def default_gamma(x): | |
# type: (int) -> int |