Skip to content

Instantly share code, notes, and snippets.

View himkt's full-sized avatar
:octocat:

himkt himkt

:octocat:
View GitHub Profile
$ ssh -p 64537 3.81.79.237
circleci@e4cd4ad52982:~$ cd project/
.circleci/ .github/ .pytest_cache/ CODE_OF_CONDUCT.md Dockerfile MANIFEST.in dist/ examples/ optuna/ result/ setup.py swig-3.0.12.tar.gz venv/
.git/ .gitignore .readthedocs.yml CONTRIBUTING.md LICENSE README.md docs/ lightning_logs/ optuna.egg-info/ setup.cfg swig-3.0.12/ tests/
circleci@e4cd4ad52982:~$ cd project/
circleci@e4cd4ad52982:~/project$ . venv/bin/activate
(venv) circleci@e4cd4ad52982:~/project$ python -m pytest tests/integration_tests/allennlp_tests/test_allennlp.py
====================================================================================================================================================================================================== test session starts =================================================================
____________________________ test_dump_best_config _____________________________
def test_dump_best_config() -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
BATCH_SIZE = 100
service = HashtagService.new
search_options = HashtagService::SEARCH_OPTIONS.merge(per_page: 20)
PopularQuery.order(uu: :desc).limit(100).each_slice(BATCH_SIZE).with_index(1) do |popular_queries, index|
puts "##{index} processing (#{Time.new})"
hashtags_list = popular_queries
.map { |popular_query| Tsukurepo2Hashtag.search_by_keyword(popular_query.canonical_keyword, search_options).search }
.then { |searches| Search::Utils.multi_search(searches) }
____________________________ test_dump_best_config _____________________________
def test_dump_best_config() -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
local batch_size = 64;
local cuda_device = 0;
local num_epochs = 15;
local seed = 42;
local embedding_dim = std.parseInt(std.extVar('embedding_dim'));
local dropout = std.parseJson(std.extVar('dropout'));
local lr = std.parseJson(std.extVar('lr'));
local max_filter_size = std.parseInt(std.extVar('max_filter_size'));
local batch_size = 64;
local cuda_device = 0;
local num_epochs = 15;
local seed = 42;
local embedding_dim = 128;
local dropout = 0.2;
local lr = 0.1;
local max_filter_size = 4;
local num_filters = 128;
Creating virtualenv test in /Users/makoto-hiramatsu/Desktop/test/.venv
Using virtualenv: /Users/makoto-hiramatsu/Desktop/test/.venv
PyPI: 77 packages found for allennlp *
Using version ^0.9.0 for allennlp
Updating dependencies
Resolving dependencies...
1: fact: test is 0.1.0
1: derived: test
1: fact: test depends on allennlp (^0.9.0)
{
"data_loader": {
"batch_size": 64,
"shuffle": true
},
"dataset_reader": {
"lazy": false,
"token_indexers": {
"tokens": {
"lowercase_tokens": true,
study = optuna.create_study(
storage="sqlite:///result/trial.db", # save results in DB
sampler=optuna.samplers.TPESampler(seed=24),
study_name="optuna_allennlp",
direction="maximize",
)
timeout = 60 * 60 * 10 # timeout (sec): 60*60*10 sec => 10 hours
study.optimize(
objective,
def objective(trial: optuna.Trial) -> float:
trial.suggest_int("embedding_dim", 32, 256)
trial.suggest_int("max_filter_size", 2, 6)
trial.suggest_int("num_filters", 32, 256)
trial.suggest_int("output_dim", 32, 256)
trial.suggest_float("dropout", 0.0, 0.8)
trial.suggest_float("lr", 5e-3, 5e-1, log=True)
executor = optuna.integration.allennlp.AllenNLPExecutor(
trial=trial, # trial object