Skip to content

Instantly share code, notes, and snippets.

$ git ± [ ● ][master]
add -- add file contents to index
am -- apply patches from a mailbox
apply -- apply patch to files and/or to index
archimport -- import an Arch repository into git
archive -- create archive of files from named tree
bisect -- find, by binary search, change that introduced a bug
blame -- show what revision and author last modified each line of a file
branch -- list, create, or delete branches
bundle -- move objects and refs by archive
# include <cmath>
# include <iostream>
# include <queue>
# include <set>
# include <stack>
# include <string>
# include <vector>
using namespace std;
"""
One of the design principles of AllenNLP is the use of a modular,
declarative language (JSON) for defining experiments and models.
This is implemented by giving each AllenNLP class a method
.. code-block
@classmethod
def from_params(cls, params: Params, **extras) -> 'ClassName':
...
{
"dataset_reader":{
"type": "sst_tokens",
"use_subtrees": true,
"granularity": "5-class"
},
"validation_dataset_reader":{
"type": "sst_tokens",
"use_subtrees": false,
study = optuna.create_study(direction="maximize")
study.optimize(
objective,
n_jobs=1, # number of processes in parallel execution
n_trials=30, # number of trials to train a model
)
optuna.integration.allennlp.dump_best_config(
"./config/imdb_optuna.jsonnet",
"best.imdb1.json",
import sklearn
import sklearn.datasets
import sklearn.ensemble
import sklearn.model_selection
import sklearn.svm
import optuna
# Define an objective function to be minimized.
def objective(trial):
def objective(trial: optuna.Trial) -> float:
trial.suggest_int("embedding_dim", 32, 256)
trial.suggest_int("max_filter_size", 2, 6)
trial.suggest_int("num_filters", 32, 256)
trial.suggest_int("output_dim", 32, 256)
trial.suggest_float("dropout", 0.0, 0.8)
trial.suggest_float("lr", 5e-3, 5e-1, log=True)
executor = optuna.integration.allennlp.AllenNLPExecutor(
trial=trial, # trial object
study = optuna.create_study(
storage="sqlite:///result/trial.db", # save results in DB
sampler=optuna.samplers.TPESampler(seed=24),
study_name="optuna_allennlp",
direction="maximize",
)
timeout = 60 * 60 * 10 # timeout (sec): 60*60*10 sec => 10 hours
study.optimize(
objective,
{
"data_loader": {
"batch_size": 64,
"shuffle": true
},
"dataset_reader": {
"lazy": false,
"token_indexers": {
"tokens": {
"lowercase_tokens": true,
Creating virtualenv test in /Users/makoto-hiramatsu/Desktop/test/.venv
Using virtualenv: /Users/makoto-hiramatsu/Desktop/test/.venv
PyPI: 77 packages found for allennlp *
Using version ^0.9.0 for allennlp
Updating dependencies
Resolving dependencies...
1: fact: test is 0.1.0
1: derived: test
1: fact: test depends on allennlp (^0.9.0)