Skip to content

Instantly share code, notes, and snippets.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@icoxfog417
icoxfog417 / allennlp_tutorial_tuner.py
Last active June 10, 2019 03:51
allennlp_tutorial_tuner.py
hyperparameter_ranges = {
"lr": ContinuousParameter(0.01, 0.1),
"embedding-dim": CategoricalParameter([6, 12]),
"hidden-dim": CategoricalParameter([6, 12])
}
objective_metric_name = "validation loss"
objective_type = "Minimize"
metric_definitions = [
{"Name": objective_metric_name,
@icoxfog417
icoxfog417 / allennlp_tutorial_estimator2.py
Created April 8, 2019 06:17
allennlp_tutorial_estimator2.py
estimator = PyTorch(entry_point="tuning.py",
source_dir="../../allennlp-sagemaker-tuning",
dependencies=[from_root("example"), from_root(".venv")],
role=role,
framework_version="1.0.0",
train_instance_count=1,
train_instance_type="ml.p2.8xlarge",
hyperparameters={
"train-file-name": os.path.basename(s3_paths[0]),
"validation": os.path.basename(s3_paths[1]),
@icoxfog417
icoxfog417 / allennlp_tutorial_estimator.py
Created April 8, 2019 06:13
allennlp_tutorial_estimator.py
estimator = PyTorch(entry_point="tuning.py",
role=role,
framework_version="1.0.0",
train_instance_count=1,
train_instance_type="ml.p2.8xlarge",
hyperparameters={
"train-file-name": os.path.basename(s3_paths[0]),
"validation": os.path.basename(s3_paths[1]),
"epochs": 10
})
@icoxfog417
icoxfog417 / allennlp_tutorial_train.py
Created April 8, 2019 04:44
allennlp_tutorial_train.py
model = LstmTagger(word_embeddings, lstm, vocab)
optimizer = optim.SGD(model.parameters(), lr=0.1)
iterator = BucketIterator(batch_size=2, sorting_keys=[("sentence", "num_tokens")])
iterator.index_with(vocab)
trainer = Trainer(model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_dataset,
validation_dataset=validation_dataset,
@icoxfog417
icoxfog417 / allennlp_tutorial.py
Created April 8, 2019 01:32
allennlp_tutorial.py
class LstmTagger(Model):
def __init__(self,
word_embeddings: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
vocab: Vocabulary) -> None:
super().__init__(vocab)
self.word_embeddings = word_embeddings
self.encoder = encoder
self.hidden2tag = torch.nn.Linear(in_features=encoder.get_output_dim(),
class EDINETGetDocumentSensor(BaseSensorOperator):
@apply_defaults
def __init__(self, document_type="xbrl", *args, **kwargs):
self.document_type = document_type
self._next_document_index = -1
super().__init__(*args, **kwargs)
def poke(self, context):
document_ids = context["task_instance"].xcom_pull(
class EDINETGetDocumentsOperator(BaseOperator):
@apply_defaults
def __init__(self, filter_func=None, *args, **kwargs):
self.filter_func = filter_func
super().__init__(*args, **kwargs)
def execute(self, context):
self.log.info("Retreave list of documents from EDINET @ {}.".format(
self.start_date.strftime("%Y/%m/%d")))
def update(self, states, actions, rewards, values):
# Calculate values (or advantage) at outside of update process.
advantage = reward - values
action_probs = self.actor(states)
selected_action_probs = action_probs[self.to_one_hot(actions)]
neg_logs = - log(selected_action_probs)
policy_loss = reduce_mean(neg_logs * advantages)
def update(self, states, actions, rewards):
values = self.critic(states)
advantage = reward - tf.stop_gradient(values) # Prevent gradient flows to critic
action_probs = self.actor(states)
selected_action_probs = action_probs[self.to_one_hot(actions)]
neg_logs = - log(selected_action_probs)
policy_loss = reduce_mean(neg_logs * advantages)