Skip to content

Instantly share code, notes, and snippets.

@p-baleine
Last active October 25, 2017 10:42
Show Gist options
  • Save p-baleine/0391241986964b6d468e7b93bab0debc to your computer and use it in GitHub Desktop.
Save p-baleine/0391241986964b6d468e7b93bab0debc to your computer and use it in GitHub Desktop.
MemN2N
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End-To-End Memory Networksやってみた\n",
"\n",
"[End-To-End Memory Networks](https://arxiv.org/abs/1503.08895)をTensorFlowで実装してみた。"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Data"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"! [ ! -d tasks_1-20_v1-2 ] && curl -O http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz && tar zxvf tasks_1-20_v1-2.tar.gz"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1 Mary moved to the bathroom.\r\n",
"2 John went to the hallway.\r\n",
"3 Where is Mary? \tbathroom\t1\r\n",
"4 Daniel went back to the hallway.\r\n",
"5 Sandra moved to the garden.\r\n",
"6 Where is Daniel? \thallway\t4\r\n",
"7 John moved to the office.\r\n",
"8 Sandra journeyed to the bathroom.\r\n",
"9 Where is Daniel? \thallway\t4\r\n",
"10 Mary moved to the hallway.\r\n",
"11 Daniel travelled to the office.\r\n",
"12 Where is Daniel? \toffice\t11\r\n",
"13 John went back to the garden.\r\n",
"14 John moved to the bedroom.\r\n",
"15 Where is Sandra? \tbathroom\t8\r\n",
"1 Sandra travelled to the office.\r\n",
"2 Sandra went to the bathroom.\r\n",
"3 Where is Sandra? \tbathroom\t2\r\n",
"4 Mary went to the bedroom.\r\n",
"5 Daniel moved to the hallway.\r\n"
]
}
],
"source": [
"! head -n 20 tasks_1-20_v1-2/en/qa1_single-supporting-fact_train.txt"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import numpy as np\n",
"import os\n",
"import tensorflow as tf\n",
"\n",
"from collections import namedtuple\n",
"from glob import glob\n",
"\n",
"np.random.seed(1234)\n",
"\n",
"class Story(namedtuple(\"Story\", [\"sentences\", \"question\", \"answer\"])):\n",
" \"\"\"Storyデータ.\n",
" \n",
" Args:\n",
" sentences: `[max_story_size, max_sentence_size]`\n",
" question: `[max_story_size]`\n",
" answer: scolar\n",
" \"\"\"\n",
" pass\n",
"\n",
"def get_vectorizer(data, max_sentence_size):\n",
" vectorizer = tf.contrib.learn.preprocessing.VocabularyProcessor(\n",
" max_sentence_size)\n",
" vectorizer.fit(sum([s + [q] + [a] for s, q, a in data], []))\n",
" return vectorizer\n",
"\n",
"def tokenize_data(vectorizer, data, max_sentence_size, max_story_size):\n",
" def zeros(s):\n",
" return (max_story_size - len(s)) * [[0] * max_sentence_size]\n",
" return [Story(\n",
" sentences=np.array(\n",
" # reverse sentences,reflecting their relative distance from the question\n",
" list(reversed(list(vectorizer.transform(s))))[:max_story_size] + zeros(s)),\n",
" question=list(vectorizer.transform([q]))[0],\n",
" answer=list(vectorizer.transform([a]))[0][0]) for s, q, a in data]\n",
"\n",
"def get_max_sentence_size(data):\n",
" return max(len(s.split()) for sentences, _, _ in data for s in sentences)\n",
"\n",
"def get_max_story_size(data):\n",
" return max(len(sentences) for sentences, _, _ in data)\n",
"\n",
"def load_task(task_id, data_path, story_size_threshold=50):\n",
" test_file_path = glob(os.path.join(data_path, task_id) + \"_*_test.txt\")\n",
" assert len(test_file_path) == 1\n",
" test_file_path = test_file_path[0]\n",
"\n",
" train_file_path = glob(os.path.join(data_path, task_id) + \"_*_train.txt\")\n",
" assert len(train_file_path) == 1\n",
" train_file_path = train_file_path[0]\n",
"\n",
" test_data = list(load_file(test_file_path))\n",
" train_data = list(load_file(train_file_path))\n",
"\n",
" max_story_size = min(\n",
" story_size_threshold, get_max_story_size(test_data + train_data))\n",
" max_sentence_size = get_max_sentence_size(test_data + train_data)\n",
" vectorizer = get_vectorizer(test_data + train_data,\n",
" max_sentence_size=max_sentence_size)\n",
"\n",
" tokenized_test_data = tokenize_data(vectorizer, test_data,\n",
" max_sentence_size=max_sentence_size,\n",
" max_story_size=max_story_size)\n",
" tokenized_train_data = tokenize_data(vectorizer, train_data,\n",
" max_sentence_size=max_sentence_size,\n",
" max_story_size=max_story_size)\n",
"\n",
" return (tokenized_test_data, tokenized_train_data, vectorizer,\n",
" max_story_size, max_sentence_size)\n",
"\n",
"def load_file(file_path):\n",
" with open(file_path) as f:\n",
" yield from parse_stories(f)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import re\n",
"\n",
"def find_sentence_id(line):\n",
" matched = re.search(r\"(^[0-9]+) \", line)\n",
" assert matched is not None\n",
" return int(matched.group(1))\n",
"\n",
"def find_sentence(line):\n",
" matched = re.search(r\"^[0-9]+ (.+)\", line)\n",
" assert matched is not None\n",
" return matched.group(1).rstrip(\".\").lower() # Remove period\n",
"\n",
"def find_question_answer(line):\n",
" matched = re.match(r\"^[0-9]+ ([^\\t]+) ?\\t([^\\t]+)\\t([0-9])\", line)\n",
" if matched is not None:\n",
" question, answer, supporting_fact_id = matched.groups()\n",
" assert question is not None\n",
" assert answer is not None\n",
" assert supporting_fact_id is not None\n",
" return (question.rstrip(\"?\").lower(), # Remove `?`\n",
" answer.lower(), supporting_fact_id)\n",
" else:\n",
" return None, None, None\n",
"\n",
"def parse_stories(file):\n",
" story_id = 0\n",
" sentences = []\n",
"\n",
" for line in file:\n",
" sentence_id = find_sentence_id(line)\n",
" \n",
" if sentence_id == 1: # Head of story\n",
" story_id += 1\n",
" sentences = []\n",
" \n",
" question, answer, _ = find_question_answer(line)\n",
"\n",
" if question is not None:\n",
" assert sentence_id != 1\n",
" yield sentences[:], question, answer\n",
" else:\n",
" sentences.append(find_sentence(line))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#train data: 1000\n",
"#test data: 1000\n",
"#vocab 20\n"
]
}
],
"source": [
"test_data, train_data, vectorizer, max_story_size, max_sentence_size =\\\n",
" load_task(\"qa1\", \"tasks_1-20_v1-2/en\")\n",
"\n",
"print(\"#train data:\", len(train_data))\n",
"print(\"#test data:\", len(test_data))\n",
"print(\"#vocab\", len(vectorizer.vocabulary_))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Story 1\n",
"Sentences:\n",
"\t mary moved to the bathroom <UNK>\n",
"\t john went to the hallway <UNK>\n",
"Question:\n",
"\t where is mary <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bathroom\n",
"\n",
"Story 2\n",
"Sentences:\n",
"\t mary moved to the bathroom <UNK>\n",
"\t john went to the hallway <UNK>\n",
"\t daniel went back to the hallway\n",
"\t sandra moved to the garden <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"\n",
"Story 3\n",
"Sentences:\n",
"\t mary moved to the bathroom <UNK>\n",
"\t john went to the hallway <UNK>\n",
"\t daniel went back to the hallway\n",
"\t sandra moved to the garden <UNK>\n",
"\t john moved to the office <UNK>\n",
"\t sandra journeyed to the bathroom <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"\n"
]
}
],
"source": [
"import numpy as np\n",
"\n",
"# sanity check\n",
"for idx, (sentences, question, answer) in enumerate(train_data[:3], start=1):\n",
" print(\"Story {}\".format(idx))\n",
" print(\"Sentences:\")\n",
" lst = [np.squeeze(x) for x in np.vsplit(sentences, sentences.shape[0])]\n",
" for s in reversed(list(vectorizer.reverse(lst))):\n",
" if re.match(r\"^<UNK>\", s):\n",
" continue\n",
" print(\"\\t\", s)\n",
" print(\"Question:\\n\\t\", list(vectorizer.reverse([question]))[0])\n",
" print(\"Answer:\\n\\t\", list(vectorizer.reverse([np.array([answer])]))[0])\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Story(sentences=array([[ 1, 12, 3, 4, 5, 0],\n",
" [ 6, 14, 3, 4, 8, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0],\n",
" [ 0, 0, 0, 0, 0, 0]]), question=array([ 9, 10, 6, 0, 0, 0]), answer=8)\n",
"Shape of sentences: (10, 6)\n",
"Shape of question: (6,)\n",
"Answers: {5, 8, 15, 17, 18, 19}\n"
]
}
],
"source": [
"print(train_data[0])\n",
"print(\"Shape of sentences:\", train_data[0].sentences.shape)\n",
"print(\"Shape of question:\", train_data[0].question.shape)\n",
"print(\"Answers:\", set([x.answer for x in train_data + test_data]))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"\n",
"MiniBatchedStory = Story\n",
"\n",
"def mini_batches(data, mini_batch_size):\n",
" def mini_batched_data(d):\n",
" return MiniBatchedStory(\n",
" sentences=np.array([s for s, _, _ in d]),\n",
" question=np.array([q for _, q, _ in d]),\n",
" answer=np.array([a for _, _, a in d]),)\n",
"\n",
" assert len(data) > mini_batch_size\n",
"\n",
" np.random.shuffle(data)\n",
" num_mini_batches = len(data) // mini_batch_size\n",
"\n",
" for idx in range(0, num_mini_batches):\n",
" yield mini_batched_data(\n",
" data[idx * mini_batch_size:(idx + 1) * mini_batch_size])\n",
"\n",
" remain = mini_batched_data(data[(idx + 1) * mini_batch_size:])\n",
" \n",
" if len(remain) > 0:\n",
" yield remain"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Model"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def bow(params, ids, dim, rank):\n",
" mask = tf.tile(tf.expand_dims(\n",
" tf.to_float(tf.sign(ids)), axis=rank), [1] * rank + [dim])\n",
" emb = tf.reduce_sum(\n",
" tf.nn.embedding_lookup(params, ids) * mask, axis=rank - 1)\n",
" return emb\n",
"\n",
"def inference(hparams, sentences, question, encode=bow):\n",
" \"\"\"inference\n",
" \n",
" Args:\n",
" hparams\n",
" sentences: `[batch_size, story_size, sentence_size]`\n",
" question: `[batch_size, sentence_size]`\n",
" encode: encoder for sentence representation\n",
" Returns:\n",
" logits: `[batch_size, vocab_size]`\n",
" \"\"\"\n",
" dim = hparams.dim\n",
" vocab_size = hparams.vocab_size\n",
" max_story_size = hparams.max_story_size\n",
"\n",
" def one_hop(A, C, T_a, T_c, u):\n",
" # memory, [batch_size, story_size, dim]\n",
" m = encode(A, sentences, dim=dim, rank=3)\n",
" m = m + T_a\n",
" # p, [batch_size, story_size]\n",
" p = tf.nn.softmax(tf.squeeze(\n",
" tf.matmul(tf.expand_dims(u, axis=1), m, transpose_b=True)))\n",
" p = tf.reshape(p, [-1, max_story_size])\n",
" # output, [batch_size, story_size, dim]\n",
" c = encode(C, sentences, dim=dim, rank=3)\n",
" c = c + T_c\n",
" # response, [batch_size, dim]\n",
" o = tf.squeeze(tf.matmul(tf.expand_dims(p, axis=1), c))\n",
" o = tf.reshape(o, [-1, dim])\n",
" return o\n",
"\n",
" # embedding matrix for memory\n",
" embedding_A = tf.get_variable(\"embedding_A_1\", [vocab_size, dim], tf.float32)\n",
" # embedding matrix for internal state, adjecent weight tying: B = A^1\n",
" embedding_B = embedding_A\n",
" # matrix for temporal information of memory\n",
" temporal_T_a = tf.get_variable(\"temporal_T_A_1\", [max_story_size, dim], tf.float32)\n",
" \n",
" # internal state, [batch_size, dim]\n",
" u = encode(embedding_B, question, dim=dim, rank=2)\n",
" u = tf.reshape(u, [-1, dim])\n",
"\n",
" for hop in range(1, hparams.K + 1):\n",
" # embedding matrix for output\n",
" embedding_C = tf.get_variable(\n",
" \"embedding_C_{}\".format(hop), [vocab_size, dim], tf.float32)\n",
" # matrix for temporal information of output\n",
" temporal_T_c = tf.get_variable(\n",
" \"Temporal_T_c_{}\".format(hop), [max_story_size, dim], tf.float32)\n",
" o = one_hop(embedding_A, embedding_C, temporal_T_a, temporal_T_c, u)\n",
" u = o + u\n",
" # adjecent weight tying: A^{k+1} = C^k\n",
" embedding_A = embedding_C\n",
" temporal_T_a = temporal_T_c\n",
"\n",
" # Prediction, adjecent weight ting; W = C^k\n",
" W = embedding_C\n",
" # a^ , [batch_size, vocab_size]\n",
" a_ = tf.transpose(tf.matmul(W, u, transpose_b=True))\n",
" a_ = tf.reshape(a_, [-1, vocab_size])\n",
" \n",
" return a_\n",
"\n",
"def loss(hparams, logits, labels):\n",
" \"\"\"loss\n",
" \n",
" Args:\n",
" hparams\n",
" logits: `[batch_size, vocab_size]`\n",
" labels: `[batch_size,]`\n",
" Returns:\n",
" loss: scalor\n",
" \"\"\" \n",
" vocab_size = hparams.vocab_size\n",
" cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n",
" logits=logits,\n",
" labels=tf.one_hot(labels, vocab_size))\n",
" return tf.reduce_mean(cross_entropy)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def train(hparams, loss):\n",
" optimizer = tf.train.GradientDescentOptimizer(\n",
" hparams.learning_rate)\n",
" grad_and_vars = optimizer.compute_gradients(loss)\n",
" grad_and_vars = [(tf.clip_by_norm(g, hparams.max_grad_norm), v)\n",
" for g, v in grad_and_vars]\n",
" train_op = optimizer.apply_gradients(grad_and_vars)\n",
" return train_op\n",
"\n",
"def inputs():\n",
" return (tf.placeholder(tf.int32, [None, None, None], \"sentences\"),\n",
" tf.placeholder(tf.int32, [None, None], \"question\"),\n",
" tf.placeholder(tf.int32, [None], \"answer\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Experiments"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"import itertools\n",
"\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"def train_and_evaluate_task(task_id, hparams, do_predict=False):\n",
" tf.reset_default_graph()\n",
" tf.set_random_seed(4321)\n",
"\n",
" test_data, train_data, vectorizer,\\\n",
" max_story_size, max_sentence_size =\\\n",
" load_task(task_id, \"tasks_1-20_v1-2/en\", story_size_threshold=50)\n",
" train_data, valid_data = train_test_split(\n",
" train_data, test_size=0.1, random_state=1234)\n",
"\n",
" hparams.max_story_size = max_story_size\n",
" hparams.vocab_size = len(vectorizer.vocabulary_)\n",
"\n",
" sentences, question, answer = inputs()\n",
"\n",
" with tf.variable_scope(\n",
" \"model\", initializer=tf.random_normal_initializer(0, 0.1)) as scope:\n",
" logits = inference(hparams, sentences, question)\n",
" _loss = loss(hparams, logits, answer)\n",
" train_op = train(hparams, _loss)\n",
"\n",
" valid_mini_batches = itertools.cycle(\n",
" mini_batches(valid_data, hparams.batch_size))\n",
"\n",
" init = tf.global_variables_initializer()\n",
"\n",
" sess = tf.InteractiveSession()\n",
" sess.run(init)\n",
"\n",
" step = 0\n",
" \n",
" for epoch in range(1, hparams.num_epochs + 1):\n",
" print(\"Epoch: \", epoch)\n",
" for mini_batch in mini_batches(train_data, hparams.batch_size):\n",
" train_loss, _ = sess.run([_loss, train_op], feed_dict={\n",
" sentences: mini_batch.sentences,\n",
" question: mini_batch.question,\n",
" answer: mini_batch.answer})\n",
" if (step + 1) % hparams.frequency_of_print_loss == 0:\n",
" valid_mini_batch = next(valid_mini_batches)\n",
" valid_loss = sess.run(_loss, feed_dict={\n",
" sentences: valid_mini_batch.sentences,\n",
" question: valid_mini_batch.question,\n",
" answer: valid_mini_batch.answer})\n",
" print(\"\\tStep: \", step % hparams.batch_size)\n",
" print(\"\\tTrain loss: \", train_loss)\n",
" print(\"\\tValidation loss: \", valid_loss)\n",
" step += 1\n",
"\n",
" def evaluate(data):\n",
" correct_predictions = []\n",
" for step, datum in enumerate(data, start=1):\n",
" a_ = sess.run(logits, feed_dict={\n",
" sentences: datum.sentences,\n",
" question: datum.question})\n",
" correct_predictions += np.equal(\n",
" datum.answer, np.argmax(a_, axis=1)).tolist()\n",
" return np.mean(correct_predictions)\n",
" \n",
" def predict():\n",
" valid_mini_batches = itertools.cycle(mini_batches(valid_data, 1))\n",
"\n",
" for i in range(20):\n",
" data = next(valid_mini_batches)\n",
" predict = sess.run(logits, feed_dict={\n",
" sentences: data.sentences,\n",
" question: data.question,\n",
" answer: data.answer})\n",
" print(\"Sentences:\")\n",
" lst = [np.squeeze(x) for x in np.vsplit(data.sentences[0], data.sentences[0].shape[0])]\n",
" for s in reversed(list(vectorizer.reverse(lst))):\n",
" if re.match(r\"^<UNK>\", s):\n",
" continue\n",
" print(\"\\t\", s)\n",
" print(\"Question:\\n\\t\", list(vectorizer.reverse([data.question[0]]))[0])\n",
" print(\"Answer:\\n\\t\", list(vectorizer.reverse([np.array([data.answer[0]])]))[0])\n",
" print(\"Predicted:\\n\\t\", list(vectorizer.reverse([np.array([np.argmax(predict[0])])]))[0])\n",
" print()\n",
"\n",
" if do_predict:\n",
" print(\"\\nPrediction:\\n\")\n",
" predict()\n",
" \n",
" return (evaluate(mini_batches(train_data, hparams.batch_size)),\n",
" evaluate(mini_batches(valid_data, hparams.batch_size)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Task 1"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 1\n",
"\tStep: 19\n",
"\tTrain loss: 2.96812\n",
"\tValidation loss: 2.93507\n",
"Epoch: 2\n",
"\tStep: 7\n",
"\tTrain loss: 2.87854\n",
"\tValidation loss: 2.85748\n",
"Epoch: 3\n",
"\tStep: 27\n",
"\tTrain loss: 2.82057\n",
"\tValidation loss: 2.81465\n",
"\tStep: 15\n",
"\tTrain loss: 2.77049\n",
"\tValidation loss: 2.74937\n",
"Epoch: 4\n",
"\tStep: 3\n",
"\tTrain loss: 2.66787\n",
"\tValidation loss: 2.69177\n",
"Epoch: 5\n",
"\tStep: 23\n",
"\tTrain loss: 2.57968\n",
"\tValidation loss: 2.55292\n",
"\tStep: 11\n",
"\tTrain loss: 2.4941\n",
"\tValidation loss: 2.44024\n",
"Epoch: 6\n",
"\tStep: 31\n",
"\tTrain loss: 2.33851\n",
"\tValidation loss: 2.36035\n",
"Epoch: 7\n",
"\tStep: 19\n",
"\tTrain loss: 2.2418\n",
"\tValidation loss: 2.25946\n",
"\tStep: 7\n",
"\tTrain loss: 2.1468\n",
"\tValidation loss: 2.1337\n",
"Epoch: 8\n",
"\tStep: 27\n",
"\tTrain loss: 2.02164\n",
"\tValidation loss: 1.99905\n",
"Epoch: 9\n",
"\tStep: 15\n",
"\tTrain loss: 1.9927\n",
"\tValidation loss: 2.02795\n",
"\tStep: 3\n",
"\tTrain loss: 1.91842\n",
"\tValidation loss: 1.92998\n",
"Epoch: 10\n",
"\tStep: 23\n",
"\tTrain loss: 1.87628\n",
"\tValidation loss: 1.9022\n",
"Epoch: 11\n",
"\tStep: 11\n",
"\tTrain loss: 1.83927\n",
"\tValidation loss: 1.81987\n",
"Epoch: 12\n",
"\tStep: 31\n",
"\tTrain loss: 1.80945\n",
"\tValidation loss: 1.89576\n",
"\tStep: 19\n",
"\tTrain loss: 1.79169\n",
"\tValidation loss: 1.79634\n",
"Epoch: 13\n",
"\tStep: 7\n",
"\tTrain loss: 1.78947\n",
"\tValidation loss: 1.8171\n",
"Epoch: 14\n",
"\tStep: 27\n",
"\tTrain loss: 1.70586\n",
"\tValidation loss: 1.74335\n",
"\tStep: 15\n",
"\tTrain loss: 1.73177\n",
"\tValidation loss: 1.82551\n",
"Epoch: 15\n",
"\tStep: 3\n",
"\tTrain loss: 1.74266\n",
"\tValidation loss: 1.7447\n",
"Epoch: 16\n",
"\tStep: 23\n",
"\tTrain loss: 1.71719\n",
"\tValidation loss: 1.75405\n",
"\tStep: 11\n",
"\tTrain loss: 1.66789\n",
"\tValidation loss: 1.696\n",
"Epoch: 17\n",
"\tStep: 31\n",
"\tTrain loss: 1.64804\n",
"\tValidation loss: 1.76841\n",
"Epoch: 18\n",
"\tStep: 19\n",
"\tTrain loss: 1.66418\n",
"\tValidation loss: 1.70519\n",
"\tStep: 7\n",
"\tTrain loss: 1.65871\n",
"\tValidation loss: 1.7062\n",
"Epoch: 19\n",
"\tStep: 27\n",
"\tTrain loss: 1.67197\n",
"\tValidation loss: 1.64129\n",
"Epoch: 20\n",
"\tStep: 15\n",
"\tTrain loss: 1.62802\n",
"\tValidation loss: 1.71114\n",
"\tStep: 3\n",
"\tTrain loss: 1.58409\n",
"\tValidation loss: 1.67574\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.64565\n",
"\tValidation loss: 1.64516\n",
"Epoch: 22\n",
"\tStep: 11\n",
"\tTrain loss: 1.59746\n",
"\tValidation loss: 1.57817\n",
"Epoch: 23\n",
"\tStep: 31\n",
"\tTrain loss: 1.57458\n",
"\tValidation loss: 1.76592\n",
"\tStep: 19\n",
"\tTrain loss: 1.37352\n",
"\tValidation loss: 1.612\n",
"Epoch: 24\n",
"\tStep: 7\n",
"\tTrain loss: 1.58072\n",
"\tValidation loss: 1.5858\n",
"Epoch: 25\n",
"\tStep: 27\n",
"\tTrain loss: 1.50994\n",
"\tValidation loss: 1.51001\n",
"\tStep: 15\n",
"\tTrain loss: 1.44424\n",
"\tValidation loss: 1.78646\n",
"Epoch: 26\n",
"\tStep: 3\n",
"\tTrain loss: 1.55097\n",
"\tValidation loss: 1.5935\n",
"Epoch: 27\n",
"\tStep: 23\n",
"\tTrain loss: 1.51576\n",
"\tValidation loss: 1.54786\n",
"\tStep: 11\n",
"\tTrain loss: 1.5699\n",
"\tValidation loss: 1.45227\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.37242\n",
"\tValidation loss: 1.77187\n",
"Epoch: 29\n",
"\tStep: 19\n",
"\tTrain loss: 1.53852\n",
"\tValidation loss: 1.56634\n",
"\tStep: 7\n",
"\tTrain loss: 1.38963\n",
"\tValidation loss: 1.50106\n",
"Epoch: 30\n",
"\tStep: 27\n",
"\tTrain loss: 1.33782\n",
"\tValidation loss: 1.40122\n",
"Epoch: 31\n",
"\tStep: 15\n",
"\tTrain loss: 1.61058\n",
"\tValidation loss: 1.86396\n",
"Epoch: 32\n",
"\tStep: 3\n",
"\tTrain loss: 1.31375\n",
"\tValidation loss: 1.51058\n",
"\tStep: 23\n",
"\tTrain loss: 1.53394\n",
"\tValidation loss: 1.48892\n",
"Epoch: 33\n",
"\tStep: 11\n",
"\tTrain loss: 1.26643\n",
"\tValidation loss: 1.34357\n",
"Epoch: 34\n",
"\tStep: 31\n",
"\tTrain loss: 1.3991\n",
"\tValidation loss: 1.85145\n",
"\tStep: 19\n",
"\tTrain loss: 1.46673\n",
"\tValidation loss: 1.51953\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.39502\n",
"\tValidation loss: 1.43972\n",
"Epoch: 36\n",
"\tStep: 27\n",
"\tTrain loss: 1.27928\n",
"\tValidation loss: 1.28535\n",
"\tStep: 15\n",
"\tTrain loss: 1.3721\n",
"\tValidation loss: 1.87132\n",
"Epoch: 37\n",
"\tStep: 3\n",
"\tTrain loss: 1.15105\n",
"\tValidation loss: 1.48812\n",
"Epoch: 38\n",
"\tStep: 23\n",
"\tTrain loss: 1.28274\n",
"\tValidation loss: 1.42713\n",
"\tStep: 11\n",
"\tTrain loss: 1.35292\n",
"\tValidation loss: 1.33023\n",
"Epoch: 39\n",
"\tStep: 31\n",
"\tTrain loss: 1.27308\n",
"\tValidation loss: 1.87945\n",
"Epoch: 40\n",
"\tStep: 19\n",
"\tTrain loss: 1.27273\n",
"\tValidation loss: 1.49049\n",
"\tStep: 7\n",
"\tTrain loss: 1.20915\n",
"\tValidation loss: 1.3991\n",
"Epoch: 41\n",
"\tStep: 27\n",
"\tTrain loss: 1.29843\n",
"\tValidation loss: 1.27894\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.16111\n",
"\tValidation loss: 1.89412\n",
"Epoch: 43\n",
"\tStep: 3\n",
"\tTrain loss: 1.2531\n",
"\tValidation loss: 1.43689\n",
"\tStep: 23\n",
"\tTrain loss: 1.20863\n",
"\tValidation loss: 1.3907\n",
"Epoch: 44\n",
"\tStep: 11\n",
"\tTrain loss: 1.27087\n",
"\tValidation loss: 1.27778\n",
"Epoch: 45\n",
"\tStep: 31\n",
"\tTrain loss: 1.14525\n",
"\tValidation loss: 1.86529\n",
"\tStep: 19\n",
"\tTrain loss: 1.16957\n",
"\tValidation loss: 1.40183\n",
"Epoch: 46\n",
"\tStep: 7\n",
"\tTrain loss: 1.20336\n",
"\tValidation loss: 1.37053\n",
"Epoch: 47\n",
"\tStep: 27\n",
"\tTrain loss: 1.11867\n",
"\tValidation loss: 1.26667\n",
"\tStep: 15\n",
"\tTrain loss: 1.17489\n",
"\tValidation loss: 1.88182\n",
"Epoch: 48\n",
"\tStep: 3\n",
"\tTrain loss: 0.924264\n",
"\tValidation loss: 1.3463\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.15743\n",
"\tValidation loss: 1.3743\n",
"\tStep: 11\n",
"\tTrain loss: 0.9277\n",
"\tValidation loss: 1.21701\n",
"Epoch: 50\n",
"\tStep: 31\n",
"\tTrain loss: 1.23364\n",
"\tValidation loss: 1.86515\n",
"Epoch: 51\n",
"\tStep: 19\n",
"\tTrain loss: 1.39673\n",
"\tValidation loss: 1.29697\n",
"Epoch: 52\n",
"\tStep: 7\n",
"\tTrain loss: 1.11399\n",
"\tValidation loss: 1.31502\n",
"\tStep: 27\n",
"\tTrain loss: 0.963557\n",
"\tValidation loss: 1.21402\n",
"Epoch: 53\n",
"\tStep: 15\n",
"\tTrain loss: 1.01184\n",
"\tValidation loss: 1.8502\n",
"Epoch: 54\n",
"\tStep: 3\n",
"\tTrain loss: 1.0836\n",
"\tValidation loss: 1.23838\n",
"\tStep: 23\n",
"\tTrain loss: 0.914242\n",
"\tValidation loss: 1.22848\n",
"Epoch: 55\n",
"\tStep: 11\n",
"\tTrain loss: 1.00203\n",
"\tValidation loss: 1.10236\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.918177\n",
"\tValidation loss: 1.85644\n",
"\tStep: 19\n",
"\tTrain loss: 0.903965\n",
"\tValidation loss: 1.14396\n",
"Epoch: 57\n",
"\tStep: 7\n",
"\tTrain loss: 1.13174\n",
"\tValidation loss: 1.1593\n",
"Epoch: 58\n",
"\tStep: 27\n",
"\tTrain loss: 1.21992\n",
"\tValidation loss: 1.00619\n",
"\tStep: 15\n",
"\tTrain loss: 1.07627\n",
"\tValidation loss: 1.83878\n",
"Epoch: 59\n",
"\tStep: 3\n",
"\tTrain loss: 0.853292\n",
"\tValidation loss: 1.061\n",
"Epoch: 60\n",
"\tStep: 23\n",
"\tTrain loss: 0.960672\n",
"\tValidation loss: 1.03252\n",
"\tStep: 11\n",
"\tTrain loss: 1.03171\n",
"\tValidation loss: 0.891628\n",
"Epoch: 61\n",
"\tStep: 31\n",
"\tTrain loss: 1.05054\n",
"\tValidation loss: 1.70316\n",
"Epoch: 62\n",
"\tStep: 19\n",
"\tTrain loss: 0.958406\n",
"\tValidation loss: 0.964959\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.860754\n",
"\tValidation loss: 0.948872\n",
"\tStep: 27\n",
"\tTrain loss: 0.847246\n",
"\tValidation loss: 0.756816\n",
"Epoch: 64\n",
"\tStep: 15\n",
"\tTrain loss: 0.564195\n",
"\tValidation loss: 1.54249\n",
"Epoch: 65\n",
"\tStep: 3\n",
"\tTrain loss: 0.806124\n",
"\tValidation loss: 0.843716\n",
"\tStep: 23\n",
"\tTrain loss: 0.57552\n",
"\tValidation loss: 0.936893\n",
"Epoch: 66\n",
"\tStep: 11\n",
"\tTrain loss: 0.924345\n",
"\tValidation loss: 0.667232\n",
"Epoch: 67\n",
"\tStep: 31\n",
"\tTrain loss: 0.708243\n",
"\tValidation loss: 1.25644\n",
"\tStep: 19\n",
"\tTrain loss: 0.675012\n",
"\tValidation loss: 0.73713\n",
"Epoch: 68\n",
"\tStep: 7\n",
"\tTrain loss: 0.689302\n",
"\tValidation loss: 0.855582\n",
"Epoch: 69\n",
"\tStep: 27\n",
"\tTrain loss: 0.5217\n",
"\tValidation loss: 0.621005\n",
"\tStep: 15\n",
"\tTrain loss: 0.719874\n",
"\tValidation loss: 1.14378\n",
"Epoch: 70\n",
"\tStep: 3\n",
"\tTrain loss: 0.569638\n",
"\tValidation loss: 0.633101\n",
"Epoch: 71\n",
"\tStep: 23\n",
"\tTrain loss: 0.660872\n",
"\tValidation loss: 0.754816\n",
"Epoch: 72\n",
"\tStep: 11\n",
"\tTrain loss: 0.480787\n",
"\tValidation loss: 0.477384\n",
"\tStep: 31\n",
"\tTrain loss: 0.380027\n",
"\tValidation loss: 0.821858\n",
"Epoch: 73\n",
"\tStep: 19\n",
"\tTrain loss: 0.530742\n",
"\tValidation loss: 0.529187\n",
"Epoch: 74\n",
"\tStep: 7\n",
"\tTrain loss: 0.370717\n",
"\tValidation loss: 0.68136\n",
"\tStep: 27\n",
"\tTrain loss: 0.26433\n",
"\tValidation loss: 0.379774\n",
"Epoch: 75\n",
"\tStep: 15\n",
"\tTrain loss: 0.299553\n",
"\tValidation loss: 0.516853\n",
"Epoch: 76\n",
"\tStep: 3\n",
"\tTrain loss: 0.429947\n",
"\tValidation loss: 0.427354\n",
"\tStep: 23\n",
"\tTrain loss: 0.429237\n",
"\tValidation loss: 0.557328\n",
"Epoch: 77\n",
"\tStep: 11\n",
"\tTrain loss: 0.300164\n",
"\tValidation loss: 0.259169\n",
"Epoch: 78\n",
"\tStep: 31\n",
"\tTrain loss: 0.177143\n",
"\tValidation loss: 0.397318\n",
"\tStep: 19\n",
"\tTrain loss: 0.321967\n",
"\tValidation loss: 0.333427\n",
"Epoch: 79\n",
"\tStep: 7\n",
"\tTrain loss: 0.462344\n",
"\tValidation loss: 0.466505\n",
"Epoch: 80\n",
"\tStep: 27\n",
"\tTrain loss: 0.183119\n",
"\tValidation loss: 0.192235\n",
"\tStep: 15\n",
"\tTrain loss: 0.331639\n",
"\tValidation loss: 0.257957\n",
"Epoch: 81\n",
"\tStep: 3\n",
"\tTrain loss: 0.173452\n",
"\tValidation loss: 0.249304\n",
"Epoch: 82\n",
"\tStep: 23\n",
"\tTrain loss: 0.129526\n",
"\tValidation loss: 0.365981\n",
"Epoch: 83\n",
"\tStep: 11\n",
"\tTrain loss: 0.163764\n",
"\tValidation loss: 0.12696\n",
"\tStep: 31\n",
"\tTrain loss: 0.144191\n",
"\tValidation loss: 0.209653\n",
"Epoch: 84\n",
"\tStep: 19\n",
"\tTrain loss: 0.14675\n",
"\tValidation loss: 0.179617\n",
"Epoch: 85\n",
"\tStep: 7\n",
"\tTrain loss: 0.239101\n",
"\tValidation loss: 0.266617\n",
"\tStep: 27\n",
"\tTrain loss: 0.181032\n",
"\tValidation loss: 0.0777291\n",
"Epoch: 86\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\tStep: 15\n",
"\tTrain loss: 0.158481\n",
"\tValidation loss: 0.148183\n",
"Epoch: 87\n",
"\tStep: 3\n",
"\tTrain loss: 0.130033\n",
"\tValidation loss: 0.138752\n",
"\tStep: 23\n",
"\tTrain loss: 0.0687625\n",
"\tValidation loss: 0.2415\n",
"Epoch: 88\n",
"\tStep: 11\n",
"\tTrain loss: 0.118796\n",
"\tValidation loss: 0.0523991\n",
"Epoch: 89\n",
"\tStep: 31\n",
"\tTrain loss: 0.142234\n",
"\tValidation loss: 0.104241\n",
"\tStep: 19\n",
"\tTrain loss: 0.167531\n",
"\tValidation loss: 0.122136\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.0598314\n",
"\tValidation loss: 0.200647\n",
"Epoch: 91\n",
"\tStep: 27\n",
"\tTrain loss: 0.0666134\n",
"\tValidation loss: 0.0401783\n",
"Epoch: 92\n",
"\tStep: 15\n",
"\tTrain loss: 0.0738868\n",
"\tValidation loss: 0.0904169\n",
"\tStep: 3\n",
"\tTrain loss: 0.0517351\n",
"\tValidation loss: 0.105055\n",
"Epoch: 93\n",
"\tStep: 23\n",
"\tTrain loss: 0.0739264\n",
"\tValidation loss: 0.181873\n",
"Epoch: 94\n",
"\tStep: 11\n",
"\tTrain loss: 0.0330149\n",
"\tValidation loss: 0.0326287\n",
"\tStep: 31\n",
"\tTrain loss: 0.0585638\n",
"\tValidation loss: 0.0753817\n",
"Epoch: 95\n",
"\tStep: 19\n",
"\tTrain loss: 0.10059\n",
"\tValidation loss: 0.0856605\n",
"Epoch: 96\n",
"\tStep: 7\n",
"\tTrain loss: 0.0396673\n",
"\tValidation loss: 0.168075\n",
"\tStep: 27\n",
"\tTrain loss: 0.029895\n",
"\tValidation loss: 0.0244707\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.0322218\n",
"\tValidation loss: 0.0681593\n",
"Epoch: 98\n",
"\tStep: 3\n",
"\tTrain loss: 0.0793147\n",
"\tValidation loss: 0.0784841\n",
"\tStep: 23\n",
"\tTrain loss: 0.0433926\n",
"\tValidation loss: 0.149478\n",
"Epoch: 99\n",
"\tStep: 11\n",
"\tTrain loss: 0.033868\n",
"\tValidation loss: 0.0217299\n",
"Epoch: 100\n",
"\tStep: 31\n",
"\tTrain loss: 0.0558287\n",
"\tValidation loss: 0.0469228\n",
"\tStep: 19\n",
"\tTrain loss: 0.061124\n",
"\tValidation loss: 0.0716867\n",
"\n",
"Prediction:\n",
"\n",
"Sentences:\n",
"\t mary went to the bathroom <UNK>\n",
"\t john went to the kitchen <UNK>\n",
"\t daniel moved to the office <UNK>\n",
"\t daniel moved to the bathroom <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bathroom\n",
"Predicted:\n",
"\t bathroom\n",
"\n",
"Sentences:\n",
"\t sandra went to the garden <UNK>\n",
"\t daniel journeyed to the office <UNK>\n",
"\t sandra travelled to the bathroom <UNK>\n",
"\t sandra travelled to the kitchen <UNK>\n",
"\t john travelled to the bedroom <UNK>\n",
"\t sandra went to the hallway <UNK>\n",
"\t john went back to the office\n",
"\t mary travelled to the hallway <UNK>\n",
"Question:\n",
"\t where is mary <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t mary went back to the kitchen\n",
"\t john went back to the office\n",
"\t daniel journeyed to the bedroom <UNK>\n",
"\t mary travelled to the bathroom <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bedroom\n",
"Predicted:\n",
"\t bedroom\n",
"\n",
"Sentences:\n",
"\t mary travelled to the bedroom <UNK>\n",
"\t daniel moved to the hallway <UNK>\n",
"Question:\n",
"\t where is mary <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bedroom\n",
"Predicted:\n",
"\t bedroom\n",
"\n",
"Sentences:\n",
"\t mary travelled to the garden <UNK>\n",
"\t daniel went to the office <UNK>\n",
"\t daniel travelled to the kitchen <UNK>\n",
"\t sandra journeyed to the kitchen <UNK>\n",
"\t mary went back to the hallway\n",
"\t daniel went to the bedroom <UNK>\n",
"\t mary moved to the bathroom <UNK>\n",
"\t sandra journeyed to the bathroom <UNK>\n",
"\t john journeyed to the garden <UNK>\n",
"\t sandra went to the garden <UNK>\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t garden\n",
"\n",
"Sentences:\n",
"\t daniel moved to the garden <UNK>\n",
"\t sandra moved to the hallway <UNK>\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t mary went back to the bedroom\n",
"\t mary travelled to the garden <UNK>\n",
"\t john journeyed to the kitchen <UNK>\n",
"\t sandra went back to the office\n",
"\t mary journeyed to the bedroom <UNK>\n",
"\t sandra travelled to the bathroom <UNK>\n",
"\t daniel journeyed to the garden <UNK>\n",
"\t mary went back to the hallway\n",
"Question:\n",
"\t where is mary <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t john journeyed to the bedroom <UNK>\n",
"\t mary went to the office <UNK>\n",
"\t john moved to the office <UNK>\n",
"\t mary went to the hallway <UNK>\n",
"\t sandra travelled to the office <UNK>\n",
"\t john travelled to the bedroom <UNK>\n",
"\t daniel travelled to the kitchen <UNK>\n",
"\t mary went to the kitchen <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t kitchen\n",
"Predicted:\n",
"\t kitchen\n",
"\n",
"Sentences:\n",
"\t sandra journeyed to the bathroom <UNK>\n",
"\t mary travelled to the hallway <UNK>\n",
"\t sandra moved to the kitchen <UNK>\n",
"\t mary journeyed to the garden <UNK>\n",
"\t john travelled to the office <UNK>\n",
"\t john went to the hallway <UNK>\n",
"\t daniel travelled to the bedroom <UNK>\n",
"\t john went back to the office\n",
"Question:\n",
"\t where is mary <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t garden\n",
"\n",
"Sentences:\n",
"\t john went to the kitchen <UNK>\n",
"\t john journeyed to the office <UNK>\n",
"\t daniel travelled to the bedroom <UNK>\n",
"\t john journeyed to the kitchen <UNK>\n",
"\t john journeyed to the office <UNK>\n",
"\t mary went back to the bathroom\n",
"\t john journeyed to the garden <UNK>\n",
"\t sandra went to the bathroom <UNK>\n",
"\t sandra journeyed to the office <UNK>\n",
"\t john went back to the bathroom\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t office\n",
"Predicted:\n",
"\t office\n",
"\n",
"Sentences:\n",
"\t john travelled to the garden <UNK>\n",
"\t daniel journeyed to the bedroom <UNK>\n",
"Question:\n",
"\t where is john <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t garden\n",
"\n",
"Sentences:\n",
"\t mary moved to the bedroom <UNK>\n",
"\t sandra travelled to the garden <UNK>\n",
"\t sandra journeyed to the kitchen <UNK>\n",
"\t sandra moved to the garden <UNK>\n",
"\t sandra travelled to the kitchen <UNK>\n",
"\t daniel went to the garden <UNK>\n",
"\t john went to the hallway <UNK>\n",
"\t mary travelled to the hallway <UNK>\n",
"Question:\n",
"\t where is john <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t sandra moved to the hallway <UNK>\n",
"\t john went back to the hallway\n",
"\t daniel moved to the garden <UNK>\n",
"\t daniel went to the bathroom <UNK>\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t hallway\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t john went to the office <UNK>\n",
"\t mary journeyed to the garden <UNK>\n",
"\t john went to the garden <UNK>\n",
"\t john journeyed to the hallway <UNK>\n",
"\t sandra went to the hallway <UNK>\n",
"\t john went to the bedroom <UNK>\n",
"\t sandra moved to the bedroom <UNK>\n",
"\t mary travelled to the hallway <UNK>\n",
"\t john travelled to the garden <UNK>\n",
"\t daniel went back to the bedroom\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bedroom\n",
"Predicted:\n",
"\t bedroom\n",
"\n",
"Sentences:\n",
"\t john went back to the hallway\n",
"\t daniel journeyed to the bedroom <UNK>\n",
"\t sandra went back to the bedroom\n",
"\t sandra moved to the bathroom <UNK>\n",
"\t john went to the office <UNK>\n",
"\t john travelled to the garden <UNK>\n",
"Question:\n",
"\t where is john <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t garden\n",
"\n",
"Sentences:\n",
"\t john went back to the hallway\n",
"\t sandra journeyed to the kitchen <UNK>\n",
"\t daniel journeyed to the office <UNK>\n",
"\t john went to the kitchen <UNK>\n",
"\t daniel journeyed to the garden <UNK>\n",
"\t john travelled to the bedroom <UNK>\n",
"\t daniel journeyed to the office <UNK>\n",
"\t daniel went to the bedroom <UNK>\n",
"\t sandra travelled to the hallway <UNK>\n",
"\t sandra journeyed to the bedroom <UNK>\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bedroom\n",
"Predicted:\n",
"\t bedroom\n",
"\n",
"Sentences:\n",
"\t sandra travelled to the bathroom <UNK>\n",
"\t sandra moved to the kitchen <UNK>\n",
"\t sandra journeyed to the bathroom <UNK>\n",
"\t sandra journeyed to the bedroom <UNK>\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t bedroom\n",
"Predicted:\n",
"\t bedroom\n",
"\n",
"Sentences:\n",
"\t mary moved to the bathroom <UNK>\n",
"\t sandra went to the hallway <UNK>\n",
"\t mary went back to the kitchen\n",
"\t daniel went back to the kitchen\n",
"\t john went back to the bathroom\n",
"\t daniel journeyed to the office <UNK>\n",
"\t daniel journeyed to the bathroom <UNK>\n",
"\t daniel went back to the garden\n",
"Question:\n",
"\t where is daniel <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t garden\n",
"\n",
"Sentences:\n",
"\t mary went back to the hallway\n",
"\t sandra journeyed to the garden <UNK>\n",
"\t sandra travelled to the bathroom <UNK>\n",
"\t john travelled to the hallway <UNK>\n",
"\t john travelled to the garden <UNK>\n",
"\t mary journeyed to the bedroom <UNK>\n",
"\t sandra journeyed to the office <UNK>\n",
"\t daniel journeyed to the hallway <UNK>\n",
"Question:\n",
"\t where is john <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t garden\n",
"Predicted:\n",
"\t hallway\n",
"\n",
"Sentences:\n",
"\t sandra moved to the bedroom <UNK>\n",
"\t john travelled to the office <UNK>\n",
"\t daniel travelled to the hallway <UNK>\n",
"\t mary went to the hallway <UNK>\n",
"\t mary travelled to the office <UNK>\n",
"\t sandra moved to the kitchen <UNK>\n",
"\t john went back to the hallway\n",
"\t daniel moved to the garden <UNK>\n",
"Question:\n",
"\t where is sandra <UNK> <UNK> <UNK>\n",
"Answer:\n",
"\t kitchen\n",
"Predicted:\n",
"\t kitchen\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy:\n",
"\tTrain set: 0.997777777778\n",
"\tValidation set: 0.99\n"
]
}
],
"source": [
"hparams = tf.contrib.training.HParams(\n",
" num_epochs=100,\n",
" batch_size=32,\n",
" learning_rate=0.01,\n",
" K=3,\n",
" dim=20,\n",
" max_grad_norm=40.0,\n",
" frequency_of_print_loss=20)\n",
"\n",
"train_acc, valid_acc = train_and_evaluate_task(\"qa1\", hparams, do_predict=True)\n",
"\n",
"print(\"Accuracy:\")\n",
"print(\"\\tTrain set:\", train_acc)\n",
"print(\"\\tValidation set:\", valid_acc)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### All tasks"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Run task 1\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.09745\n",
"\tValidation loss: 2.12545\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.75791\n",
"\tValidation loss: 1.79225\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.59937\n",
"\tValidation loss: 1.57655\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.41682\n",
"\tValidation loss: 1.78423\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.37144\n",
"\tValidation loss: 1.36076\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.21635\n",
"\tValidation loss: 1.53782\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.07945\n",
"\tValidation loss: 1.20168\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.31766\n",
"\tValidation loss: 2.28588\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.818008\n",
"\tValidation loss: 0.753366\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.737409\n",
"\tValidation loss: 0.826736\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.460335\n",
"\tValidation loss: 0.387317\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.192214\n",
"\tValidation loss: 0.341945\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.0860833\n",
"\tValidation loss: 0.102556\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.0438907\n",
"\tValidation loss: 0.0963219\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 2\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.3495\n",
"\tValidation loss: 2.30812\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.78481\n",
"\tValidation loss: 1.80927\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.81111\n",
"\tValidation loss: 1.74961\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.74176\n",
"\tValidation loss: 1.81599\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.76032\n",
"\tValidation loss: 1.73414\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.70809\n",
"\tValidation loss: 1.71211\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.65094\n",
"\tValidation loss: 1.70456\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.49194\n",
"\tValidation loss: 1.87351\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.66357\n",
"\tValidation loss: 1.61599\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.55717\n",
"\tValidation loss: 1.64459\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.50871\n",
"\tValidation loss: 1.65319\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.57756\n",
"\tValidation loss: 1.96735\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 1.43108\n",
"\tValidation loss: 1.52769\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 1.53321\n",
"\tValidation loss: 1.58856\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 3\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.92206\n",
"\tValidation loss: 1.98312\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.79454\n",
"\tValidation loss: 1.82708\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.76797\n",
"\tValidation loss: 1.79812\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.83004\n",
"\tValidation loss: 1.78264\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.79124\n",
"\tValidation loss: 1.79182\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.73042\n",
"\tValidation loss: 1.76931\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.77441\n",
"\tValidation loss: 1.7658\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.75626\n",
"\tValidation loss: 1.78524\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.71753\n",
"\tValidation loss: 1.81564\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.81942\n",
"\tValidation loss: 1.77268\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.65342\n",
"\tValidation loss: 1.77415\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.77645\n",
"\tValidation loss: 1.79659\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 1.73795\n",
"\tValidation loss: 1.81495\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 1.6262\n",
"\tValidation loss: 1.74289\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 4\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.78426\n",
"\tValidation loss: 1.77209\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.61588\n",
"\tValidation loss: 1.63196\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.46773\n",
"\tValidation loss: 1.54088\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.43549\n",
"\tValidation loss: 1.36773\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.26368\n",
"\tValidation loss: 1.2959\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.26631\n",
"\tValidation loss: 1.20291\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.00726\n",
"\tValidation loss: 1.25319\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.865727\n",
"\tValidation loss: 1.31095\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.21535\n",
"\tValidation loss: 1.08631\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.761224\n",
"\tValidation loss: 0.949992\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.13567\n",
"\tValidation loss: 1.06629\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.72156\n",
"\tValidation loss: 0.948872\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.657984\n",
"\tValidation loss: 0.880444\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.754788\n",
"\tValidation loss: 0.787512\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 5\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.29036\n",
"\tValidation loss: 2.24511\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.42597\n",
"\tValidation loss: 1.44302\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.02837\n",
"\tValidation loss: 1.20217\n",
"Epoch: 22\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.912609\n",
"\tValidation loss: 0.576921\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.763801\n",
"\tValidation loss: 0.628885\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.602095\n",
"\tValidation loss: 0.825071\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.553963\n",
"\tValidation loss: 0.563324\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.460653\n",
"\tValidation loss: 0.21192\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.464378\n",
"\tValidation loss: 0.317727\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.307548\n",
"\tValidation loss: 0.588223\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.221775\n",
"\tValidation loss: 0.361621\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.313985\n",
"\tValidation loss: 0.140192\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.51238\n",
"\tValidation loss: 0.253537\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.256771\n",
"\tValidation loss: 0.517277\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 6\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 0.735899\n",
"\tValidation loss: 0.743474\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.711384\n",
"\tValidation loss: 0.70331\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.688483\n",
"\tValidation loss: 0.717613\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.721462\n",
"\tValidation loss: 0.66692\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.701683\n",
"\tValidation loss: 0.675967\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.677627\n",
"\tValidation loss: 0.695411\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.68502\n",
"\tValidation loss: 0.69769\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.684875\n",
"\tValidation loss: 0.785643\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.685205\n",
"\tValidation loss: 0.711194\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.702132\n",
"\tValidation loss: 0.692586\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.670091\n",
"\tValidation loss: 0.699896\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.688951\n",
"\tValidation loss: 0.75874\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.726582\n",
"\tValidation loss: 0.659828\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.685139\n",
"\tValidation loss: 0.696006\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 7\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 0.945657\n",
"\tValidation loss: 0.920799\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.861676\n",
"\tValidation loss: 0.991134\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.01812\n",
"\tValidation loss: 1.09993\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.829908\n",
"\tValidation loss: 0.778051\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.14204\n",
"\tValidation loss: 0.785192\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.775537\n",
"\tValidation loss: 0.873105\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.747437\n",
"\tValidation loss: 1.02669\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.906569\n",
"\tValidation loss: 0.71628\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.701658\n",
"\tValidation loss: 0.717117\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.77082\n",
"\tValidation loss: 0.745128\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.698296\n",
"\tValidation loss: 0.914132\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.692145\n",
"\tValidation loss: 0.635045\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.532061\n",
"\tValidation loss: 0.614327\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.651422\n",
"\tValidation loss: 0.61374\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 8\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.68324\n",
"\tValidation loss: 1.75375\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.36222\n",
"\tValidation loss: 1.32231\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.17422\n",
"\tValidation loss: 1.20801\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.1057\n",
"\tValidation loss: 0.680838\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.0264\n",
"\tValidation loss: 0.984369\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.90949\n",
"\tValidation loss: 0.86687\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.734267\n",
"\tValidation loss: 0.983495\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.90845\n",
"\tValidation loss: 0.343099\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.949722\n",
"\tValidation loss: 0.927898\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.660181\n",
"\tValidation loss: 0.806514\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.581601\n",
"\tValidation loss: 0.929486\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.677291\n",
"\tValidation loss: 0.238451\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.627333\n",
"\tValidation loss: 0.800817\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.763266\n",
"\tValidation loss: 0.723536\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 9\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 0.775386\n",
"\tValidation loss: 0.707875\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.673706\n",
"\tValidation loss: 0.613081\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.622948\n",
"\tValidation loss: 0.638484\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.698392\n",
"\tValidation loss: 0.366911\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.619152\n",
"\tValidation loss: 0.65195\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.644017\n",
"\tValidation loss: 0.604418\n",
"Epoch: 43\n",
"Epoch: 44\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.725456\n",
"\tValidation loss: 0.642419\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.705424\n",
"\tValidation loss: 0.413693\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.557618\n",
"\tValidation loss: 0.640907\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.546577\n",
"\tValidation loss: 0.608255\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.639506\n",
"\tValidation loss: 0.64173\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.687607\n",
"\tValidation loss: 0.406511\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.505479\n",
"\tValidation loss: 0.641245\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.62756\n",
"\tValidation loss: 0.638775\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 10\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.09619\n",
"\tValidation loss: 0.908637\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.919987\n",
"\tValidation loss: 1.07118\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.894098\n",
"\tValidation loss: 0.941657\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.850006\n",
"\tValidation loss: 1.47137\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.956898\n",
"\tValidation loss: 0.84856\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.845975\n",
"\tValidation loss: 1.03952\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.933435\n",
"\tValidation loss: 0.896343\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.965738\n",
"\tValidation loss: 1.22259\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.955382\n",
"\tValidation loss: 0.773395\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.886383\n",
"\tValidation loss: 1.05286\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.976238\n",
"\tValidation loss: 0.821038\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.796923\n",
"\tValidation loss: 1.36117\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.90571\n",
"\tValidation loss: 0.813749\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.716779\n",
"\tValidation loss: 1.0163\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 11\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.11904\n",
"\tValidation loss: 2.0668\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.70595\n",
"\tValidation loss: 1.63197\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.40536\n",
"\tValidation loss: 1.48439\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.16229\n",
"\tValidation loss: 1.20174\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.25209\n",
"\tValidation loss: 0.910793\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.781264\n",
"\tValidation loss: 0.697028\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.707495\n",
"\tValidation loss: 0.979629\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.723453\n",
"\tValidation loss: 0.326824\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.655104\n",
"\tValidation loss: 0.579259\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.618374\n",
"\tValidation loss: 0.347346\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.3067\n",
"\tValidation loss: 0.597763\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.334479\n",
"\tValidation loss: 0.0922885\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.242681\n",
"\tValidation loss: 0.292515\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.422009\n",
"\tValidation loss: 0.287349\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 12\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.07485\n",
"\tValidation loss: 2.05673\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.71441\n",
"\tValidation loss: 1.80016\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.50978\n",
"\tValidation loss: 1.54465\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.28093\n",
"\tValidation loss: 1.70649\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.04177\n",
"\tValidation loss: 1.37963\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.04249\n",
"\tValidation loss: 1.0457\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.915971\n",
"\tValidation loss: 1.00001\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.859279\n",
"\tValidation loss: 1.62086\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.903003\n",
"\tValidation loss: 1.40421\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.31637\n",
"\tValidation loss: 0.964259\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.707548\n",
"\tValidation loss: 0.977897\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.66689\n",
"\tValidation loss: 0.836086\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.628363\n",
"\tValidation loss: 0.771519\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.427775\n",
"\tValidation loss: 0.391545\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 13\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.96478\n",
"\tValidation loss: 2.0232\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.42362\n",
"\tValidation loss: 1.43187\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.955573\n",
"\tValidation loss: 1.00049\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.595268\n",
"\tValidation loss: 0.395731\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.457691\n",
"\tValidation loss: 0.368375\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.507181\n",
"\tValidation loss: 0.380129\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.565933\n",
"\tValidation loss: 0.530777\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.419306\n",
"\tValidation loss: 0.0907823\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.415034\n",
"\tValidation loss: 0.307424\n",
"Epoch: 64\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.832884\n",
"\tValidation loss: 0.319421\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.254921\n",
"\tValidation loss: 0.460128\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.261401\n",
"\tValidation loss: 0.0720408\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.335938\n",
"\tValidation loss: 0.277187\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.0940465\n",
"\tValidation loss: 0.275292\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 14\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.93513\n",
"\tValidation loss: 1.92591\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.79169\n",
"\tValidation loss: 1.79609\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.7519\n",
"\tValidation loss: 1.78999\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.6906\n",
"\tValidation loss: 1.79725\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.71098\n",
"\tValidation loss: 1.66699\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.69356\n",
"\tValidation loss: 1.69907\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.59893\n",
"\tValidation loss: 1.75684\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.66752\n",
"\tValidation loss: 1.6827\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.54356\n",
"\tValidation loss: 1.52372\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.37366\n",
"\tValidation loss: 1.66381\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.38949\n",
"\tValidation loss: 1.56952\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.32237\n",
"\tValidation loss: 1.24872\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 1.35548\n",
"\tValidation loss: 1.271\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.978611\n",
"\tValidation loss: 1.46689\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 15\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.51894\n",
"\tValidation loss: 1.51713\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.39587\n",
"\tValidation loss: 1.39438\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.39498\n",
"\tValidation loss: 1.40152\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.39204\n",
"\tValidation loss: 1.40699\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.36054\n",
"\tValidation loss: 1.39373\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.39881\n",
"\tValidation loss: 1.37155\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.36003\n",
"\tValidation loss: 1.39549\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.38701\n",
"\tValidation loss: 1.36431\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.32804\n",
"\tValidation loss: 1.37424\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.37226\n",
"\tValidation loss: 1.35781\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.33349\n",
"\tValidation loss: 1.36294\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.351\n",
"\tValidation loss: 1.27252\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 1.35741\n",
"\tValidation loss: 1.33358\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 1.27142\n",
"\tValidation loss: 1.35104\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 16\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.47224\n",
"\tValidation loss: 1.53037\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.398\n",
"\tValidation loss: 1.39796\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.29688\n",
"\tValidation loss: 1.25525\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.24834\n",
"\tValidation loss: 1.38022\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.01439\n",
"\tValidation loss: 1.17662\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.09838\n",
"\tValidation loss: 1.25426\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.10821\n",
"\tValidation loss: 1.07391\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.13069\n",
"\tValidation loss: 1.23622\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.00809\n",
"\tValidation loss: 1.03255\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.09481\n",
"\tValidation loss: 1.20616\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.14692\n",
"\tValidation loss: 0.997377\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.10374\n",
"\tValidation loss: 1.07237\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.99194\n",
"\tValidation loss: 0.979406\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.982479\n",
"\tValidation loss: 1.12188\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 17\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 0.726126\n",
"\tValidation loss: 0.717924\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.680442\n",
"\tValidation loss: 0.708188\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.693506\n",
"\tValidation loss: 0.73121\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.706719\n",
"\tValidation loss: 0.784537\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.677641\n",
"\tValidation loss: 0.72004\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.676487\n",
"\tValidation loss: 0.689275\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.727653\n",
"\tValidation loss: 0.735775\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.694395\n",
"\tValidation loss: 0.679532\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.658741\n",
"\tValidation loss: 0.714005\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.70855\n",
"\tValidation loss: 0.702908\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.677652\n",
"\tValidation loss: 0.737967\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.648161\n",
"\tValidation loss: 0.823487\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.70273\n",
"\tValidation loss: 0.718944\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.737034\n",
"\tValidation loss: 0.696736\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 18\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 0.732534\n",
"\tValidation loss: 0.714979\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 0.678001\n",
"\tValidation loss: 0.691264\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 0.711304\n",
"\tValidation loss: 0.681295\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.67804\n",
"\tValidation loss: 0.68868\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.686062\n",
"\tValidation loss: 0.72617\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.65627\n",
"\tValidation loss: 0.689436\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.668023\n",
"\tValidation loss: 0.677626\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.741086\n",
"\tValidation loss: 0.672936\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.658086\n",
"\tValidation loss: 0.76492\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.72921\n",
"\tValidation loss: 0.693975\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.668003\n",
"\tValidation loss: 0.692102\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.628705\n",
"\tValidation loss: 0.662707\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.602274\n",
"\tValidation loss: 0.668545\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.593441\n",
"\tValidation loss: 0.707749\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 19\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 1.41358\n",
"\tValidation loss: 1.43216\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.43137\n",
"\tValidation loss: 1.41079\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.42335\n",
"\tValidation loss: 1.40029\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 1.37357\n",
"\tValidation loss: 1.3713\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 1.40403\n",
"\tValidation loss: 1.39853\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 1.38505\n",
"\tValidation loss: 1.41271\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 1.39842\n",
"\tValidation loss: 1.39978\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 1.38602\n",
"\tValidation loss: 1.34684\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 1.46978\n",
"\tValidation loss: 1.39647\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 1.35913\n",
"\tValidation loss: 1.38574\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 1.36556\n",
"\tValidation loss: 1.40434\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 1.3869\n",
"\tValidation loss: 1.29694\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 1.35454\n",
"\tValidation loss: 1.38766\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 1.36392\n",
"\tValidation loss: 1.40284\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n",
"Run task 20\n",
"Epoch: 1\n",
"Epoch: 2\n",
"Epoch: 3\n",
"Epoch: 4\n",
"Epoch: 5\n",
"Epoch: 6\n",
"Epoch: 7\n",
"\tStep: 7\n",
"\tTrain loss: 2.80582\n",
"\tValidation loss: 2.86694\n",
"Epoch: 8\n",
"Epoch: 9\n",
"Epoch: 10\n",
"Epoch: 11\n",
"Epoch: 12\n",
"Epoch: 13\n",
"Epoch: 14\n",
"\tStep: 15\n",
"\tTrain loss: 1.52916\n",
"\tValidation loss: 1.67154\n",
"Epoch: 15\n",
"Epoch: 16\n",
"Epoch: 17\n",
"Epoch: 18\n",
"Epoch: 19\n",
"Epoch: 20\n",
"Epoch: 21\n",
"\tStep: 23\n",
"\tTrain loss: 1.32422\n",
"\tValidation loss: 1.15923\n",
"Epoch: 22\n",
"Epoch: 23\n",
"Epoch: 24\n",
"Epoch: 25\n",
"Epoch: 26\n",
"Epoch: 27\n",
"Epoch: 28\n",
"\tStep: 31\n",
"\tTrain loss: 0.93362\n",
"\tValidation loss: 1.13054\n",
"Epoch: 29\n",
"Epoch: 30\n",
"Epoch: 31\n",
"Epoch: 32\n",
"Epoch: 33\n",
"Epoch: 34\n",
"Epoch: 35\n",
"\tStep: 7\n",
"\tTrain loss: 0.860604\n",
"\tValidation loss: 0.715872\n",
"Epoch: 36\n",
"Epoch: 37\n",
"Epoch: 38\n",
"Epoch: 39\n",
"Epoch: 40\n",
"Epoch: 41\n",
"Epoch: 42\n",
"\tStep: 15\n",
"\tTrain loss: 0.496623\n",
"\tValidation loss: 0.543028\n",
"Epoch: 43\n",
"Epoch: 44\n",
"Epoch: 45\n",
"Epoch: 46\n",
"Epoch: 47\n",
"Epoch: 48\n",
"Epoch: 49\n",
"\tStep: 23\n",
"\tTrain loss: 0.25702\n",
"\tValidation loss: 0.29978\n",
"Epoch: 50\n",
"Epoch: 51\n",
"Epoch: 52\n",
"Epoch: 53\n",
"Epoch: 54\n",
"Epoch: 55\n",
"Epoch: 56\n",
"\tStep: 31\n",
"\tTrain loss: 0.15449\n",
"\tValidation loss: 0.190065\n",
"Epoch: 57\n",
"Epoch: 58\n",
"Epoch: 59\n",
"Epoch: 60\n",
"Epoch: 61\n",
"Epoch: 62\n",
"Epoch: 63\n",
"\tStep: 7\n",
"\tTrain loss: 0.0990822\n",
"\tValidation loss: 0.103068\n",
"Epoch: 64\n",
"Epoch: 65\n",
"Epoch: 66\n",
"Epoch: 67\n",
"Epoch: 68\n",
"Epoch: 69\n",
"\tStep: 15\n",
"\tTrain loss: 0.0778447\n",
"\tValidation loss: 0.115714\n",
"Epoch: 70\n",
"Epoch: 71\n",
"Epoch: 72\n",
"Epoch: 73\n",
"Epoch: 74\n",
"Epoch: 75\n",
"Epoch: 76\n",
"\tStep: 23\n",
"\tTrain loss: 0.0445239\n",
"\tValidation loss: 0.0771226\n",
"Epoch: 77\n",
"Epoch: 78\n",
"Epoch: 79\n",
"Epoch: 80\n",
"Epoch: 81\n",
"Epoch: 82\n",
"Epoch: 83\n",
"\tStep: 31\n",
"\tTrain loss: 0.0353877\n",
"\tValidation loss: 0.0408647\n",
"Epoch: 84\n",
"Epoch: 85\n",
"Epoch: 86\n",
"Epoch: 87\n",
"Epoch: 88\n",
"Epoch: 89\n",
"Epoch: 90\n",
"\tStep: 7\n",
"\tTrain loss: 0.0252387\n",
"\tValidation loss: 0.0315219\n",
"Epoch: 91\n",
"Epoch: 92\n",
"Epoch: 93\n",
"Epoch: 94\n",
"Epoch: 95\n",
"Epoch: 96\n",
"Epoch: 97\n",
"\tStep: 15\n",
"\tTrain loss: 0.0284599\n",
"\tValidation loss: 0.0459488\n",
"Epoch: 98\n",
"Epoch: 99\n",
"Epoch: 100\n",
"\n"
]
},
{
"data": {
"text/html": [
"<table>\n",
"<tr><td>Task</td><td>Acc Train </td><td>Acc Validation</td></tr>\n",
"<tr><td>qa1 </td><td>0.9977777777777778 </td><td>0.99 </td></tr>\n",
"<tr><td>qa2 </td><td>0.46 </td><td>0.34 </td></tr>\n",
"<tr><td>qa3 </td><td>0.2822222222222222 </td><td>0.23 </td></tr>\n",
"<tr><td>qa4 </td><td>0.7288888888888889 </td><td>0.6 </td></tr>\n",
"<tr><td>qa5 </td><td>0.89 </td><td>0.82 </td></tr>\n",
"<tr><td>qa6 </td><td>0.5888888888888889 </td><td>0.55 </td></tr>\n",
"<tr><td>qa7 </td><td>0.7955555555555556 </td><td>0.69 </td></tr>\n",
"<tr><td>qa8 </td><td>0.7355555555555555 </td><td>0.72 </td></tr>\n",
"<tr><td>qa9 </td><td>0.6677777777777778 </td><td>0.66 </td></tr>\n",
"<tr><td>qa10</td><td>0.53 </td><td>0.56 </td></tr>\n",
"<tr><td>qa11</td><td>0.8922222222222222 </td><td>0.87 </td></tr>\n",
"<tr><td>qa12</td><td>0.8955555555555555 </td><td>0.8 </td></tr>\n",
"<tr><td>qa13</td><td>0.9344444444444444 </td><td>0.93 </td></tr>\n",
"<tr><td>qa14</td><td>0.6833333333333333 </td><td>0.46 </td></tr>\n",
"<tr><td>qa15</td><td>0.4111111111111111 </td><td>0.44 </td></tr>\n",
"<tr><td>qa16</td><td>0.5144444444444445 </td><td>0.53 </td></tr>\n",
"<tr><td>qa17</td><td>0.5255555555555556 </td><td>0.51 </td></tr>\n",
"<tr><td>qa18</td><td>0.53 </td><td>0.5 </td></tr>\n",
"<tr><td>qa19</td><td>0.32555555555555554</td><td>0.29 </td></tr>\n",
"<tr><td>qa20</td><td>1.0 </td><td>1.0 </td></tr>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import tabulate\n",
"\n",
"from IPython.display import HTML, display\n",
"\n",
"hparams.frequency_of_print_loss = 200\n",
"\n",
"head = [\"Task\", \"Acc Train\", \"Acc Validation\"]\n",
"table = []\n",
"\n",
"for id in range(1, 21):\n",
" print(\"Run task {}\".format(id))\n",
" table.append([\"qa{}\".format(id)]\n",
" + list(train_and_evaluate_task(\"qa{}\".format(id), hparams)))\n",
" print(\"\")\n",
"\n",
"display(HTML(tabulate.tabulate([head] + table, tablefmt=\"html\")))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment