This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Updated to work with TF 1.4 | |
# Working example for my blog post at: | |
# http://danijar.com/variable-sequence-lengths-in-tensorflow/ | |
import functools | |
import sets | |
import tensorflow as tf | |
from tensorflow import nn | |
def lazy_property(function): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
if (object.edible == true){ | |
dog.eat(object); | |
} | |
else{ | |
dog.tearToShreds(object); | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Assume you are in my_project directory | |
newdir="language_model" | |
if [ ! -d "$newdir" ]; then | |
mkdir "$newdir" | |
fi | |
cd "$newdir" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import sys | |
import numpy as np | |
import tensorflow as tf | |
# From lm_1b | |
import language_model.lm_1b.data_utils as data_utils | |
from six.moves import xrange | |
from google.protobuf import text_format |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# For saving demo resources, use batch size 1 and step 1. | |
BATCH_SIZE = 1 | |
NUM_TIMESTEPS = 1 | |
MAX_WORD_LEN = 50 | |
# File Paths | |
vocab_file = "language_model/data/vocab-2016-09-10.txt" | |
save_dir = "language_model/output" | |
pbtxt = "language_model/data/graph-2016-09-10.pbtxt" | |
ckpt = "language_model/data/ckpt-*" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def forward(sentence): | |
# Tokenize characters and words | |
word_ids = [vocab.word_to_id(w) for w in sentence.split()] | |
char_ids = [vocab.word_to_char_ids(w) for w in sentence.split()] | |
if sentence.find('<S>') != 0: | |
sentence = '<S> ' + sentence | |
for i in xrange(len(word_ids)): | |
inputs[0, 0] = word_ids[i] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
use pyo3::prelude::*; | |
use pyo3::buffer::PyBuffer; | |
use std::thread; | |
use std::sync::mpsc; | |
use std::sync::Mutex; | |
use std::sync::Arc; | |
use std::net::{TcpListener, TcpStream}; | |
use std::io::prelude::*; |