Skip to content

Instantly share code, notes, and snippets.

// Before, with Sprockets:
//= require style
//= require tablesort
//= require tooltipster
//= require jquery-ui
// ...
@import 'variables';
@import 'overall';
@import 'type';
loaders: [
{ test: /\.css$/, loader: "style-loader!css-loader" },
{ test: /\.scss$/, loaders: ["style", "css", "sass"] }
]
loaders: [
...
{ test: /\.png$/, loader: 'url-loader?limit=100000' },
{ test: /\.jpg$/, loader: 'file-loader' },
{ test: /\.svg$/, loader: 'file-loader' }
]
}
{
...
output: {
path: outputPath,
publicPath: "https://cnd.com/production/js/", // hostname needs to match Rails config
filename: '[name]-[chunkhash].js'
},
plugins: [
new AssetsPlugin({ path: outputPath }),
new webpack.optimize.UglifyJsPlugin(),
@kevinrobinson
kevinrobinson / gen_word2vec.py
Created December 6, 2015 16:51
Built gen_word2vec.py, 3972c79
"""Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
@kevinrobinson
kevinrobinson / dictionary.py
Last active December 7, 2015 18:48
Naive dictionary approach to representing words
# example input:
sentence = 'The quick brown fox jumped over the lazy dog.'
# tokenize and normalize words, building the set of all vocabulary ever seen
words_set = {}
def tokenize(sentence):
return map(str.lower, sentence[0:-1].split(' '))
for word in tokenize(sentence):
words_set[word] = True
@kevinrobinson
kevinrobinson / call_skipgram.py
Created December 7, 2015 19:34
Call the skipgram op to load training data
(words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
REGISTER_OP(“Skipgram”)
 .Output(“vocab_word: string”)
 .Output(“vocab_freq: int32”)
 .Output(“words_per_epoch: int64”)
 .Output(“current_epoch: int32”)
 .Output(“total_words_processed: int64”)
 .Output(“examples: int32”)
 .Output(“labels: int32”)
 .Attr(“filename: string”)
 .Attr(“batch_size: int”)
lr = opts.learning_rate * tf.maximum(0.0001, 1.0 — tf.cast(self._words, tf.float32) / words_to_train)
node {
name: "Cast_1"
op: "Cast"
input: "Skipgram:4"
attr {
key: "DstT"
value {
type: DT_FLOAT
}
}