This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
set -euo pipefail | |
# echo error | |
echo_error() { | |
echo "ERROR: $1" | |
} | |
# removes trailing slashes for directory paths |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import cv2 | |
import numpy as np | |
def normalize_kernel(kernel, k_width, k_height, scaling_factor = 1.0): | |
'''Zero-summing normalize kernel''' | |
K_EPS = 1.0e-12 | |
# positive and negative sum of kernel values | |
pos_range, neg_range = 0, 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for image_name in image_hash_dict.keys(): | |
distance = scipy.spatial.distance.hamming( | |
hash_hex_to_hash_array(image_hash_dict[image_name]), | |
hash_hex_to_hash_array(image_hash_dict['images/burj-khalifa-3.jpg']) | |
) | |
print("{0:<30} {1}".format(image_name, distance)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# read all image names in database directory | |
image_names = sorted(glob.glob('images/*.jpg')) | |
def hash_array_to_hash_hex(hash_array): | |
# convert hash array of 0 or 1 to hash string in hex | |
hash_array = np.array(hash_array, dtype = np.uint8) | |
hash_str = ''.join(str(i) for i in 1 * hash_array.flatten()) | |
return (hex(int(hash_str, 2))) | |
def hash_hex_to_hash_array(hash_hex): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def predict(x, temp): | |
probs = F.softmax(x / temp, dim = 0) | |
probs = np.squeeze(probs.detach().cpu().numpy()) | |
ind = np.random.choice(vocab_len, 1, p = probs) | |
return ind[0] | |
generated_text = ['there','is','no','one','love'] | |
curr_len = 0 | |
embeds = [] | |
is_end = word_to_int[';'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def get_batches_x(tot_seq, batch_size): | |
ind = np.random.permutation(tot_seq).tolist() | |
i = 0 | |
for i in range(0, tot_seq, batch_size): | |
batch_ids = ind[i:i+batch_size] | |
yield X[batch_ids], Y[batch_ids] | |
class Quote_Generator(nn.Module): | |
def __init__(self, embed_size, hidden_size, vocab_len): | |
super(Quote_Generator, self).__init__() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
window = 5 # max_seq_length | |
sequences, next_words = [], [] | |
for quote in quotes: | |
words = quote.split(' ') | |
for i in range(0,len(words) - window + 1): | |
sequences.append(words[i:i+window]) | |
if (i + window) < len(words): | |
next_words.append(words[i+window]) | |
else: | |
next_words.append(';') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
tsne = TSNE(n_components=2, perplexity=40, n_iter=300,metric='cosine') | |
n_points = 100 | |
emb_tsne = tsne.fit_transform(emb[:n_points, :]) | |
labels = list(word_to_int.keys())[:n_points] | |
x = emb_tsne[:,0] | |
y = emb_tsne[:,1] | |
plt.figure(figsize=(16, 16)) | |
for i in range(n_points): | |
plt.scatter(x[i],y[i]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def f_x(x, x_max, alpha): | |
x = (x/x_max)**alpha | |
return torch.min(x, torch.ones_like(x)).to(device) | |
def weight_mse(w_x, x, log_x): | |
loss = w_x * F.mse_loss(x, log_x, reduction='none') | |
return torch.mean(loss).to(device) | |
def glove_train(glove): | |
epochs = 100 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def get_batch(batch_size): | |
ind = np.random.permutation(occs.size).tolist() | |
i = 0 | |
for i in range(0, tot_pairs, batch_size): | |
batch_ids = ind[i:i+batch_size] | |
yield p1[batch_ids], p2[batch_ids], occs[batch_ids] | |
device = None | |
if torch.cuda.is_available(): | |
device = torch.device("cuda:0") |
NewerOlder