[ Launch: reddit base ] 75fc23ad5dda44094c9f by MLWhiz[ Launch: reddit base ] 6633418 by enjalot[ Launch: simple histogram ] 6631652 by enjalot
[ Launch: reddit api with JSONP ] 55bb07ffb4480a03af87 by MLWhiz[ Launch: reddit api with JSONP ] 6605352 by enjalot
[ Launch: reddit base ] 2061bf3c8cc74f5dec1e by MLWhiz[ Launch: reddit base ] 6633418 by enjalot[ Launch: simple histogram ] 6631652 by enjalot
[ Launch: reddit base ] d9b9f14d25fc65db34c7 by MLWhiz[ Launch: reddit base ] 2061bf3c8cc74f5dec1e by MLWhiz[ Launch: reddit base ] 6633418 by enjalot[ Launch: simple histogram ] 6631652 by enjalot
A polar area chart showing prominent Game of Thrones characters' on-screen time and number of episodes. Built using D3 with data from user "Sellsword" on the Game of Thrones forums.
Forked from Desmond Weindorf's Pen Polar Area Chart (Game of Thrones).
Forked from Desmond Weindorf's Pen Polar Area Chart (Game of Thrones).
A Pen by Rahul Agarwal on CodePen.
[ Launch: reddit base ] 23fed5f4f1fd0ada21ff by MLWhiz[ Launch: reddit base ] 6633418 by enjalot[ Launch: simple histogram ] 6631652 by enjalot
def get_model(features,clipvalue=1.,num_filters=40,dropout=0.1,embed_size=501): | |
features_input = Input(shape=(features.shape[1],)) | |
inp = Input(shape=(maxlen, )) | |
# Layer 1: Word2Vec Embeddings. | |
x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) | |
# Layer 2: SpatialDropout1D(0.1) | |
x = SpatialDropout1D(dropout)(x) |
class Alex_NeuralNet_Meta(nn.Module): | |
def __init__(self,hidden_size,lin_size, embedding_matrix=embedding_matrix): | |
super(Alex_NeuralNet_Meta, self).__init__() | |
# Initialize some parameters for your model | |
self.hidden_size = hidden_size | |
drp = 0.1 | |
# Layer 1: Word2Vec Embeddings. | |
self.embedding = nn.Embedding(max_features, embed_size) |
def pytorch_model_run_cv(x_train,y_train,features,x_test, model_obj, feats = False,clip = True): | |
seed_everything() | |
avg_losses_f = [] | |
avg_val_losses_f = [] | |
# matrix for the out-of-fold predictions | |
train_preds = np.zeros((len(x_train))) | |
# matrix for the predictions on the test set | |
test_preds = np.zeros((len(x_test))) | |
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED).split(x_train, y_train)) | |
for i, (train_idx, valid_idx) in enumerate(splits): |
# Some preprocesssing that will be common to all the text classification methods you will see. | |
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£', | |
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', | |
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', | |
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', | |
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] | |
def clean_text(x): | |
x = str(x) |