Skip to content

Instantly share code, notes, and snippets.

├── 1
│   ├── 54a71ceb36394d94aeabe5190c33de6d
│   │   ├── artifacts
│   │   │   └── model
│   │   │   ├── MLmodel
│   │   │   ├── conda.yaml
│   │   │   └── model.pkl
│   │   ├── meta.yaml
│   │   ├── metrics
│   │   │   └── f1_score
HyperparameterHunterAssets/
├── Experiments
│   ├── Descriptions
│   │   ├── 0ff9842a-2be7-468e-9916-c4c4f252f6ea.json
│   │   ├── 14f02a59-0fa7-45c3-b9b1-9832e9f35921.json
[...]
│   ├── Heartbeats
│   │   ├── 0ff9842a-2be7-468e-9916-c4c4f252f6ea.log
│   │   ├── 14f02a59-0fa7-45c3-b9b1-9832e9f35921.log
[...]
mlflow.set_tracking_uri(mlflow_tracking_URI)
try:
mlflow.create_experiment(experiment_name)
except:
print("reusing experiment")
mlflow.set_experiment(experiment_name)
def forward(self, users, items):
user_emb = self.embeddings_user(users)
item_emb = self.embeddings_item(items)
prod = user_emb*item_emb
preds = torch.sigmoid(self.out(prod))
return preds
def forward(self, users, items):
user_emb = self.embeddings_user(users)
item_emb = self.embeddings_item(items)
emb_vector = torch.cat([user_emb,item_emb], dim=1)
emb_vector = self.mlp(emb_vector)
preds = torch.sigmoid(self.out(emb_vector))
return preds
def forward(self, users, items):
mf_user_emb = self.mf_embeddings_user(users)
mf_item_emb = self.mf_embeddings_item(items)
mlp_user_emb = self.mlp_embeddings_user(users)
mlp_item_emb = self.mlp_embeddings_item(items)
mf_emb_vector = mf_user_emb*mf_item_emb
mlp_emb_vector = torch.cat([mlp_user_emb,mlp_item_emb], dim=1)
def forward(self, users, items):
mf_user_emb = self.mf_embeddings_user(users)
mf_item_emb = self.mf_embeddings_item(items)
mlp_user_emb = self.mlp_embeddings_user(users)
mlp_item_emb = self.mlp_embeddings_item(items)
mf_emb_vector = mf_user_emb*mf_item_emb
mlp_emb_vector = torch.cat([mlp_user_emb,mlp_item_emb], dim=1)
def forward(self, u, i, j):
ego_embeddings = torch.cat([self.u_embeddings, self.i_embeddings], 0)
pred_embeddings = [ego_embeddings]
for k in range(self.n_layers):
temp_embed = []
for f in range(self.n_fold):
temp_embed.append(torch.sparse.mm(self.A_fold_hat[f], ego_embeddings))
@jrzaurin
jrzaurin / lightgbm_focal_loss.py
Last active October 4, 2019 17:04
Focal Loss to be used with LightGBM
def focal_loss_lgb(y_pred, dtrain, alpha, gamma):
a,g = alpha, gamma
y_true = dtrain.label
def fl(x,t):
p = 1/(1+np.exp(-x))
return -( a*t + (1-a)*(1-t) ) * (( 1 - ( t*p + (1-t)*(1-p)) )**g) * ( t*np.log(p)+(1-t)*np.log(1-p) )
partial_fl = lambda x: fl(x, y_true)
grad = derivative(partial_fl, y_pred, n=1, dx=1e-6)
hess = derivative(partial_fl, y_pred, n=2, dx=1e-6)
return grad, hess
@jrzaurin
jrzaurin / focal_loss_eval_func_lightgbm.py
Last active October 4, 2019 17:04
Focal Loss Evaluation Function for LightGBM
def focal_loss_lgb_eval_error(y_pred, dtrain, alpha, gamma):
a,g = alpha, gamma
y_true = dtrain.label
p = 1/(1+np.exp(-y_pred))
loss = -( a*y_true + (1-a)*(1-y_true) ) * (( 1 - ( y_true*p + (1-y_true)*(1-p)) )**g) * ( y_true*np.log(p)+(1-y_true)*np.log(1-p) )
# (eval_name, eval_result, is_higher_better)
return 'focal_loss', np.mean(loss), False