Created
July 2, 2017 13:28
-
-
Save ecolss/2fee8de0405dde107afa862e7defdbf9 to your computer and use it in GitHub Desktop.
a naive Pytorch impl of Alibaba's Piecewise-Linear Model (I'd call it mixture of LR), compare it with LR.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import numpy as np | |
import pylab as pl | |
import torch as th | |
from torch import nn, autograd as ag, optim | |
from torch.nn import functional as F | |
from sklearn import metrics | |
from sklearn.preprocessing import OneHotEncoder | |
def load_data(n=500, low=-1.0, high=1.0, plot=False): | |
x = np.random.rand(n, 2) * (high-low) | |
x -= np.abs(low) | |
y = [] | |
for i,j in x[:]: | |
if j*i>=0: | |
if j>=-i-1 and j<=-i+1: | |
y.append(1) | |
else: | |
y.append(0) | |
else: | |
if j<=i+1 and j>=i-1: | |
y.append(1) | |
else: | |
y.append(0) | |
y = np.array(y) | |
if plot: | |
pl.scatter(x[:,0], x[:,1], c=y) | |
pl.show() | |
return x, y | |
class MLR(nn.Module): | |
def __init__(self, d, m): | |
super(MLR, self).__init__() | |
self.fn0 = nn.Linear(d, m) | |
self.fn1 = nn.Linear(d, m) | |
def forward(self, x): | |
z0 = F.softmax(self.fn0(x)) | |
z1 = F.sigmoid(self.fn1(x)) | |
y_hat = th.sum(z0*z1, dim=1) | |
return y_hat | |
class LR(nn.Module): | |
def __init__(self, d): | |
super(LR, self).__init__() | |
self.fn0 = nn.Linear(d, 2) | |
def forward(self, x): | |
z0 = F.softmax(self.fn0(x)) | |
y_hat = z0[:,1] | |
return y_hat | |
def bench(x, y, m): | |
n,d = x.shape | |
k = int(n*0.7) | |
x_train = x[:k] | |
y_train = y[:k] | |
x_eval = x[-n+k:] | |
y_eval = y[-n+k:] | |
x_train = ag.Variable(th.from_numpy(x_train)).float() | |
y_train = ag.Variable(th.from_numpy(y_train)).float() | |
print('='*60, '\n', 'try MLR...') | |
mlr = MLR(d, m) | |
opt = optim.SGD(mlr.parameters(), lr=1e0, momentum=0.9) | |
for i in range(20): | |
opt.zero_grad() | |
y_hat = mlr(x_train) | |
loss = F.binary_cross_entropy(y_hat, y_train) | |
loss.backward() | |
print('loss = {}'.format(loss.data[0])) | |
opt.step() | |
y_eval_hat = mlr(ag.Variable(th.from_numpy(x_eval)).float()) | |
print('auc = {}'.format(metrics.roc_auc_score(y_eval, y_eval_hat.data.numpy()))) | |
print('='*60, '\n', 'try LR...') | |
lr = LR(d) | |
opt = optim.SGD(lr.parameters(), lr=1e0, momentum=0.9) | |
for i in range(20): | |
opt.zero_grad() | |
y_hat = lr(x_train) | |
loss = F.binary_cross_entropy(y_hat, y_train) | |
loss.backward() | |
print('loss = {}'.format(loss.data[0])) | |
opt.step() | |
y_eval_hat = lr(ag.Variable(th.from_numpy(x_eval)).float()) | |
print('auc = {}'.format(metrics.roc_auc_score(y_eval, y_eval_hat.data.numpy()))) | |
print('='*60, '\n', 'try LR with naively discretized features...') | |
oh = OneHotEncoder() | |
z = (x/0.2).astype('int') | |
z -= z.min(axis=0) | |
z = oh.fit_transform(z).toarray() | |
x_train = z[:k] | |
x_eval = z[-n+k:] | |
x_train = ag.Variable(th.from_numpy(x_train)).float() | |
lr = LR(z.shape[1]) | |
opt = optim.SGD(lr.parameters(), lr=1e0, momentum=0.9) | |
for i in range(20): | |
opt.zero_grad() | |
y_hat = lr(x_train) | |
loss = F.binary_cross_entropy(y_hat, y_train) | |
loss.backward() | |
print('loss = {}'.format(loss.data[0])) | |
opt.step() | |
y_eval_hat = lr(ag.Variable(th.from_numpy(x_eval)).float()) | |
print('auc = {}'.format(metrics.roc_auc_score(y_eval, y_eval_hat.data.numpy()))) | |
if __name__ == '__main__': | |
x, y = load_data(20000) | |
bench(x, y, 5) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment