Created
March 6, 2021 03:51
-
-
Save cycyyy/5df0c1c8e270c4c2519bbce500430120 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from deepctr_torch.inputs import SparseFeat, DenseFeat | |
import numpy as np | |
import torch | |
from torch import nn | |
import torch.utils.data as td | |
import torch.nn.functional as F | |
from tqdm import tqdm | |
import sys, os | |
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
import prepare_data | |
dpath = os.getenv('BBPATH', '..') | |
device = torch.device("cuda:0") | |
#device = torch.device("cpu") | |
small_dataset = False | |
print(device, small_dataset) | |
class SparseOnlyModel(torch.nn.Module): | |
def __init__(self, feature_columns, hidden_size, binary=False): | |
super(SparseOnlyModel, self).__init__() | |
self.binary = binary | |
self.embedding_tables = nn.ModuleList() | |
# self.dram_tables = [] | |
input_size = 0 | |
for feature_column in feature_columns: | |
self.embedding_tables.append(nn.Embedding(feature_column.vocabulary_size, feature_column.embedding_dim, sparse=True)) | |
# self.dram_tables.append(nn.Embedding(feature_column.vocabulary_size, feature_column.embedding_dim, sparse=True)) | |
input_size += feature_column.embedding_dim | |
self.fc1 = nn.Linear(input_size, hidden_size[0]) | |
self.relu1 = nn.ReLU() | |
self.fc2 = nn.Linear(hidden_size[0], hidden_size[1]) | |
self.relu2 = nn.ReLU() | |
self.fc3 = nn.Linear(hidden_size[1], hidden_size[2]) | |
self.relu3 = nn.ReLU() | |
self.fc4 = nn.Linear(hidden_size[2], 1) | |
if binary == True: | |
self.sigmoid = nn.Sigmoid() | |
# def init_dram(self): | |
# for i in range(0, len(self.embedding_tables)): | |
# self.dram_tables[i].to('cpu') | |
# #self.embedding_tables[i].to('cpu') | |
def forward(self, x): | |
veces = [] | |
for i in range(0, len(self.embedding_tables)): | |
# self.dram_tables[i].weight.data[x[:, i]] = self.embedding_tables[i].weight.data[x[:, i]].to('cpu') | |
veces.append(self.embedding_tables[i](x[:, i])) | |
x = torch.cat(veces, 1) | |
x = self.fc1(x) | |
x = self.relu1(x) | |
x = self.fc2(x) | |
x = self.relu2(x) | |
x = self.fc3(x) | |
x = self.relu3(x) | |
x = self.fc4(x) | |
if self.binary == True: | |
return self.sigmoid(x) | |
return x | |
def get_moivelen(): | |
return prepare_data.build_movielens1m(path=dpath+"/movielens/ml-1m", cache_folder=dpath+"/.cache") | |
def get_criteo(): | |
return prepare_data.build_criteo(path=dpath+"/criteo/train.txt", cache_folder=dpath+"/.cache") | |
def generate_input(): | |
if small_dataset: | |
feature_columns, _, raw_data, input_data, target = get_moivelen() | |
else: | |
feature_columns, _, raw_data, input_data, target = get_criteo() | |
y = raw_data[target].to_numpy() | |
del raw_data | |
feature_list = [] | |
x = [] | |
for feature_column in feature_columns: | |
if isinstance(feature_column, SparseFeat): | |
feature_list.append(feature_column) | |
x.append(input_data[feature_column.embedding_name].to_numpy()) | |
x = np.array(x).T[:] | |
y = y[:] | |
train_tensor_data = td.TensorDataset(torch.from_numpy(x), torch.from_numpy(y)) | |
return train_tensor_data, feature_list | |
def train(batch_size, epoch, device): | |
train_tensor_data, feature_list = generate_input() | |
train_loader = td.DataLoader(dataset=train_tensor_data, batch_size=batch_size) | |
if small_dataset: | |
binary = False | |
else: | |
binary = True | |
model = SparseOnlyModel(feature_list, [512, 256, 64], binary).to(device) | |
print(model) | |
# model.init_dram() | |
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) | |
if small_dataset: | |
loss_fuc = F.mse_loss | |
else: | |
loss_fuc = F.binary_cross_entropy | |
for e in range(epoch): | |
total_loss = 0.0 | |
with tqdm(enumerate(train_loader), total=len(train_loader)) as t: | |
for index, (x, y) in t: | |
optimizer.zero_grad() | |
x = x.to(device) | |
pred_y = model(x) | |
y = y.to(device).float() | |
loss = loss_fuc(pred_y, y) | |
total_loss += loss | |
loss.backward() | |
optimizer.step() | |
print(e, ":", total_loss / len(train_loader)) | |
if small_dataset: | |
train(2048, 5, device) | |
else: | |
train(8192, 5, device) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment