def describe(x):
print("Type: {}".format(x.type()))
print("Shape/size: {}".format(x.shape))
print("Values: \n{}".format(x))
torch::DeviceType device_type;
if (torch::cuda::is_available()) {
std::cout << "Cuda available, running on GPU" << "\n";
device_type = torch::kCUDA;
} else {
std::cout << "Cuda NOT available, running on CPU" << "\n";
device_type = torch::kCPU;
}
torch::Device device(device_type);
module->to(torch::Device(device));
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
model.to(device)
def n_grams(text, n):
''' takes tokens or text, returns a list of ngrams '''
return [text[i:i+n] for i in range(len(text)-n+1)]
from nltk.tokenize import sent_tokenize
data = "Sentence one. Sentence two."
sent_tokenize(data)
from nltk.tokenize import word_tokenize
data = "word one two three, four."
word_tokenize(data)