Last active
December 19, 2022 17:01
-
-
Save jacquesfize/c5751e67e9b205ec56d08f0fde813229 to your computer and use it in GitHub Desktop.
Script to use Bert for text classification
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# REQUIREMENTS : pandas keras torch numpy transformers | |
""" | |
Strongly based from the article : https://mccormickml.com/2019/07/22/BERT-fine-tuning/ | |
by Chris McCormick | |
""" | |
import os | |
import time | |
import random | |
import argparse | |
import datetime | |
import pandas as pd | |
import numpy as np | |
import tensorflow as tf | |
import torch | |
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler | |
from keras.preprocessing.sequence import pad_sequences | |
from transformers import BertTokenizer | |
from transformers import BertForSequenceClassification, AdamW, BertConfig | |
from transformers import get_linear_schedule_with_warmup | |
def flat_accuracy(preds, labels): | |
pred_flat = np.argmax(preds, axis=1).flatten() | |
labels_flat = labels.flatten() | |
return np.sum(pred_flat == labels_flat) / len(labels_flat) | |
def format_time(elapsed): | |
''' | |
Takes a time in seconds and returns a string hh:mm:ss | |
''' | |
# Round to the nearest second. | |
elapsed_rounded = int(round((elapsed))) | |
# Format as hh:mm:ss | |
return str(datetime.timedelta(seconds=elapsed_rounded)) | |
parser = argparse.ArgumentParser() | |
parser.add_argument("train" ,help="TSV with two columns : 'sentence' and 'label'") | |
parser.add_argument("test",help="TSV with two columns : 'sentence' and 'label'") | |
parser.add_argument("-e","--epochs",type=int,default=5) | |
parser.add_argument("-b","--batch_size",default=16,type=int) | |
args = parser.parse_args() | |
if not os.path.exists(args.train) or not os.path.exists(args.test): | |
raise FileNotFoundError("Train or Test filepath is incorrect !") | |
# Number of training epochs (authors recommend between 2 and 4) | |
epochs = args.epochs | |
# The DataLoader needs to know the batch size for training, so I specify it here. | |
# For fine-tuning BERT on a specific task, the authors recommend a batch size of | |
# 16 or 32. | |
batch_size = args.batch_size | |
# OUTPUT DIR | |
output_dir = args.outputdir | |
if not os.path.exists(args.outputdir): | |
raise FileNotFoundError("{0} directory does not exists ! ".format(args.output_dir)) | |
if not os.path.isdir(args.output_dir): | |
raise NotADirectoryError("{0} is not a directory".format(args.output_dir)) | |
df_train = pd.read_csv(args.train, sep="\t") | |
df_test = pd.read_csv(args.test, sep="\t") | |
# Get the GPU device name. | |
device_name = tf.test.gpu_device_name() | |
# The device name should look like the following: | |
if device_name == '/device:GPU:0': | |
print('Found GPU at: {}'.format(device_name)) | |
else: | |
raise SystemError('GPU device not found') | |
# If there's a GPU available... | |
if torch.cuda.is_available(): | |
# Tell PyTorch to use the GPU. | |
device = torch.device("cuda") | |
print('There are %d GPU(s) available.' % torch.cuda.device_count()) | |
print('We will use the GPU:', torch.cuda.get_device_name(0)) | |
# If not... | |
else: | |
print('No GPU available, using the CPU instead.') | |
device = torch.device("cpu") | |
# Load the BERT tokenizer. | |
print('Loading FlauBERT tokenizer...') | |
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased',do_lower_case=False) | |
df_train["input_ids"] = df_train.sentence.apply(lambda x: tokenizer.encode(x,add_special_tokens = True)) | |
df_test["input_ids"] = df_test.sentence.apply(lambda x: tokenizer.encode(x,add_special_tokens = True)) | |
# Set the maximum sequence length. | |
# took the size of the largest sentence | |
MAX_LEN = df_train.input_ids.apply(len).max()+2 | |
print('\nPadding/truncating all sentences to %d values...' % MAX_LEN) | |
print('\nPadding token: "{:}", ID: {:}'.format(tokenizer.pad_token, tokenizer.pad_token_id)) | |
df_train["input_ids"] = pad_sequences(df_train.input_ids.values, maxlen=MAX_LEN, dtype="long", value=0, truncating="post", padding="post").tolist() | |
df_test["input_ids"] = pad_sequences(df_test.input_ids.values, maxlen=MAX_LEN, dtype="long", value=0, truncating="post", padding="post").tolist() | |
df_train["attention_mask"] = df_train.input_ids.apply(lambda x: [int(token_id > 0) for token_id in x] ) | |
df_test["attention_mask"] = df_test.input_ids.apply(lambda x: [int(token_id > 0) for token_id in x]) | |
train_inputs = torch.tensor(np.array(df_train.input_ids.values.tolist())) | |
validation_inputs = torch.tensor(np.array(df_test.input_ids.values.tolist())) | |
train_labels = torch.tensor(np.array(df_train.label.values.tolist())) | |
validation_labels = torch.tensor(np.array(df_test.label.values.tolist())) | |
train_masks = torch.tensor(np.array(df_train.attention_mask.values.tolist())) | |
validation_masks = torch.tensor(np.array(df_test.attention_mask.values.tolist())) | |
# Create the DataLoader for training set. | |
train_data = TensorDataset(train_inputs, train_masks, train_labels) | |
train_sampler = RandomSampler(train_data) | |
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) | |
# Create the DataLoader for validation set. | |
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels) | |
validation_sampler = SequentialSampler(validation_data) | |
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size) | |
# Load BertForSequenceClassification, the pretrained BERT model with a single | |
# linear classification layer on top. | |
model = BertForSequenceClassification.from_pretrained( | |
"bert-base-multilingual-cased", # Use the 12-layer BERT model, with an uncased vocab. | |
num_labels = len(np.unique(df_train.label)), # The number of output labels--2 for binary classification. | |
# You can increase this for multi-class tasks. | |
output_attentions = False, # Whether the model returns attentions weights. | |
output_hidden_states = False, # Whether the model returns all hidden-states. | |
) | |
# Tell pytorch to run this model on the GPU. | |
model.cuda() | |
optimizer = AdamW(model.parameters(), | |
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 | |
eps = 1e-8 # args.adam_epsilon - default is 1e-8. | |
) | |
# Total number of training steps is number of batches * number of epochs. | |
total_steps = len(train_dataloader) * epochs | |
# Create the learning rate scheduler. | |
scheduler = get_linear_schedule_with_warmup(optimizer, | |
num_warmup_steps = 0, # Default value in run_glue.py | |
num_training_steps = total_steps) | |
# Set the seed value all over the place to make this reproducible. | |
seed_val = 42 | |
random.seed(seed_val) | |
np.random.seed(seed_val) | |
torch.manual_seed(seed_val) | |
torch.cuda.manual_seed_all(seed_val) | |
# Store the average loss after each epoch so I can plot them. | |
loss_values = [] | |
# For each epoch... | |
for epoch_i in range(0, epochs): | |
# ======================================== | |
# Training | |
# ======================================== | |
# Perform one full pass over the training set. | |
print("") | |
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) | |
print('Training...') | |
# Measure how long the training epoch takes. | |
t0 = time.time() | |
# Reset the total loss for this epoch. | |
total_loss = 0 | |
# Put the model into training mode. | |
model.train() | |
# For each batch of training data... | |
for step, batch in enumerate(train_dataloader): | |
# Progress update every 40 batches. | |
if step % 40 == 0 and not step == 0: | |
# Calculate elapsed time in minutes. | |
elapsed = format_time(time.time() - t0) | |
# Report progress. | |
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) | |
# Unpack this training batch from the dataloader. | |
# | |
# As I unpack the batch, I'll also copy each tensor to the GPU using the | |
# `to` method. | |
# | |
# `batch` contains three pytorch tensors: | |
# [0]: input ids | |
# [1]: attention masks | |
# [2]: labels | |
b_input_ids = batch[0].to(device) | |
b_input_mask = batch[1].to(device) | |
b_labels = batch[2].to(device) | |
# Always clear any previously calculated gradients before performing a | |
# backward pass. PyTorch doesn't do this automatically because | |
# accumulating the gradients is "convenient while training RNNs". | |
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) | |
model.zero_grad() | |
# Perform a forward pass (evaluate the model on this training batch). | |
# This will return the loss (rather than the model output) because I | |
# have provided the `labels`. | |
# The documentation for this `model` function is here: | |
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification | |
outputs = model(b_input_ids, | |
token_type_ids=None, | |
attention_mask=b_input_mask, | |
labels=b_labels) | |
# The call to `model` always returns a tuple, so I need to pull the | |
# loss value out of the tuple. | |
loss = outputs[0] | |
# Accumulate the training loss over all of the batches so that I can | |
# calculate the average loss at the end. `loss` is a Tensor containing a | |
# single value; the `.item()` function just returns the Python value | |
# from the tensor. | |
total_loss += loss.item() | |
# Perform a backward pass to calculate the gradients. | |
loss.backward() | |
# Clip the norm of the gradients to 1.0. | |
# This is to help prevent the "exploding gradients" problem. | |
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) | |
# Update parameters and take a step using the computed gradient. | |
# The optimizer dictates the "update rule"--how the parameters are | |
# modified based on their gradients, the learning rate, etc. | |
optimizer.step() | |
# Update the learning rate. | |
scheduler.step() | |
# Calculate the average loss over the training data. | |
avg_train_loss = total_loss / len(train_dataloader) | |
# Store the loss value for plotting the learning curve. | |
loss_values.append(avg_train_loss) | |
print("") | |
print(" Average training loss: {0:.2f}".format(avg_train_loss)) | |
print(" Training epcoh took: {:}".format(format_time(time.time() - t0))) | |
# ======================================== | |
# Validation | |
# ======================================== | |
# After the completion of each training epoch, measure the performance on | |
# the validation set. | |
print("") | |
print("Running Validation...") | |
t0 = time.time() | |
# Put the model in evaluation mode--the dropout layers behave differently | |
# during evaluation. | |
model.eval() | |
# Tracking variables | |
eval_loss, eval_accuracy = 0, 0 | |
nb_eval_steps, nb_eval_examples = 0, 0 | |
# Evaluate data for one epoch | |
for batch in validation_dataloader: | |
# Add batch to GPU | |
batch = tuple(t.to(device) for t in batch) | |
# Unpack the inputs from dataloader | |
b_input_ids, b_input_mask, b_labels = batch | |
# Telling the model not to compute or store gradients, saving memory and | |
# speeding up validation | |
with torch.no_grad(): | |
# Forward pass, calculate logit predictions. | |
# This will return the logits rather than the loss because we have | |
# not provided labels. | |
# token_type_ids is the same as the "segment ids", which | |
# differentiates sentence 1 and 2 in 2-sentence tasks. | |
# The documentation for this `model` function is here: | |
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification | |
outputs = model(b_input_ids, | |
token_type_ids=None, | |
attention_mask=b_input_mask) | |
# Get the "logits" output by the model. The "logits" are the output | |
# values prior to applying an activation function like the softmax. | |
logits = outputs[0] | |
# Move logits and labels to CPU | |
logits = logits.detach().cpu().numpy() | |
label_ids = b_labels.to('cpu').numpy() | |
# Calculate the accuracy for this batch of test sentences. | |
tmp_eval_accuracy = flat_accuracy(logits, label_ids) | |
# Accumulate the total accuracy. | |
eval_accuracy += tmp_eval_accuracy | |
# Track the number of batches | |
nb_eval_steps += 1 | |
# Report the final accuracy for this validation run. | |
print(" Accuracy: {0:.2f}".format(eval_accuracy/nb_eval_steps)) | |
print(" Validation took: {:}".format(format_time(time.time() - t0))) | |
print("") | |
print("Training complete!") | |
# Create output directory if needed | |
if not os.path.exists(output_dir): | |
os.makedirs(output_dir) | |
print("Saving model to %s" % output_dir) | |
# Save a trained model, configuration and tokenizer using `save_pretrained()`. | |
# They can then be reloaded using `from_pretrained()` | |
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training | |
model_to_save.save_pretrained(output_dir) | |
tokenizer.save_pretrained(output_dir) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment