Last active
July 18, 2021 17:52
-
-
Save sai-prasanna/d4b280ca171024b9114bbb631d0d32b9 to your computer and use it in GitHub Desktop.
Hub fairseq translate batched
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from collections import namedtuple | |
from typing import Any, List, Iterator, Tuple | |
import torch | |
import copy | |
class FairseqHubInferer: | |
""" | |
Runs inference on fairseq models. | |
""" | |
def __init__(self, *args, **kwargs): | |
self.hub = torch.hub.load(*args, **kwargs) | |
from fairseq import utils | |
self.utils = utils | |
self.max_positions = self.utils.resolve_max_positions( | |
self.hub.task.max_positions(), *[model.max_positions() for model in self.hub.models] | |
) | |
def translate(self, sentences: List[str], n_best: int = 1, beam: int = 5, verbose: bool = False, **kwargs) -> List[List[str]]: | |
tokenized_sentences = [self.hub.encode(s) for s in sentences] | |
# build generator using current args as well as any kwargs | |
gen_args = copy.copy(self.hub.args) | |
gen_args.beam = beam | |
for k, v in kwargs.items(): | |
setattr(gen_args, k, v) | |
generator = self.hub.task.build_generator(gen_args) | |
results = [] | |
for batch in self.build_batches(tokenized_sentences): | |
ids, src_tokens, src_lengths = batch | |
src_tokens = src_tokens.to(self.hub.device) | |
src_lengths = src_lengths.to(self.hub.device) | |
sample = { | |
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} | |
} | |
translations = self.hub.task.inference_step( | |
generator, self.hub.models, sample | |
) | |
for (iden, hypos) in zip(ids.tolist(), translations): | |
results.append((iden, hypos)) | |
# sort output to match input order | |
outputs = [] | |
for (_, hypos) in sorted(results, key=lambda x: x[0]): | |
hypotheses = [] | |
# Process top predictions | |
for hypo in hypos[: min(len(hypos), n_best)]: | |
hypo_tokens = hypo["tokens"].int().cpu() | |
hypotheses.append(self.hub.decode(hypo_tokens)) | |
outputs.append(hypotheses) | |
return outputs | |
def build_batches(self, tokens: List[List[int]]) -> Iterator[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: | |
lengths = torch.LongTensor([t.numel() for t in tokens]) | |
itr = self.hub.task.get_batch_iterator( | |
dataset=self.hub.task.build_dataset_for_inference(tokens, lengths), | |
max_tokens=self.hub.args.max_tokens, | |
max_sentences=self.hub.args.max_sentences, | |
max_positions=self.max_positions, | |
).next_epoch_itr(shuffle=False) | |
for batch in itr: | |
yield (batch["id"],batch["net_input"]["src_tokens"],batch["net_input"]["src_lengths"]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment