Skip to content

Instantly share code, notes, and snippets.

@vuiseng9
Last active May 31, 2022 17:31
Show Gist options
  • Save vuiseng9/7908731ed83217574f4941d1ad30a9b2 to your computer and use it in GitHub Desktop.
Save vuiseng9/7908731ed83217574f4941d1ad30a9b2 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging as log
import sys
import time
import numpy as np
from openvino.preprocess import PrePostProcessor
from openvino.runtime import AsyncInferQueue, Core, InferRequest, Layout, Type
import os
import torch
import torchvision
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
Resize,
ToTensor,
InterpolationMode
)
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def parse_args() -> argparse.Namespace:
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
# fmt: off
args.add_argument('-h', '--help', action='help',
help='Show this help message and exit.')
args.add_argument('-m', '--model', type=str, required=True,
help='Required. Path to an .xml or .onnx file with a trained model.')
args.add_argument('-i', '--input', required=True, type=str, help='Required. Path to an image folder, assuming torchvision folder structures')
args.add_argument('-d', '--device', type=str, default='CPU',
help='Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: '
'is acceptable. The sample will look for a suitable plugin for device specified. '
'Default value is CPU.')
args.add_argument('-b', default=8, type=int, help='batch size, also number of parallel infer request')
parser.add_argument('--quiet', action='store_true')
# fmt: on
return parser.parse_args()
def create_imagenet_valset_loader(dataset_dir, batch_size,
n_worker=8,
image_size=224,
crop_pct=0.875,
mode=InterpolationMode.BILINEAR,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
size = int(image_size / crop_pct)
preprocess_list = [
Resize(size, interpolation=mode),
CenterCrop(image_size),
ToTensor(),
Normalize(mean=mean, std=std)]
val_transforms = Compose(preprocess_list)
val_dataset = torchvision.datasets.ImageFolder(os.path.join(dataset_dir, 'val'), val_transforms)
val_sampler = torch.utils.data.SequentialSampler(val_dataset)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size,
shuffle=False, num_workers=n_worker,
sampler=val_sampler, drop_last=False)
return val_loader
top1 = AverageMeter()
top5 = AverageMeter()
def completion_callback(infer_request: InferRequest, userdata):
predictions = next(iter(infer_request.results.values()))
# Change a shape of a numpy.ndarray with results to get another one with one dimension
probs = predictions.reshape(-1)
# Get an array of 10 class IDs in descending order of probability
top_5 = np.argsort(probs)[-5:][::-1]
if userdata in top_5:
top5.update(100)
else:
top5.update(0)
if userdata == top_5[0]:
top1.update(100)
else:
top1.update(0)
# log.info("img_count {:6} | top1.avg: {:.1f}, top5.avg: {:.1f}".format(top1.count, top1.avg, top5.avg))
def main() -> int:
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = parse_args()
batch_size = args.b
# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = Core()
# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {args.model}')
# (.xml and .bin files) or (.onnx file)
model = core.read_model(args.model)
if len(model.inputs) != 1:
log.error('Sample supports only single input topologies')
return -1
if len(model.outputs) != 1:
log.error('Sample supports only single output topologies')
return -1
# --------------------------- Step 3. Set up dataloader --------------------------------------------------------------------
if os.path.isdir(args.input):
datadir = args.input# assume torchvision datadir structure
else:
raise ValueError("Pls provide a torchvision-like image directory")
val_loader = create_imagenet_valset_loader(datadir, batch_size=batch_size)
# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = PrePostProcessor(model)
ppp.input().tensor() \
.set_element_type(Type.f32)
ppp.output().tensor().set_element_type(Type.f32)
# 4) Apply preprocessing modifing the original 'model'
model = ppp.build()
# --------------------------- Step 5. Loading model to the device -----------------------------------------------------
log.info('Loading the model to the plugin')
compiled_model = core.compile_model(model, args.device)
# --------------------------- Step 6. Create infer request queue ------------------------------------------------------
# current design is that batch size matches number of jobs (specified by user)
log.info('Starting inference in asynchronous mode')
infer_queue = AsyncInferQueue(compiled_model, batch_size)
infer_queue.set_callback(completion_callback)
# --------------------------- Step 7. Do inference --------------------------------------------------------------------
end = time.time()
for i, (input_, target) in enumerate(val_loader):
np_inputs, np_targets = input_.numpy(), target.numpy()
for i, o in zip(np_inputs, np_targets):
infer_queue.start_async(
inputs={0: np.expand_dims(i, axis=0)},
userdata=o)
infer_queue.wait_all()
if args.quiet is False:
log.info("img_count {:6} | top1.avg: {:5.1f}, top5.avg: {:5.1f}".format(top1.count, top1.avg, top5.avg))
e2e_elapse = time.time() - end
throughput = top1.count/e2e_elapse
log.info(
'Final::: '
'| E2E: {:.3f} s '
'| Acc@1: {:>5.2f} % '
'| Acc@5: {:>5.2f} % '
'| TPT: {:6.2f} fps '
'| N-InferReq: {}'.format(
e2e_elapse, top1.avg, top5.avg, throughput, batch_size)
)
return 0
if __name__ == '__main__':
sys.exit(main())
@vuiseng9
Copy link
Author

python ov-api2p0-async-infer.py \
    -d CPU \
    -b 16 \
    --quiet \
    -i /path/to/imgnet-1k \
    -m /path/to/openvino-onnx-or-ir

@vuiseng9
Copy link
Author

[ INFO ] Final::: | E2E: 11.829 s | Acc@1: 90.80 % | Acc@5: 98.60 % | TPT:  84.54 fps | N-InferReq: 1
[ INFO ] Final::: | E2E:  9.299 s | Acc@1: 90.80 % | Acc@5: 98.60 % | TPT: 107.54 fps | N-InferReq: 2
[ INFO ] Final::: | E2E:  8.109 s | Acc@1: 90.80 % | Acc@5: 98.60 % | TPT: 123.31 fps | N-InferReq: 4
[ INFO ] Final::: | E2E:  7.936 s | Acc@1: 90.80 % | Acc@5: 98.60 % | TPT: 126.00 fps | N-InferReq: 8
[ INFO ] Final::: | E2E:  6.857 s | Acc@1: 90.80 % | Acc@5: 98.60 % | TPT: 145.85 fps | N-InferReq: 16

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment