Last active
December 4, 2020 08:10
-
-
Save AsharFatmi/302abf70eac2b6651e35070bab74e9f9 to your computer and use it in GitHub Desktop.
trt wpodnet lp detection inference file
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from PIL import Image | |
import cv2 | |
from src.keras_utils import reconstruct | |
import pycuda.driver as cuda | |
import pycuda.autoinit # For automatic creation and cleanup of CUDA context | |
import tensorrt as trt | |
import utils | |
ENGINE_PATH = '/home/kingashar/YOLOV3-LP/new.engine' | |
CLASSES = ['LP'] | |
INPUT_DATA_TYPE = np.float32 | |
LP_DIMENSIONS = (256, 96) | |
threshold = 0.95 | |
def im2single(I): | |
assert(I.dtype == 'uint8') | |
return I.astype('float32')/255. | |
trt_logger = trt.Logger(trt.Logger.INFO) | |
runtime = trt.Runtime(trt_logger) | |
with open(ENGINE_PATH, "rb") as f: | |
engine = runtime.deserialize_cuda_engine(f.read()) | |
context = engine.create_execution_context() | |
stream = cuda.Stream() | |
host_in = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=INPUT_DATA_TYPE) | |
host_out = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=INPUT_DATA_TYPE) | |
devide_in = cuda.mem_alloc(host_in.nbytes) | |
devide_out = cuda.mem_alloc(host_out.nbytes) | |
INPUT_IMAGE_PATH = '/home/kingashar/Downloads/6205_0.jpg' # ADJUST | |
image = cv2.imread(INPUT_IMAGE_PATH) | |
image_tensor = im2single(image) | |
image_tensor = cv2.resize(image,(600,845)) | |
image_tensor = image_tensor.copy() | |
image_tensor = image_tensor.reshape((1,image_tensor.shape[1],image_tensor.shape[0],image_tensor.shape[2])) | |
print(image_tensor.shape) | |
# image_tensor = image_tensor.ravel() | |
# %matplotlib inline | |
# import matplotlib.pyplot as plt | |
# _=plt.imshow(img.transpose(1, 2, 0).astype(np.float32)/255.) | |
import time | |
start = time.time() | |
def infer(img): | |
bindings = [int(devide_in), int(devide_out)] | |
np.copyto(host_in, img.ravel()) | |
cuda.memcpy_htod_async(devide_in, host_in, stream) | |
context.execute_async(bindings=bindings, stream_handle=stream.handle) | |
cuda.memcpy_dtoh_async(host_out, devide_out, stream) | |
stream.synchronize() | |
return host_out | |
out = infer(image_tensor) | |
Yr = np.squeeze(out) | |
print('Input : {}'.format(INPUT_IMAGE_PATH)) | |
print('Output: {}'.format(out)) | |
print('Prediction: {}'.format(np.squeeze(out))) | |
print((time.time()-start)*1000) | |
with open('/home/kingashar/YOLOV3-LP/yr.txt', 'w') as f: | |
f.write('{}'.format(str(Yr))) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thanks for sharing your work! I am using this script and the one for keras_inference both of them give different outputs. If you can throw some light on how to get correct output inTRT inference it would be great and very helpful.
Thanks!