Skip to content

Instantly share code, notes, and snippets.

View zongfan2's full-sized avatar
💭
Coding is life!

Zong Fan zongfan2

💭
Coding is life!
View GitHub Profile
@zongfan2
zongfan2 / tic_tac_go.py
Last active May 13, 2021 11:28
Application of deep Q-learning for tic-tac-go game on PyTorch
"""
Modified from github repo: shakedzy/tic_tac_toe
"""
import numpy as np
import torch
import torch.nn as nn
import os
import time
import random
from collections import deque
@zongfan2
zongfan2 / CBM3D.py
Created May 7, 2020 06:51
CBM3D algorithm for white Gaussian noise attenuation
# -*- coding: utf-8 -*-
import cv2
# import PSNR
import numpy as np
import pysnooper
cv2.setUseOptimized(True)
# Parameters initialization
@zongfan2
zongfan2 / opencv_heatmap.py
Created November 26, 2019 03:02
Generate crowd heatmap with opencv and heatmap package
import heatmap
import cv2
def use_heatmap(image, box_centers):
hm = heatmap.Heatmap()
img = hm.heatmap(box_centers, dotsize=200, size=(image.shape[1], image.shape[0]), opacity=128, area=((0, 0), (image.shape[1], image.shape[0])))
return img
img = "/path/to/image.jpg"
centers = [(10, 20), (30, 40) ] # centers of heatmaps
@zongfan2
zongfan2 / onnx_tensorrt_backend_example.py
Created November 5, 2019 10:03
ONNX model inference with onnx_tensorrt backend
import onnx
import argparse
import onnx_tensorrt.backend as backend
import numpy as np
import time
def main():
parser = argparse.ArgumentParser(description="Onnx runtime engine.")
parser.add_argument(
"--onnx", default="/home/arkenstone/test_face_model/res50/mxnet_exported_mnet.onnx",
@zongfan2
zongfan2 / onnx_tensorrt_inference.py
Created November 4, 2019 09:14
Acceleration inference of onnx model with TensorRT
import tensorrt as trt
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import time
model_path = "model.onnx"
input_size = 32
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
@zongfan2
zongfan2 / image_client_for_trt_serving.py
Created November 1, 2019 14:34
Modified tensorrt inference server image client sample
import argparse
import numpy as np
import os
import tensorrtserver.api as tapi
import tensorrtserver.api.model_config_pb2 as model_config
import cv2
import queue as q
def model_dtype_to_np(model_dtype):
@zongfan2
zongfan2 / deepstream_gstreamer_decoding_with_python.py
Created September 18, 2019 09:06
Use Nvidia deepstream SDK gstreamer plugin to decode H.264 video stream and output decoded frames in python
import subprocess as sp
import cv2
import numpy as np
cmd = ["gst-launch-1.0",
"rtspsrc", "location=rtsp://admin:[email protected]/Streaming/Channels/1", "latency=100", "!",
"queue", "!",
"rtph264depay", "!",
"h264parse", "!",
"nvv4l2decoder", "drop-frame-interval=2", "!",
@zongfan2
zongfan2 / ubuntu1804_dGPU_install_nv_deepstream.dockerfile
Last active December 22, 2021 12:46
Dockerfile to prepare DeepStream in docker for Nvidia dGPUs (including Tesla T4, GeForce GTX 1080, RTX 2080 and so on)
From ubuntu:18.04 as base
# install github and vim
RUN apt-get install -y vim wget gnupg
ENV DEBIAN_FRONTEND "noninteractive" # to skip any interactive configurations during installation
RUN apt-get install -yq keyboard-configuration
# install gstreamer
RUN apt install -y \
@zongfan2
zongfan2 / ffmpeg_python_with_gpu_acceleration.py
Created September 8, 2019 16:35
Use pipe to read ffmped decoded video frames with NVIDIA GPU hardware acceleration
import subprocess as sp
import cv2
import numpy as np
from PIL import Image
import tensorflow as tf
ffmpeg_cmd_1 = ["./ffmpeg", "-y",
"-hwaccel", "nvdec",
"-c:v", "h264_cuvid",
"-vsync", "0",
@zongfan2
zongfan2 / tf_model_post_training_quantization.py
Last active June 14, 2022 08:15
post-training quantization tensorflow model to float16
import tensorflow as tf
from tensorflow.core.framework import types_pb2, graph_pb2, attr_value_pb2
from tensorflow.tools.graph_transforms import TransformGraph
from google.protobuf import text_format
import numpy as np
# object detection api input and output nodes
input_name = "image_tensor"
output_names = ["detection_boxes", "detection_classes", "detection_scores", "num_detections"]
# Const should be float32 in object detection api during nms (see here: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/non-max-suppression-v4.html)