Skip to content

Instantly share code, notes, and snippets.

View pra-dan's full-sized avatar
🐣
Working on Computer Vision & Gen-AI

Prashant Dandriyal pra-dan

🐣
Working on Computer Vision & Gen-AI
View GitHub Profile
import bpy
import os
from math import *
from mathutils import *
#set your own target here
target = bpy.data.objects['Cube']
cam = bpy.data.objects['Camera']
t_loc_x = target.location.x
t_loc_y = target.location.y

Observations (for bigger bot)

  1. Related to Forward and Reverse Drive
# Drive codes
0x602: RIGHT DRIVE
0X601: LEFT DRIVE

# Acceleration Codes
Min V: 0xCC = 204
Max V: 0x6A = 106 # faster
{
"name": "POOPY",
"symbol": "poopy",
"description": "Test NFT",
"seller_fee_basis_points": 10,
"image": "https://github.com/matterport/Mask_RCNN/blob/master/images/2383514521_1fc8d7b0de_z.jpg",
"animation_url": "https://www.arweave.net/efgh1234?ext=mp4",
"external_url": "https://twitter.com/yellowpoophole",
"attributes": [
{
# Install DeepStream 5.1
- Flash JetPack 4.5.1 which comes installed with DeepStream 5.1.
# Install Prerequisites
## Install packages. Run `requirements.sh`
## Follow instructions given in `/opt/nvidia/deepstream/deepstream-5.1/sources/apps/sample_apps/deepstream_app/README` as follows:
sudo apt-get install libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
libgstrtspserver-1.0-dev libx11-dev
# Obtain inference engine using `trtexec`
@pra-dan
pra-dan / compare_onnx_files.py
Created November 11, 2025 11:03
Compare multiple onnx models's output similarity. Uses polygraphy version `0.49.18`
from polygraphy.backend.onnxrt import OnnxrtRunner, SessionFromOnnx
from polygraphy.backend.trt import TrtRunner, EngineFromNetwork, NetworkFromOnnxPath
from polygraphy.comparator import Comparator, DataLoader
model_path_1 = "yes_dynamo.onnx"
model_path_2 = "no_dynamo.onnx"
build_onnxrt_session_1 = SessionFromOnnx(model_path_1)
build_onnxrt_session_2 = SessionFromOnnx(model_path_2)
# build_engine = EngineFromNetwork(NetworkFromOnnxPath(model_path))