Created
May 19, 2022 02:42
-
-
Save jakkaj/a624919724f08cc489cca5fb6dcbab48 to your computer and use it in GitHub Desktop.
Cap stereo image and re-stream to rtsp using nvenc
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# MIT License | |
# Copyright (c) 2019-2022 JetsonHacks | |
# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a | |
# NVIDIA Jetson Nano Developer Kit using OpenCV | |
# Drivers for the camera and OpenCV are included in the base image | |
import cv2 | |
import time | |
import numpy as np | |
from threading import Thread | |
import subprocess as sp | |
from decouple import config | |
""" | |
gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera | |
Flip the image by setting the flip_method (most common values: 0 and 2) | |
display_width and display_height determine the size of each camera pane in the window on the screen | |
Default 1920x1080 displayd in a 1/4 size window | |
""" | |
CAMERAS = int(config("CAMERAS")) | |
images = [None] * CAMERAS | |
OUTPUT_IMAGE = None | |
RTSP_URL = "localhost:8554" | |
def grabFrames(camOrdinal): | |
global images | |
print("Cam ordinal:" + str(camOrdinal)) | |
cap = cv2.VideoCapture(gstreamer_pipeline( | |
flip_method=0, sensor_id=camOrdinal), cv2.CAP_GSTREAMER) | |
while(True): | |
time.sleep(1) | |
try: | |
while(cap.isOpened()): | |
time.sleep(1/20) | |
ret, img = cap.read() | |
if (ret): | |
images[camOrdinal] = img | |
else: | |
print("No frame") | |
except Exception as e: | |
print("Video cap source DC, trying again" + e) | |
cap.release() | |
cap = cv2.VideoCapture(gstreamer_pipeline( | |
flip_method=0, sensor_id=camOrdinal), cv2.CAP_GSTREAMER) | |
def gstreamer_pipeline( | |
sensor_id=0, | |
capture_width=1280, | |
capture_height=720, | |
display_width=960, | |
display_height=540, | |
framerate=15, | |
flip_method=0, | |
): | |
return ( | |
"nvarguscamerasrc sensor-id=%d !" | |
"video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! " | |
"nvvidconv flip-method=%d ! " | |
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! " | |
"videoconvert ! " | |
"video/x-raw, format=(string)BGR ! appsink" | |
% ( | |
sensor_id, | |
capture_width, | |
capture_height, | |
framerate, | |
flip_method, | |
display_width, | |
display_height, | |
) | |
) | |
def render(): | |
global images | |
global OUTPUT_IMAGE | |
frame_rate = 15 | |
prev = 0 | |
noImage = True | |
while(True): | |
while(noImage): | |
print("Waiting for frames") | |
noImage = False | |
time.sleep(1) | |
for img in images: | |
if(img is None): | |
noImage = True | |
time_elapsed = time.time() - prev | |
if time_elapsed < 1./frame_rate: | |
time.sleep(1./frame_rate - time_elapsed) | |
prev = time.time() | |
try: | |
if(len(images) == 2): | |
vis = np.concatenate((images[0], images[1]), axis=0) | |
else: | |
vis = images[0] | |
OUTPUT_IMAGE = vis | |
except Exception as e: | |
print("encoder fail, trying again" + e) | |
def encoder(): | |
global OUTPUT_IMAGE | |
rtsp_server = RTSP_URL + '/happy' # push server (output server) | |
frame_rate = 15 | |
prev = 0 | |
#gst_out = 'appsrc ! videoconvert ! x264enc speed-preset=veryfast tune=zerolatency byte-stream=true threads=1 key-int-max=15 intra-refresh=true ! video/x-h264 ! rtspclientsink location=rtsp://' + rtsp_server + ' protocols=tcp' | |
# gst_out = 'appsrc is-live=true ! videoconvert ! omxh264enc bitrate=12000000 ! video/x-h264, \ | |
# stream-format=byte-stream ! h264parse ! rtspclientsink location=rtsp://' + rtsp_server + ' protocols=tcp' | |
# gst_out = 'appsrc is-live=1 ! videoconvert ! nvv4l2h264enc ! video/x-h264, \ | |
# stream-format=byte-stream ! h264parse ! rtspclientsink location=rtsp://' + rtsp_server + ' latency=400' | |
# gst_out = 'appsrc is-live=true ! videoconvert ! omxh264enc bitrate=8000000 ! video/x-h264, \ | |
# stream-format=byte-stream ! h264parse ! rtspclientsink location=rtsp://' + rtsp_server + ' latency=400' | |
gst_out = 'appsrc is-live=true ! videoconvert ! video/x-raw,format=RGBA ! nvvidconv ! nvv4l2h264enc iframeinterval=25 bitrate=3000000 ! video/x-h264, \ | |
stream-format=byte-stream ! h264parse ! rtspclientsink location=rtsp://' + rtsp_server + ' latency=500' | |
out = None | |
try: | |
count = 0 | |
while(True): | |
count = count + 1 | |
while (OUTPUT_IMAGE is None): | |
print("Encoder sleeping") | |
time.sleep(1) | |
if out is None: | |
print("Creating") | |
width = OUTPUT_IMAGE.shape[1] | |
height = OUTPUT_IMAGE.shape[0] | |
out = cv2.VideoWriter( | |
gst_out, cv2.CAP_GSTREAMER, 0, 15, (width, height), True) | |
if not out.isOpened(): | |
raise Exception("can't open video writer") | |
time_elapsed = time.time() - prev | |
if time_elapsed < 1./frame_rate: | |
time.sleep(1./frame_rate - time_elapsed) | |
prev = time.time() | |
out.write(OUTPUT_IMAGE) | |
except Exception as e: | |
print("encoder fail, trying again" + e) | |
if __name__ == "__main__": | |
cameras = len(images) | |
for cam in range(cameras): | |
threadCam = Thread(target=grabFrames, args=(cam,)) | |
threadCam.start() | |
threadRender = Thread(target=render, args=()) | |
threadRender.start() | |
threadEncoder = Thread(target=encoder, args=()) | |
threadEncoder.start() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment