Skip to content

Instantly share code, notes, and snippets.

@jkjung-avt
Last active November 7, 2024 11:55
Show Gist options
  • Save jkjung-avt/86b60a7723b97da19f7bfa3cb7d2690e to your computer and use it in GitHub Desktop.
Save jkjung-avt/86b60a7723b97da19f7bfa3cb7d2690e to your computer and use it in GitHub Desktop.
Capture and display video from either IP CAM, USB webcam, or the Tegra X2/X1 onboard camera.
# --------------------------------------------------------
# Camera sample code for Tegra X2/X1
#
# This program could capture and display video from
# IP CAM, USB webcam, or the Tegra onboard camera.
# Refer to the following blog post for how to set up
# and run the code:
# https://jkjung-avt.github.io/tx2-camera-with-python/
#
# Written by JK Jung <[email protected]>
# --------------------------------------------------------
import sys
import argparse
import subprocess
import cv2
WINDOW_NAME = 'CameraDemo'
def parse_args():
# Parse input arguments
desc = 'Capture and display live camera video on Jetson TX2/TX1'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [1]',
default=1, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [1920]',
default=1920, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [1080]',
default=1080, type=int)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(width, height):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, width, height)
cv2.moveWindow(WINDOW_NAME, 0, 0)
cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')
def read_cam(cap):
show_help = True
full_scrn = False
help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
# Check to see if the user has closed the window
# If yes, terminate the program
break
_, img = cap.read() # grab the next image frame from camera
if show_help:
cv2.putText(img, help_text, (11, 20), font,
1.0, (32, 32, 32), 4, cv2.LINE_AA)
cv2.putText(img, help_text, (10, 20), font,
1.0, (240, 240, 240), 1, cv2.LINE_AA)
cv2.imshow(WINDOW_NAME, img)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
show_help = not show_help
elif key == ord('F') or key == ord('f'): # toggle fullscreen
full_scrn = not full_scrn
if full_scrn:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def main():
args = parse_args()
print('Called with args:')
print(args)
print('OpenCV version: {}'.format(cv2.__version__))
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev,
args.image_width,
args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width,
args.image_height)
if not cap.isOpened():
sys.exit('Failed to open camera!')
open_window(args.image_width, args.image_height)
read_cam(cap)
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
@tnaduc
Copy link

tnaduc commented May 18, 2021

thank you @jkjung-avt for posting this useful tip. I have followed your rtsp setup for ip camera and it does work.
In fact the cap is open and I can take frames from the cap object. However, I got the following warnings and somewhat intermittent performance issues with the capture:

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_caps_is_empty: assertion 'GST_IS_CAPS (caps)' failed

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_caps_truncate: assertion 'GST_IS_CAPS (caps)' failed

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_caps_fixate: assertion 'GST_IS_CAPS (caps)' failed

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_caps_get_structure: assertion 'GST_IS_CAPS (caps)' failed

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_structure_get_string: assertion 'structure != NULL' failed

(python3:10309): GStreamer-CRITICAL **: 10:25:13.291: gst_mini_object_unref: assertion 'mini_object != NULL' failed
NvMMLiteOpen : Block : BlockType = 261 
NVMEDIA: Reading vendor.tegra.display-size : status: 6 
NvMMLiteBlockCreate : Block : BlockType = 261 
Allocating new output: 1280x720 (x 12), ThumbnailMode = 0
OPENMAX: HandleNewStreamFormat: 3605: Send OMX_EventPortSettingsChanged: nFrameWidth = 1280, nFrameHeight = 720 
[ WARN:0] global /home/nvidia/host/build_opencv/nv_opencv/modules/videoio/src/cap_gstreamer.cpp (896) open OpenCV | GStreamer warning: unable to query duration of stream
[ WARN:0] global /home/nvidia/host/build_opencv/nv_opencv/modules/videoio/src/cap_gstreamer.cpp (933) open OpenCV | GStreamer warning: Cannot query video position: status=1, value=0, duration=-1

Is there anything I can do to get rid of those critical warning?

gst string is exactly the same as you suggested:

gst_str = ('rtspsrc location={} latency={} ! '
                        'rtph264depay ! h264parse ! omxh264dec ! '
                        'nvvidconv ! '
                        'video/x-raw, width=(int){}, height=(int){}, '
                        'format=(string)BGRx ! videoconvert ! '
                        'appsink').format(uri, latency, width, height)

@qazi0
Copy link

qazi0 commented May 24, 2021

Hi @jkjung-avt
I had tested my TX2 camera with this script earlier (about 4-5 months) and it worked flawlessly. Now the camera fails to open with the script.
I can still successfully open it from the terminal using this command and pipeline that you mentioned in this thread above:
`

gst-launch-1.0 nvarguscamerasrc ! 'video/x-raw(memory:NVMM), width=640, height=480, framerate=30/1, format=NV12' ! nvvidconv flip-method=2 ! nvegltransform ! nveglglessink -e

`

I haven't changed anything significantly on my tx2 in these 4 months (by change I mean installing/uninstalling packages or a reflash of L4T) but now tegra-cam.py gives Failed to open camera.
I've also tried using the models from tensorrt_demos like trt_googlenet.py but they also fail to open the camera (cap.isOpened() returns False).

I've also already tried changing the gst_str to


 gst_str = ('nvarguscamerasrc ! '
 'video/x-raw(memory:NVMM), width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)NV12 ! '
 'nvvidconv flip-method=2 ! '
 'nvegltransform ! '
 'videoconvert ! '
 'appsink').format(width, height)

But to no avail. Camera works flawlessly with gst-launch-1.0 in the terminal but doesn't open with the script.
Could you suggest some other fixes or possible reasons?

Here is my TX2 info from jtop:
image

Thanks alot.

@jkjung-avt
Copy link
Author

@Siraj-Qazi Since the "gst-launch-1.0" command works on your TX2, I think you should check whether you've made any changes to your opencv library or python3 cv2 module. (For example, did you do sudo apt install libopencv-dev by accident?)

I had tested my TX2 camera with this script earlier (about 4-5 months) and it worked flawlessly. Now the camera fails to open with the script. I can still successfully open it from the terminal using this command ......

@qazi0
Copy link

qazi0 commented May 28, 2021

@jkjung-avt I think I did install libopencv-dev during this time. So how do I fix it? Should I remove libopencv-dev and leave only the python3 module?

@jkjung-avt
Copy link
Author

Yes. Please remove "libopencv-dev". (sudo apt purge libopencv-dev)

@qazi0
Copy link

qazi0 commented May 31, 2021

Hi @jkjung-avt, sorry I couldnt get back to you earlier.

Yes. Please remove "libopencv-dev". (sudo apt purge libopencv-dev)

Didn't work. Same error. Also did sudo apt remove libopencv* which also grabbed libopencv-python and uninstalled it, but still the error persists. Any more suggestions?
image

@jkjung-avt
Copy link
Author

@Siraj-Qazi The problem is likely that you've overridden the stock libopencv (installed by JetPack). You could either remove the non-working version of libopencv, or re-install (e.g. build from source by yourself) libopencv with proper support of GStreamer.

@qazi0
Copy link

qazi0 commented Jun 1, 2021

@jkjung-avt Thanks alot. I'll try to build from source with GStreamer and let you know :)

@zahidaMassin
Copy link

Hi @jkjung-avt, sorry I couldnt get back to you earlier.

Yes. Please remove "libopencv-dev". (sudo apt purge libopencv-dev)

Didn't work. Same error. Also did sudo apt remove libopencv* which also grabbed libopencv-python and uninstalled it, but still the error persists. Any more suggestions?
image

Hi
@Siraj-Qazi
@jkjung-avt
i just found the solution . it was just gstreamer error you just need to install opencv 3.4.6 for jetson nano .
https://jkjung-avt.github.io/opencv-on-nano/

@qazi0
Copy link

qazi0 commented Jun 8, 2021

Hi @jkjung-avt, sorry I couldnt get back to you earlier.

Yes. Please remove "libopencv-dev". (sudo apt purge libopencv-dev)

Didn't work. Same error. Also did sudo apt remove libopencv* which also grabbed libopencv-python and uninstalled it, but still the error persists. Any more suggestions?
image

Hi
@Siraj-Qazi
@jkjung-avt
i just found the solution . it was just gstreamer error you just need to install opencv 3.4.6 for jetson nano .
https://jkjung-avt.github.io/opencv-on-nano/

Oh alright. Will get back as soon as I get time to build this. Thanks @zahidaMassin!

@han88
Copy link

han88 commented Jun 11, 2021

hi is their away to use tegra-cam. py on PC for rtsp streaming? usb camera works... is there an alternative for omxh264dec nvvidcov etc? thx

@jkjung-avt
Copy link
Author

@han88 You might use "avdec_h264". Reference: https://github.com/jkjung-avt/tensorrt_demos/blob/master/utils/camera.py#L66-L72

    elif 'avdec_h264' in gst_elements:
        # Otherwise try to use the software decoder 'avdec_h264'
        # NOTE: in case resizing images is necessary, try adding
        #       a 'videoscale' into the pipeline
        gst_str = ('rtspsrc location={} latency={} ! '
                   'rtph264depay ! h264parse ! avdec_h264 ! '
                   'videoconvert ! appsink').format(uri, latency)

@han88
Copy link

han88 commented Jun 13, 2021

hi thx gives me no more error messages but unfortunately still fails to open the camera now usb too..(worked only ones out of 10 times before...) maybe it's ubuntu 20.04? Can't compile the Yolo examples with Tensorrt 8.0 either...

@zahidaMassin
Copy link

@han88 i had the same error just try to uinstalll opencv and install it again, use jkjung-avt tutorial to install opencv 3.4.6
https://jkjung-avt.github.io/opencv-on-nano/

@gusarg81
Copy link

Hi,

I am trying to use this script and I got:

python3 tegra_cam.py --usb --vid 0 --rtsp --uri rtsp://10.0.1.5:554
Called with args:
Namespace(image_height=1080, image_width=1920, rtsp_latency=200, rtsp_uri='rtsp://10.0.1.5:554', use_rtsp=True, use_usb=True, video_dev=0)
OpenCV version: 4.5.2
Failed to open camera!

My camera is a UVC USB module (Arducam B0205) which is working just fine, located in /dev/video0.

Any ideas? Thanks.

@jkjung-avt
Copy link
Author

@gusarg81 Since you are using the USB webcam (/dev/video0), you shouldn't have added "--rtsp --uri rtsp://10.0.1.5:554" in the command line.

@gusarg81
Copy link

Yeah sorry, was a bad pasted here. I was typing in fact without --rtsp and --uri. The problem was opencv installation (fixed).

Also, in my case since the camera supports MJPEG and YUYV, with this script the capture was slow (since YUYV at 1080, captures max 5fps while with MJPEG max is 30fps).

To solve this, I used image/jpeg and jpegdec.

@kuzmich9023
Copy link

Hey! I am having a problem running the example. I tried to connect as in the article to the IP camera using the rtsp protocol. An error popped up in the terminal:
Снимок экрана от 2021-08-07 19-49-29

@kuzmich9023
Copy link

kuzmich9023 commented Aug 8, 2021

I entered the command from the post you gave me. Where else can I fix it to make it work for me?
Снимок экрана от 2021-08-08 09-06-44

@niteshgaba
Copy link

niteshgaba commented Aug 8, 2021 via email

@kuzmich9023
Copy link

You can write an example. I'm just starting to understand Linux ...

@glemarivero
Copy link

glemarivero commented Nov 3, 2021

Hi, Is it possible to stream onboard camera output of jetson nano to 2 virtual camera sinks? I tried doing that with: gst-launch-1.0 -v nvarguscamerasrc ! 'video/x-raw(memory:NVMM), format=NV12, width=1920, height=1080, framerate=30/1' ! nvvidconv ! 'video/x-raw, width=640, height=480, format=I420, framerate=30/1' ! videoconvert ! identity drop-allocation=1 ! 'video/x-raw, width=640, height=480, format=RGB, framerate=30/1' ! v4l2sink device=/dev/video3 v4l2sink device=/dev/video4

but it isn't working. Essentially, I want to mirror my onboard camera stream to device's web browser for which I created 2 virtual camera devices namely /dev/video3 and /dev/video4 but it only streams to a single virtual camera device (this command works in case of single sink)

in case @bhavitvyamalik (or anyone else) is still wondering how to achieve this, you need to use tee and queue:

First create the two devices: sudo modprobe v4l2loopback devices=2
gst-launch-1.0 -v nvarguscamerasrc ! 'video/x-raw(memory:NVMM), format=NV12, width=640, height=480, framerate=30/1' ! nvvidconv ! 'video/x-raw, width=640, height=480, format=I420, framerate=30/1' ! videoconvert ! identity drop-allocation=1 ! 'video/x-raw, width=640, height=480, format=RGB, framerate=30/1' ! tee name=t ! queue ! v4l2sink device=/dev/video1 t. ! queue ! v4l2sink device=/dev/video2

@kuzmich9023
Copy link

kuzmich9023 commented Nov 3, 2021 via email

@niteshgaba
Copy link

@jkjung-avt : Could you please help me to save the video instead of image?

@jkjung-avt
Copy link
Author

@niteshgaba Please refer to:

@niteshgaba
Copy link

niteshgaba commented Dec 2, 2021

@jkjung-avt : Thanks for the links. Do you have any idea about known issues of opencv 4.1.1 with gstreamer. If i run the image capture for 5 seconds in a thread the pipeline closes fine and if ran for a large number of seconds, it just stays and does not closes.

@jkjung-avt
Copy link
Author

Do you have any idea about known issues of opencv 4.1.1 with gstreamer. If i run the image capture for 5 seconds in a thread the pipeline closes fine and if ran for a large number of seconds, it just stays and does not closes.

Sorry. It does not ring a bell for me.

@niteshgaba
Copy link

niteshgaba commented Dec 2, 2021 via email

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment