Last active
June 7, 2023 15:18
-
-
Save Shreeyak/143807ed52ec0f87e66c62fd3fe16854 to your computer and use it in GitHub Desktop.
Script to record series of images from a realsense D400 series stereo camera using python API.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# pyrealsense2 is required. | |
# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python | |
import pyrealsense2 as rs | |
import numpy as np | |
import cv2 | |
import argparse | |
from os import makedirs | |
from os.path import exists, join | |
import shutil | |
import json | |
from enum import IntEnum | |
try: | |
# Python 2 compatible | |
input = raw_input | |
except NameError: | |
pass | |
class Preset(IntEnum): | |
Custom = 0 | |
Default = 1 | |
Hand = 2 | |
HighAccuracy = 3 | |
HighDensity = 4 | |
MediumDensity = 5 | |
def make_clean_folder(path_folder): | |
if not exists(path_folder): | |
makedirs(path_folder) | |
else: | |
user_input = input("%s not empty. Overwrite? (y/n) : " % path_folder) | |
if user_input.lower() == 'y': | |
shutil.rmtree(path_folder) | |
makedirs(path_folder) | |
else: | |
exit() | |
def save_intrinsic_as_json(filename, frame, depth_scale=0.001): | |
intrinsics = frame.profile.as_video_stream_profile().intrinsics | |
with open(filename, 'w') as outfile: | |
obj = json.dump( | |
{ | |
'width': | |
intrinsics.width, | |
'height': | |
intrinsics.height, | |
'depth_scale': | |
depth_scale, | |
'intrinsic_matrix': [ | |
intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx, | |
intrinsics.ppy, 1 | |
] | |
}, | |
outfile, | |
indent=4) | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser( | |
description= | |
"Realsense Recorder. Please select one of the optional arguments") | |
parser.add_argument("--output_folder", | |
default='dataset/realsense/', | |
help="set output folder") | |
parser.add_argument("--record_rosbag", | |
action='store_true', | |
help="Recording rgbd stream into realsense.bag") | |
parser.add_argument( | |
"--record_imgs", | |
action='store_true', | |
help="Recording save color and depth images into realsense folder") | |
parser.add_argument("--playback_rosbag", | |
action='store_true', | |
help="Play recorded realsense.bag file") | |
args = parser.parse_args() | |
if sum(o is not False for o in vars(args).values()) != 2: | |
parser.print_help() | |
exit() | |
path_output = args.output_folder | |
path_depth = join(args.output_folder, "depth") | |
path_color = join(args.output_folder, "color") | |
if args.record_imgs: | |
make_clean_folder(path_output) | |
make_clean_folder(path_depth) | |
make_clean_folder(path_color) | |
path_bag = join(args.output_folder, "realsense.bag") | |
if args.record_rosbag: | |
if exists(path_bag): | |
user_input = input("%s exists. Overwrite? (y/n) : " % path_bag) | |
if user_input.lower() == 'n': | |
exit() | |
# Create a pipeline | |
pipeline = rs.pipeline() | |
#Create a config and configure the pipeline to stream | |
# different resolutions of color and depth streams | |
config = rs.config() | |
if args.record_imgs or args.record_rosbag: | |
# note: using 640 x 480 depth resolution produces smooth depth boundaries | |
# using rs.format.bgr8 for color image format for OpenCV based image visualization | |
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 6) | |
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 6) | |
if args.record_rosbag: | |
config.enable_record_to_file(path_bag) | |
if args.playback_rosbag: | |
config.enable_device_from_file(path_bag, repeat_playback=True) | |
# Start streaming | |
profile = pipeline.start(config) | |
depth_sensor = profile.get_device().first_depth_sensor() | |
# Using preset HighAccuracy for recording | |
if args.record_rosbag or args.record_imgs: | |
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy) | |
# Set Custom Disparity Shift | |
device = profile.get_device() | |
advnc_mode = rs.rs400_advanced_mode(device) | |
depth_table_control_group = advnc_mode.get_depth_table() | |
depth_table_control_group.disparityShift = 64 | |
advnc_mode.set_depth_table(depth_table_control_group) | |
# Getting the depth sensor's depth scale (see rs-align example for explanation) | |
depth_scale = depth_sensor.get_depth_scale() | |
# We will not display the background of objects more than | |
# clipping_distance_in_meters meters away | |
clipping_distance_in_meters = 1 # 3 meter | |
clipping_distance = clipping_distance_in_meters / depth_scale | |
# Create an align object | |
# rs.align allows us to perform alignment of depth frames to others frames | |
# The "align_to" is the stream type to which we plan to align depth frames. | |
align_to = rs.stream.color | |
align = rs.align(align_to) | |
# Streaming loop | |
frame_count = 0 | |
try: | |
while True: | |
# Get frameset of color and depth | |
frames = pipeline.wait_for_frames() | |
# Align the depth frame to color frame | |
aligned_frames = align.process(frames) | |
# Get aligned frames | |
aligned_depth_frame = aligned_frames.get_depth_frame() | |
color_frame = aligned_frames.get_color_frame() | |
# Validate that both frames are valid | |
if not aligned_depth_frame or not color_frame: | |
continue | |
depth_image = np.asanyarray(aligned_depth_frame.get_data()) | |
color_image = np.asanyarray(color_frame.get_data()) | |
if args.record_imgs: | |
if frame_count == 0: | |
save_intrinsic_as_json( | |
join(args.output_folder, "camera_intrinsic.json"), | |
color_frame, depth_scale) | |
cv2.imwrite("%s/%06d.png" % \ | |
(path_depth, frame_count), depth_image) | |
cv2.imwrite("%s/%06d.jpg" % \ | |
(path_color, frame_count), color_image) | |
print("Saved color + depth image %06d" % frame_count) | |
frame_count += 1 | |
# Remove background - Set pixels further than clipping_distance to grey | |
grey_color = 0 | |
#depth image is 1 channel, color is 3 channels | |
depth_image_3d = np.dstack((depth_image, depth_image, depth_image)) | |
bg_removed = np.where((depth_image_3d > clipping_distance) | \ | |
(depth_image_3d <= 0), grey_color, color_image) | |
# Render images | |
depth_colormap = cv2.applyColorMap( | |
cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET) | |
images = np.hstack((bg_removed, depth_colormap)) | |
cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE) | |
cv2.imshow('Recorder Realsense', images) | |
key = cv2.waitKey(1) | |
# if 'esc' button pressed, escape loop and exit program | |
if key == 27 or key == ord('q'): | |
cv2.destroyAllWindows() | |
break | |
finally: | |
pipeline.stop() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment