Skip to content

Instantly share code, notes, and snippets.

@andrewssobral
Created October 21, 2018 17:59
Show Gist options
  • Save andrewssobral/96b2f009ed469c2b5dccba13695d6294 to your computer and use it in GitHub Desktop.
Save andrewssobral/96b2f009ed469c2b5dccba13695d6294 to your computer and use it in GitHub Desktop.
demo_depth2color.py (Intel® RealSense™ SDK)
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#####################################################
## Align Depth to Color ##
#####################################################
# First import the library
import pyrealsense2 as rs
import numpy as np
import cv2
import time
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
#config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Number of captured frames
num_frames = 0;
# Start time
start = time.time()
# Streaming loop
try:
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
# Render images
#depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
#images = np.hstack((bg_removed, depth_colormap))
images = bg_removed
cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Align Example', images)
#cv2.waitKey(1)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
num_frames = num_frames + 1
finally:
pipeline.stop()
# End time
end = time.time()
# Time elapsed
seconds = end - start
print("Time taken : {0} seconds".format(seconds))
# Calculate frames per second
fps = num_frames / seconds;
print("Estimated frames per second : {0}".format(fps))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment