Last active
October 23, 2023 21:20
-
-
Save Erol444/a9e50f5d8cef689dc86c544c15ca3175 to your computer and use it in GitHub Desktop.
With latest depthai, you should just use Camera node, which does undistortion by itself. Demo here: https://github.com/luxonis/depthai-python/blob/main/examples/StereoDepth/rgb_depth_aligned.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
With latest depthai, you should just use Camera node, which does undistortion by itself. | |
Demo here: https://github.com/luxonis/depthai-python/blob/main/examples/StereoDepth/rgb_depth_aligned.py | |
""" | |
#!/usr/bin/env python3 | |
import cv2 | |
import numpy as np | |
import depthai as dai | |
# Weights to use when blending depth/rgb image (should equal 1.0) | |
rgbWeight = 0.4 | |
depthWeight = 0.6 | |
msgs = dict() | |
def add_msg(msg, name, seq = None): | |
if seq is None: | |
seq = msg.getSequenceNum() | |
seq = str(seq) | |
if seq not in msgs: | |
msgs[seq] = dict() | |
msgs[seq][name] = msg | |
def get_msgs(): | |
global msgs | |
seq_remove = [] # Arr of sequence numbers to get deleted | |
for seq, syncMsgs in msgs.items(): | |
seq_remove.append(seq) # Will get removed from dict if we find synced msgs pair | |
# Check if we have both detections and color frame with this sequence number | |
if len(syncMsgs) == 2: # rgb + depth | |
for rm in seq_remove: | |
del msgs[rm] | |
return syncMsgs # Returned synced msgs | |
return None | |
def updateBlendWeights(percent_rgb): | |
""" | |
Update the rgb and depth weights used to blend depth/rgb image | |
@param[in] percent_rgb The rgb weight expressed as a percentage (0..100) | |
""" | |
global depthWeight | |
global rgbWeight | |
rgbWeight = float(percent_rgb)/100.0 | |
depthWeight = 1.0 - rgbWeight | |
# Optional. If set (True), the ColorCamera is downscaled from 1080p to 720p. | |
# Otherwise (False), the aligned depth is automatically upscaled to 1080p | |
downscaleColor = True | |
fps = 30 | |
# The disparity is computed at this resolution, then upscaled to RGB resolution | |
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_720_P | |
camSocket = dai.CameraBoardSocket.RGB | |
def getMesh(calibData, ispSize): | |
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1])) | |
d1 = np.array(calibData.getDistortionCoefficients(camSocket)) | |
R1 = np.identity(3) | |
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1) | |
meshCellSize = 16 | |
mesh0 = [] | |
# Creates subsampled mesh which will be loaded on to device to undistort the image | |
for y in range(mapX.shape[0] + 1): # iterating over height of the image | |
if y % meshCellSize == 0: | |
rowLeft = [] | |
for x in range(mapX.shape[1]): # iterating over width of the image | |
if x % meshCellSize == 0: | |
if y == mapX.shape[0] and x == mapX.shape[1]: | |
rowLeft.append(mapX[y - 1, x - 1]) | |
rowLeft.append(mapY[y - 1, x - 1]) | |
elif y == mapX.shape[0]: | |
rowLeft.append(mapX[y - 1, x]) | |
rowLeft.append(mapY[y - 1, x]) | |
elif x == mapX.shape[1]: | |
rowLeft.append(mapX[y, x - 1]) | |
rowLeft.append(mapY[y, x - 1]) | |
else: | |
rowLeft.append(mapX[y, x]) | |
rowLeft.append(mapY[y, x]) | |
if (mapX.shape[1] % meshCellSize) % 2 != 0: | |
rowLeft.append(0) | |
rowLeft.append(0) | |
mesh0.append(rowLeft) | |
mesh0 = np.array(mesh0) | |
meshWidth = mesh0.shape[1] // 2 | |
meshHeight = mesh0.shape[0] | |
mesh0.resize(meshWidth * meshHeight, 2) | |
mesh = list(map(tuple, mesh0)) | |
return mesh, meshWidth, meshHeight | |
def create_pipeline(calibData): | |
# Create pipeline | |
pipeline = dai.Pipeline() | |
queueNames = [] | |
# Define sources and outputs | |
camRgb = pipeline.create(dai.node.ColorCamera) | |
left = pipeline.create(dai.node.MonoCamera) | |
right = pipeline.create(dai.node.MonoCamera) | |
stereo = pipeline.create(dai.node.StereoDepth) | |
rgbOut = pipeline.create(dai.node.XLinkOut) | |
disparityOut = pipeline.create(dai.node.XLinkOut) | |
rgbOut.setStreamName("rgb") | |
queueNames.append("rgb") | |
disparityOut.setStreamName("disp") | |
queueNames.append("disp") | |
#Properties | |
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) | |
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) | |
camRgb.setFps(fps) | |
camRgb.setIspScale(2, 3) | |
manip = pipeline.create(dai.node.ImageManip) | |
mesh, meshWidth, meshHeight = getMesh(calibData, camRgb.getIspSize()) | |
manip.setWarpMesh(mesh, meshWidth, meshHeight) | |
manip.setMaxOutputFrameSize(camRgb.getIspWidth() * camRgb.getIspHeight() * 3 // 2) | |
camRgb.isp.link(manip.inputImage) | |
left.setResolution(monoResolution) | |
left.setBoardSocket(dai.CameraBoardSocket.LEFT) | |
left.setFps(fps) | |
right.setResolution(monoResolution) | |
right.setBoardSocket(dai.CameraBoardSocket.RIGHT) | |
right.setFps(fps) | |
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) | |
# LR-check is required for depth alignment | |
stereo.setLeftRightCheck(True) | |
stereo.setDepthAlign(dai.CameraBoardSocket.RGB) | |
# Linking | |
# manip.out.link(rgbOut.input) | |
camRgb.isp.link(rgbOut.input) | |
left.out.link(stereo.left) | |
right.out.link(stereo.right) | |
stereo.disparity.link(disparityOut.input) | |
return pipeline | |
# Connect to device and start pipeline | |
with dai.Device() as device: | |
calibData = device.readCalibration() | |
pipeline = create_pipeline(calibData) | |
device.startPipeline(pipeline) | |
blendedWindowName = "rgb-depth" | |
cv2.namedWindow(blendedWindowName) | |
cv2.createTrackbar('RGB Weight %', blendedWindowName, int(rgbWeight*100), 100, updateBlendWeights) | |
while True: | |
for name in ['rgb', 'disp']: | |
msg = device.getOutputQueue(name).tryGet() | |
if msg is not None: | |
add_msg(msg, name) | |
synced = get_msgs() | |
if synced: | |
frameRgb = synced["rgb"].getCvFrame() | |
frameDisp = synced["disp"].getFrame() | |
maxDisparity = 95 | |
# Optional, extend range 0..95 -> 0..255, for a better visualisation | |
if 1: frameDisp = (frameDisp * 255. / maxDisparity).astype(np.uint8) | |
# Optional, apply false colorization | |
if 1: frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_HOT) | |
frameDisp = np.ascontiguousarray(frameDisp) | |
# Need to have both frames in BGR format before blending | |
if len(frameDisp.shape) < 3: | |
frameDisp = cv2.cvtColor(frameDisp, cv2.COLOR_GRAY2BGR) | |
blended = cv2.addWeighted(frameRgb, rgbWeight, frameDisp, depthWeight, 0) | |
cv2.imshow(blendedWindowName, blended) | |
if cv2.waitKey(1) == ord('q'): | |
break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment