Skip to content

Instantly share code, notes, and snippets.

@whatisor
Created December 31, 2019 07:32
Show Gist options
  • Save whatisor/148fa6c3836b199deba81f57665c16fc to your computer and use it in GitHub Desktop.
Save whatisor/148fa6c3836b199deba81f57665c16fc to your computer and use it in GitHub Desktop.
RealsenseTool.hpp
#pragma once
#include <exception>
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace rs2;
static cv::Point3d pixel2Point3D(const rs2::depth_frame& frame, cv::Point u)
{
float upixel[2]; // From pixel
float upoint[3]; // From point (in 3D)
// Copy pixels into the arrays (to match rsutil signatures)
upixel[0] = u.x;
upixel[1] = u.y;
//precheck
upixel[0] = upixel[0] >=0?upixel[0]:0;
upixel[0] = upixel[0] < frame.get_width() ?upixel[0]:frame.get_width()-1;
upixel[1] = upixel[1] >=0?upixel[1]:0;
upixel[1] = upixel[1] < frame.get_height() ?upixel[1]:frame.get_height()-1;
// Query the frame for distance
// Note: this can be optimized
// It is not recommended to issue an API call for each pixel
// (since the compiler can't inline these)
// However, in this example it is not one of the bottlenecks
double udist = 0;
int k = 5;
size_t count = 0;
if (upixel[0] <= k || upixel[0] >= frame.get_width() - k
|| upixel[1] <= k || upixel[1] >= frame.get_height() - k) {
udist = frame.get_distance(upixel[0], upixel[1]);
k = 0;
count = 1;
}
else{
for (int i = -k; i < k; i++)
for (int j = -k; j < k; j++) {
double d = frame.get_distance(upixel[0] + i, upixel[1] + j);
if (d > 0 && d < 10) {
udist += d;
count++;
}
}
if (count)
udist = udist / count;
}
// Deproject from pixel to point in 3D
rs2_intrinsics intr = frame.get_profile().as<rs2::video_stream_profile>().get_intrinsics(); // Calibration data
rs2_deproject_pixel_to_point(upoint, &intr, upixel, udist);
return cv::Point3d(upoint[0], upoint[1], upoint[2]);
}
// Convert rs2::frame to cv::Mat
static cv::Mat frame_to_mat(const rs2::frame& f)
{
using namespace cv;
using namespace rs2;
auto vf = f.as<video_frame>();
const int w = vf.get_width();
const int h = vf.get_height();
if (f.get_profile().format() == RS2_FORMAT_BGR8)
{
return Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_RGB8)
{
auto r = Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
cvtColor(r, r, COLOR_RGB2BGR);
return r;
}
else if (f.get_profile().format() == RS2_FORMAT_Z16)
{
return Mat(Size(w, h), CV_16UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_Y8)
{
return Mat(Size(w, h), CV_8UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_DISPARITY32)
{
return Mat(Size(w, h), CV_32FC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
throw std::runtime_error("Frame format is not supported yet!");
}
// Converts depth frame to a matrix of doubles with distances in meters
static cv::Mat depth_frame_to_meters(const rs2::pipeline& pipe, const rs2::depth_frame& f)
{
using namespace cv;
using namespace rs2;
Mat dm = frame_to_mat(f);
dm.convertTo(dm, CV_64F);
auto depth_scale = pipe.get_active_profile()
.get_device()
.first<depth_sensor>()
.get_depth_scale();
dm = dm * depth_scale;
return dm;
}
static void MapColor2IR(const Point &color, Point &depth, rs2_intrinsics &color_inst, rs2_intrinsics &depth_inst, rs2_extrinsics &extrincs) {
float pixes[2];
pixes[0] = color.x;
pixes[1] = color.y;
float colorPoint[3];
rs2_deproject_pixel_to_point(colorPoint, &color_inst, pixes, 1);
float depthPoint[3];
rs2_transform_point_to_point(depthPoint, &extrincs, colorPoint);
rs2_project_point_to_pixel(pixes, &depth_inst, depthPoint);
depth.x = pixes[0];
depth.y = pixes[1];
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment