Skip to content

Instantly share code, notes, and snippets.

@jherico
Last active August 29, 2015 13:59
Show Gist options
  • Save jherico/10440306 to your computer and use it in GitHub Desktop.
Save jherico/10440306 to your computer and use it in GitHub Desktop.
#include <OVR.h>
#undef new
#undef min
#undef max
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/noise.hpp>
#include <glm/gtc/epsilon.hpp>
#include <glm/gtx/norm.hpp>
#include <opencv2/opencv.hpp>
typedef OVR::Util::Render::StereoEye StereoEye;
#define LEFT StereoEye::StereoEye_Left
#define RIGHT StereoEye::StereoEye_Right
template <typename Function>
void for_each_eye(Function function) {
for (StereoEye eye = StereoEye::StereoEye_Left;
eye <= StereoEye::StereoEye_Right;
eye = static_cast<StereoEye>(eye + 1)) {
function(eye);
}
}
template <typename Function>
void for_each_pixel(const glm::uvec2 & size, Function f) {
for (size_t y = 0; y < size.y; ++y) {
for (size_t x = 0; x < size.x; ++x) {
f(x, y);
}
}
}
class DistortionHelper {
float K[4];
float lensOffset;
float eyeAspect;
float distortionScale;
glm::uvec2 desiredSize;
glm::ivec2 centerPixel;
glm::vec2 findSceneTextureCoords(StereoEye eye, glm::vec2 texCoord) {
static bool init = false;
if (!init) {
init = true;
}
// In order to calculate the distortion, we need to know
// the distance between the lens axis point, and the point
// we are rendering. So we have to move the coordinate
// from texture space (which has a range of [0, 1]) into
// screen space
texCoord *= 2.0;
texCoord -= 1.0;
// Moving it into screen space isn't enough though. We
// actually need it in rift space. Rift space differs
// from screen space in two ways. First, it has
// homogeneous axis scales, so we need to correct for any
// difference between the X and Y viewport sizes by scaling
// the y axis by the aspect ratio
texCoord.y /= eyeAspect;
// The second way rift space differs is that the origin
// is at the intersection of the display pane with the
// lens axis. So we have to offset the X coordinate to
// account for that.
texCoord.x += (eye == LEFT) ? -lensOffset : lensOffset;
// Although I said we need the distance between the
// origin and the texture, it turns out that getting
// the scaling factor only requires the distance squared.
// We could get the distance and then square it, but that
// would be a waste since getting the distance in the
// first place requires doing a square root.
float rSq = glm::length2(texCoord);
float distortionScale = K[0] + rSq * (K[1] + rSq * (K[2] + rSq * K[3]));
texCoord *= distortionScale;
// Now that we've applied the distortion scale, we
// need to reverse all the work we did to move from
// texture space into Rift space. So we apply the
// inverse operations in reverse order.
texCoord.x -= (eye == LEFT) ? -lensOffset : lensOffset;;
texCoord.y *= eyeAspect;
texCoord += 1.0;
texCoord /= 2.0;
// Our texture coordinate is now distorted. Or rather,
// for the input texture coordinate on the distorted screen
// we now have the undistorted sourceRect coordinates to pull
// the color from
return texCoord;
}
public:
DistortionHelper(const OVR::HMDInfo & ovrHmdInfo) {
lensOffset = 1.0f
- (2.0f * ovrHmdInfo.LensSeparationDistance
/ ovrHmdInfo.HScreenSize);
eyeAspect = (ovrHmdInfo.HResolution / 2.0f) / ovrHmdInfo.VResolution;
OVR::Util::Render::StereoConfig stereoConfig;
stereoConfig.SetHMDInfo(ovrHmdInfo);
const OVR::Util::Render::DistortionConfig & distortion =
stereoConfig.GetDistortionConfig();
distortionScale = stereoConfig.GetDistortionScale();
desiredSize = glm::uvec2(glm::vec2(ovrHmdInfo.HResolution / 2, ovrHmdInfo.VResolution) * distortionScale);
for (int i = 0; i < 4; ++i) {
K[i] = (float)(distortion.K[i] / distortionScale);
}
}
const glm::uvec2 getCenterPixel(StereoEye eye) const {
glm::uvec2 result(desiredSize.x / 2, desiredSize.y / 2);
result.x += desiredSize.x * lensOffset / 2.0 * (eye == LEFT ? 1 : -1);
return result;
}
const glm::uvec2 & getDesiredSize() const {
return desiredSize;
}
void createDistortionMap(StereoEye eye, cv::Mat & map_x, cv::Mat & map_y) {
cv::Scalar defScalar((float) 1.0f, (float) 1.0f);
map_x = cv::Mat(desiredSize.y, desiredSize.x, CV_32FC1);
map_y = cv::Mat(desiredSize.y, desiredSize.x, CV_32FC1);
glm::vec2 texCenterOffset = glm::vec2(0.5f) / glm::vec2(desiredSize);
for_each_pixel(desiredSize, [&](size_t x, size_t y){
glm::vec2 textureCoord = (glm::vec2(x, y) / glm::vec2(desiredSize)) + texCenterOffset;
glm::vec2 distortedCoord = findSceneTextureCoords(eye, textureCoord);
distortedCoord *= desiredSize;
map_x.at<float>(y, x) = distortedCoord.x;
map_y.at<float>(y, x) = distortedCoord.y;
});
}
};
std::map<StereoEye, const char *> WINDOWS{ {
{ LEFT, "RiftLeft" },
{ RIGHT, "RiftRight" }
} };
class Rift {
public:
static void getHmdInfo(
const OVR::Ptr<OVR::DeviceManager> & ovrManager,
OVR::HMDInfo & out) {
if (!ovrManager) {
throw std::runtime_error("no SDK");
}
OVR::Ptr<OVR::HMDDevice> ovrHmd = *ovrManager->
EnumerateDevices<OVR::HMDDevice>().CreateDevice();
if (!ovrHmd) {
throw std::runtime_error("no HMD");
}
ovrHmd->GetDeviceInfo(&out);
ovrHmd = nullptr;
}
};
class RiftManagerApp {
protected:
OVR::Ptr<OVR::DeviceManager> ovrManager;
OVR::HMDInfo ovrHmdInfo;
glm::uvec2 hmdNativeResolution;
glm::ivec2 hmdDesktopPosition;
glm::uvec2 eyeSize;
float eyeAspect{ 1 };
float eyeAspectInverse{ 1 };
public:
RiftManagerApp() {
ovrManager = *OVR::DeviceManager::Create();
Rift::getHmdInfo(ovrManager, ovrHmdInfo);
hmdNativeResolution = glm::ivec2(ovrHmdInfo.HResolution, ovrHmdInfo.VResolution);
eyeAspect = ((float)hmdNativeResolution.x / (float)hmdNativeResolution.x) / 2.0f;
eyeAspectInverse = 1.0f / eyeAspect;
hmdDesktopPosition = glm::ivec2(ovrHmdInfo.DesktopX, ovrHmdInfo.DesktopY);
eyeSize = hmdNativeResolution;
eyeSize.x /= 2;
}
};
class ShaderLookupDistort : public RiftManagerApp {
protected:
std::map<StereoEye, cv::Mat> sceneTextures;
std::map<StereoEye, cv::Mat> lookupMapsX;
std::map<StereoEye, cv::Mat> lookupMapsY;
DistortionHelper helper;
public:
cv::Mat getImageFromWebcam(StereoEye eye) {
// replace with images from your camera
return cv::imread("/Users/bdavis/git/OculusRiftExamples/resources/images/webcam.png");
}
ShaderLookupDistort() : helper(ovrHmdInfo) {
for_each_eye([&](StereoEye eye) {
sceneTextures[eye] = getImageFromWebcam(eye);
helper.createDistortionMap(eye, lookupMapsX[eye], lookupMapsY[eye]);
});
}
int run() {
for_each_eye([&](StereoEye eye) {
cv::namedWindow("Rift", CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO);
cvResizeWindow("Rift", 1280, 800);
});
cv::Mat riftOutput = cv::Mat::zeros(800, 1280, CV_8UC3);
static const float scaleWebcamImage = 0.7f;
do {
for_each_eye([&](StereoEye eye) {
cv::Mat sceneTexture = sceneTextures[eye];
// Resize the webcam image to account for the FOV
cv::resize(sceneTexture, sceneTexture, cv::Size(sceneTexture.cols * scaleWebcamImage, sceneTexture.rows * scaleWebcamImage));
// Copy the resized webcam image to the buffer for distortion, centering it under the lens axis and cropping as necessary
cv::Mat undistorted = cv::Mat::zeros(helper.getDesiredSize().y, helper.getDesiredSize().x, CV_8UC3);
{
glm::uvec2 projectionCenter = helper.getCenterPixel(eye);
// Center the source image under the lens axis, and crop it of it goes past the undistorted border
cv::Rect destRect, sourceRect;
{
// center
sourceRect.x = sourceRect.y = 0;
destRect.x = projectionCenter.x - sceneTexture.cols / 2;
destRect.y = projectionCenter.y - sceneTexture.rows / 2;
destRect.width = sceneTexture.cols;
destRect.height = sceneTexture.rows;
if (destRect.x < 0) {
destRect.width += destRect.x;
sourceRect.x = -destRect.x;
destRect.x = 0;
}
if (destRect.x + destRect.width > undistorted.cols) {
destRect.width -= (destRect.x + destRect.width) - undistorted.cols;
}
sourceRect.width = destRect.width;
sourceRect.height = destRect.height;
}
// If we're cropping, make sure we're copying from the right portion of the source image
sceneTexture(sourceRect).copyTo(undistorted(destRect));
}
// Create the distorted version of the image
cv::Mat distorted = cv::Mat::zeros(helper.getDesiredSize().y, helper.getDesiredSize().x, CV_8UC3);
cv::remap(undistorted, distorted, lookupMapsX[eye], lookupMapsY[eye], CV_INTER_LINEAR);
//distorted = undistorted;
// Resize the webcam image to account for the FOV
cv::resize(distorted, distorted, cv::Size(640, 800));
distorted.copyTo(riftOutput(cv::Rect(eye == LEFT ? 0 : 640, 0, 640, 800)));
});
cv::imshow("Rift", riftOutput); // Show our image inside it.
} while (27 != (0xff & cv::waitKey(15)));
return 0;
}
};
#ifdef WIN32
#define MAIN_DECL int __stdcall WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow)
#else
#define MAIN_DECL int main(int argc, char ** argv)
#endif
// Combine some macros together to create a single macro
// to launch a class containing a run method
#define RUN_OVR_APP(AppClass) \
MAIN_DECL { \
OVR::System::Init(); \
return AppClass().run(); \
OVR::System::Destroy(); \
return -1; \
}
RUN_OVR_APP(ShaderLookupDistort)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment