Last active
April 25, 2017 03:22
-
-
Save OlegJakushkin/0f0e9ca5dcd1153921e7c4c7449b4df8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #include <iostream> | |
| #include <vector> | |
| #include <chrono> | |
| #include <cmath> | |
| #include <opencv2/world.hpp> | |
| #include <opencv2/highgui/highgui.hpp> | |
| #include <opencv2/imgproc/imgproc.hpp> | |
| #include <opencv2/opencv.hpp> | |
| using namespace cv; | |
| using namespace std; | |
| //Default Settings | |
| int selectedImage= 0; | |
| int pyrScale = 2; | |
| int pyrDepth = 2; | |
| int ofThreshold = 17; | |
| auto ofScale = 4; // Will be devided by 10 | |
| auto ofLevel = 2; | |
| auto ofSize = 14; | |
| auto ofIterations = 20; | |
| auto ofPolyN = 5; | |
| auto ofPolySigma = 6; // Will be devided by 10 | |
| int useBlur = 1; | |
| int blurOne =0; | |
| int blurScale = 3; | |
| int blbSize = 5; | |
| //Used images | |
| vector<Mat> tests; | |
| Mat back; | |
| ////////////////////////////////////////////////////////////// Utilitees /////////////////////////////////////////////////////////////////////// | |
| template<typename T> | |
| inline T pi() { | |
| const auto pi = acos(-T(1)); | |
| return pi; | |
| } | |
| Mat equalizeIntensity(const Mat& inputImage) { | |
| if(inputImage.channels() >= 3) { | |
| Mat ycrcb; | |
| cvtColor(inputImage,ycrcb,CV_BGR2YCrCb); | |
| vector<Mat> channels; | |
| split(ycrcb,channels); | |
| equalizeHist(channels[0], channels[0]); | |
| Mat result; | |
| merge(channels,ycrcb); | |
| cvtColor(ycrcb,result,CV_YCrCb2BGR); | |
| return result; | |
| } | |
| return Mat(); | |
| } | |
| struct ScopedTimer { | |
| chrono::time_point<chrono::system_clock> start, end; | |
| ScopedTimer() { | |
| start = chrono::system_clock::now(); | |
| } | |
| ~ScopedTimer() { | |
| end = chrono::system_clock::now(); | |
| chrono::duration<double, std::milli> elapsed_ms = end-start; | |
| cout << "elapsed time: " << elapsed_ms.count() << "ms"<< endl; | |
| } | |
| }; | |
| Mat findBigBlobs(cv::Mat &src, int areaSize){ | |
| int largest_area=0; | |
| int largest_contour_index=0; | |
| Mat temp(src.rows,src.cols,CV_8UC1); | |
| Mat dst(src.rows,src.cols,CV_8UC1,Scalar::all(0)); | |
| src.copyTo(temp); | |
| vector<vector<Point>> contours; // storing contour | |
| vector<Vec4i> hierarchy; | |
| findContours( temp, contours, hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); | |
| for( int i = 0; i< contours.size(); i++ ) // iterate | |
| { | |
| double area=contourArea( contours[i],false); //Find the largest area of contour | |
| if(area>areaSize) | |
| { | |
| drawContours( dst, contours,i, Scalar(255), CV_FILLED, 8, hierarchy ); | |
| } | |
| } | |
| // Draw the largest contour | |
| return dst; | |
| } | |
| ///////////////////////////////////////////////////////////////////////////////////////////////// Utilities | |
| //In GRAY mats localBack and localTest | |
| Mat OpticalFlow(Mat localBack, Mat localTest) { | |
| Mat result = localBack.clone(), flow; | |
| cvtColor(result ,result , CV_GRAY2BGR); | |
| cvtColor(result,result, CV_BGR2HSV); | |
| calcOpticalFlowFarneback(localBack,localTest, flow, (float)ofScale/ 10.0, ofLevel, ofSize, ofIterations, ofPolyN, (float)ofPolySigma/ 10.0 , 0); | |
| vector<Mat> channels(flow.channels()); | |
| split(flow, channels); | |
| auto mag = channels[0].clone(); | |
| auto ang = channels[0].clone(); | |
| cartToPolar(channels[0], channels[1], mag, ang); | |
| normalize(mag, mag, 0, 255, NORM_MINMAX); | |
| for (auto j = 0; j < result.rows; ++j) { | |
| for (auto i = 0; i < result.cols; ++i) { | |
| auto & p = result.at<Vec3b>(j, i); | |
| auto m = ang.at<float>(j, i); | |
| p.val[0] = m*180.0/ pi<float>() /2.0; | |
| p.val[1] = 255; | |
| p.val[2] = mag.at<float>(j, i) > ofThreshold ? 255 : mag.at<float>(j, i); | |
| } | |
| } | |
| //cvtColor(result,result, CV_HSV2BGR); | |
| return result; | |
| } | |
| Mat OpticalPyromid(Mat localBack, Mat localTest) { | |
| vector<Mat> results; | |
| auto result = localBack.clone(); | |
| auto cpyBack = localBack.clone(); | |
| auto cpyTest = localTest.clone(); | |
| auto size = Size(cpyTest.cols, cpyTest.rows); | |
| for(auto i = 0; i< pyrDepth; ++i) { | |
| size /= (i != 0)? pyrScale : 1; | |
| resize(cpyBack, cpyBack, size, 0,0,INTER_AREA); | |
| resize(cpyTest, cpyTest, size, 0,0,INTER_AREA); | |
| auto flow = OpticalFlow(cpyBack, cpyTest); | |
| results.push_back(flow.clone()); | |
| } | |
| size = Size(result.cols, result.rows); | |
| for(auto i = pyrDepth-1; i> 0; --i) { | |
| resize(results[i], results[i], size, 0,0,INTER_AREA); | |
| } | |
| for (auto j = 0; j < result.rows; ++j) { | |
| for (auto i = 0; i < result.cols; ++i) { | |
| auto fill = false; | |
| for(auto img : results) { | |
| auto bb = img.channels(); | |
| fill = img.at<Vec3b>(j, i).val[2] > 200; | |
| if(!fill) { | |
| break; | |
| } | |
| } | |
| if(fill) { | |
| result.at<uchar>(j, i) = 255; | |
| } else { | |
| result.at<uchar>(j, i) = 0; | |
| } | |
| } | |
| } | |
| return result; | |
| } | |
| //TODO: decide to remove | |
| int k1=500; | |
| int k2=500; | |
| int k3=500; | |
| int k4=500; | |
| int k5=500; | |
| Mat Undistort(Mat current) { | |
| Mat result; | |
| auto fov = 60; | |
| auto x = current.cols / 2; | |
| auto y = current.rows / 2; | |
| // http://answers.opencv.org/question/17076/conversion-focal-distance-from-mm-to-pixels/?answer=17180#post-id-17180 | |
| auto focalLengthX = x / tan(fov * 0.5 * pi<float>()/180.0); | |
| auto focalLengthY = y / tan(fov* 0.5 * pi<float>()/180.0); | |
| cv::Mat camera_matrix = (cv::Mat_<double>(3,3) << focalLengthX, 0, x, 0 , focalLengthX, y, 0, 0, 1); | |
| // http://stackoverflow.com/a/34024057/1973207 | |
| Mat distortionCoefficients = (Mat1d(1, 5) << (float)(k1-500)/1000.0, (float)(k2-500)/1000.0, (float)(k3-500)/1000.0, (float)(k4-500)/1000.0, (float)(k5-500)/1000.0); | |
| undistort(current, result, camera_matrix, distortionCoefficients); | |
| return result; | |
| } | |
| //Main Logic allows blur -> dilate/erode -> flow | |
| void Update(int, void*) { | |
| pyrScale =(pyrScale <= 0) ? 1 : pyrScale; | |
| try{ | |
| auto test = Undistort(tests[selectedImage].clone()); | |
| auto uiTest = test.clone(); | |
| cvtColor(test,test, CV_BGR2GRAY); | |
| auto localBack = Undistort(back.clone()); | |
| auto localTest = test.clone(); | |
| //blur step | |
| if(useBlur) { | |
| auto blursize = Size(blurScale,blurScale); | |
| blur( localBack, localBack, blursize); | |
| if(!blurOne) { | |
| blur( localTest, localTest, blursize); | |
| } | |
| } | |
| //Optical flow | |
| Mat result, blobs; | |
| { | |
| ScopedTimer t; | |
| result = OpticalPyromid(localBack, localTest); | |
| blobs = findBigBlobs(result, blbSize*1000); | |
| } | |
| cvtColor(result,result, CV_GRAY2BGR); | |
| cvtColor(result,result, CV_BGR2HSV); | |
| for (auto j = 0; j < result.rows; ++j) { | |
| for (auto i = 0; i < result.cols; ++i) { | |
| auto & pix = result.at<Vec3b>(j,i); | |
| auto & segment = blobs.at<uchar>(j,i); | |
| if(segment > 200) { | |
| pix.val[1] = 255; | |
| pix.val[0] = 185; | |
| } | |
| else if(pix.val[2] > 200) { | |
| pix.val[1] = 255; | |
| pix.val[0] = 125; | |
| } | |
| } | |
| } | |
| cvtColor(result,result, CV_HSV2BGR); | |
| // Setup SimpleBlobDetector parameters. | |
| addWeighted(uiTest, 0.9, result, 0.9, 0.0, result); | |
| //drawKeypoints(result, keypoints, result, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); | |
| imshow( "View", result); | |
| } catch (std::exception &e) { | |
| cout << e.what() << endl; | |
| } | |
| } | |
| int main() { | |
| auto maxImages = 5; | |
| cout << "This is a C++ OpenCV 3.2 Optical Flow Test." << endl | |
| << "It looks for a file named back.png and [frame0.png .. frame"<< maxImages+1 <<".png] in current forlder!" << endl ; | |
| // Load Images | |
| back = imread("./back.png", CV_LOAD_IMAGE_ANYCOLOR); | |
| back = equalizeIntensity(back); | |
| for(int i = 0; i <= maxImages; ++i) { | |
| auto currentName = string("./frame") + to_string(i) + ".png"; | |
| auto current = imread(currentName, CV_LOAD_IMAGE_ANYCOLOR); | |
| if(current.empty()) { | |
| cout << "please provide image: " << currentName << endl; | |
| cin.get(); | |
| return 0; | |
| } | |
| equalizeIntensity(current); | |
| tests.push_back(current); | |
| } | |
| //Prepare for OpticsFlow | |
| cvtColor(back,back, CV_BGR2GRAY); | |
| //UI | |
| auto name = "Optical Flow Test"; | |
| namedWindow( name, CV_WINDOW_FREERATIO ); | |
| resizeWindow(name, 400, 600); | |
| createTrackbar( "image:", name, &selectedImage, maxImages, &Update); | |
| createTrackbar( "threshhold:", name, &ofThreshold, 45, &Update); | |
| createTrackbar( "blob ksize:", name, &blbSize, 30, &Update); | |
| createTrackbar( "OF Scale:", name, &ofScale, 9, &Update ); | |
| createTrackbar( "OF Level:", name, &ofLevel, 5, &Update ); | |
| createTrackbar( "OF Size:", name, &ofSize, 65, &Update ); | |
| createTrackbar( "OF Iterations:", name, &ofIterations, 40, &Update ); | |
| createTrackbar( "OF Poly N:", name, &ofPolyN, 30, &Update ); | |
| createTrackbar( "OF Poly Sigma:", name, &ofPolySigma, 100, &Update); | |
| createTrackbar( "Use Blur", name, &useBlur, 1, &Update ); | |
| createTrackbar( "B only BG:", name, &blurOne, 1, &Update ); | |
| createTrackbar( "B Scale:", name, &blurScale, 20, &Update ); | |
| createTrackbar( "Pyr depth:", name, &pyrDepth, 5, &Update); | |
| createTrackbar( "Pyr scale:", name, &pyrScale, 6, &Update); | |
| createTrackbar( "dist K1:", name, &k1, 1000, &Update); | |
| createTrackbar( "dist K2:", name, &k2, 1000, &Update); | |
| createTrackbar( "dist K3:", name, &k3, 1000, &Update); | |
| createTrackbar( "dist K4:", name, &k4, 1000, &Update); | |
| createTrackbar( "dist K5:", name, &k5, 1000, &Update); | |
| Update(0,0); | |
| waitKey(); | |
| cin.get(); | |
| return 0; | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment