Last active
May 8, 2024 05:49
-
-
Save royshil/27e038fa9106ff6463d5 to your computer and use it in GitHub Desktop.
A simple video stabilizer in OpenCV, based on goodFeaturesToTrack, calcOpticalFlowPyrLK and estimateRigidTransform.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <opencv2/core/core.hpp> | |
#include <opencv2/highgui/highgui.hpp> | |
#include <opencv2/features2d/features2d.hpp> | |
#include <opencv2/imgproc/imgproc.hpp> | |
#include <opencv2/video/video.hpp> | |
#include <iostream> | |
using namespace cv; | |
using namespace std; | |
class Tracker { | |
vector<Point2f> trackedFeatures; | |
Mat prevGray; | |
public: | |
bool freshStart; | |
Mat_<float> rigidTransform; | |
Tracker():freshStart(true) { | |
rigidTransform = Mat::eye(3,3,CV_32FC1); //affine 2x3 in a 3x3 matrix | |
} | |
void processImage(Mat& img) { | |
Mat gray; cvtColor(img,gray,CV_BGR2GRAY); | |
vector<Point2f> corners; | |
if(trackedFeatures.size() < 200) { | |
goodFeaturesToTrack(gray,corners,300,0.01,10); | |
cout << "found " << corners.size() << " features\n"; | |
for (int i = 0; i < corners.size(); ++i) { | |
trackedFeatures.push_back(corners[i]); | |
} | |
} | |
if(!prevGray.empty()) { | |
vector<uchar> status; vector<float> errors; | |
calcOpticalFlowPyrLK(prevGray,gray,trackedFeatures,corners,status,errors,Size(10,10)); | |
if(countNonZero(status) < status.size() * 0.8) { | |
cout << "cataclysmic error \n"; | |
rigidTransform = Mat::eye(3,3,CV_32FC1); | |
trackedFeatures.clear(); | |
prevGray.release(); | |
freshStart = true; | |
return; | |
} else | |
freshStart = false; | |
Mat_<float> newRigidTransform = estimateRigidTransform(trackedFeatures,corners,false); | |
Mat_<float> nrt33 = Mat_<float>::eye(3,3); | |
newRigidTransform.copyTo(nrt33.rowRange(0,2)); | |
rigidTransform *= nrt33; | |
trackedFeatures.clear(); | |
for (int i = 0; i < status.size(); ++i) { | |
if(status[i]) { | |
trackedFeatures.push_back(corners[i]); | |
} | |
} | |
} | |
for (int i = 0; i < trackedFeatures.size(); ++i) { | |
circle(img,trackedFeatures[i],3,Scalar(0,0,255),CV_FILLED); | |
} | |
gray.copyTo(prevGray); | |
} | |
}; | |
int main() { | |
VideoCapture vc; | |
vc.open("myvideo.mp4"); | |
Mat frame,orig,orig_warped,tmp; | |
Tracker tracker; | |
while(vc.isOpened()) { | |
vc >> frame; | |
if(frame.empty()) break; | |
frame.copyTo(orig); | |
tracker.processImage(orig); | |
Mat invTrans = tracker.rigidTransform.inv(DECOMP_SVD); | |
warpAffine(orig,orig_warped,invTrans.rowRange(0,2),Size()); | |
imshow("orig",orig_warped); | |
int c = waitKey(0); | |
if(c == ' ') { | |
if(waitKey()==27) break; | |
} else | |
if(c == 27) break; | |
} | |
vc.release(); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi !
Same question. I could open the video and display the interest points. Inside the window the black space constantly changes but it doesn't really stabilize my video. I use my phone cam as source with only small movement so according to the video you posted it should work. Any ideas ? Thanks