Skip to content

Instantly share code, notes, and snippets.

@sikang99
Last active March 3, 2025 13:45
Show Gist options
  • Save sikang99/431011fdd4aa07c6b126fc6a790105d1 to your computer and use it in GitHub Desktop.
Save sikang99/431011fdd4aa07c6b126fc6a790105d1 to your computer and use it in GitHub Desktop.
Video Stabilization

Video Stabilization

  • AIS : AI Image Stabilization
  • DIS : Digital Image Stabilization
  • EIS : Electronic Image Stabilization
  • HIS : Hybrid Image Stabilization = EIS + OIS
  • OIS : Optical Image Stabilization

Articles

Information

Videos

Papers

Open Source

@sikang99
Copy link
Author

sikang99 commented Nov 22, 2023

import cv2
import numpy as np

class SimpleStabilization:
    def __init__(self):
        self.prev_gray = None
        self.prev_keypoints = None
        self.feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)

    def stabilize(self, frame, motion_data):
        # Convert the frame to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if self.prev_gray is None:
            self.prev_gray = gray
            return frame

        # Calculate optical flow using Lucas-Kanade method
        lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        
        if self.prev_keypoints is not None:
            # Detect new keypoints if the number of previous keypoints is insufficient
            if len(self.prev_keypoints) < 4:
                p0 = cv2.goodFeaturesToTrack(self.prev_gray, mask=None, **self.feature_params)
                self.prev_keypoints = p0
                return frame
            
            keypoints, status, error = cv2.calcOpticalFlowPyrLK(self.prev_gray, gray, self.prev_keypoints, None, **lk_params)
            
            # Check if we have enough keypoints for optical flow calculation and if the keypoints are valid
            if keypoints is not None and len(keypoints) >= 4 and status is not None and len(status) >= 4:
                # Use only the first four keypoints for motion estimation
                good_keypoints = keypoints[status.ravel() == 1][:4]

                # Check if the number of valid keypoints is sufficient
                if len(good_keypoints) >= 4:
                    # Estimate the motion as a translation
                    dx = np.mean(good_keypoints[:, 0])
                    dy = np.mean(good_keypoints[:, 1])

                    # Update the previous frame and keypoints
                    self.prev_gray = gray
                    self.prev_keypoints = keypoints

                    # Define the 3x3 transformation matrix
                    translation_matrix = np.float32([[1, 0, -dx], [0, 1, -dy]])

                    # Apply the translation to the frame
                    stabilized_frame = cv2.warpAffine(frame, translation_matrix, (frame.shape[1], frame.shape[0]))

                    return stabilized_frame

        # Reset keypoints if optical flow calculation failed
        self.prev_keypoints = None

        return frame

# Initialize video capture (replace '0' with the appropriate camera index)
cap = cv2.VideoCapture(0)

# Initialize the video stabilization algorithm
stabilizer = SimpleStabilization()

while True:
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Check if the frame is valid
    if not ret:
        print("Error: Could not read frame.")
        continue

    # Apply stabilization algorithm
    stabilized_frame = stabilizer.stabilize(frame, motion_data=None)

    # Display the resulting frame
    cv2.imshow('Stabilized Video', stabilized_frame)

    # Break the loop if 'q' is pressed
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the capture
cap.release()
cv2.destroyAllWindows()

@sikang99
Copy link
Author

# import required libraries
from vidgear.gears.stabilizer import Stabilizer
import cv2

# Open suitable video stream, such as webcam on first index(i.e. 0)
stream = cv2.VideoCapture("UnstabilizedTest10sec.mp4")

# initiate stabilizer object with default parameters
#stab = Stabilizer()
stab = Stabilizer(smoothing_radius=30, crop_n_zoom=True, border_size=5, logging=True)

# loop over
while True:

    # read frames from stream
    (grabbed, frame) = stream.read()

    # check for frame if not grabbed
    if not grabbed:
        break

    # send current frame to stabilizer for processing
    stabilized_frame = stab.stabilize(frame)

    # wait for stabilizer which still be initializing
    if stabilized_frame is None:
        continue

    # {do something with the stabilized frame here}

    # Show output window
    cv2.imshow("Stabilized Frame", stabilized_frame)

    # check for 'q' key if pressed
    key = cv2.waitKey(1) & 0xFF
    if key == ord("q"):
        break

# close output window
cv2.destroyAllWindows()

# clear stabilizer resources
stab.clean()

# safely close video stream
stream.release()

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment