Created
October 27, 2025 09:47
-
-
Save kenoir/57567b556ecb0c1a4955bc95b14924a7 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import time | |
| import math | |
| import numpy as np | |
| import pyvirtualcam | |
| import cv2 | |
| # Use MediaPipe Face Mesh for robust, real-time facial landmarks | |
| try: | |
| import mediapipe as mp | |
| except ImportError as e: | |
| raise ImportError( | |
| "This cell now uses MediaPipe. Install with: pip install mediapipe" | |
| ) from e | |
| mp_face_mesh = mp.solutions.face_mesh | |
| # Global handle for FaceMesh so we can close it on shutdown | |
| _face_mesh = None | |
| def _clamp(val: float, lo: float, hi: float) -> float: | |
| return max(lo, min(hi, val)) | |
| def _draw_googly_eye(img: np.ndarray, center: tuple[int, int], eye_radius: int, pupil_offset: tuple[float, float]) -> None: | |
| """Draw a single googly eye at center with given radius and pupil offset. | |
| - White sclera: filled circle | |
| - Black border | |
| - Pupil: dark circle limited to within the sclera (with a small margin) | |
| """ | |
| x, y = int(center[0]), int(center[1]) | |
| r = int(max(4, eye_radius)) | |
| # White sclera | |
| cv2.circle(img, (x, y), r, (255, 255, 255), -1, lineType=cv2.LINE_AA) | |
| # Slight border | |
| cv2.circle(img, (x, y), r, (0, 0, 0), 2, lineType=cv2.LINE_AA) | |
| # Pupil size and movement range | |
| pupil_r = max(3, int(r * 0.35)) | |
| max_offset = r - pupil_r - 2 # keep the pupil inside the white | |
| # Apply offset and clamp to circle boundary | |
| dx, dy = pupil_offset | |
| mag = math.hypot(dx, dy) | |
| if mag > 1e-6: | |
| scale = _clamp(max_offset / mag, 0.0, 1.0) | |
| dx *= scale | |
| dy *= scale | |
| px = int(round(x + dx)) | |
| py = int(round(y + dy)) | |
| cv2.circle(img, (px, py), pupil_r, (30, 30, 30), -1, lineType=cv2.LINE_AA) | |
| # Small highlight for a fun effect | |
| cv2.circle(img, (px - pupil_r // 3, py - pupil_r // 3), max(1, pupil_r // 4), (230, 230, 230), -1, lineType=cv2.LINE_AA) | |
| def edit_frame(frame: np.ndarray, t: float) -> np.ndarray: | |
| """ | |
| Edit hook: flip horizontally, run MediaPipe Face Mesh, and overlay googly eyes | |
| that track the eye centers and iris landmarks. | |
| Args: | |
| frame: BGR uint8 image from OpenCV with shape (H, W, 3) | |
| t: elapsed time in seconds since start (float) | |
| Returns: | |
| BGR uint8 image with same shape as input. | |
| """ | |
| out = cv2.flip(frame, 1) # mirror first so overlays read correctly | |
| if _face_mesh is None: | |
| return out | |
| h, w = out.shape[:2] | |
| # MediaPipe expects RGB and a non-writeable array for speed | |
| rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB) | |
| rgb.flags.writeable = False | |
| results = _face_mesh.process(rgb) | |
| rgb.flags.writeable = True | |
| if results.multi_face_landmarks: | |
| for face_landmarks in results.multi_face_landmarks: | |
| # Helper to map a landmark index to pixel coords | |
| def lm_xy(idx: int) -> tuple[int, int]: | |
| lm = face_landmarks.landmark[idx] | |
| return int(lm.x * w), int(lm.y * h) | |
| # Landmark indices (Face Mesh with refine_landmarks=True) | |
| # Right eye (subject's right): outer corner 33, inner 133, iris center 468 | |
| # Left eye (subject's left): outer corner 263, inner 362, iris center 473 | |
| r_outer = np.array(lm_xy(33), dtype=np.float32) | |
| r_inner = np.array(lm_xy(133), dtype=np.float32) | |
| r_iris = np.array(lm_xy(468), dtype=np.float32) | |
| l_outer = np.array(lm_xy(263), dtype=np.float32) | |
| l_inner = np.array(lm_xy(362), dtype=np.float32) | |
| l_iris = np.array(lm_xy(473), dtype=np.float32) | |
| # Eye centers and radii | |
| r_center = (r_outer + r_inner) * 0.5 | |
| l_center = (l_outer + l_inner) * 0.5 | |
| # Approximate eye radius as 0.55 of half eye width | |
| r_radius = max(6.0, 1.2 * (np.linalg.norm(r_outer - r_inner) * 0.5)) | |
| l_radius = max(6.0, 1.2 * (np.linalg.norm(l_outer - l_inner) * 0.5)) | |
| # Pupil offset relative to center (pixels) | |
| r_offset = (r_iris - r_center) | |
| l_offset = (l_iris - l_center) | |
| # Optional wobble to add 'googly' randomness | |
| wobble = 0.3 * math.sin(6.0 * t) | |
| r_offset = r_offset * (1.0 + wobble) | |
| l_offset = l_offset * (1.0 + wobble) | |
| # Draw eyes | |
| _draw_googly_eye(out, (int(r_center[0]), int(r_center[1])), int(r_radius), (float(r_offset[0]), float(r_offset[1]))) | |
| _draw_googly_eye(out, (int(l_center[0]), int(l_center[1])), int(l_radius), (float(l_offset[0]), float(l_offset[1]))) | |
| return out | |
| # Select the input camera device (0 is usually the built‑in webcam) | |
| camera_index = 0 | |
| # Initialize camera capture | |
| cap = cv2.VideoCapture(camera_index) | |
| if not cap.isOpened(): | |
| raise RuntimeError(f"Failed to open camera index {camera_index}") | |
| # Read a single frame to determine size and to ensure camera works | |
| ok, frame = cap.read() | |
| if not ok or frame is None: | |
| cap.release() | |
| raise RuntimeError("Failed to read initial frame from the camera") | |
| h, w = frame.shape[:2] | |
| # FPS can be 0/NaN on some systems; default to 30 if unknown | |
| fps = cap.get(cv2.CAP_PROP_FPS) | |
| if fps is None or fps <= 0 or (isinstance(fps, float) and np.isnan(fps)): | |
| fps = 30.0 | |
| print(f"Input camera opened: index={camera_index}, size={w}x{h}, fps≈{fps:.1f}") | |
| try: | |
| # Create MediaPipe Face Mesh once | |
| global _face_mesh | |
| _face_mesh = mp_face_mesh.FaceMesh( | |
| static_image_mode=False, | |
| refine_landmarks=True, # enables iris landmarks (468-477) | |
| max_num_faces=5, | |
| min_detection_confidence=0.5, | |
| min_tracking_confidence=0.5, | |
| ) | |
| # Create virtual camera using the same size and fps as the real camera | |
| with pyvirtualcam.Camera(width=w, height=h, fps=int(round(fps))) as cam: | |
| print(f"Using virtual camera: {cam.device}") | |
| print("Streaming frames… Interrupt the kernel (stop button) to end.") | |
| t0 = time.time() | |
| while True: | |
| ok, frame = cap.read() | |
| if not ok or frame is None: | |
| continue | |
| # Apply user-editable hook | |
| t = time.time() - t0 | |
| frame = edit_frame(frame, t) | |
| # Convert BGR (OpenCV) -> RGB (pyvirtualcam default) | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| cam.send(frame_rgb) | |
| cam.sleep_until_next_frame() | |
| except KeyboardInterrupt: | |
| print("Stopping stream.") | |
| finally: | |
| # Cleanup | |
| if _face_mesh is not None: | |
| _face_mesh.close() | |
| _face_mesh = None | |
| cap.release() | |
| print("Camera released and resources cleaned up.") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment