Created
June 20, 2025 00:00
-
-
Save Cellebyte/2b1ec02167d5349d8961b90b16b2fb95 to your computer and use it in GitHub Desktop.
Vibecode to blur multiple people using AI in Python
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import cv2 | |
| import numpy as np | |
| from ultralytics import YOLO | |
| # --- Configuration --- | |
| DEBUG = True | |
| IMAGE_PATH = "image.jpeg" | |
| OUTPUT_PATH = ( | |
| f"{'debug-' if DEBUG else ''}blurred-image.jpeg" | |
| ) | |
| YOLO_MODEL_PATH = "yolo12x.pt" | |
| FACE_PROTO_PATH = "deploy.prototxt" | |
| FACE_MODEL_PATH = "res10_300x300_ssd_iter_140000.caffemodel" | |
| SCALES = [0.5, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5] # Try more if needed | |
| YOLO_CONF = 0.005 # Lower confidence catches more, but may increase false positives | |
| YOLO_IOU = 0.05 # Lower IOU allows more overlapping boxes | |
| YOLO_NMS = 0.99 # Higher NMS keeps more boxes | |
| FACE_CONF = 0.15 | |
| FACE_PERSON_IOU = 0.1 # Allowed IOU for face/person overlap | |
| def box_iou(boxA, boxB): | |
| xA = max(boxA[0], boxB[0]) | |
| yA = max(boxA[1], boxB[1]) | |
| xB = min(boxA[2], boxB[2]) | |
| yB = min(boxA[3], boxB[3]) | |
| interArea = max(0, xB - xA) * max(0, yB - yA) | |
| boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1]) | |
| boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1]) | |
| if boxAArea + boxBArea - interArea == 0: | |
| return 0 | |
| return interArea / float(boxAArea + boxBArea - interArea) | |
| # --- Load image --- | |
| image = cv2.imread(IMAGE_PATH) | |
| if image is None: | |
| raise IOError(f"Cannot load image: {IMAGE_PATH}") | |
| h, w = image.shape[:2] | |
| # --- Load YOLO model --- | |
| model = YOLO(YOLO_MODEL_PATH) | |
| # --- Multi-scale YOLO person detection --- | |
| person_boxes = [] | |
| person_scores = [] | |
| for scale in SCALES: | |
| if DEBUG: | |
| print(f"Scale {scale}:") | |
| scaled_img = cv2.resize(image, (int(w * scale), int(h * scale))) | |
| results = model(scaled_img, conf=YOLO_CONF, iou=YOLO_IOU) | |
| for result in results: | |
| for box in result.boxes: | |
| cls_id = int(box.cls[0]) | |
| if model.names[cls_id].lower() == "person": | |
| x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
| # Scale coordinates back to original image | |
| x1 = int(x1 / scale) | |
| y1 = int(y1 / scale) | |
| x2 = int(x2 / scale) | |
| y2 = int(y2 / scale) | |
| conf = float(box.conf[0]) | |
| person_boxes.append([x1, y1, x2, y2]) | |
| person_scores.append(conf) | |
| if DEBUG: | |
| print(f" Persons found: {len(person_boxes)}") | |
| # --- Apply Non-Maximum Suppression (NMS) for person boxes --- | |
| indices = cv2.dnn.NMSBoxes( | |
| person_boxes, person_scores, score_threshold=YOLO_CONF, nms_threshold=YOLO_NMS | |
| ) | |
| if len(indices) > 0: | |
| indices = np.array(indices).flatten() | |
| final_person_boxes = [person_boxes[i] for i in indices] | |
| else: | |
| final_person_boxes = [] | |
| if DEBUG: | |
| print(f"After NMS: {len(final_person_boxes)} person boxes.") | |
| # --- Face detection using OpenCV DNN --- | |
| face_net = cv2.dnn.readNetFromCaffe(FACE_PROTO_PATH, FACE_MODEL_PATH) | |
| blob = cv2.dnn.blobFromImage( | |
| cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0) | |
| ) | |
| face_net.setInput(blob) | |
| detections = face_net.forward() | |
| print("Detections shape:", detections.shape) | |
| for i in range(detections.shape[2]): | |
| confidence = detections[0, 0, i, 2] | |
| print(f"Detection {i}: confidence={confidence}") | |
| face_boxes = [] | |
| for i in range(detections.shape[2]): | |
| confidence = detections[0, 0, i, 2] | |
| if confidence > FACE_CONF: | |
| box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) | |
| x1, y1, x2, y2 = box.astype("int") | |
| x1, y1, x2, y2 = max(x1, 0), max(y1, 0), min(x2, w), min(y2, h) | |
| # Only add if not significantly overlapping a detected person box | |
| is_inside = False | |
| for px1, py1, px2, py2 in final_person_boxes: | |
| if box_iou([x1, y1, x2, y2], [px1, py1, px2, py2]) > FACE_PERSON_IOU: | |
| is_inside = True | |
| break | |
| if not is_inside: | |
| face_boxes.append([x1, y1, x2, y2]) | |
| if DEBUG: | |
| print(f"Face boxes (outside person): {len(face_boxes)}") | |
| # --- Draw rectangles on image --- | |
| output_img = image.copy() | |
| for x1, y1, x2, y2 in final_person_boxes: | |
| person = image[y1:y2, x1:x2] | |
| if person.size > 0: | |
| if DEBUG: | |
| cv2.rectangle( | |
| output_img, (x1, y1), (x2, y2), (0, 255, 0), 3 | |
| ) # Green rectangle with thickness 3 for people | |
| blurred = cv2.GaussianBlur(person, (51, 51), 0) | |
| output_img[y1:y2, x1:x2] = blurred | |
| for x1, y1, x2, y2 in face_boxes: | |
| face = image[y1:y2, x1:x2] | |
| if face.size > 0: | |
| if DEBUG: | |
| cv2.rectangle( | |
| output_img, (x1, y1), (x2, y2), (255, 0, 0), 2 | |
| ) # Blue for faces | |
| blurred = cv2.GaussianBlur(face, (51, 51), 0) | |
| output_img[y1:y2, x1:x2] = blurred | |
| # --- Save output --- | |
| cv2.imwrite(OUTPUT_PATH, output_img) | |
| print(f"Output saved as {OUTPUT_PATH}") | |
| print( | |
| f"People detected: {len(final_person_boxes)} | Faces detected (outside person boxes): {len(face_boxes)}" | |
| ) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment