Skip to content

Instantly share code, notes, and snippets.

@nsdevaraj
Last active October 9, 2025 03:34
Show Gist options
  • Save nsdevaraj/fc9a39a9110b56684dcd3810ef167da8 to your computer and use it in GitHub Desktop.
Save nsdevaraj/fc9a39a9110b56684dcd3810ef167da8 to your computer and use it in GitHub Desktop.
detect face
import React, { useRef, useEffect, useState } from 'react';
import * as faceapi from '@vladmandic/face-api';
function App() {
const videoRef = useRef(null);
const canvasRef = useRef(null);
const [photo, setPhoto] = useState(null);
const [validation, setValidation] = useState({});
const [modelsLoaded, setModelsLoaded] = useState(false);
const [modelsLoading, setModelsLoading] = useState(true);
const [error, setError] = useState(null);
const [liveValidation, setLiveValidation] = useState({});
const [isValidating, setIsValidating] = useState(false);
useEffect(() => {
const loadModels = async () => {
const modelPaths = [
'/models',
`${window.location.origin}/models`,
'./models',
'https://raw.githubusercontent.com/vladmandic/face-api/master/model'
];
let modelsLoaded = false;
let lastError = null;
for (const MODEL_URL of modelPaths) {
console.log('Attempting to load models from:', MODEL_URL);
try {
// Test if models directory is accessible
const testResponse = await fetch(`${MODEL_URL}/ssd_mobilenetv1_model-weights_manifest.json`);
if (!testResponse.ok) {
throw new Error(`HTTP ${testResponse.status}: ${testResponse.statusText}`);
}
// Load all models
await Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL),
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL),
faceapi.nets.ageGenderNet.loadFromUri(MODEL_URL)
]);
console.log('Face-api models loaded successfully from:', MODEL_URL);
modelsLoaded = true;
break;
} catch (err) {
console.warn(`Failed to load models from ${MODEL_URL}:`, err.message);
lastError = err;
continue;
}
}
if (modelsLoaded) {
setModelsLoaded(true);
setModelsLoading(false);
} else {
console.error('All model loading attempts failed. Last error:', lastError);
setError(`Failed to load face detection models. Please check your internet connection and try refreshing the page.`);
setModelsLoading(false);
}
};
loadModels();
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
if (videoRef.current) {
videoRef.current.srcObject = stream;
}
})
.catch(err => {
setError('Failed to access webcam. Ensure permissions are granted.');
});
// Start real-time validation
const startLiveValidation = () => {
const interval = setInterval(async () => {
if (modelsLoaded && videoRef.current && !isValidating) {
await performLiveValidation();
}
}, 2000); // Check every 2 seconds
return interval;
};
let validationInterval;
if (modelsLoaded) {
validationInterval = startLiveValidation();
}
return () => {
if (videoRef.current && videoRef.current.srcObject) {
videoRef.current.srcObject.getTracks().forEach(track => track.stop());
}
if (validationInterval) {
clearInterval(validationInterval);
}
};
}, [modelsLoaded, isValidating]);
const performLiveValidation = async () => {
if (!videoRef.current || isValidating) return;
setIsValidating(true);
const video = videoRef.current;
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
const results = await validatePhoto(canvas, false);
setLiveValidation(results);
setIsValidating(false);
};
const takePhoto = async () => {
if (!modelsLoaded) {
setError('Models not loaded yet.');
return;
}
const video = videoRef.current;
const canvas = canvasRef.current;
if (!video || !canvas) return;
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL('image/png');
setPhoto(dataUrl);
await validatePhoto(canvas, true);
};
const validatePhoto = async (canvas, setValidationState = true) => {
let results = {};
// Face detection
const detections = await faceapi.detectAllFaces(canvas)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender();
if (detections.length === 0) {
results.faceVisible = false;
// Still check background even without face detection
results.plainBackground = checkPlainBackgroundSimple(canvas);
} else if (detections.length > 1) {
results.oneFace = false;
// Use the first detected face for background check
const box = detections[0].detection.box;
results.plainBackground = checkPlainBackground(canvas, box);
} else {
const detection = detections[0];
results.faceVisible = true;
results.oneFace = true;
// Face size - more lenient thresholds
const box = detection.detection.box;
const faceArea = box.width * box.height;
const imageArea = canvas.width * canvas.height;
const areaPercentage = (faceArea / imageArea) * 100;
const heightPercentage = (box.height / canvas.height) * 100;
const widthPercentage = (box.width / canvas.width) * 100;
// Adjusted: Broader area (10-50%), height (40-90%), width (30-80%)
// Passes if area in range OR (height and width in range) OR height alone
const areaValid = areaPercentage >= 10 && areaPercentage <= 50;
const heightValid = heightPercentage >= 40 && heightPercentage <= 90;
const widthValid = widthPercentage >= 30 && widthPercentage <= 80;
results.faceSize = areaValid || (heightValid && widthValid) || heightValid;
// Debug logging
console.log('Face size validation:', {
areaPercentage: areaPercentage.toFixed(1),
heightPercentage: heightPercentage.toFixed(1),
widthPercentage: widthPercentage.toFixed(1),
areaValid,
heightValid,
widthValid,
finalResult: results.faceSize
});
// Centered (within 10% of center)
const centerX = box.x + box.width / 2;
const centerY = box.y + box.height / 2;
results.centered = Math.abs(centerX - canvas.width / 2) < canvas.width * 0.1 &&
Math.abs(centerY - canvas.height / 2) < canvas.height * 0.1;
// Not baby (age > 12, arbitrary)
results.notBaby = detection.age > 12;
// No coverings (approximate: if landmarks detected successfully and neutral expression confidence high)
results.noCoverings = detection.landmarks && detection.expressions.neutral > 0.5;
// Well-lit
results.wellLit = isWellLit(canvas);
// Not blurry
results.notBlurry = !isBlurry(canvas);
// Plain background
results.plainBackground = checkPlainBackground(canvas, box);
// Not pixelated (assume ok if resolution > 300x300)
results.notPixelated = canvas.width > 300 && canvas.height > 300;
}
// Original/not edited: assumed true since captured live
results.original = true;
if (setValidationState) {
setValidation(results);
}
return results;
};
const isWellLit = (canvas) => {
const ctx = canvas.getContext('2d');
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
let totalBrightness = 0;
for (let i = 0; i < imageData.data.length; i += 4) {
const r = imageData.data[i];
const g = imageData.data[i + 1];
const b = imageData.data[i + 2];
totalBrightness += (r + g + b) / 3;
}
const avgBrightness = totalBrightness / (imageData.data.length / 4);
return avgBrightness > 100; // Threshold 0-255
};
const isBlurry = (canvas) => {
const ctx = canvas.getContext('2d');
const w = canvas.width;
const h = canvas.height;
const imageData = ctx.getImageData(0, 0, w, h);
// Grayscale
const gray = new Uint8Array(w * h);
for (let i = 0; i < w * h; i++) {
const idx = i * 4;
gray[i] = (imageData.data[idx] * 0.299 + imageData.data[idx + 1] * 0.587 + imageData.data[idx + 2] * 0.114);
}
// Laplacian
const lap = new Float32Array(w * h);
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
const idx = y * w + x;
lap[idx] = -4 * gray[idx] + gray[idx - 1] + gray[idx + 1] + gray[idx - w] + gray[idx + w];
}
}
// Variance (ignoring borders)
let mean = 0;
let count = (w - 2) * (h - 2);
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
mean += lap[y * w + x];
}
}
mean /= count;
let variance = 0;
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
variance += Math.pow(lap[y * w + x] - mean, 2);
}
}
variance /= count;
return variance < 100; // Low variance = blurry (adjust threshold as needed)
};
const checkPlainBackground = (canvas, box) => {
const ctx = canvas.getContext('2d');
const w = canvas.width;
const h = canvas.height;
const imageData = ctx.getImageData(0, 0, w, h);
let rSum = 0, gSum = 0, bSum = 0, count = 0;
let rSq = 0, gSq = 0, bSq = 0;
// Sample background pixels (excluding face area)
for (let y = 0; y < h; y++) {
for (let x = 0; x < w; x++) {
if (x < box.x || x > box.x + box.width || y < box.y || y > box.y + box.height) {
const idx = (y * w + x) * 4;
const r = imageData.data[idx];
const g = imageData.data[idx + 1];
const b = imageData.data[idx + 2];
rSum += r;
gSum += g;
bSum += b;
rSq += r * r;
gSq += g * g;
bSq += b * b;
count++;
}
}
}
if (count === 0) return false;
const rMean = rSum / count;
const gMean = gSum / count;
const bMean = bSum / count;
const rVar = (rSq / count) - rMean * rMean;
const gVar = (gSq / count) - gMean * gMean;
const bVar = (bSq / count) - bMean * bMean;
const totalVariance = rVar + gVar + bVar;
// Stricter background check - only very plain or very light backgrounds
const avgBrightness = (rMean + gMean + bMean) / 3;
// Only two criteria - passes if either is met
const isVeryPlain = totalVariance < 5000; // Very uniform background
const isVeryLight = avgBrightness > 120; // Very bright
// Pass if: (very plain) OR (very light regardless of variance)
const result = isVeryPlain || isVeryLight;
console.log('Background check:', {
totalVariance: totalVariance.toFixed(0),
avgBrightness: avgBrightness.toFixed(0),
isVeryPlain,
isVeryLight,
result
});
return result;
};
const checkPlainBackgroundSimple = (canvas) => {
const ctx = canvas.getContext('2d');
const w = canvas.width;
const h = canvas.height;
const imageData = ctx.getImageData(0, 0, w, h);
let rSum = 0, gSum = 0, bSum = 0, count = 0;
let rSq = 0, gSq = 0, bSq = 0;
const borderSize = Math.min(w, h) * 0.1;
for (let y = 0; y < h; y++) {
for (let x = 0; x < w; x++) {
if (x < borderSize || x > w - borderSize || y < borderSize || y > h - borderSize) {
const idx = (y * w + x) * 4;
const r = imageData.data[idx];
const g = imageData.data[idx + 1];
const b = imageData.data[idx + 2];
rSum += r;
gSum += g;
bSum += b;
rSq += r * r;
gSq += g * g;
bSq += b * b;
count++;
}
}
}
if (count === 0) return true;
const rMean = rSum / count;
const gMean = gSum / count;
const bMean = bSum / count;
const rVar = (rSq / count) - rMean * rMean;
const gVar = (gSq / count) - gMean * gMean;
const bVar = (bSq / count) - bMean * bMean;
const totalVariance = rVar + gVar + bVar;
// Stricter background check - same logic as main function
const avgBrightness = (rMean + gMean + bMean) / 3;
// Only two criteria - passes if either is met
const isVeryPlain = totalVariance < 5000; // Very uniform background
const isVeryLight = avgBrightness > 110; // Very bright (slightly lower for edge sampling)
// Pass if: (very plain) OR (very light regardless of variance)
const result = isVeryPlain || isVeryLight;
console.log('Simple background check:', {
totalVariance: totalVariance.toFixed(0),
avgBrightness: avgBrightness.toFixed(0),
isVeryPlain,
isVeryLight,
result
});
return result;
};
const allConditionsMet = (validationData = validation) => {
return validationData.original &&
validationData.notBaby &&
validationData.faceVisible &&
validationData.oneFace &&
validationData.wellLit &&
validationData.notBlurry &&
validationData.plainBackground &&
validationData.centered &&
validationData.faceSize &&
validationData.noCoverings &&
validationData.notPixelated;
};
const downloadPhoto = () => {
if (photo) {
const link = document.createElement('a');
link.download = `passport-photo-${new Date().toISOString().slice(0, 19).replace(/:/g, '-')}.png`;
link.href = photo;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
};
const recapturePhoto = () => {
setPhoto(null);
setValidation({});
};
const getVideoBorderStyle = () => {
if (!modelsLoaded || Object.keys(liveValidation).length === 0) {
return { border: '3px solid #ccc' };
}
const isValid = allConditionsMet(liveValidation);
return {
border: `3px solid ${isValid ? '#4CAF50' : '#f44336'}`,
borderRadius: '8px'
};
};
const getInstructionStyle = (condition) => {
if (Object.keys(liveValidation).length === 0) {
return {};
}
let isValid = false;
switch (condition) {
case 'original':
isValid = liveValidation.original;
break;
case 'notBaby':
isValid = liveValidation.notBaby;
break;
case 'faceVisible':
isValid = liveValidation.faceVisible && liveValidation.wellLit;
break;
case 'noCoverings':
isValid = liveValidation.noCoverings;
break;
case 'plainBackground':
isValid = liveValidation.plainBackground;
break;
case 'quality':
isValid = liveValidation.notBlurry && liveValidation.wellLit && liveValidation.notPixelated;
break;
case 'centered':
isValid = liveValidation.faceVisible && liveValidation.centered;
break;
case 'faceSize':
isValid = liveValidation.faceSize;
break;
default:
return {};
}
return {
color: isValid ? '#4CAF50' : '#f44336',
fontWeight: isValid ? 'normal' : 'bold'
};
};
return (
<div style={{ textAlign: 'center', padding: '20px' }}>
<h1>Passport-Style Photo Capture</h1>
{error && <p style={{ color: 'red' }}>{error}</p>}
<canvas ref={canvasRef} style={{ display: 'none' }} />
{!photo ? (
<div style={{
display: 'flex',
justifyContent: 'center',
alignItems: 'flex-start',
gap: '40px',
maxWidth: '1200px',
margin: '0 auto'
}}>
<div style={{ flex: '0 0 auto' }}>
<video ref={videoRef} autoPlay muted width="640" height="480" style={getVideoBorderStyle()} />
<br />
<button
onClick={takePhoto}
disabled={!modelsLoaded}
style={{
marginTop: '15px',
padding: '12px 24px',
fontSize: '16px',
backgroundColor: modelsLoaded ? '#2196F3' : '#ccc',
color: 'white',
border: 'none',
borderRadius: '8px',
cursor: modelsLoaded ? 'pointer' : 'not-allowed'
}}
>
{modelsLoading ? 'Loading Models...' : modelsLoaded ? 'Capture Photo' : 'Models Failed to Load'}
</button>
</div>
<div style={{
flex: '1',
textAlign: 'left',
maxWidth: '400px',
padding: '20px',
backgroundColor: '#f9f9f9',
borderRadius: '12px',
border: '1px solid #e0e0e0'
}}>
<h2 style={{ marginTop: '0', color: '#333' }}>Instructions</h2>
<ul style={{ lineHeight: '1.6', paddingLeft: '20px' }}>
<li style={getInstructionStyle('original')}>Please upload an original & not an edited/photoshopped picture of you</li>
<li style={getInstructionStyle('notBaby')}>Your picture should look like how you look now & not the baby version of you 😁</li>
<li style={getInstructionStyle('faceVisible')}>Make sure your face is clearly visible and well-lit.</li>
<li style={getInstructionStyle('noCoverings')}>Avoid sunglasses, hats, or anything that covers the face.</li>
<li style={getInstructionStyle('plainBackground')}>A plain/light-colored background is best.</li>
<li style={getInstructionStyle('quality')}>The photo should not be blurry, dark, or pixelated.</li>
<li>A passport-size style photo works best.</li>
<li style={getInstructionStyle('faceSize')}>Ensure your face is appropriately sized (not too close or too far).</li>
</ul>
<div style={{
marginTop: '15px',
padding: '12px',
backgroundColor: '#e3f2fd',
borderRadius: '8px',
border: '1px solid #2196F3'
}}>
<h4 style={{ margin: '0 0 8px 0', color: '#1976D2', fontSize: '14px' }}>💡 Tip</h4>
<p style={{ margin: '0', fontSize: '13px', color: '#333', lineHeight: '1.4' }}>
Preview the video to ensure your full face is visible and centered before capturing.
</p>
</div>
{Object.keys(liveValidation).length > 0 && (
<div style={{
marginTop: '20px',
padding: '15px',
backgroundColor: allConditionsMet(liveValidation) ? '#e8f5e8' : '#ffeaea',
borderRadius: '8px',
border: `2px solid ${allConditionsMet(liveValidation) ? '#4CAF50' : '#f44336'}`
}}>
<h3 style={{ margin: '0 0 10px 0', color: '#333' }}>Live Validation Status</h3>
<p style={{
color: allConditionsMet(liveValidation) ? '#4CAF50' : '#f44336',
fontWeight: 'bold',
fontSize: '16px',
margin: '0'
}}>
{allConditionsMet(liveValidation) ? '✅ Ready to capture!' : '❌ Adjust your position'}
</p>
</div>
)}
</div>
</div>
) : (
<div style={{ maxWidth: '800px', margin: '0 auto' }}>
<h2 style={{ marginBottom: '20px' }}>Photo Preview</h2>
<div style={{
marginBottom: '30px',
border: `4px solid ${allConditionsMet() ? '#4CAF50' : '#f44336'}`,
borderRadius: '12px',
display: 'inline-block',
padding: '8px',
backgroundColor: 'white'
}}>
<img
src={photo}
alt="Captured Photo"
width="640"
height="480"
style={{ borderRadius: '8px' }}
/>
</div>
<div style={{ marginBottom: '30px' }}>
<button
onClick={downloadPhoto}
style={{
backgroundColor: '#4CAF50',
color: 'white',
padding: '15px 30px',
border: 'none',
borderRadius: '8px',
fontSize: '18px',
cursor: 'pointer',
marginRight: '20px',
fontWeight: 'bold'
}}
>
📥 Download Photo
</button>
<button
onClick={recapturePhoto}
style={{
backgroundColor: '#2196F3',
color: 'white',
padding: '15px 30px',
border: 'none',
borderRadius: '8px',
fontSize: '18px',
cursor: 'pointer',
fontWeight: 'bold'
}}
>
📷 Re-capture
</button>
</div>
<div style={{
padding: '20px',
backgroundColor: allConditionsMet() ? '#e8f5e8' : '#ffeaea',
borderRadius: '12px',
border: `2px solid ${allConditionsMet() ? '#4CAF50' : '#f44336'}`,
marginBottom: '20px'
}}>
{allConditionsMet() ? (
<div>
<h3 style={{ color: '#4CAF50', margin: '0 0 10px 0' }}>✅ Perfect! All conditions met</h3>
<p style={{ color: '#333', margin: '0' }}>Your photo meets all passport requirements and is ready to download.</p>
</div>
) : (
<div>
<h3 style={{ color: '#f44336', margin: '0 0 10px 0' }}>❌ Some conditions not met</h3>
<p style={{ color: '#333', margin: '0' }}>Please re-capture to ensure your photo meets all requirements.</p>
</div>
)}
</div>
<div style={{
textAlign: 'left',
backgroundColor: '#f9f9f9',
padding: '20px',
borderRadius: '12px',
border: '1px solid #e0e0e0'
}}>
<h3 style={{ marginTop: '0', color: '#333' }}>Validation Details</h3>
<ul style={{ listStyleType: 'none', paddingLeft: '0', lineHeight: '1.8', color: '#000' }}>
<li style={{ color: '#000' }}>Original & not edited/photoshopped: {validation.original ? '✅ Yes (live capture)' : '❌ No'}</li>
<li style={{ color: '#000' }}>Looks like current you (not baby version): {validation.notBaby ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Face clearly visible and well-lit: {validation.faceVisible && validation.wellLit ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Avoid sunglasses, hats, or face coverings: {validation.noCoverings ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Plain/light-colored background: {validation.plainBackground ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Not blurry, dark, or pixelated: {validation.notBlurry && validation.wellLit && validation.notPixelated ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Full face visible and centered: {validation.faceVisible && validation.centered ? '✅ Yes' : '❌ No'}</li>
<li style={{ color: '#000' }}>Face is appropriately sized: {validation.faceSize ? '✅ Yes' : '❌ No'}</li>
</ul>
</div>
</div>
)}
</div>
);
}
export default App;
// Instructions to set up the React app:
// 1. Run: npx create-react-app photo-capture-app
// 2. cd photo-capture-app
// 3. Run: npm install @vladmandic/face-api
// 4. Download the face-api models from https://github.com/vladmandic/face-api/tree/master/model
// and place them in a new folder: public/models/
// 5. Replace src/App.js with the code below.
// 6. Optionally, update src/App.css or index.css for styling if needed.
// 7. Run: npm start
// The app will access the webcam, allow capturing a photo, and validate it against the conditions using face detection and image analysis.
// Note: The validations are automated where possible, with arbitrary thresholds that can be adjusted. Some conditions (e.g., no hats/sunglasses) are approximated.
import React, { useRef, useEffect, useState } from 'react';
import * as faceapi from '@vladmandic/face-api';
function App() {
const videoRef = useRef(null);
const canvasRef = useRef(null);
const [photo, setPhoto] = useState(null);
const [validation, setValidation] = useState({});
const [modelsLoaded, setModelsLoaded] = useState(false);
const [error, setError] = useState(null);
useEffect(() => {
const loadModels = async () => {
const MODEL_URL = process.env.PUBLIC_URL + '/models';
try {
await faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
await faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
await faceapi.nets.ageGenderNet.loadFromUri(MODEL_URL);
setModelsLoaded(true);
} catch (err) {
setError('Failed to load models. Ensure models are in public/models/.');
}
};
loadModels();
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
if (videoRef.current) {
videoRef.current.srcObject = stream;
}
})
.catch(err => {
setError('Failed to access webcam. Ensure permissions are granted.');
});
return () => {
if (videoRef.current && videoRef.current.srcObject) {
videoRef.current.srcObject.getTracks().forEach(track => track.stop());
}
};
}, []);
const takePhoto = async () => {
if (!modelsLoaded) {
setError('Models not loaded yet.');
return;
}
const video = videoRef.current;
const canvas = canvasRef.current;
if (!video || !canvas) return;
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL('image/png');
setPhoto(dataUrl);
await validatePhoto(canvas);
};
const validatePhoto = async (canvas) => {
let results = {};
// Face detection
const detections = await faceapi.detectAllFaces(canvas)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender();
if (detections.length === 0) {
results.faceVisible = false;
} else if (detections.length > 1) {
results.oneFace = false;
} else {
const detection = detections[0];
results.faceVisible = true;
results.oneFace = true;
// Face size (70-80% of frame area, lenient to 60-90%)
const box = detection.detection.box;
const faceArea = box.width * box.height;
const imageArea = canvas.width * canvas.height;
const facePercentage = (faceArea / imageArea) * 100;
results.faceSize = facePercentage >= 60 && facePercentage <= 90;
// Centered (within 10% of center)
const centerX = box.x + box.width / 2;
const centerY = box.y + box.height / 2;
results.centered = Math.abs(centerX - canvas.width / 2) < canvas.width * 0.1 &&
Math.abs(centerY - canvas.height / 2) < canvas.height * 0.1;
// Not baby (age > 12, arbitrary)
results.notBaby = detection.age > 12;
// No coverings (approximate: if landmarks detected successfully and neutral expression confidence high)
results.noCoverings = detection.landmarks && detection.expressions.neutral > 0.5;
// Well-lit
results.wellLit = isWellLit(canvas);
// Not blurry
results.notBlurry = !isBlurry(canvas);
// Plain background
results.plainBackground = checkPlainBackground(canvas, box);
// Not pixelated (assume ok if resolution > 300x300)
results.notPixelated = canvas.width > 300 && canvas.height > 300;
}
// Original/not edited: assumed true since captured live
results.original = true;
setValidation(results);
};
const isWellLit = (canvas) => {
const ctx = canvas.getContext('2d');
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
let totalBrightness = 0;
for (let i = 0; i < imageData.data.length; i += 4) {
const r = imageData.data[i];
const g = imageData.data[i + 1];
const b = imageData.data[i + 2];
totalBrightness += (r + g + b) / 3;
}
const avgBrightness = totalBrightness / (imageData.data.length / 4);
return avgBrightness > 100; // Threshold 0-255
};
const isBlurry = (canvas) => {
const ctx = canvas.getContext('2d');
const w = canvas.width;
const h = canvas.height;
const imageData = ctx.getImageData(0, 0, w, h);
// Grayscale
const gray = new Uint8Array(w * h);
for (let i = 0; i < w * h; i++) {
const idx = i * 4;
gray[i] = (imageData.data[idx] * 0.299 + imageData.data[idx + 1] * 0.587 + imageData.data[idx + 2] * 0.114);
}
// Laplacian
const lap = new Float32Array(w * h);
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
const idx = y * w + x;
lap[idx] = -4 * gray[idx] + gray[idx - 1] + gray[idx + 1] + gray[idx - w] + gray[idx + w];
}
}
// Variance (ignoring borders)
let mean = 0;
let count = (w - 2) * (h - 2);
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
mean += lap[y * w + x];
}
}
mean /= count;
let variance = 0;
for (let y = 1; y < h - 1; y++) {
for (let x = 1; x < w - 1; x++) {
variance += Math.pow(lap[y * w + x] - mean, 2);
}
}
variance /= count;
return variance < 100; // Low variance = blurry (adjust threshold as needed)
};
const checkPlainBackground = (canvas, box) => {
const ctx = canvas.getContext('2d');
const w = canvas.width;
const h = canvas.height;
const imageData = ctx.getImageData(0, 0, w, h);
let rSum = 0, gSum = 0, bSum = 0, count = 0;
let rSq = 0, gSq = 0, bSq = 0;
for (let y = 0; y < h; y++) {
for (let x = 0; x < w; x++) {
if (x < box.x || x > box.x + box.width || y < box.y || y > box.y + box.height) {
const idx = (y * w + x) * 4;
const r = imageData.data[idx];
const g = imageData.data[idx + 1];
const b = imageData.data[idx + 2];
rSum += r;
gSum += g;
bSum += b;
rSq += r * r;
gSq += g * g;
bSq += b * b;
count++;
}
}
}
if (count === 0) return false;
const rMean = rSum / count;
const gMean = gSum / count;
const bMean = bSum / count;
const rVar = (rSq / count) - rMean * rMean;
const gVar = (gSq / count) - gMean * gMean;
const bVar = (bSq / count) - bMean * bMean;
const totalVariance = rVar + gVar + bVar;
return totalVariance < 1000; // Low variance = plain/uniform (adjust as needed)
};
const allConditionsMet = () => {
return validation.original &&
validation.notBaby &&
validation.faceVisible &&
validation.oneFace &&
validation.wellLit &&
validation.notBlurry &&
validation.plainBackground &&
validation.centered &&
validation.faceSize &&
validation.noCoverings &&
validation.notPixelated;
};
return (
<div style={{ textAlign: 'center' }}>
<h1>Passport-Style Photo Capture</h1>
{error && <p style={{ color: 'red' }}>{error}</p>}
<video ref={videoRef} autoPlay muted width="640" height="480" />
<br />
<button onClick={takePhoto} disabled={!modelsLoaded}>Capture Photo</button>
<canvas ref={canvasRef} style={{ display: 'none' }} />
{photo && (
<>
<h2>Preview</h2>
<img src={photo} alt="Captured Photo" width="640" height="480" />
<h2>Validation Results</h2>
<ul style={{ listStyleType: 'none' }}>
<li>Original & not edited/photoshopped: {validation.original ? '✅ Yes (live capture)' : '❌ No'}</li>
<li>Looks like current you (not baby version): {validation.notBaby ? '✅ Yes' : '❌ No'}</li>
<li>Face clearly visible and well-lit: {validation.faceVisible && validation.wellLit ? '✅ Yes' : '❌ No'}</li>
<li>Avoid sunglasses, hats, or face coverings: {validation.noCoverings ? '✅ Yes' : '❌ No'}</li>
<li>Plain/light-colored background: {validation.plainBackground ? '✅ Yes' : '❌ No'}</li>
<li>Not blurry, dark, or pixelated: {validation.notBlurry && validation.wellLit && validation.notPixelated ? '✅ Yes' : '❌ No'}</li>
<li>Full face visible and centered: {validation.faceVisible && validation.centered ? '✅ Yes' : '❌ No'}</li>
<li>Face covers about 70–80% of the frame: {validation.faceSize ? '✅ Yes' : '❌ No'}</li>
</ul>
{allConditionsMet() ? (
<p style={{ color: 'green' }}>All conditions met! You can use this photo.</p>
) : (
<p style={{ color: 'red' }}>Some conditions not met. Please retake the photo.</p>
)}
</>
)}
{!photo && (
<div>
<h2>Instructions</h2>
<ul>
<li>Please upload an original & not an edited/photoshopped picture of you</li>
<li>Your picture should look like how you look now & not the baby version of you 😁</li>
<li>Make sure your face is clearly visible and well-lit.</li>
<li>Avoid sunglasses, hats, or anything that covers the face.</li>
<li>A plain/light-colored background is best.</li>
<li>The photo should not be blurry, dark, or pixelated.</li>
<li>Preview it to ensure your full face is visible and centered.</li>
<li>A passport-size style photo works best.</li>
<li>Ensure your face covers about 70–80% of the frame.</li>
</ul>
</div>
)}
</div>
);
}
export default App;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment