Skip to content

Instantly share code, notes, and snippets.

@nicksheffield
Last active March 29, 2018 04:01
Show Gist options
  • Save nicksheffield/528d81be03f535e8af5d09cf5ad86bcb to your computer and use it in GitHub Desktop.
Save nicksheffield/528d81be03f535e8af5d09cf5ad86bcb to your computer and use it in GitHub Desktop.
Facial Recognition + webcam + canvas
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>FACE</title>
<style>
video { display: none; }
#canvas2 { display: none; }
</style>
</head>
<body>
<video id="video"></video>
<canvas id="canvas" width="640" height="480"></canvas>
<canvas id="canvas2" width="640" height="480"></canvas>
<br>
<button id="play_btn">Play</button>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/1.20.4/TweenMax.min.js"></script>
<script>
const at = {
angle: (a,b)=>Math.atan2(b.y-a.y,b.x-a.x)/Math.PI*180,
dist: (a,b)=>Math.sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y)),
step: (a,s)=>({x:s*Math.cos(a*Math.PI/180),y:s*Math.sin(a*Math.PI/180)})
}
var play_btn = document.querySelector('#play_btn')
var video = document.querySelector('#video')
var canvas = document.querySelector('#canvas')
var canvas2 = document.querySelector('#canvas2')
var ctx = canvas.getContext('2d')
var ctx2 = canvas2.getContext('2d')
var detector = new FaceDetector()
var face = {
data: null,
head: { x: 0, y: 0, r: 0, scale: 0 },
eye1: { x: 0, y: 0, r: 0, scale: 0, side: 'none' },
eye2: { x: 0, y: 0, r: 0, scale: 0, side: 'none' },
mouth: { x: 0, y: 0, a: 0, scale: 0 }
}
var smoothness = 0.1
ctx.translate(-0.5, 0.5)
var isPlaying = (el) => el.currentTime > 0 && el.paused === false && el.ended === false
play_btn.addEventListener('click', function(event) {
if (isPlaying(video)) {
play_btn.innerHTML = 'play'
video.pause()
} else {
play_btn.innerHTML = 'pause'
video.play()
}
})
navigator.mediaDevices.getUserMedia({ audio: false, video: { width: 640, height: 480 } })
.then(stream => {
video.srcObject = stream
})
.catch(err => {
console.log('getUserMedia', err)
})
setInterval(() => {
if (isPlaying(video)) {
detector.detect(canvas2).then(val => {
if (val[0]) {
face.data = val[0]
}
})
}
}, 1000 / 60)
const min = (n, min = 0) => n < min ? min : n
const rotate = (a, x, y, cb) => {
ctx.save()
ctx.translate(x, y)
ctx.rotate(a * Math.PI / 180)
cb()
ctx.restore()
}
const renderHead = head => {
// ctx.globalCompositeOperation = 'overlay'
ctx.fillStyle = 'yellow'
ctx.beginPath()
ctx.arc(head.x, head.y, head.r, 0, 2 * Math.PI, false)
// ctx.fill()
ctx.closePath()
// ctx.globalCompositeOperation = 'source-over'
}
const renderEye = eye => {
var scale = eye.scale
rotate(eye.a, eye.x, eye.y, () => {
// main eye circle
ctx.fillStyle = 'white'
ctx.strokeStyle = 'black'
ctx.lineWidth = 3 / eye.scale
ctx.beginPath()
ctx.arc(0, 0, min(eye.r) / scale, 0, 2 * Math.PI, false)
ctx.closePath()
ctx.fill()
ctx.stroke()
// pupil circle
ctx.fillStyle = 'black'//eye.side === 'left' ? 'red' : 'blue'
ctx.beginPath()
ctx.arc(0, 0, min(eye.r / 2) / scale, 0, 2 * Math.PI, false)
ctx.closePath()
ctx.fill()
// eyebrow
ctx.strokeStyle = 'red'
ctx.lineWidth = 10 / eye.scale
ctx.beginPath()
if (eye.side === 'left') {
ctx.moveTo(-20 / scale, -30 / eye.scale)
ctx.lineTo(20 / scale, -20 / eye.scale)
} else {
ctx.moveTo(20 / scale, -30 / eye.scale)
ctx.lineTo(-20 / scale, -20 / eye.scale)
}
ctx.closePath()
ctx.stroke()
})
}
const renderMouth = mouth => {
var scale = mouth.scale
ctx.lineCap = 'round'
rotate(mouth.a, mouth.x, mouth.y, () => {
ctx.fillStyle = 'white'
ctx.strokeStyle = 'black'
ctx.lineWidth = 3 / scale
ctx.beginPath()
ctx.arc(0, -15 / scale, 50 / scale, 0, Math.PI, false)
ctx.closePath()
ctx.fill()
ctx.stroke()
})
}
function render() {
if (isPlaying(video)) {
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx2.drawImage(video, 0, 0)
ctx.drawImage(canvas2, 0, 0)
if (!face.data) return
var w = face.data.boundingBox.right - face.data.boundingBox.left
var h = face.data.boundingBox.bottom - face.data.boundingBox.top
var scale = 100 / w / 0.5
TweenMax.to(face.head, smoothness, {
x: face.data.boundingBox.x + (w / 2),
y: face.data.boundingBox.y + (h / 2),
r: w / 2,
scale
})
face.data.landmarks.map((landmark, i) => {
let loc = landmark.location
var r = 20
if (landmark.type === 'eye' && i === 0) {
TweenMax.to(face.eye1, smoothness, {
x: loc.x,
y: loc.y,
side: 'left',
a: at.angle(face.eye1, face.eye2),
r,
scale
})
}
if (landmark.type === 'eye' && i === 1) {
TweenMax.to(face.eye2, smoothness, {
x: loc.x,
y: loc.y,
side: 'right',
a: at.angle(face.eye1, face.eye2),
r,
scale
})
}
if (landmark.type === 'mouth') {
TweenMax.to(face.mouth, smoothness, {
x: loc.x,
y: loc.y,
a: at.angle(face.eye1, face.eye2),
scale
})
}
})
renderHead(face.head)
renderEye(face.eye1)
renderEye(face.eye2)
renderMouth(face.mouth)
}
}
;(function loop() {
render()
requestAnimationFrame(loop)
})()
</script>
</body>
</html>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment