Skip to content

Instantly share code, notes, and snippets.

@amatiasq
Last active June 16, 2018 13:40
Show Gist options
  • Save amatiasq/077105993c8bdbe7b4268b6035ba98ba to your computer and use it in GitHub Desktop.
Save amatiasq/077105993c8bdbe7b4268b6035ba98ba to your computer and use it in GitHub Desktop.
Pose detection with Tensorflow
<!DOCTYPE html>
<html>
<head>
<title>Pose detection</title>
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<script src="https://unpkg.com/@tensorflow-models/posenet"></script>
<style>
#preview {
opacity: 0.25;
}
</style>
</head>
<body>
<input type="file" id="upload" value="Subir imagen" accept="image/x-png,image/gif,image/jpeg">
<img id="preview" />
<canvas style="display: none"></canvas>
<script>
const $img = document.querySelector('#preview');
const $upload = document.querySelector('#upload');
const $canvas = document.querySelector('canvas');
const context = $canvas.getContext('2d');
const scaleFactor = 0.5;
const outputStride = 16;
const flipHorizontal = false;
let net;
(async() => {
net = await posenet.load();
$upload.removeAttribute('disabled');
$upload.addEventListener('change', onInputChange);
$img.addEventListener('load', onImageLoad);
})();
async function onInputChange(event) {
const { files } = event.target;
if (!files || !files.length) {
console.warn('No files selected');
return;
}
$canvas.style.display = 'none';
const [ file ] = files;
const reader = new FileReader();
reader.addEventListener('load', () => {
$img.style.display = 'block';
$img.src = reader.result
});
reader.readAsDataURL(file);
}
async function onImageLoad() {
const parts = {};
const result = await net.estimateSinglePose(
$img,
scaleFactor,
flipHorizontal,
outputStride,
);
$canvas.style.display = 'block';
$canvas.width = $img.width;
$canvas.height = $img.height;
$img.style.display = 'none';
context.drawImage($img, 0, 0);
console.log('result', result);
for (const { part, position, score } of result.keypoints) {
if (score > 0.4) {
circle(position);
parts[part] = position;
}
}
line(parts.leftShoulder, parts.rightShoulder);
line(parts.leftHip, parts.rightHip);
line(parts.leftElbow, parts.leftWrist);
line(parts.leftShoulder, parts.leftElbow);
line(parts.leftShoulder, parts.leftHip);
line(parts.leftHip, parts.leftKnee);
line(parts.leftKnee, parts.leftAnkle);
line(parts.rightElbow, parts.rightWrist);
line(parts.rightShoulder, parts.rightElbow);
line(parts.rightShoulder, parts.rightHip);
line(parts.rightHip, parts.rightKnee);
line(parts.rightKnee, parts.rightAnkle);
}
function circle({ x, y }) {
context.beginPath();
context.arc(x, y, 5, 0, Math.PI * 2);
context.closePath();
context.fillStyle = 'red';
context.fill();
}
function line(from, to) {
if (!from || !to) {
return;
}
context.beginPath();
context.moveTo(from.x, from.y);
context.lineTo(to.x, to.y);
context.closePath();
context.strokeStyle = 'red';
context.stroke();
}
</script>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<title>Pose detection</title>
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<script src="https://unpkg.com/@tensorflow-models/posenet"></script>
<style>
body {
margin: 0;
background-color: black;
}
video, canvas {
position: absolute;
top: 0;
left: 0;
}
</style>
</head>
<body>
<video width="300" height="300"></video>
<canvas></canvas>
<script>
const $video = document.querySelector('video');
const $canvas = document.querySelector('canvas');
const context = $canvas.getContext('2d');
const scaleFactor = 0.5;
const outputStride = 16;
const flipHorizontal = false;
let stop = false;
let net;
(async() => {
net = await posenet.load();
$video.srcObject = await getUserVideo();
$video.width = $canvas.width = window.innerWidth;
$video.height = $canvas.height = window.innerHeight;
$video.play();
requestAnimationFrame(capture);
document.addEventListener('click', () => stop = true);
})();
async function getUserVideo() {
return new Promise((resolve, reject) => {
navigator.getUserMedia({
audio: false,
video: { facingMode: 'user' },
}, resolve, reject);
});
}
async function capture() {
const parts = {};
const result = await net.estimateSinglePose(
$video,
scaleFactor,
flipHorizontal,
outputStride,
);
// context.drawImage($img, 0, 0);
context.clearRect(0, 0, $canvas.width, $canvas.height);
console.log('result', result);
for (const { part, position, score } of result.keypoints) {
if (score > 0.4) {
circle(position);
parts[part] = position;
}
}
line(parts.leftShoulder, parts.rightShoulder);
line(parts.leftHip, parts.rightHip);
line(parts.leftElbow, parts.leftWrist);
line(parts.leftShoulder, parts.leftElbow);
line(parts.leftShoulder, parts.leftHip);
line(parts.leftHip, parts.leftKnee);
line(parts.leftKnee, parts.leftAnkle);
line(parts.rightElbow, parts.rightWrist);
line(parts.rightShoulder, parts.rightElbow);
line(parts.rightShoulder, parts.rightHip);
line(parts.rightHip, parts.rightKnee);
line(parts.rightKnee, parts.rightAnkle);
if (!stop) {
requestAnimationFrame(capture);
}
}
function circle({ x, y }) {
context.beginPath();
context.arc(x, y, 5, 0, Math.PI * 2);
context.closePath();
context.fillStyle = 'red';
context.fill();
}
function line(from, to) {
if (!from || !to) {
return;
}
context.beginPath();
context.moveTo(from.x, from.y);
context.lineTo(to.x, to.y);
context.closePath();
context.strokeStyle = 'red';
context.stroke();
}
</script>
</body>
</html>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment