Skip to content

Instantly share code, notes, and snippets.

@agusibrahim
Created July 8, 2025 02:42
Show Gist options
  • Save agusibrahim/1ebd5cd99d4e52d4426237a1ffa5bfce to your computer and use it in GitHub Desktop.
Save agusibrahim/1ebd5cd99d4e52d4426237a1ffa5bfce to your computer and use it in GitHub Desktop.
detection
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Roboflow + Replit Example</title>
<!-- Load roboflow.js, the script that will handle loading and running our computer vision model in the browser -->
<script src="https://cdn.jsdelivr.net/npm/inferencejs"></script>
<link rel="stylesheet" href="/static/styles.css" />
<script>
// Replace the key below with your Roboflow publishable key
var publishable_key = "rf_dwNi6VPD8Eh9nzPQaUfRa4YyO4z2";
// Change these values to set your model name, version, and the
// confidence threshold that must be met for the model to return
// a prediction.
// We recommend setting a low CONFIDENCE_THRESHOLD value while testing
// and filtering out predictions by confidence level using the
//"Prediction Confidence %" filter
const CONFIDENCE_THRESHOLD = 0.1;
const MODEL_NAME = "indonesia-id-card-and-face-xoztw";
const MODEL_VERSION = 1;
</script>
</head>
<body>
<main>
<h1>Roboflow + Replit Web Example 🚀</h1>
<p id="loading">Loading...</p>
<!-- The canvas below is required -->
<section class="infer-widget">
<canvas width="640" height="480" id="video_canvas"></canvas>
</section>
<section id="settings">
<label for="confidence">Prediction Confidence %</label><br>
<input type="range" min="1" max="100" value="60" class="slider" id="confidence">
</section>
<section class="links">
<a href="https://blog.roboflow.com/getting-started-with-roboflow/" class="styled-button">Build a Custom Model</a>
<a href="https://universe.roboflow.com" class="styled-button styled-button-turqouise">Explore 50k+
Models and Datasets</a>
</section>
<!-- Load our application logic -->
<script src="/static/script.js"></script>
</main>
</body>
</html>
// To display predictions, this app has:
// 1. A video that shows a feed from the user's webcam
// 2. A canvas that appears over the video and shows predictions
// When the page loads, a user is asked to give webcam permission.
// After this happens, the model initializes and starts to make predictions
// On the first prediction, an initialiation step happens in detectFrame()
// to prepare the canvas on which predictions are displayed.
var bounding_box_colors = {};
var user_confidence = 0.6;
// Update the colors in this list to set the bounding box colors
var color_choices = [
"#C7FC00",
"#FF00FF",
"#8622FF",
"#FE0056",
"#00FFCE",
"#FF8000",
"#00B7EB",
"#FFFF00",
"#0E7AFE",
"#FFABAB",
"#0000FF",
"#CCCCCC",
];
var canvas_painted = false;
var canvas = document.getElementById("video_canvas");
var ctx = canvas.getContext("2d");
const inferEngine = new inferencejs.InferenceEngine();
var modelWorkerId = null;
function detectFrame() {
// On first run, initialize a canvas
// On all runs, run inference using a video frame
// For each video frame, draw bounding boxes on the canvas
if (!modelWorkerId) return requestAnimationFrame(detectFrame);
inferEngine.infer(modelWorkerId, new inferencejs.CVImage(video)).then(function(predictions) {
if (!canvas_painted) {
var video_start = document.getElementById("video1");
canvas.top = video_start.top;
canvas.left = video_start.left;
canvas.style.top = video_start.top + "px";
canvas.style.left = video_start.left + "px";
canvas.style.position = "absolute";
video_start.style.display = "block";
canvas.style.display = "absolute";
canvas_painted = true;
var loading = document.getElementById("loading");
loading.style.display = "none";
}
requestAnimationFrame(detectFrame);
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (video) {
drawBoundingBoxes(predictions, ctx)
}
});
}
function drawBoundingBoxes(predictions, ctx) {
// For each prediction, choose or assign a bounding box color choice,
// then apply the requisite scaling so bounding boxes appear exactly
// around a prediction.
// If you want to do anything with predictions, start from this function.
// For example, you could display them on the web page, check off items on a list,
// or store predictions somewhere.
for (var i = 0; i < predictions.length; i++) {
var confidence = predictions[i].confidence;
console.log(user_confidence)
if (confidence < user_confidence) {
continue
}
if (predictions[i].class in bounding_box_colors) {
ctx.strokeStyle = bounding_box_colors[predictions[i].class];
} else {
var color =
color_choices[Math.floor(Math.random() * color_choices.length)];
ctx.strokeStyle = color;
// remove color from choices
color_choices.splice(color_choices.indexOf(color), 1);
bounding_box_colors[predictions[i].class] = color;
}
var prediction = predictions[i];
var x = prediction.bbox.x - prediction.bbox.width / 2;
var y = prediction.bbox.y - prediction.bbox.height / 2;
var width = prediction.bbox.width;
var height = prediction.bbox.height;
ctx.rect(x, y, width, height);
ctx.fillStyle = "rgba(0, 0, 0, 0)";
ctx.fill();
ctx.fillStyle = ctx.strokeStyle;
ctx.lineWidth = "4";
ctx.strokeRect(x, y, width, height);
ctx.font = "25px Arial";
ctx.fillText(prediction.class + " " + Math.round(confidence * 100) + "%", x, y - 10);
}
}
function webcamInference() {
// Ask for webcam permissions, then run main application.
var loading = document.getElementById("loading");
loading.style.display = "block";
navigator.mediaDevices
.getUserMedia({ video: { facingMode: "environment" } })
.then(function(stream) {
video = document.createElement("video");
video.srcObject = stream;
video.id = "video1";
// hide video until the web stream is ready
video.style.display = "none";
video.setAttribute("playsinline", "");
document.getElementById("video_canvas").after(video);
video.onloadedmetadata = function() {
video.play();
}
// on full load, set the video height and width
video.onplay = function() {
height = video.videoHeight;
width = video.videoWidth;
// scale down video by 0.75
video.width = width;
video.height = height;
video.style.width = 640 + "px";
video.style.height = 480 + "px";
canvas.style.width = 640 + "px";
canvas.style.height = 480 + "px";
canvas.width = width;
canvas.height = height;
document.getElementById("video_canvas").style.display = "block";
};
ctx.scale(1, 1);
// Load the Roboflow model using the publishable_key set in index.html
// and the model name and version set at the top of this file
inferEngine.startWorker(MODEL_NAME, MODEL_VERSION, publishable_key, [{ scoreThreshold: CONFIDENCE_THRESHOLD }])
.then((id) => {
modelWorkerId = id;
// Start inference
detectFrame();
});
})
.catch(function(err) {
console.log(err);
});
}
function changeConfidence () {
user_confidence = document.getElementById("confidence").value / 100;
}
document.getElementById("confidence").addEventListener("input", changeConfidence);
webcamInference();
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment