Skip to content

Instantly share code, notes, and snippets.

@greggman
Last active April 16, 2025 21:57
Show Gist options
  • Save greggman/50f335ba07cac14fd69997887ef0c5ec to your computer and use it in GitHub Desktop.
Save greggman/50f335ba07cac14fd69997887ef0c5ec to your computer and use it in GitHub Desktop.
WebGPU Simple Textured Quad - Import Canvas (speed check, update 100 textures, no mips)

WebGPU Simple Textured Quad - Import Canvas (speed check, update 100 textures, no mips)

view on jsgist

@import url(https://webgpufundamentals.org/webgpu/resources/webgpu-lesson.css);
html, body {
margin: 0; /* remove the default margin */
height: 100%; /* make the html,body fill the page */
}
canvas {
display: block; /* make the canvas act like a block */
width: 100%; /* make the canvas fill its container */
height: 100%;
}
#info {
position: absolute;
left: 0;
top: 0;
background-color: black;
color: white;
margin: 0;
padding: 0.5em;
}
<canvas></canvas>
<pre id="info"></pre>
// WebGPU Simple Textured Quad - Import Canvas
// from https://webgpufundamentals.org/webgpu/webgpu-simple-textured-quad-import-canvas.html
// see https://webgpufundamentals.org/webgpu/lessons/webgpu-utils.html#wgpu-matrix
import {mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js';
import GUI from 'https://webgpufundamentals.org/3rdparty/muigui-0.x.module.js';
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need a browser that supports WebGPU');
return;
}
// Get a WebGPU context from the canvas and configure it
const canvas = document.querySelector('canvas');
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
});
const module = device.createShaderModule({
label: 'our hardcoded textured quad shaders',
code: `
struct OurVertexShaderOutput {
@builtin(position) position: vec4f,
@location(0) texcoord: vec2f,
};
struct Uniforms {
matrix: mat4x4f,
};
@group(0) @binding(2) var<uniform> uni: Uniforms;
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> OurVertexShaderOutput {
let pos = array(
vec3f(-0.5, 0.5,-0.5),
vec3f( 0.5, 0.5,-0.5),
vec3f(-0.5, 0.5, 0.5),
vec3f(-0.5, 0.5, 0.5),
vec3f( 0.5, 0.5,-0.5),
vec3f( 0.5, 0.5, 0.5),
);
var vsOutput: OurVertexShaderOutput;
let xyz = pos[vertexIndex];
vsOutput.position = uni.matrix * vec4f(xyz, 1.0);
vsOutput.texcoord = (xyz.xz + 0.5) * vec2f(1, 50);
return vsOutput;
}
@group(0) @binding(0) var ourSampler: sampler;
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f {
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
}
`,
});
const pipeline = device.createRenderPipeline({
label: 'hardcoded textured quad pipeline',
layout: 'auto',
vertex: {
module,
},
fragment: {
module,
targets: [{ format: presentationFormat }],
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus',
},
});
const numMipLevels = (...sizes) => {
const maxSize = Math.max(...sizes);
return 1 + Math.log2(maxSize) | 0;
};
function copySourceToTexture(device, texture, source, {flipY} = {}) {
device.queue.copyExternalImageToTexture(
{ source, flipY, },
{ texture },
{ width: source.width, height: source.height },
);
if (texture.mipLevelCount > 1) {
generateMips(device, texture);
}
}
function createTextureFromSource(device, source, options = {}) {
const texture = device.createTexture({
format: 'rgba8unorm',
mipLevelCount: options.mips ? numMipLevels(source.width, source.height) : 1,
size: [source.width, source.height],
usage: GPUTextureUsage.TEXTURE_BINDING |
GPUTextureUsage.COPY_DST |
GPUTextureUsage.RENDER_ATTACHMENT,
});
copySourceToTexture(device, texture, source, options);
return texture;
}
const generateMips = (() => {
let sampler;
let module;
const pipelineByFormat = {};
return function generateMips(device, texture) {
if (!module) {
module = device.createShaderModule({
label: 'textured quad shaders for mip level generation',
code: `
struct VSOutput {
@builtin(position) position: vec4f,
@location(0) texcoord: vec2f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> VSOutput {
let pos = array(
vec2f( 0.0, 0.0), // center
vec2f( 1.0, 0.0), // right, center
vec2f( 0.0, 1.0), // center, top
// 2st triangle
vec2f( 0.0, 1.0), // center, top
vec2f( 1.0, 0.0), // right, center
vec2f( 1.0, 1.0), // right, top
);
var vsOutput: VSOutput;
let xy = pos[vertexIndex];
vsOutput.position = vec4f(xy * 2.0 - 1.0, 0.0, 1.0);
vsOutput.texcoord = vec2f(xy.x, 1.0 - xy.y);
return vsOutput;
}
@group(0) @binding(0) var ourSampler: sampler;
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
@fragment fn fs(fsInput: VSOutput) -> @location(0) vec4f {
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
}
`,
});
sampler = device.createSampler({
minFilter: 'linear',
});
}
if (!pipelineByFormat[texture.format]) {
pipelineByFormat[texture.format] = device.createRenderPipeline({
label: 'mip level generator pipeline',
layout: 'auto',
vertex: {
module,
},
fragment: {
module,
targets: [{ format: texture.format }],
},
});
}
const pipeline = pipelineByFormat[texture.format];
const encoder = device.createCommandEncoder({
label: 'mip gen encoder',
});
let width = texture.width;
let height = texture.height;
let baseMipLevel = 0;
while (width > 1 || height > 1) {
width = Math.max(1, width / 2 | 0);
height = Math.max(1, height / 2 | 0);
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView({baseMipLevel, mipLevelCount: 1}) },
],
});
++baseMipLevel;
const renderPassDescriptor = {
label: 'our basic canvas renderPass',
colorAttachments: [
{
view: texture.createView({baseMipLevel, mipLevelCount: 1}),
loadOp: 'clear',
storeOp: 'store',
},
],
};
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(6); // call our vertex shader 6 times
pass.end();
}
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
};
})();
const size = 2048;
const half = size / 2;
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = size;
ctx.canvas.height = size;
const hsl = (h, s, l) => `hsl(${h * 360 | 0}, ${s * 100}%, ${l * 100 | 0}%)`;
function update2DCanvas(time) {
time *= 0.0001;
ctx.fillStyle = hsl(time + 0.5, 1, 0.125)
ctx.fillRect(0, 0, size, size);
ctx.font = `${size * 0.5 | 0}px monospace`;
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.save();
ctx.translate(half, half);
const num = 20;
const t = (time * 1000).toFixed(0);
ctx.scale(0.1, 0.1);
for (let i = 0; i < num; ++i) {
ctx.fillStyle = hsl(i / num * 0.4 + time, 1, i % 2 * 0.5);
ctx.fillText(t, 0, 0);
ctx.translate(0, size / num * 0.5);
ctx.scale(1.1, 1.1);
}
ctx.restore();
}
update2DCanvas(0);
const maxTextures = 100;
const textures = new Array(maxTextures).fill(0).map(_ => createTextureFromSource(device, ctx.canvas, {mips: false}));
// offsets to the various uniform values in float32 indices
const kMatrixOffset = 0;
const objectInfos = [];
for (let j = 0; j < maxTextures; j += 8) {
for (let i = 0; i < 8; ++i) {
if (i + j >= maxTextures) {
break;
}
const sampler = device.createSampler({
addressModeU: 'repeat',
addressModeV: 'repeat',
magFilter: (i & 1) ? 'linear' : 'nearest',
minFilter: (i & 2) ? 'linear' : 'nearest',
mipmapFilter: (i & 4) ? 'linear' : 'nearest',
});
// create a buffer for the uniform values
const uniformBufferSize =
16 * 4; // matrix is 16 32bit floats (4bytes each)
const uniformBuffer = device.createBuffer({
label: 'uniforms for quad',
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
// create a typedarray to hold the values for the uniforms in JavaScript
const uniformValues = new Float32Array(uniformBufferSize / 4);
const matrix = uniformValues.subarray(kMatrixOffset, 16);
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: textures[i + j].createView() },
{ binding: 2, resource: { buffer: uniformBuffer }},
],
});
// Save the data we need to render this object.
objectInfos.push({
bindGroup,
matrix,
uniformValues,
uniformBuffer,
});
}
}
const renderPassDescriptor = {
label: 'our basic canvas renderPass',
colorAttachments: [
{
// view: <- to be filled out when we render
clearValue: [0.3, 0.3, 0.3, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
depthStencilAttachment: {
// view: undefined, // Assigned later
depthClearValue: 1.0,
depthLoadOp: 'clear',
depthStoreOp: 'store',
},
};
let ticks = [{time: 0, deltaTime: 1/60}];
let deltaAvg = 0;
const info = document.querySelector('#info');
const settings = {
copyCount: 40,
};
const gui = new GUI();
gui.add(settings, 'copyCount', 1, maxTextures, 1);
let depthTexture;
function render(time) {
let last = ticks[ticks.length - 1]
let deltaCur = time - last.time;
deltaAvg = ((deltaAvg * ticks.length) + deltaCur) / (ticks.length + 1)
ticks.push({time: time, deltaTime: deltaCur})
if (ticks.length > 60) {
deltaAvg = ((deltaAvg * ticks.length) - ticks[0].deltaTime) / (ticks.length - 1)
ticks.shift()
}
info.textContent = `fps: ${(1000 / deltaAvg).toFixed(0)}`;
update2DCanvas(time);
for (let i = 0; i < settings.copyCount; ++i) {
copySourceToTexture(device, textures[i], ctx.canvas);
}
const fov = 60 * Math.PI / 180; // 60 degrees in radians
const aspect = canvas.clientWidth / canvas.clientHeight;
const zNear = 1;
const zFar = 2000;
const projectionMatrix = mat4.perspective(fov, aspect, zNear, zFar);
const cameraPosition = [0, 0, 2];
const up = [0, 1, 0];
const target = [0, 0, 0];
const viewMatrix = mat4.lookAt(cameraPosition, target, up);
const viewProjectionMatrix = mat4.multiply(projectionMatrix, viewMatrix);
// Get the current texture from the canvas context and
// set it as the texture to render to.
const canvasTexture = context.getCurrentTexture();
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
if (!depthTexture || depthTexture.width !== canvasTexture.width || depthTexture.height !== canvasTexture.height) {
depthTexture?.destroy();
depthTexture = device.createTexture({
size: [canvasTexture.width, canvasTexture.height],
format: 'depth24plus',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
renderPassDescriptor.depthStencilAttachment.view = depthTexture.createView();
}
const encoder = device.createCommandEncoder({
label: 'render quad encoder',
});
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
const ss = [
{ x: -1, y: 1, zRot: 0, /*magFilter: gl.NEAREST, minFilter: gl.NEAREST, */ },
{ x: 0, y: 1, zRot: 0, /*magFilter: gl.LINEAR, minFilter: gl.LINEAR, */ },
{ x: 1, y: 1, zRot: 0, /*magFilter: gl.LINEAR, minFilter: gl.NEAREST_MIPMAP_NEAREST, */ },
{ x: -1, y: -1, zRot: 1, /*magFilter: gl.LINEAR, minFilter: gl.LINEAR_MIPMAP_NEAREST, */ },
{ x: 0, y: -1, zRot: 1, /*magFilter: gl.LINEAR, minFilter: gl.NEAREST_MIPMAP_LINEAR, */ },
{ x: 1, y: -1, zRot: 1, /*magFilter: gl.LINEAR, minFilter: gl.LINEAR_MIPMAP_LINEAR, */ },
];
const xSpacing = 1.2;
const ySpacing = 0.7;
const zDepth = 50;
for (let j = 0; j < objectInfos.length; j += 6) {
ss.forEach(function(s, i) {
if (j + i >= objectInfos.length) {
return;
}
const {bindGroup, matrix, uniformBuffer, uniformValues} = objectInfos[j + i];
mat4.translate(viewProjectionMatrix, [s.x * xSpacing + j * 0.1, s.y * ySpacing, -zDepth * 0.5], matrix);
mat4.rotateZ(matrix, s.zRot * Math.PI, matrix);
mat4.scale(matrix, [1, 1, zDepth], matrix);
// copy the values from JavaScript to the GPU
device.queue.writeBuffer(uniformBuffer, 0, uniformValues);
pass.setBindGroup(0, bindGroup);
pass.draw(6); // call our vertex shader 6 times
});
}
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
const observer = new ResizeObserver(entries => {
for (const entry of entries) {
const canvas = entry.target;
const width = entry.contentBoxSize[0].inlineSize;
const height = entry.contentBoxSize[0].blockSize;
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
}
});
observer.observe(canvas);
canvas.addEventListener('click', () => {
texNdx = (texNdx + 1) % textures.length;
});
}
function fail(msg) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
{"name":"WebGPU Simple Textured Quad - Import Canvas (speed check, update 100 textures, no mips)","settings":{},"filenames":["index.html","index.css","index.js"]}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment