Created
April 10, 2025 20:20
-
-
Save greggman/976e81130668d089b719aad08411f903 to your computer and use it in GitHub Desktop.
WebGPU Cube (with depth texture visualization as texture_depth_2d with linear filtering with textureGather)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
html, body { margin: 0; height: 100% } | |
canvas { width: 100%; height: 100%; display: block; } | |
#fail { | |
position: fixed; | |
left: 0; | |
top: 0; | |
width: 100%; | |
height: 100%; | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
background: red; | |
color: white; | |
font-weight: bold; | |
font-family: monospace; | |
font-size: 16pt; | |
text-align: center; | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<canvas></canvas> | |
<div id="fail" style="display: none"> | |
<div class="content"></div> | |
</div> | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// WebGPU Cube | |
/* global GPUBufferUsage */ | |
/* global GPUTextureUsage */ | |
import {vec3, mat3, mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js'; | |
import GUI from 'https://muigui.org/dist/0.x/muigui.module.js'; | |
async function main() { | |
const adapter = await navigator.gpu?.requestAdapter(); | |
const device = await adapter?.requestDevice(); | |
if (!device) { | |
fail('need webgpu'); | |
return; | |
} | |
device.addEventListener('uncapturederror', e => console.error(e.error.message)); | |
const canvas = document.querySelector('canvas'); | |
const context = canvas.getContext('webgpu'); | |
const presentationFormat = navigator.gpu.getPreferredCanvasFormat(adapter); | |
context.configure({ | |
alphaMode: "opaque", | |
format: presentationFormat, | |
device, | |
}); | |
const litShaderModule = device.createShaderModule({code: ` | |
struct VSUniforms { | |
worldViewProjection: mat4x4f, | |
worldInverseTranspose: mat4x4f, | |
}; | |
@group(0) @binding(0) var<uniform> vsUniforms: VSUniforms; | |
struct MyVSInput { | |
@location(0) position: vec4f, | |
@location(1) normal: vec3f, | |
@location(2) texcoord: vec2f, | |
}; | |
struct MyVSOutput { | |
@builtin(position) position: vec4f, | |
@location(0) normal: vec3f, | |
@location(1) texcoord: vec2f, | |
}; | |
@vertex | |
fn myVSMain(v: MyVSInput) -> MyVSOutput { | |
var vsOut: MyVSOutput; | |
vsOut.position = vsUniforms.worldViewProjection * v.position; | |
vsOut.normal = (vsUniforms.worldInverseTranspose * vec4f(v.normal, 0.0)).xyz; | |
vsOut.texcoord = v.texcoord; | |
return vsOut; | |
} | |
struct FSUniforms { | |
lightDirection: vec3f, | |
}; | |
@group(0) @binding(1) var<uniform> fsUniforms: FSUniforms; | |
@group(0) @binding(2) var diffuseSampler: sampler; | |
@group(0) @binding(3) var diffuseTexture: texture_2d<f32>; | |
@fragment | |
fn myFSMain(v: MyVSOutput) -> @location(0) vec4f { | |
var diffuseColor = textureSample(diffuseTexture, diffuseSampler, v.texcoord); | |
var a_normal = normalize(v.normal); | |
var l = dot(a_normal, fsUniforms.lightDirection) * 0.5 + 0.5; | |
return vec4f(diffuseColor.rgb * l, diffuseColor.a); | |
} | |
`}); | |
const planeShaderModule = device.createShaderModule({code: ` | |
struct Uniforms { | |
matrix: mat4x4f, | |
texMatrix: mat3x3f, | |
}; | |
struct MyVSOutput { | |
@builtin(position) position: vec4f, | |
@location(1) texcoord: vec2f, | |
}; | |
@vertex | |
fn myVSMain(@builtin(vertex_index) vNdx: u32) -> MyVSOutput { | |
let points = array(vec2f(0, 0), vec2f(0, 1), vec2f(1, 0), vec2f(1, 1)); | |
var vsOut: MyVSOutput; | |
let p = points[vNdx]; | |
vsOut.position = uni.matrix * vec4f(p, 0, 1); | |
vsOut.texcoord = p; | |
return vsOut; | |
} | |
@group(0) @binding(0) var<uniform> uni: Uniforms; | |
@group(0) @binding(1) var s: sampler; | |
@group(0) @binding(2) var diffuseTexture: texture_depth_2d; | |
fn linearlyFilterDepthTexture(t: texture_depth_2d, normalizedTextureCoord: vec2f) -> f32 { | |
let tSize = textureDimensions(t); | |
let texelCoord = normalizedTextureCoord * vec2f(tSize) - 0.5; | |
// load the 4 texels | |
let texel = textureGather(t, s, normalizedTextureCoord); | |
// +-+-+ | |
// |A|B| | |
// +-+-+ | |
// |R|G| | |
// +-+-+ | |
// blend horizontally | |
let top = mix(texel.a, texel.b, fract(texelCoord.x)); | |
let bot = mix(texel.r, texel.g, fract(texelCoord.x)); | |
// blend vertically | |
return mix(top, bot, fract(texelCoord.y)); | |
} | |
@fragment | |
fn myFSMain(v: MyVSOutput) -> @location(0) vec4f { | |
let d = linearlyFilterDepthTexture(diffuseTexture, (uni.texMatrix * vec3f(v.texcoord, 1)).xy); | |
return vec4f(d, d, d, 1); | |
} | |
`}); | |
const vUniformBufferSize = 2 * 16 * 4; // 2 mat4s * 16 floats per mat * 4 bytes per float | |
const fUniformBufferSize = 3 * 4 + 4; // 1 vec3 * 3 floats per vec3 * 4 bytes per float + pad | |
const vsUniformBuffer = device.createBuffer({ | |
size: vUniformBufferSize, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
const fsUniformBuffer = device.createBuffer({ | |
size: fUniformBufferSize, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
const vsUniformValues = new Float32Array(vUniformBufferSize / 4); // 2 mat4s | |
const worldViewProjection = vsUniformValues.subarray(0, 16); | |
const worldInverseTranspose = vsUniformValues.subarray(16, 32); | |
const fsUniformValues = new Float32Array(fUniformBufferSize / 4); // 1 vec3 | |
const lightDirection = fsUniformValues.subarray(0, 3); | |
const planeUniformBuffer = device.createBuffer({ | |
size: (16 + 12) * 4, | |
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, | |
}); | |
const planeUniformValues = new Float32Array(planeUniformBuffer.size / 4); | |
const matrix = planeUniformValues.subarray(0, 16); | |
const texMatrix = planeUniformValues.subarray(16, 28); | |
function createBuffer(device, data, usage) { | |
const buffer = device.createBuffer({ | |
size: data.byteLength, | |
usage, | |
mappedAtCreation: true, | |
}); | |
const dst = new data.constructor(buffer.getMappedRange()); | |
dst.set(data); | |
buffer.unmap(); | |
return buffer; | |
} | |
const positions = new Float32Array([1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1]); | |
const normals = new Float32Array([1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1]); | |
const texcoords = new Float32Array([1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]); | |
const indices = new Uint16Array([0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23]); | |
const positionBuffer = createBuffer(device, positions, GPUBufferUsage.VERTEX); | |
const normalBuffer = createBuffer(device, normals, GPUBufferUsage.VERTEX); | |
const texcoordBuffer = createBuffer(device, texcoords, GPUBufferUsage.VERTEX); | |
const indicesBuffer = createBuffer(device, indices, GPUBufferUsage.INDEX); | |
const tex = device.createTexture({ | |
size: [2, 2, 1], | |
format: 'rgba8unorm', | |
usage: | |
GPUTextureUsage.TEXTURE_BINDING | | |
GPUTextureUsage.COPY_DST, | |
}); | |
device.queue.writeTexture( | |
{ texture: tex }, | |
new Uint8Array([ | |
255, 255, 128, 255, | |
128, 255, 255, 255, | |
255, 128, 255, 255, | |
255, 128, 128, 255, | |
]), | |
{ bytesPerRow: 8, rowsPerImage: 2 }, | |
{ width: 2, height: 2 }, | |
); | |
const sampler = device.createSampler({ | |
magFilter: 'nearest', | |
minFilter: 'nearest', | |
}); | |
const litPipeline = device.createRenderPipeline({ | |
layout: 'auto', | |
vertex: { | |
module: litShaderModule, | |
buffers: [ | |
// position | |
{ | |
arrayStride: 3 * 4, // 3 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 0, offset: 0, format: 'float32x3'}, | |
], | |
}, | |
// normals | |
{ | |
arrayStride: 3 * 4, // 3 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 1, offset: 0, format: 'float32x3'}, | |
], | |
}, | |
// texcoords | |
{ | |
arrayStride: 2 * 4, // 2 floats, 4 bytes each | |
attributes: [ | |
{shaderLocation: 2, offset: 0, format: 'float32x2',}, | |
], | |
}, | |
], | |
}, | |
fragment: { | |
module: litShaderModule, | |
targets: [ | |
{format: presentationFormat}, | |
], | |
}, | |
primitive: { | |
topology: 'triangle-list', | |
cullMode: 'back', | |
}, | |
depthStencil: { | |
depthWriteEnabled: true, | |
depthCompare: 'less', | |
format: 'depth24plus', | |
}, | |
}); | |
const depthSampler = device.createSampler({ | |
magFilter: 'nearest', | |
minFilter: 'nearest', | |
}); | |
const planeBindGroup0Layout = device.createBindGroupLayout({ | |
entries: [ | |
{ | |
binding: 0, | |
visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.VERTEX, | |
buffer: { | |
type: "uniform", | |
}, | |
}, | |
{ | |
binding: 1, | |
visibility: GPUShaderStage.FRAGMENT, | |
sampler: { | |
type: "non-filtering", | |
}, | |
}, | |
{ | |
binding: 2, | |
visibility: GPUShaderStage.FRAGMENT, | |
texture: { | |
sampleType: "depth", | |
}, | |
}, | |
], | |
}); | |
const planePipelineLayout = device.createPipelineLayout({ | |
bindGroupLayouts: [ planeBindGroup0Layout ], | |
}); | |
const planePipeline = device.createRenderPipeline({ | |
layout: planePipelineLayout, | |
vertex: { | |
module: planeShaderModule, | |
}, | |
fragment: { | |
module: planeShaderModule, | |
targets: [ | |
{format: presentationFormat}, | |
], | |
}, | |
primitive: { | |
topology: 'triangle-strip', | |
}, | |
}); | |
const cubeBindGroup = device.createBindGroup({ | |
layout: litPipeline.getBindGroupLayout(0), | |
entries: [ | |
{ binding: 0, resource: { buffer: vsUniformBuffer } }, | |
{ binding: 1, resource: { buffer: fsUniformBuffer } }, | |
{ binding: 2, resource: sampler }, | |
{ binding: 3, resource: tex.createView() }, | |
], | |
}); | |
const renderPassDescriptor = { | |
colorAttachments: [ | |
{ | |
// view: undefined, // Assigned later | |
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 }, | |
loadOp: 'clear', | |
storeOp: 'store', | |
}, | |
], | |
depthStencilAttachment: { | |
// view: undefined, // Assigned later | |
depthClearValue: 1.0, | |
depthLoadOp: 'clear', | |
depthStoreOp: 'store', | |
}, | |
}; | |
// We need a separate render pass because we're rendering to the depth | |
// texture in the first pass. | |
const depthRenderPassDescriptor = { | |
colorAttachments: [ | |
{ | |
// view: undefined, // Assigned later | |
loadOp: 'load', | |
storeOp: 'store', | |
}, | |
], | |
}; | |
const s = { | |
zoom: 1, | |
}; | |
const gui = new GUI(); | |
gui.add(s, 'zoom', 1, 10); | |
let depthTexture; | |
let planeBindGroup; | |
function render(time) { | |
time *= 0.001; | |
const projection = mat4.perspective(45 * Math.PI / 180, canvas.clientWidth / canvas.clientHeight, 3, 8); | |
const eye = [1, 3, -4]; | |
const target = [0, 0, 0]; | |
const up = [0, 1, 0]; | |
const view = mat4.lookAt(eye, target, up); | |
const viewProjection = mat4.multiply(projection, view); | |
const world = mat4.rotationY(time); | |
mat4.transpose(mat4.inverse(world), worldInverseTranspose); | |
mat4.multiply(viewProjection, world, worldViewProjection); | |
vec3.normalize([1, 8, -10], lightDirection); | |
device.queue.writeBuffer(vsUniformBuffer, 0, vsUniformValues); | |
device.queue.writeBuffer(fsUniformBuffer, 0, fsUniformValues); | |
const canvasTexture = context.getCurrentTexture();; | |
if (!depthTexture || depthTexture.width !== canvasTexture.width || depthTexture.height !== canvasTexture.height) { | |
depthTexture?.destroy(); | |
depthTexture = device.createTexture({ | |
size: canvasTexture, // canvasTexture has width, height, and depthOrArrayLayers properties | |
format: 'depth24plus', | |
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, | |
}); | |
planeBindGroup = device.createBindGroup({ | |
layout: planePipeline.getBindGroupLayout(0), | |
entries: [ | |
{ binding: 0, resource: { buffer: planeUniformBuffer } }, | |
{ binding: 1, resource: depthSampler }, | |
{ binding: 2, resource: depthTexture.createView() }, | |
], | |
}); | |
} | |
const colorTexture = context.getCurrentTexture(); | |
renderPassDescriptor.colorAttachments[0].view = colorTexture.createView(); | |
renderPassDescriptor.depthStencilAttachment.view = depthTexture.createView(); | |
depthRenderPassDescriptor.colorAttachments[0].view = colorTexture.createView(); | |
const encoder = device.createCommandEncoder(); | |
{ | |
const pass = encoder.beginRenderPass(renderPassDescriptor); | |
pass.setPipeline(litPipeline); | |
pass.setBindGroup(0, cubeBindGroup); | |
pass.setVertexBuffer(0, positionBuffer); | |
pass.setVertexBuffer(1, normalBuffer); | |
pass.setVertexBuffer(2, texcoordBuffer); | |
pass.setIndexBuffer(indicesBuffer, 'uint16'); | |
pass.drawIndexed(indices.length); | |
pass.end(); | |
} | |
mat4.ortho(0, canvas.width, canvas.height, 0, -1, 1, matrix); | |
mat4.translate(matrix, [0, canvas.height * 3 / 4, 0], matrix); | |
mat4.scale(matrix, [canvas.width / 4, canvas.height / 4, 1], matrix); | |
mat3.translation([0.5, 0.5], texMatrix); | |
mat3.scale(texMatrix, [1 / s.zoom, 1 / s.zoom], texMatrix); | |
mat3.translate(texMatrix, [-0.5, -0.5], texMatrix); | |
device.queue.writeBuffer(planeUniformBuffer, 0, planeUniformValues); | |
{ | |
const pass = encoder.beginRenderPass(depthRenderPassDescriptor); | |
pass.setPipeline(planePipeline); | |
pass.setBindGroup(0, planeBindGroup); | |
pass.draw(4); | |
pass.end(); | |
} | |
device.queue.submit([encoder.finish()]); | |
requestAnimationFrame(render); | |
} | |
requestAnimationFrame(render); | |
const observer = new ResizeObserver(entries => { | |
for (const entry of entries) { | |
const canvas = entry.target; | |
const width = entry.contentBoxSize[0].inlineSize; | |
const height = entry.contentBoxSize[0].blockSize; | |
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D)); | |
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D)); | |
} | |
}); | |
observer.observe(canvas); | |
} | |
function fail(msg) { | |
const elem = document.querySelector('#fail'); | |
const contentElem = elem.querySelector('.content'); | |
elem.style.display = ''; | |
contentElem.textContent = msg; | |
} | |
main(); | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{"name":"WebGPU Cube (with depth texture visualization as texture_depth_2d with linear filtering with textureGather)","settings":{},"filenames":["index.html","index.css","index.js"]} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment