Skip to content

Instantly share code, notes, and snippets.

@sylefeb
Last active March 12, 2024 17:09
Show Gist options
  • Select an option

  • Save sylefeb/75a68ed071f0d36dfed2f0d4fad53b4f to your computer and use it in GitHub Desktop.

Select an option

Save sylefeb/75a68ed071f0d36dfed2f0d4fad53b4f to your computer and use it in GitHub Desktop.
WebGPU test 1
async function main()
{
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need a browser that supports WebGPU');
return;
}
const module = device.createShaderModule({
label: 'compute module',
code: `
struct Params {
N : u32,
K : u32,
}
@group(0) @binding(0) var<uniform> param : Params;
@group(0) @binding(1) var<storage, read_write> data_out: array<f32>;
@group(0) @binding(2) var<storage, read> data_in : array<f32>;
@compute @workgroup_size(1) fn computeSomething(
@builtin(global_invocation_id) gid: vec3<u32>,
@builtin(local_invocation_id) lid: vec3<u32>
) {
var sum : f32 = 0.0;
for (var i = - i32(param.K) ; i <= i32(param.K); i++) {
sum = sum + data_in[ min(i32(param.N)-1,max(0,i32(gid.x) + i)) ];
}
data_out[gid.x] = sum / f32(2*param.K+1);
}
`,
});
const piplayout_group0 = device.createBindGroupLayout({
label: 'piplayout_group0',
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'uniform',
},
},
{
binding: 1,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'storage',
},
},
{
binding: 2,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'read-only-storage',
},
},
],
})
const pipeline = device.createComputePipeline({
label: 'compute pipeline',
layout: device.createPipelineLayout({
bindGroupLayouts: [piplayout_group0],
}),
compute: {
module,
entryPoint: 'computeSomething',
},
});
N = 256 +1; // pad the array with a first entry (unused)
K = 5;
// GPU output buffer
const data_out = device.createBuffer({
label: 'data_out buffer',
size : (N-1/*padding*/) * 4 /*f32*/,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
// GPU input buffer
const data_in = device.createBuffer({
label: 'data_in buffer',
size: N * 4 /*f32*/,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
// place some initial data inside
const input = new Float32Array(N);
input[0] = 0 // padding
for (var n=0;n<N;++n) {
input[1+n] = n % 2;
}
// copy our input data from CPU to GPU
device.queue.writeBuffer(data_in, 0, input);
// create a buffer on the GPU to get a copy of the results
const resultBuffer = device.createBuffer({
label: 'result buffer',
size: (N-1/*padding*/) * 4 /*f32*/,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
// create a buffer for the uniform parameters
const params = device.createBuffer({
label: 'uniform buffer',
size: 2 * 4 /*u32*/,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
console.log(params)
device.queue.writeBuffer(params,0,new Uint32Array([N,K]));
// Setup a bindGroup to tell the shader which
// buffer to use for the computation
const bindGroup = device.createBindGroup({
label: 'bindGroup',
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: params }, },
{ binding: 1, resource: { buffer: data_out }, },
{ binding: 2, resource: { buffer: data_in }, },
],
});
// Encode commands to do the computation
const encoder = device.createCommandEncoder({
label: 'encoder',
});
const pass = encoder.beginComputePass({
label: 'compute pass',
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(N-1/*padding*/);
pass.end();
// Encode a command to copy the results to a mappable buffer.
encoder.copyBufferToBuffer(data_out, 0, resultBuffer, 0, resultBuffer.size);
// Finish encoding and submit the commands
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
// Read the results
await resultBuffer.mapAsync(GPUMapMode.READ);
const result = new Float32Array(resultBuffer.getMappedRange().slice());
resultBuffer.unmap();
console.log('result', result);
}
function fail(msg) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need a browser that supports WebGPU');
return;
}
const module = device.createShaderModule({
label: 'compute module',
code: `
struct Params {
N : u32,
}
@group(0) @binding(0) var<uniform> param: Params;
@group(0) @binding(1) var<storage, read_write> data_out: array<f32>;
@compute @workgroup_size(1) fn computeSomething(
@builtin(global_invocation_id) gid: vec3<u32>,
@builtin(local_invocation_id) lid: vec3<u32>
) {
data_out[gid.x] = f32(gid.x);
}
`,
});
const piplayout_group0 = device.createBindGroupLayout({
label: 'piplayout_group0',
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'uniform',
},
},
{
binding: 1,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'storage',
},
},
],
})
const pipeline = device.createComputePipeline({
label: 'compute pipeline',
layout: device.createPipelineLayout({
bindGroupLayouts: [piplayout_group0],
}),
compute: {
module,
entryPoint: 'computeSomething',
},
});
N = 64
// GPU output buffer
const data_out = device.createBuffer({
label: 'data_out buffer',
size: N * 4 /*f32*/,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
// create a buffer on the GPU to get a copy of the results
const resultBuffer = device.createBuffer({
label: 'result buffer',
size: N * 4 /*f32*/,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
// create a buffer for the uniform parameters
const params = device.createBuffer({
label: 'uniform buffer',
size: 4,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
console.log(params)
device.queue.writeBuffer(params,0,new Uint32Array([N]));
// Setup a bindGroup to tell the shader which
// buffer to use for the computation
const bindGroup = device.createBindGroup({
label: 'bindGroup',
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: params }, },
{ binding: 1, resource: { buffer: data_out }, },
],
});
// Encode commands to do the computation
const encoder = device.createCommandEncoder({
label: 'encoder',
});
const pass = encoder.beginComputePass({
label: 'compute pass',
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(N);
pass.end();
// Encode a command to copy the results to a mappable buffer.
encoder.copyBufferToBuffer(data_out, 0, resultBuffer, 0, resultBuffer.size);
// Finish encoding and submit the commands
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
// Read the results
await resultBuffer.mapAsync(GPUMapMode.READ);
const result = new Float32Array(resultBuffer.getMappedRange().slice());
resultBuffer.unmap();
console.log('result', result);
}
function fail(msg) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1 id="header"></h1>
<script src="class_1.js"></script>
</body>
</html>
async function main()
{
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need a browser that supports WebGPU');
return;
}
const module = device.createShaderModule({
label: 'compute module',
code: `
struct Params {
N : u32,
}
@group(0) @binding(0) var<uniform> param : Params;
@group(0) @binding(1) var<storage, read_write> data_out: array<f32>;
@group(0) @binding(2) var<storage, read> data_in : array<f32>;
@compute @workgroup_size(1) fn computeSomething(
@builtin(global_invocation_id) gid: vec3<u32>,
@builtin(local_invocation_id) lid: vec3<u32>
) {
var i : u32 = gid.x + 1;
data_out[gid.x] = data_in[i] + data_in[i-1 /*always >= 0*/];
}
`,
});
const piplayout_group0 = device.createBindGroupLayout({
label: 'piplayout_group0',
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'uniform',
},
},
{
binding: 1,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'storage',
},
},
{
binding: 2,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: 'read-only-storage',
},
},
],
})
const pipeline = device.createComputePipeline({
label: 'compute pipeline',
layout: device.createPipelineLayout({
bindGroupLayouts: [piplayout_group0],
}),
compute: {
module,
entryPoint: 'computeSomething',
},
});
N = 64 +1 // pad the array with a first entry (unused)
// GPU output buffer
const data_out = device.createBuffer({
label: 'data_out buffer',
size : (N-1/*padding*/) * 4 /*f32*/,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
// GPU input buffer
const data_in = device.createBuffer({
label: 'data_in buffer',
size: N * 4 /*f32*/,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
// place some initial data inside
const input = new Float32Array(N);
input[0] = 0 // padding
for (var n=0;n<N;++n) {
input[1+n] = n;
}
// copy our input data from CPU to GPU
device.queue.writeBuffer(data_in, 0, input);
// create a buffer on the GPU to get a copy of the results
const resultBuffer = device.createBuffer({
label: 'result buffer',
size: (N-1/*padding*/) * 4 /*f32*/,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
// create a buffer for the uniform parameters
const params = device.createBuffer({
label: 'uniform buffer',
size: 4,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
console.log(params)
device.queue.writeBuffer(params,0,new Uint32Array([N]));
// Setup a bindGroup to tell the shader which
// buffer to use for the computation
const bindGroup = device.createBindGroup({
label: 'bindGroup',
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: params }, },
{ binding: 1, resource: { buffer: data_out }, },
{ binding: 2, resource: { buffer: data_in }, },
],
});
// Encode commands to do the computation
const encoder = device.createCommandEncoder({
label: 'encoder',
});
const pass = encoder.beginComputePass({
label: 'compute pass',
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(N-1/*padding*/);
pass.end();
// Encode a command to copy the results to a mappable buffer.
encoder.copyBufferToBuffer(data_out, 0, resultBuffer, 0, resultBuffer.size);
// Finish encoding and submit the commands
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
// Read the results
await resultBuffer.mapAsync(GPUMapMode.READ);
const result = new Float32Array(resultBuffer.getMappedRange().slice());
resultBuffer.unmap();
console.log('result', result);
}
function fail(msg) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
body {
background: black;
color: #fcbe24;
padding: 0 24px;
margin: 0;
height: 100vh;
display: flex;
justify-content: center;
align-items: center;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment