Skip to content

Instantly share code, notes, and snippets.

@sebbbi
Created October 21, 2024 10:14
Show Gist options
  • Save sebbbi/d342af7e26dc19dfafb0d59512a0be05 to your computer and use it in GitHub Desktop.
Save sebbbi/d342af7e26dc19dfafb0d59512a0be05 to your computer and use it in GitHub Desktop.
MacOS Dawn WebGPU triangle (messy mashed together code). Works with October 2024 WebGPU API.
#import <Cocoa/Cocoa.h>
#import <QuartzCore/CAMetalLayer.h>
#import <Metal/Metal.h>
#include <cstdlib>
#include <iostream>
#include <webgpu/webgpu.h>
// Custom delegate class to handle window close events
@interface WindowDelegate : NSObject <NSWindowDelegate>
@property (nonatomic, assign) BOOL shouldExit;
@end
@implementation WindowDelegate
// This method is called when the window is about to close
- (void)windowWillClose:(NSNotification *)notification {
std::cout << "Window is closing..." << std::endl;
self.shouldExit = YES;
}
@end
@interface MetalView : NSView
@property (nonatomic, strong) CAMetalLayer *metalLayer;
@end
@implementation MetalView
- (instancetype)initWithFrame:(NSRect)frameRect {
self = [super initWithFrame:frameRect];
if (self) {
[self setupMetalLayer];
}
return self;
}
- (void)setupMetalLayer {
// Create the Metal layer
self.wantsLayer = YES;
self.layer = [CAMetalLayer layer];
self.metalLayer = (CAMetalLayer *)self.layer;
// Set up Metal device for the layer
id<MTLDevice> metalDevice = MTLCreateSystemDefaultDevice();
self.metalLayer.device = metalDevice;
// Set the pixel format
self.metalLayer.pixelFormat = MTLPixelFormatBGRA8Unorm;
// Set other properties (optional)
self.metalLayer.contentsScale = [NSScreen mainScreen].backingScaleFactor;
self.metalLayer.framebufferOnly = YES;
}
- (CALayer *)makeBackingLayer {
return [CAMetalLayer layer];
}
@end
static char const triangle_vert_wgsl[] = R"(
struct VertexOut {
@location(0) vCol : vec3<f32>,
@builtin(position) Position : vec4<f32>
}
@vertex
fn main(
@builtin(vertex_index) VertexIndex : u32
) -> VertexOut {
var pos = array<vec2f, 3>(
vec2(0.0, 0.5),
vec2(-0.5, -0.5),
vec2(0.5, -0.5)
);
var output : VertexOut;
output.Position = vec4(pos[VertexIndex], 0.0, 1.0);
output.vCol = vec3(pos[VertexIndex] + vec2(0.5, 0.5), 1.0);
return output;
}
)";
static char const triangle_frag_wgsl[] = R"(
@fragment
fn main(@location(0) vCol : vec3<f32>) -> @location(0) vec4<f32> {
return vec4<f32>(vCol, 1.0);
}
)";
WGPUStringView createStringView(const char* str) {
WGPUStringView stringView;
stringView.length = strlen(str);
stringView.data = str;
return stringView;
}
int main(int argc, const char * argv[]) {
std::cout << "START" << std::endl;
@autoreleasepool {
NSApplication *app = [NSApplication sharedApplication];
// Create the window
NSRect frame = NSMakeRect(0, 0, 800, 600);
NSWindow *window = [[NSWindow alloc] initWithContentRect:frame
styleMask:(NSWindowStyleMaskTitled |
NSWindowStyleMaskClosable |
NSWindowStyleMaskResizable)
backing:NSBackingStoreBuffered
defer:NO];
[window setTitle:@"Metal Window"];
[window makeKeyAndOrderFront:nil];
// Create and set MetalView
MetalView *metalView = [[MetalView alloc] initWithFrame:frame];
[window setContentView:metalView];
// Create and set the window delegate
WindowDelegate *delegate = [[WindowDelegate alloc] init];
[window setDelegate:delegate];
// WebGPU...
WGPUInstanceDescriptor instanceDescriptor{};
instanceDescriptor.features.timedWaitAnyEnable = true;
WGPUDawnTogglesDescriptor toggles;
toggles.chain.next = nullptr;
toggles.chain.sType = WGPUSType_DawnTogglesDescriptor;
toggles.disabledToggleCount = 0;
toggles.enabledToggleCount = 1;
const char* toggleName = "enable_immediate_error_handling";
toggles.enabledToggles = &toggleName;
instanceDescriptor.nextInChain = &toggles.chain;
WGPUInstance instance = wgpuCreateInstance(&instanceDescriptor);
if (instance == nullptr) {
std::cerr << "Instance creation failed!\n";
return EXIT_FAILURE;
}
// Adapter
WGPURequestAdapterOptions options = {};
WGPUAdapter adapter;
WGPURequestAdapterCallbackInfo callbackInfo = {};
callbackInfo.nextInChain = nullptr;
callbackInfo.mode = WGPUCallbackMode_WaitAnyOnly;
callbackInfo.callback = [](WGPURequestAdapterStatus status,
WGPUAdapter adapter, const char *message,
void *userdata) {
if (status != WGPURequestAdapterStatus_Success) {
std::cerr << "Failed to get an adapter:" << message;
return;
}
*static_cast<WGPUAdapter *>(userdata) = adapter;
};
callbackInfo.userdata = &adapter;
wgpuInstanceRequestAdapter(
instance,
&options,
callbackInfo.callback,
callbackInfo.userdata);
// TODO: NEEDS ASYNC WAIT ON EMSCRIPTEN HERE!
WGPUSurfaceSourceMetalLayer metalLayerDesc = {
.chain = {
.next = NULL,
.sType = WGPUSType_SurfaceSourceMetalLayer,
},
.layer = metalView.metalLayer,
};
WGPUSurfaceDescriptor surfaceDesc = {
.label = NULL,
.nextInChain = &metalLayerDesc.chain,
};
WGPUSurface windowSurface = wgpuInstanceCreateSurface(instance, &surfaceDesc);
WGPUAdapterInfo info{};
WGPUStatus status = wgpuAdapterGetInfo(adapter, &info);
if (status != WGPUStatus_Success) {
std::cerr << "Failed to get an adapter info";
return -1;
}
std::cout << "VendorID: " << std::hex << info.vendorID << std::dec << "\n";
std::cout << "Vendor: " << info.vendor << "\n";
std::cout << "Architecture: " << info.architecture << "\n";
std::cout << "DeviceID: " << std::hex << info.deviceID << std::dec << "\n";
std::cout << "Name: " << info.device << "\n";
std::cout << "Driver description: " << info.description << "\n";
// Device
WGPUDeviceDescriptor deviceDesc = {};
deviceDesc.nextInChain = nullptr;
deviceDesc.requiredFeatureCount = 0; // we do not require any specific feature
deviceDesc.requiredLimits = nullptr; // we do not require any specific limit
deviceDesc.defaultQueue.nextInChain = nullptr;
deviceDesc.deviceLostCallback = [](WGPUDeviceLostReason reason, char const* message, void* /* pUserData */) {
std::cout << "Device lost: reason " << reason;
if (message) std::cout << " (" << message << ")";
std::cout << std::endl;
};
auto onDeviceRequestEnded = [](WGPURequestDeviceStatus status, WGPUDevice device, char const * message, void * pUserData) {
WGPUDevice& userData = *reinterpret_cast<WGPUDevice*>(pUserData);
if (status == WGPURequestDeviceStatus_Success) {
userData = device;
} else {
std::cout << "Could not get WebGPU device: " << message << std::endl;
}
};
WGPUDevice device;
wgpuAdapterRequestDevice(
adapter,
&deviceDesc,
onDeviceRequestEnded,
(void*)&device
);
// TODO: NEEDS ASYNC WAIT ON EMSCRIPTEN HERE!
std::cout << "Got device: " << device << std::endl;
auto onDeviceError = [](WGPUErrorType type, char const* message, void* /* pUserData */) {
std::cout << "Uncaptured device error: type " << type;
if (message) std::cout << " (" << message << ")";
std::cout << std::endl;
};
wgpuDeviceSetUncapturedErrorCallback(device, onDeviceError, nullptr /* pUserData */);
// Queue
WGPUQueue queue = wgpuDeviceGetQueue(device);
auto onQueueWorkDone = [](WGPUQueueWorkDoneStatus status, void* /* pUserData */) {
std::cout << "Queued work finished with status: " << status << std::endl;
};
wgpuQueueOnSubmittedWorkDone(queue, onQueueWorkDone, nullptr /* pUserData */);
WGPUSurfaceConfiguration config = {};
config.nextInChain = nullptr;
config.width = 800;
config.height = 600;
WGPUTextureFormat surfaceFormat = WGPUTextureFormat_BGRA8Unorm;
config.format = surfaceFormat;
config.usage = WGPUTextureUsage_RenderAttachment;
config.device = device;
config.presentMode = WGPUPresentMode_Fifo;
wgpuSurfaceConfigure(windowSurface, &config);
// Shader
WGPUPipelineLayoutDescriptor layoutDesc = {};
layoutDesc.bindGroupLayoutCount = 0;
WGPUPipelineLayout pipelineLayout = wgpuDeviceCreatePipelineLayout(device, &layoutDesc);
WGPUShaderModule vertMod;
{
WGPUShaderSourceWGSL wgsl = {};
wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
wgsl.code = createStringView(triangle_vert_wgsl);
WGPUShaderModuleDescriptor desc = {};
desc.nextInChain = reinterpret_cast<WGPUChainedStruct*>(&wgsl);
vertMod = wgpuDeviceCreateShaderModule(device, &desc);
}
WGPUShaderModule fragMod;
{
WGPUShaderSourceWGSL wgsl = {};
wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
wgsl.code = createStringView(triangle_frag_wgsl);
WGPUShaderModuleDescriptor desc = {};
desc.nextInChain = reinterpret_cast<WGPUChainedStruct*>(&wgsl);
fragMod = wgpuDeviceCreateShaderModule(device, &desc);
}
WGPURenderPipelineDescriptor desc = {};
desc.layout = pipelineLayout;
desc.vertex.module = vertMod;
desc.vertex.entryPoint = WGPUStringView("main", strlen("main"));
WGPUFragmentState fragmentState = {};
fragmentState.module = fragMod;
fragmentState.entryPoint = WGPUStringView("main", strlen("main"));
desc.fragment = &fragmentState;
desc.primitive.topology = WGPUPrimitiveTopology_TriangleList;
desc.multisample.count = 1;
desc.multisample.mask = 0xFFFFFFFF;
WGPUColorTargetState colorDesc = {};
colorDesc.format = WGPUTextureFormat_BGRA8Unorm;
colorDesc.blend = nullptr;
colorDesc.writeMask = WGPUColorWriteMask_All;
WGPUFragmentState fragState = {};
fragmentState.targetCount = 1;
fragmentState.targets = &colorDesc;
WGPURenderPipeline pipeline = wgpuDeviceCreateRenderPipeline(device, &desc);
wgpuPipelineLayoutRelease(pipelineLayout);
wgpuShaderModuleRelease(fragMod);
wgpuShaderModuleRelease(vertMod);
// Frame loop
while (!delegate.shouldExit) {
@autoreleasepool {
// Poll Cocoa events
NSEvent *event = [app nextEventMatchingMask:NSEventMaskAny
untilDate:nil
inMode:NSDefaultRunLoopMode
dequeue:YES];
if (event) {
[app sendEvent:event];
[app updateWindows];
}
// WebGPU Draw...
WGPUSurfaceTexture surfaceTexture;
wgpuSurfaceGetCurrentTexture(windowSurface, &surfaceTexture);
if (surfaceTexture.status == WGPUSurfaceGetCurrentTextureStatus_Success) {
WGPUTextureViewDescriptor viewDescriptor;
viewDescriptor.nextInChain = nullptr;
viewDescriptor.format = wgpuTextureGetFormat(surfaceTexture.texture);
viewDescriptor.dimension = WGPUTextureViewDimension_2D;
viewDescriptor.baseMipLevel = 0;
viewDescriptor.mipLevelCount = 1;
viewDescriptor.baseArrayLayer = 0;
viewDescriptor.arrayLayerCount = 1;
viewDescriptor.aspect = WGPUTextureAspect_All;
viewDescriptor.usage = WGPUTextureUsage_RenderAttachment;
WGPUTextureView targetView = wgpuTextureCreateView(surfaceTexture.texture, &viewDescriptor);
WGPUCommandEncoderDescriptor encoderDesc = {};
encoderDesc.nextInChain = nullptr;
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, &encoderDesc);
WGPURenderPassDescriptor renderPassDesc = {};
renderPassDesc.nextInChain = nullptr;
WGPURenderPassColorAttachment renderPassColorAttachment = {};
renderPassColorAttachment.depthSlice = WGPU_DEPTH_SLICE_UNDEFINED;
renderPassColorAttachment.view = targetView;
renderPassColorAttachment.loadOp = WGPULoadOp_Clear;
renderPassColorAttachment.storeOp = WGPUStoreOp_Store;
renderPassColorAttachment.clearValue = WGPUColor{ 0.9, 0.5, 0.3, 1.0 };
renderPassDesc.colorAttachmentCount = 1;
renderPassDesc.colorAttachments = &renderPassColorAttachment;
WGPURenderPassEncoder renderPass = wgpuCommandEncoderBeginRenderPass(encoder, &renderPassDesc);
wgpuRenderPassEncoderSetPipeline(renderPass, pipeline);
wgpuRenderPassEncoderDraw(renderPass, 3, 1, 0, 0);
wgpuRenderPassEncoderEnd(renderPass);
wgpuRenderPassEncoderRelease(renderPass);
WGPUCommandBufferDescriptor cmdBufferDescriptor = {};
cmdBufferDescriptor.nextInChain = nullptr;
WGPUCommandBuffer command = wgpuCommandEncoderFinish(encoder, &cmdBufferDescriptor);
wgpuCommandEncoderRelease(encoder); // release encoder after it's finished
// Finally submit the command queue
wgpuQueueSubmit(queue, 1, &command);
wgpuCommandBufferRelease(command);
wgpuSurfacePresent(windowSurface);
wgpuTextureViewRelease(targetView);
} else {
std::cout << "Surface texture not available..." << std::endl;
}
wgpuDeviceTick(device);
wgpuInstanceProcessEvents(instance);
}
}
wgpuRenderPipelineRelease(pipeline);
wgpuSurfaceUnconfigure(windowSurface);
wgpuSurfaceRelease(windowSurface);
wgpuQueueRelease(queue);
wgpuDeviceRelease(device);
wgpuAdapterRelease(adapter);
wgpuInstanceRelease(instance);
// Exit the application
std::cout << "EXIT" << std::endl;
[app terminate:nil];
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment