Skip to content

Instantly share code, notes, and snippets.

@nickelpro
Last active May 28, 2019 04:25
Show Gist options
  • Save nickelpro/a7a494905cc0cca09e332c115906c8de to your computer and use it in GitHub Desktop.
Save nickelpro/a7a494905cc0cca09e332c115906c8de to your computer and use it in GitHub Desktop.
I hate Vulkan
//Basically this:
//https://github.com/KhronosGroup/Vulkan-Docs/wiki/Synchronization-Examples
//Holy shit this function got out of control
int build_and_copy_buf(
vk_buffer_t *local_buf,
void *data,
VkDeviceSize size,
VkBufferUsageFlags usage,
VkAccessFlags dst_access,
VmaAllocator allocator,
vk_commandpools_t cpools
) {
VkBufferCreateInfo buf_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = NULL,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = NULL
};
VmaAllocationCreateInfo vma_info = {
.flags = 0,
//Guaranteed to be HOST_VISIBLE and HOST_COHERENT
.usage = VMA_MEMORY_USAGE_CPU_ONLY,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = VK_NULL_HANDLE,
.pUserData = NULL
};
//ToDo: Create a static buffer for these transfers and leave it mapped
//instead of constantly reallocating and mapping a new one everytime we do a
//transfer. Consider VMA_ALLOCATION_CREATE_MAPPED_BIT
vk_buffer_t host_buf;
VkResult err = vmaCreateBuffer(
allocator,
&buf_info,
&vma_info,
&host_buf.handle,
&host_buf.alloc,
NULL
);
if(err != VK_SUCCESS) goto borked;
void *host_mem;
//ToDo: Error check this?
vmaMapMemory(allocator, host_buf.alloc, &host_mem);
memcpy(host_mem, data, size);
vmaUnmapMemory(allocator, host_buf.alloc);
buf_info.usage = usage | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
vma_info.usage = VMA_MEMORY_USAGE_GPU_ONLY;
err = vmaCreateBuffer(
allocator,
&buf_info,
&vma_info,
&local_buf->handle,
&local_buf->alloc,
NULL
);
if(err != VK_SUCCESS) {
vmaDestroyBuffer(allocator, host_buf.handle, host_buf.alloc);
goto borked;
}
vk_combuf_t xfr_combuf = {
.dev = cpools.logicdev.handle,
.q = cpools.logicdev.xfr_q,
.cpool = cpools.xfr_handle,
};
if(get_command_buffer(&xfr_combuf, 1)) {
err = VK_RESULT_MAX_ENUM;
goto borked_buffers;
}
vkCmdCopyBuffer(
xfr_combuf.buf,
host_buf.handle,
local_buf->handle,
1,
&(const VkBufferCopy) {
.srcOffset = 0,
.dstOffset = 0,
.size = size
}
);
VkSemaphore semaphore;
VkFence fence;
if(cpools.logicdev.single_qfam) {
err = vkEndCommandBuffer(xfr_combuf.buf);
if(err != VK_SUCCESS) goto borked_xfrbuf;
if(flush_command_buffer(xfr_combuf)) {
err = VK_RESULT_MAX_ENUM;
goto borked_buffers;
}
} else {
vkCmdPipelineBarrier(
xfr_combuf.buf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, 0, NULL, 1,
&(const VkBufferMemoryBarrier) {
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.pNext = NULL,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = 0,
.srcQueueFamilyIndex = cpools.logicdev.xfr_qfam,
.dstQueueFamilyIndex = cpools.logicdev.gfx_qfam,
.buffer = local_buf->handle,
.offset = 0,
.size = size
},
0, NULL
);
vkEndCommandBuffer(xfr_combuf.buf);
err = vkCreateSemaphore(
cpools.logicdev.handle,
&(const VkSemaphoreCreateInfo) {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = NULL,
.flags = 0
},
NULL,
&semaphore
);
if(err != VK_SUCCESS) goto borked_xfrbuf;
err = vkQueueSubmit(
cpools.logicdev.xfr_q,
1,
&(const VkSubmitInfo) {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = NULL,
.waitSemaphoreCount = 0,
.pWaitSemaphores = NULL,
.pWaitDstStageMask = 0,
.commandBufferCount = 1,
.pCommandBuffers = &xfr_combuf.buf,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &semaphore
},
VK_NULL_HANDLE
);
if(err != VK_SUCCESS) goto borked_semaphore;
vk_combuf_t gfx_combuf = {
.dev = cpools.logicdev.handle,
.q = cpools.logicdev.gfx_q,
.cpool = cpools.gfx_handle,
};
if(get_command_buffer(&gfx_combuf, 1)) {
err = VK_RESULT_MAX_ENUM;
goto borked_semaphore;
}
vkCmdPipelineBarrier(
xfr_combuf.buf,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
0, 0, NULL, 1,
&(const VkBufferMemoryBarrier) {
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.pNext = NULL,
.srcAccessMask = 0,
.dstAccessMask = dst_access,
.srcQueueFamilyIndex = cpools.logicdev.xfr_qfam,
.dstQueueFamilyIndex = cpools.logicdev.gfx_qfam,
.buffer = local_buf->handle,
.offset = 0,
.size = size
},
0, NULL
);
err = vkCreateFence(
cpools.logicdev.handle,
&(const VkFenceCreateInfo) {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = NULL,
.flags = 0
},
NULL,
&fence
);
if(err != VK_SUCCESS) goto borked_gfxbuf;
err = vkQueueSubmit(
cpools.logicdev.gfx_q,
1,
&(const VkSubmitInfo) {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = NULL,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &semaphore,
.pWaitDstStageMask = 0,
.commandBufferCount = 1,
.pCommandBuffers = &gfx_combuf.buf,
.signalSemaphoreCount = 0,
.pSignalSemaphores = NULL
},
fence
);
if(err != VK_SUCCESS) goto borked_fence;
err = vkWaitForFences(gfx_combuf.dev, 1, &fence, VK_TRUE, UINT64_MAX);
if(err != VK_SUCCESS) goto borked_fence;
vkFreeCommandBuffers(xfr_combuf.dev, xfr_combuf.cpool, 1, &xfr_combuf.buf);
vkFreeCommandBuffers(gfx_combuf.dev, gfx_combuf.cpool, 1, &gfx_combuf.buf);
vkDestroyFence(cpools.logicdev.handle, fence, NULL);
vkDestroySemaphore(cpools.logicdev.handle, semaphore, NULL);
}
vmaDestroyBuffer(allocator, host_buf.handle, host_buf.alloc);
return VGFX_SUCCESS;
borked_fence:
vkDestroyFence(cpools.logicdev.handle, fence, NULL);
borked_gfxbuf:
vkFreeCommandBuffers(xfr_combuf.dev, xfr_combuf.cpool, 1, &xfr_combuf.buf);
borked_semaphore:
vkDestroySemaphore(cpools.logicdev.handle, semaphore, NULL);
borked_xfrbuf:
vkFreeCommandBuffers(xfr_combuf.dev, xfr_combuf.cpool, 1, &xfr_combuf.buf);
borked_buffers:
vmaDestroyBuffer(allocator, host_buf.handle, host_buf.alloc);
vmaDestroyBuffer(allocator, local_buf->handle, local_buf->alloc);
borked:
if(err != VK_RESULT_MAX_ENUM)
log_error("Failed to build and copy local buffer on: %s", vkr2str(err));
return VGFX_FAIL;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment