Skip to content

Instantly share code, notes, and snippets.

@SanderMertens
Last active January 19, 2021 01:19
Show Gist options
  • Select an option

  • Save SanderMertens/c2ab3557f94a822dafa0eed511c6580e to your computer and use it in GitHub Desktop.

Select an option

Save SanderMertens/c2ab3557f94a822dafa0eed511c6580e to your computer and use it in GitHub Desktop.
// Draft for manual creation of deferred command queues.
// There are three basic requirements for the API:
// 1. Allow for population of a command queue from external source (not flecs API)
// 2. Record operations and take control over how they are flushed
// 3. Allow for pushing operations to different queues
//
// All requirements must work in multithreaded applications, and for applications with multiple synchronization points.
//
// High level design:
// - Application can create a defer context. A single defer context contains a command queue per thread.
// - A command queue can be automatically merged or manually merged
// - A command queue can have a custom flush callback which gets invoked for each sync point
// - A command queue can be either populated by the regular API, or manually by adding command elements to the queue
struct thread_ctx {
ecs_world_t *world;
int defer_ctx;
};
void* worker_thread(thread_ctx *ctx) {
while (true) {
wait_for_main();
ecs_defer_push(ctx->world, ctx->defer_ctx);
// RUN SYSTEMS
ecs_defer_pop(ctx->world, ctx->defer_ctx);
signal_ready();
}
}
void* network_thread(thread_ctx *ctx) {
ecs_defer_op_t commands[MAX_COMMANDS];
while (true) {
int received = recv(commands, sizeof(ecs_defer_op_t), MAX_COMMANDS);
for (int i = 0; i < received; i ++) {
ecs_defer_append(ctx->world, ctx->defer_ctx, &commands[i]);
}
}
}
void flush_queue(ecs_world_t *world, int defer_ctx) {
ecs_vector_t *command_queue;
for (int i = 0; i < ecs_get_threads(world); i ++) {
command_queue = ecs_defer_context_get_ops(world, defer_ctx, i);
int j, count = ecs_vector_count(command_queue);
ecs_defer_op_t *ops = ecs_vector_first(command_queue, ecs_defer_op_t);
for (j = 0; j < count; j ++) {
ecs_defer_command_flush(world, &ops[j]);
}
}
}
int main(void) {
ecs_world_t *world = ecs_init();
// Allocate stages for 4 threads
ecs_set_user_threads(world, 4);
// Create two command queues, one for regular commands & one for network commands
int auto_ctx = ecs_new_defer_context(world, flush_queue);
int network_ctx = ecs_new_defer_context(world, flush_queue);
// Don't automatically merge network command queue
ecs_defer_context_set_automerge(world, network_ctx, false);
// Start two threads for running regular systems, pass the context for regular commands
thread_new(worker_thread, &(thread_ctx){ecs_get_thread(world, 0), auto_q});
thread_new(worker_thread, &(thread_ctx){ecs_get_thread(world, 1), auto_q});
// Start 2 network threads
thread_new(network_thread, &(thread_ctx){ecs_get_thread(world, 2), network_ctx});
thread_new(network_thread, &(thread_ctx){ecs_get_thread(world, 3), network_ctx});
while (true) {
ecs_frame_begin(world);
ecs_staging_begin(world);
signal_workers();
wait_for_workers();
ecs_staging_end(world); // Flush auto_q for all threads
ecs_frame_end(world);
// Flush commands in network queue for all network threads.
// Omitting locking for brevity
ecs_defer_context_flush(world, network_q);
}
ecs_fini(world);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment