Skip to content

Instantly share code, notes, and snippets.

@wheremyfoodat
Created July 21, 2025 20:22
Show Gist options
  • Save wheremyfoodat/4976a45094c5d5be228c3fe311b382b5 to your computer and use it in GitHub Desktop.
Save wheremyfoodat/4976a45094c5d5be228c3fe311b382b5 to your computer and use it in GitHub Desktop.
Jupyter Notebook for running CUDA kernels on Google Colab
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
// The notebook expects to load this CUDA kernel from the root of your Google Drive.
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
__constant__ char d_message[64];
__global__ void welcome(char* msg) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
msg[idx] = d_message[idx];
}
void printErrors(const char* label) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::fprintf(stderr, "%s: %s\n", label, cudaGetErrorString(err));
}
}
int main() {
printf("Hello CUDA from CPU\n");
char* d_msg;
char* h_msg;
const char message[] = "Hello CUDA from GPU!";
const int length = strlen(message) + 1;
// Allocate host and device memory
h_msg = (char*)std::malloc(length * sizeof(char));
cudaMalloc(&d_msg, length * sizeof(char));
// Copy message to constant memory
cudaMemcpyToSymbol(d_message, message, length);
// Run CUDA kernel and wait till it's done
welcome<<<1, length>>>(d_msg);
printErrors("Kernel launch failed");
// Copy result back to host
cudaMemcpy(h_msg, d_msg, length * sizeof(char), cudaMemcpyDeviceToHost);
h_msg[length-1] = '\0';
printErrors("Device->Host memcpy failed");
std::printf("%s\n", h_msg);
std::printf("Exiting kernel\n");
// Cleanup
std::free(h_msg);
cudaFree(d_msg);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment