Created
April 25, 2024 17:59
-
-
Save datafatmunger/fa16583234a6ee95c18969c2d5bbba7e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from numba import jit, cuda, float64 | |
# Define the Hadamard gate matrix | |
hadamard = np.array([[1, 1], [1, -1]]) / np.sqrt(2) | |
# Define a function that will be accelerated by CUDA | |
@cuda.jit | |
def hadamard_gate_kernel(qubits, out): | |
tx = cuda.threadIdx.x | |
ty = cuda.blockIdx.x | |
bw = cuda.blockDim.x | |
idx = tx + ty * bw | |
if idx < out.size: | |
qubit = qubits[idx] | |
result = cuda.local.array(2, dtype=float64) | |
result[0] = hadamard[0, 0] * qubit[0] + hadamard[0, 1] * qubit[1] | |
result[1] = hadamard[1, 0] * qubit[0] + hadamard[1, 1] * qubit[1] | |
out[idx][0] = result[0] | |
out[idx][1] = result[1] | |
# Main function to call the CUDA kernel | |
def apply_hadamard_gate_on_gpu(qubits): | |
# Allocate memory on the device | |
d_qubits = cuda.to_device(qubits.astype(np.float64)) | |
d_out = cuda.device_array_like(qubits) | |
# Configure the kernel | |
threads_per_block = 256 | |
blocks_per_grid = (qubits.shape[0] + (threads_per_block - 1)) // threads_per_block | |
# Launch the kernel | |
hadamard_gate_kernel[blocks_per_grid, threads_per_block](d_qubits, d_out) | |
# Copy the result back to the host | |
return d_out.copy_to_host() | |
# Test the GPU-accelerated function | |
if __name__ == '__main__': | |
# Get information about available GPUs | |
gpus = cuda.gpus | |
if len(gpus) == 0: | |
print("No GPU found.") | |
else: | |
for i, gpu in enumerate(gpus): | |
print(f"GPU {i}: {gpu.name.decode('utf-8')}") | |
# Get the current CUDA device | |
current_device = cuda.get_current_device() | |
# Print information about the current device | |
print("Current CUDA device:", current_device.name.decode('utf-8')) | |
N = 1000000 | |
qubits = np.array([[1, 0] for _ in range(N)], dtype=np.float64) # Initialize qubits to |0⟩ | |
result = apply_hadamard_gate_on_gpu(qubits) | |
print(result) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment