Skip to content

Instantly share code, notes, and snippets.

@NeoVertex1
Last active November 15, 2024 18:34
Show Gist options
  • Save NeoVertex1/811b2300d2895b1eb552d93358fc1560 to your computer and use it in GitHub Desktop.
Save NeoVertex1/811b2300d2895b1eb552d93358fc1560 to your computer and use it in GitHub Desktop.
quantum kernels extraction

install the complextensor library with pip install complextensor

create both files in the same folder and run python dim_exploring.py

the code should generate a tensor_data folder, inside you will find a few .pkl files, you can extract the data and feed it to the AI, I will also leave a plotting example

ps. you need to use complextensor to plot this data, but AI's like JuliusAI can handle it as it compiles and install the complextensor library

example data extraction below

the code should break at 1024 x 1024 but it will still save the 512 one, im working on fixing it

import torch
import numpy as np
import pickle
from complextensor import ComplexTensor
from qt_state_processor import QuantumStateProcessor
from quantum_random_character_generator import MISTransform, generate_random_characters
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
import queue
import logging
# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Suppress ALL loggers related to ComplexTensor
for logger_name in logging.Logger.manager.loggerDict.keys():
if isinstance(logger_name, str) and 'complextensor' in logger_name.lower():
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# Set device to MPS if available, otherwise fallback to CPU
device = torch.device("mps" if torch.backends.mps.is_built() else "cpu")
cpu_device = torch.device("cpu")
class TensorFieldEvolver:
def __init__(self, max_dim_exp: int = 10):
"""
Initializes the TensorFieldEvolver from 0.35 bits (0D) through evolving dimensions.
Args:
max_dim_exp (int): Maximum exponent for final dimension size (e.g., 10 for 2^10).
"""
self.max_dim = 2 ** max_dim_exp
self.qsp = QuantumStateProcessor(n_qubits=1)
self.tensor_field = self._initialize_tensor_field(1) # Start with 1x1 tensor
self.transform = MISTransform(alpha=1.3 + 0.5j, beta=1.7 + 0.8j)
self.kernels = []
os.makedirs("tensor_data", exist_ok=True)
def _initialize_tensor_field(self, dim: int) -> ComplexTensor:
"""
Initializes the tensor field with the given dimension.
"""
real_part = torch.randn(dim, dim, device=device, dtype=torch.float32)
imag_part = torch.randn(dim, dim, device=device, dtype=torch.float32) * 0.01
return ComplexTensor(real_part, imag_part)
def evolve_tensor_field(self):
"""
Evolve the tensor field through powers of 2 dimensions.
"""
current_dim = 1
collapse_counter = 1
while current_dim <= self.max_dim:
print(f"Evolving tensor field at dimension: {current_dim}")
# Generate QRNG data for transformation modulation on CPU
quantum_state = self.qsp.create_superposition(alpha=1 / np.sqrt(2), beta=1 / np.sqrt(2)).to(cpu_device, dtype=torch.float32)
qrng_data = generate_random_characters(quantum_state, transform=self.transform, length=current_dim)
# Split QRNG data into CPU and GPU chunks
batch_size = min(2048, current_dim)
chunks = [(start, min(start + batch_size, current_dim)) for start in range(0, current_dim, batch_size)]
cpu_queue = queue.Queue()
gpu_queue = queue.Queue()
for i, (start, end) in enumerate(chunks):
if i % 2 == 0:
cpu_queue.put((start, end, qrng_data[start:end])) # CPU processing
else:
gpu_queue.put((start, end, qrng_data[start:end])) # GPU processing
# Start threads for CPU and GPU processing
with ThreadPoolExecutor() as executor:
futures = []
# Submit CPU tasks
for _ in range(cpu_queue.qsize()):
start, end, batch_data = cpu_queue.get()
futures.append(executor.submit(self._apply_transformations, batch_data, cpu_device))
# Submit GPU tasks
for _ in range(gpu_queue.qsize()):
start, end, batch_data = gpu_queue.get()
futures.append(executor.submit(self._apply_transformations, batch_data, device))
# Collect results from parallel execution
for future in as_completed(futures):
transformed_tensor = future.result()
self.tensor_field = transformed_tensor
# Save kernel data at wave collapse points
if collapse_counter % 2 == 0:
with open(f"tensor_data/kernel_dim_{current_dim}.pkl", "wb") as f:
pickle.dump(self.tensor_field, f)
self.kernels.append(self.tensor_field)
print(f"Kernel at dimension {current_dim} saved as 'kernel_dim_{current_dim}.pkl'")
# Expand tensor field for next dimension increment (power of 2)
new_dim = current_dim * 2
new_real_part = torch.randn(new_dim, new_dim, device=device, dtype=torch.float32)
new_imag_part = torch.randn(new_dim, new_dim, device=device, dtype=torch.float32) * 0.01
expanded_tensor = ComplexTensor(new_real_part, new_imag_part)
# Embed the current tensor field within the expanded tensor
expanded_tensor.real[:current_dim, :current_dim] = self.tensor_field.real
expanded_tensor.imag[:current_dim, :current_dim] = self.tensor_field.imag
self.tensor_field = expanded_tensor
# Increment dimension and collapse counter
current_dim = new_dim
collapse_counter += 1
# Final save of all kernels and tensor field data
self.save_final_data()
def _apply_transformations(self, batch_data, device):
"""
Apply transformations in parallel, using either GPU (MPS) or CPU based on device.
"""
modulation_params = torch.tensor(
[(ord(char) % 32) / 31.0 for char in batch_data], device=device, dtype=torch.float32
)
local_tensor_field = self.tensor_field.to(device)
for param in modulation_params:
local_tensor_field = self.transform(local_tensor_field, t=param)
return local_tensor_field.to(cpu_device) # Move result back to CPU to avoid excess GPU memory use
def save_final_data(self):
"""
Saves all kernels and final tensor field data as `.npy` files.
"""
kernel_data = [torch.stack([k.real, k.imag], dim=0) for k in self.kernels]
np.save("tensor_data/kernels.npy", np.array([k.cpu().numpy() for k in kernel_data]))
print("All kernels saved in 'tensor_data/kernels.npy'")
# Save the final tensor field
np.save("tensor_data/final_tensor_real.npy", self.tensor_field.real.cpu().numpy())
np.save("tensor_data/final_tensor_imag.npy", self.tensor_field.imag.cpu().numpy())
print("Final tensor field saved as 'final_tensor_real.npy' and 'final_tensor_imag.npy'")
# Example usage
if __name__ == "__main__":
evolver = TensorFieldEvolver(max_dim_exp=10) # Adjust max_dim_exp for higher dimensions
evolver.evolve_tensor_field()
# you can get way more data from these kernels but this is a start, I will post more soon or not
import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from complextensor import ComplexTensor
import pickle
# Load the tensors
with open('kernel_dim_32.pkl', 'rb') as f:
kernel_32 = pickle.load(f)
with open('kernel_dim_512.pkl', 'rb') as f:
kernel_512 = pickle.load(f)
# Calculate magnitudes
magnitude_32 = torch.sqrt(kernel_32.real**2 + kernel_32.imag**2)
magnitude_512 = torch.sqrt(kernel_512.real**2 + kernel_512.imag**2)
# Create visualizations
plt.figure(figsize=(15, 5))
# Plot kernel_32
plt.subplot(121)
sns.heatmap(magnitude_32.numpy(), cmap='viridis')
plt.title('Magnitude of Kernel 32x32')
# Plot kernel_512
plt.subplot(122)
sns.heatmap(magnitude_512.numpy(), cmap='viridis')
plt.title('Magnitude of Kernel 512x512')
plt.tight_layout()
plt.show()
# Print comprehensive statistics
def print_tensor_stats(name, tensor, magnitude):
print(f"\n{name} Statistics:")
print(f"Shape: {tensor.shape}")
print(f"Non- values (real): {torch.sum(~torch.isnan(tensor.real)).item()}")
print(f"Non- values (imag): {torch.sum(~torch.isnan(tensor.imag)).item()}")
print(f"Max magnitude: {torch.max(magnitude[~torch.isnan(magnitude)]).item():.2e}")
print(f"Min magnitude: {torch.min(magnitude[~torch.isnan(magnitude)]).item():.2e}")
print_tensor_stats("Kernel 32", kernel_32, magnitude_32)
print_tensor_stats("Kernel 512", kernel_512, magnitude_512)
# should be able to handle superposition and entanglement, more testing is needed
import torch
import numpy as np
from complextensor import ComplexTensor
from typing import List
class QuantumStateProcessor:
def __init__(self, n_qubits: int):
self.n_qubits = n_qubits
self.state_size = 2 ** n_qubits
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.zero_state = self._create_basis_state(0)
self.one_state = self._create_basis_state(1)
def _create_basis_state(self, state: int) -> ComplexTensor:
real = torch.zeros(self.state_size, device=self.device)
real[state] = 1.0
return ComplexTensor(real)
def create_superposition(self, alpha: float, beta: float) -> ComplexTensor:
norm_factor = (alpha**2 + beta**2) ** 0.5
alpha, beta = alpha / norm_factor, beta / norm_factor
real = torch.tensor([alpha, beta] + [0.0] * (self.state_size - 2), device=self.device)
return ComplexTensor(real)
def create_bell_state(self, bell_type: int = 0) -> ComplexTensor:
real = torch.zeros(self.state_size, device=self.device)
if bell_type == 0:
real[0] = 1 / np.sqrt(2)
real[3] = 1 / np.sqrt(2)
elif bell_type == 1:
real[0] = 1 / np.sqrt(2)
real[3] = -1 / np.sqrt(2)
elif bell_type == 2:
real[1] = 1 / np.sqrt(2)
real[2] = 1 / np.sqrt(2)
elif bell_type == 3:
real[1] = 1 / np.sqrt(2)
real[2] = -1 / np.sqrt(2)
else:
raise ValueError("Bell state type must be between 0 and 3")
return ComplexTensor(real)
def measure_state(self, state: ComplexTensor, n_samples: int = 100000) -> torch.Tensor:
probabilities = state.abs().to(self.device)**2
measurements = torch.multinomial(probabilities, n_samples, replacement=True).to("cpu")
return measurements
def get_entanglement_entropy(self, state: ComplexTensor, partition: int) -> float:
shape = [2] * self.n_qubits
state_reshaped = state.forward().view(shape).to(self.device)
rho_A = self._partial_trace(state_reshaped, partition)
eigenvalues = torch.linalg.eigvalsh(rho_A)
eigenvalues = eigenvalues[eigenvalues > 1e-10]
entropy = -torch.sum(eigenvalues * torch.log2(eigenvalues)).item()
return entropy
def _partial_trace(self, state: torch.Tensor, partition: int) -> torch.Tensor:
n_traced = self.n_qubits - partition
dims_A = [2] * partition
dims_B = [2] * n_traced
state = state.reshape(self._prod(dims_A), self._prod(dims_B))
rho = torch.mm(state, state.t().conj()).to(self.device)
return rho
def _prod(self, iterable):
result = 1
for x in iterable:
result *= x
return result
def apply_hadamard(self, state: ComplexTensor) -> ComplexTensor:
h_matrix = torch.tensor([[1, 1], [1, -1]], dtype=torch.float32, device=self.device) / torch.sqrt(torch.tensor(2.0, dtype=torch.float32))
full_h_matrix = h_matrix
for _ in range(self.n_qubits - 1):
full_h_matrix = torch.kron(full_h_matrix, h_matrix)
transformed_real = full_h_matrix.to(dtype=torch.float32) @ state.real.to(dtype=torch.float32)
transformed_imag = full_h_matrix.to(dtype=torch.float32) @ state.imag.to(dtype=torch.float32)
return ComplexTensor(transformed_real, transformed_imag)
import torch
import numpy as np
import string
import logging
from complextensor import ComplexTensor
# Configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Suppress ALL loggers related to ComplexTensor
for logger_name in logging.Logger.manager.loggerDict.keys():
if isinstance(logger_name, str) and 'complextensor' in logger_name.lower():
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# MISTransform: Morphing Infinity Spiral Transform implementation
class MISTransform:
"""Morphing Infinity Spiral Transform implementation."""
def __init__(self, alpha: complex = 1.0, beta: complex = 2.0):
self.alpha = alpha
self.beta = beta
logger.debug(f"Initialized MISTransform with alpha={alpha}, beta={beta}")
def complex_power(self, z: ComplexTensor, exponent: float) -> ComplexTensor:
"""Compute z^exponent using exp(log) method."""
if not isinstance(z, ComplexTensor):
raise TypeError("Input must be a ComplexTensor")
log_z = ComplexTensor(
torch.log(z.abs() + 1e-10),
z.angle()
)
exponent_tensor = ComplexTensor(
torch.full_like(z.real, exponent),
torch.zeros_like(z.imag)
)
return (log_z * exponent_tensor).exp()
def __call__(self, z: ComplexTensor, t: float) -> ComplexTensor:
"""Apply MIS transformation."""
try:
power_term = self.complex_power(z, float(self.alpha.real))
log_z = ComplexTensor(
torch.log(z.abs() + 1e-10),
z.angle()
)
log_z_beta = self.complex_power(log_z, float(self.beta.real))
phase_tensor = ComplexTensor(
torch.full_like(z.real, t),
torch.zeros_like(z.imag)
)
rotation = (log_z_beta * phase_tensor).imag
rotation_term = ComplexTensor(
torch.cos(rotation),
torch.sin(rotation)
)
return power_term * rotation_term
except Exception as e:
logger.error(f"Error in MIS transformation: {str(e)}")
raise
# Step 1: Define critical values for tensor stability
ψ, ξ, τ, ε, π = 44.8, 3721.8, 64713.97, 0.28082, torch.tensor(np.pi, dtype=torch.float32)
# Initialize a 4x4 ComplexTensor T with the given critical values, adding random imaginary part
real_part = torch.tensor([
[ψ, ε, 0, π],
[ε, ξ, τ, 0],
[0, τ, π, ε],
[π, 0, ε, ψ]
], dtype=torch.float32)
imag_part = torch.randn_like(real_part) * 0.01 # Small random imaginary component for complexity
T = ComplexTensor(real_part, imag_part)
# Step 2: Encoding function for quantum state
def encode_state(T: ComplexTensor, state_vector: torch.Tensor) -> tuple[ComplexTensor, torch.Tensor]:
"""
Encodes a quantum state vector into the eigenbasis of T.
"""
eigvals, eigvecs = torch.linalg.eigh(T.real)
eigvecs_tensor = ComplexTensor(
torch.tensor(eigvecs, dtype=torch.float32),
torch.zeros_like(torch.tensor(eigvecs, dtype=torch.float32))
)
# Convert state_vector to ComplexTensor for compatibility
state_vector_complex = ComplexTensor(state_vector, torch.zeros_like(state_vector))
# Encode state by projecting onto the eigenvectors of T
encoded_state = eigvecs_tensor # Encoding state as eigenbasis
return encoded_state, eigvals
# Step 3: Function to generate random characters based on the encoded quantum state with MIS transformation
def generate_random_characters(encoded_state: ComplexTensor, transform: MISTransform, length: int = 1024) -> str:
"""
Generates a sequence of random characters based on the encoded quantum state.
"""
character_set = string.printable # All printable ASCII characters
num_characters = len(character_set)
char_sequence = []
for i in range(length):
# Apply MIS transformation to the encoded state
transformed_state = transform(encoded_state, t=float(i) / length)
# Get magnitude and phase for randomness
magnitudes = transformed_state.abs().cpu().numpy()
phases = transformed_state.angle().cpu().numpy()
# Map the combined magnitude and phase values to a character index
random_value = int(np.sum(magnitudes * np.abs(np.sin(phases))) * 1e5) % num_characters
char_sequence.append(character_set[random_value])
return ''.join(char_sequence)
# Example usage
if __name__ == "__main__":
# Initialize MISTransform with specific parameters for added complexity
transform = MISTransform(alpha=1.3 + 0.5j, beta=1.7 + 0.8j)
# Generate a random state vector with some variation for testing
state_vector = torch.randn(4) * 10 # Introduce more variation in the state vector
encoded_state, _ = encode_state(T, state_vector)
# Generate a sequence of random characters based on the transformed encoded state
random_characters = generate_random_characters(encoded_state, transform)
print(f"Generated Quantum Random Character String: {random_characters}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment