|
import torch |
|
import numpy as np |
|
import pickle |
|
from complextensor import ComplexTensor |
|
from qt_state_processor import QuantumStateProcessor |
|
from quantum_random_character_generator import MISTransform, generate_random_characters |
|
import os |
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
import queue |
|
import logging |
|
|
|
# Configure logging |
|
logging.basicConfig(level=logging.DEBUG) |
|
logger = logging.getLogger(__name__) |
|
|
|
# Suppress ALL loggers related to ComplexTensor |
|
for logger_name in logging.Logger.manager.loggerDict.keys(): |
|
if isinstance(logger_name, str) and 'complextensor' in logger_name.lower(): |
|
logging.getLogger(logger_name).setLevel(logging.CRITICAL) |
|
|
|
# Set device to MPS if available, otherwise fallback to CPU |
|
device = torch.device("mps" if torch.backends.mps.is_built() else "cpu") |
|
cpu_device = torch.device("cpu") |
|
|
|
class TensorFieldEvolver: |
|
def __init__(self, max_dim_exp: int = 10): |
|
""" |
|
Initializes the TensorFieldEvolver from 0.35 bits (0D) through evolving dimensions. |
|
|
|
Args: |
|
max_dim_exp (int): Maximum exponent for final dimension size (e.g., 10 for 2^10). |
|
""" |
|
self.max_dim = 2 ** max_dim_exp |
|
self.qsp = QuantumStateProcessor(n_qubits=1) |
|
self.tensor_field = self._initialize_tensor_field(1) # Start with 1x1 tensor |
|
self.transform = MISTransform(alpha=1.3 + 0.5j, beta=1.7 + 0.8j) |
|
self.kernels = [] |
|
os.makedirs("tensor_data", exist_ok=True) |
|
|
|
def _initialize_tensor_field(self, dim: int) -> ComplexTensor: |
|
""" |
|
Initializes the tensor field with the given dimension. |
|
""" |
|
real_part = torch.randn(dim, dim, device=device, dtype=torch.float32) |
|
imag_part = torch.randn(dim, dim, device=device, dtype=torch.float32) * 0.01 |
|
return ComplexTensor(real_part, imag_part) |
|
|
|
def evolve_tensor_field(self): |
|
""" |
|
Evolve the tensor field through powers of 2 dimensions. |
|
""" |
|
current_dim = 1 |
|
collapse_counter = 1 |
|
|
|
while current_dim <= self.max_dim: |
|
print(f"Evolving tensor field at dimension: {current_dim}") |
|
|
|
# Generate QRNG data for transformation modulation on CPU |
|
quantum_state = self.qsp.create_superposition(alpha=1 / np.sqrt(2), beta=1 / np.sqrt(2)).to(cpu_device, dtype=torch.float32) |
|
qrng_data = generate_random_characters(quantum_state, transform=self.transform, length=current_dim) |
|
|
|
# Split QRNG data into CPU and GPU chunks |
|
batch_size = min(2048, current_dim) |
|
chunks = [(start, min(start + batch_size, current_dim)) for start in range(0, current_dim, batch_size)] |
|
cpu_queue = queue.Queue() |
|
gpu_queue = queue.Queue() |
|
|
|
for i, (start, end) in enumerate(chunks): |
|
if i % 2 == 0: |
|
cpu_queue.put((start, end, qrng_data[start:end])) # CPU processing |
|
else: |
|
gpu_queue.put((start, end, qrng_data[start:end])) # GPU processing |
|
|
|
# Start threads for CPU and GPU processing |
|
with ThreadPoolExecutor() as executor: |
|
futures = [] |
|
|
|
# Submit CPU tasks |
|
for _ in range(cpu_queue.qsize()): |
|
start, end, batch_data = cpu_queue.get() |
|
futures.append(executor.submit(self._apply_transformations, batch_data, cpu_device)) |
|
|
|
# Submit GPU tasks |
|
for _ in range(gpu_queue.qsize()): |
|
start, end, batch_data = gpu_queue.get() |
|
futures.append(executor.submit(self._apply_transformations, batch_data, device)) |
|
|
|
# Collect results from parallel execution |
|
for future in as_completed(futures): |
|
transformed_tensor = future.result() |
|
self.tensor_field = transformed_tensor |
|
|
|
# Save kernel data at wave collapse points |
|
if collapse_counter % 2 == 0: |
|
with open(f"tensor_data/kernel_dim_{current_dim}.pkl", "wb") as f: |
|
pickle.dump(self.tensor_field, f) |
|
self.kernels.append(self.tensor_field) |
|
print(f"Kernel at dimension {current_dim} saved as 'kernel_dim_{current_dim}.pkl'") |
|
|
|
# Expand tensor field for next dimension increment (power of 2) |
|
new_dim = current_dim * 2 |
|
new_real_part = torch.randn(new_dim, new_dim, device=device, dtype=torch.float32) |
|
new_imag_part = torch.randn(new_dim, new_dim, device=device, dtype=torch.float32) * 0.01 |
|
expanded_tensor = ComplexTensor(new_real_part, new_imag_part) |
|
|
|
# Embed the current tensor field within the expanded tensor |
|
expanded_tensor.real[:current_dim, :current_dim] = self.tensor_field.real |
|
expanded_tensor.imag[:current_dim, :current_dim] = self.tensor_field.imag |
|
self.tensor_field = expanded_tensor |
|
|
|
# Increment dimension and collapse counter |
|
current_dim = new_dim |
|
collapse_counter += 1 |
|
|
|
# Final save of all kernels and tensor field data |
|
self.save_final_data() |
|
|
|
def _apply_transformations(self, batch_data, device): |
|
""" |
|
Apply transformations in parallel, using either GPU (MPS) or CPU based on device. |
|
""" |
|
modulation_params = torch.tensor( |
|
[(ord(char) % 32) / 31.0 for char in batch_data], device=device, dtype=torch.float32 |
|
) |
|
local_tensor_field = self.tensor_field.to(device) |
|
for param in modulation_params: |
|
local_tensor_field = self.transform(local_tensor_field, t=param) |
|
return local_tensor_field.to(cpu_device) # Move result back to CPU to avoid excess GPU memory use |
|
|
|
def save_final_data(self): |
|
""" |
|
Saves all kernels and final tensor field data as `.npy` files. |
|
""" |
|
kernel_data = [torch.stack([k.real, k.imag], dim=0) for k in self.kernels] |
|
np.save("tensor_data/kernels.npy", np.array([k.cpu().numpy() for k in kernel_data])) |
|
print("All kernels saved in 'tensor_data/kernels.npy'") |
|
|
|
# Save the final tensor field |
|
np.save("tensor_data/final_tensor_real.npy", self.tensor_field.real.cpu().numpy()) |
|
np.save("tensor_data/final_tensor_imag.npy", self.tensor_field.imag.cpu().numpy()) |
|
print("Final tensor field saved as 'final_tensor_real.npy' and 'final_tensor_imag.npy'") |
|
|
|
# Example usage |
|
if __name__ == "__main__": |
|
evolver = TensorFieldEvolver(max_dim_exp=10) # Adjust max_dim_exp for higher dimensions |
|
evolver.evolve_tensor_field() |