Last active
January 22, 2024 15:46
-
-
Save graylan0/58e7f440f69db7e745735b7fe4575463 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pennylane as qml | |
import numpy as np | |
import requests | |
import random | |
import sys | |
import io | |
import base64 | |
from PIL import Image, ImageTk | |
import httpx | |
import asyncio | |
num_qubits = 6 | |
dev = qml.device('default.qubit', wires=num_qubits) | |
@qml.qnode(dev) | |
def quantum_circuit(params): | |
for i in range(num_qubits): | |
qml.Hadamard(wires=i) | |
qml.RY(params[i], wires=i) | |
for i in range(num_qubits - 1): | |
qml.CNOT(wires=[i, i + 1]) | |
return qml.state() | |
def mixed_state_to_color_code(mixed_state): | |
norm_probs = mixed_state / np.sum(mixed_state) | |
r = int(sum(norm_probs[:2]) * 255) | |
g = int(sum(norm_probs[2:4]) * 255) | |
b = int(sum(norm_probs[4:]) * 255) | |
return f'#{r:02x}{g:02x}{b:02x}' | |
def generate_image_from_quantum_data(quantum_data): | |
color_code = mixed_state_to_color_code(quantum_data) | |
prompt = f"Generate an image with predominant color {color_code}" | |
url = 'http://127.0.0.1:7860/sdapi/v1/txt2img' | |
payload = { | |
"prompt": prompt, | |
"steps": 121, | |
"seed": random.randrange(sys.maxsize), | |
"enable_hr": "false", | |
"denoising_strength": "0.7", | |
"cfg_scale": "7", | |
"width": 366, | |
"height": 856, | |
"restore_faces": "true", | |
} | |
response = requests.post(url, json=payload) | |
if response.status_code == 200: | |
r = response.json() | |
image_data = r['images'][0] | |
image = Image.open(io.BytesIO(base64.b64decode(image_data.split(",",1)[1]))) | |
return image | |
else: | |
raise Exception(f"Error generating image: {response.status_code}") | |
def encode_image_to_base64(image): | |
buffered = io.BytesIO() | |
image.save(buffered, format="JPEG") | |
return base64.b64encode(buffered.getvalue()).decode('utf-8') | |
async def analyze_image_with_gpt4_vision(base64_image): | |
async with httpx.AsyncClient() as client: | |
response = await client.post( | |
"https://api.openai.com/v1/chat/completions", | |
headers={"Authorization": "Bearer YOUR_OPENAI_API_KEY"}, | |
json={ | |
"model": "gpt-4-vision-preview", | |
"messages": [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "image", "data": base64_image} | |
] | |
} | |
] | |
} | |
) | |
return response.json() | |
async def integrated_quantum_image_analysis(params): | |
quantum_state = quantum_circuit(params) | |
image = generate_image_from_quantum_data(quantum_state) | |
base64_image = encode_image_to_base64(image) | |
analysis_result = await analyze_image_with_gpt4_vision(base64_image) | |
return analysis_result | |
params = np.random.random(num_qubits) * np.pi | |
analysis_result = asyncio.run(integrated_quantum_image_analysis(params)) | |
print(analysis_result) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment