Skip to content

Instantly share code, notes, and snippets.

@vgel
Created May 6, 2025 22:53
Show Gist options
  • Save vgel/55b845833af9f189ae1f834d5f7da8c3 to your computer and use it in GitHub Desktop.
Save vgel/55b845833af9f189ae1f834d5f7da8c3 to your computer and use it in GitHub Desktop.
import math
import os
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
question = "Generate a random string of the letters A and B, no talk just go."
model1, client1 = "gpt-4o-mini", OpenAI()
model2, client2 = "deepseek-chat", OpenAI(
api_key=os.environ["DEEPSEEK_API_KEY"],
base_url="https://api.deepseek.com/",
)
print()
print(question)
print()
for model, client in ((model1, client1), (model2, client2)):
responses = [
(
client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": question},
],
temperature=1.0,
max_tokens=3,
logprobs=True,
top_logprobs=3,
)
.choices[0]
.logprobs.content
)
for _ in range(2)
]
print(f"{model + ' trial 1':<34} // {model} trial 2")
print("-" * 35 + "//" + "-" * 35)
for lp1, lp2 in zip(responses[0], responses[1]):
top_logprobs_1 = sorted(lp1.top_logprobs, key=lambda t: t.logprob, reverse=True)
top_logprobs_2 = sorted(lp2.top_logprobs, key=lambda t: t.logprob, reverse=True)
for tlp1, tlp2 in zip(top_logprobs_1, top_logprobs_2):
print(
f"- {tlp1.token!r:<10} | {math.exp(tlp1.logprob) * 100:>6.2f}% | {tlp1.logprob:>9.5f} /"
f"/ {tlp2.token!r:<10} | {math.exp(tlp2.logprob) * 100:>6.2f}% | {tlp2.logprob:>9.5f}"
)
print()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment