Skip to content

Instantly share code, notes, and snippets.

@madaan
Created January 12, 2024 16:58
Show Gist options
  • Save madaan/22a9475797f983d6aa2fdd6c55d0ff79 to your computer and use it in GitHub Desktop.
Save madaan/22a9475797f983d6aa2fdd6c55d0ff79 to your computer and use it in GitHub Desktop.
MWE for using the Gemini api. The code has been tested with v0.3.2.
# MWE for using the Gemini api. The code has been tested with v0.3.2.
import google.generativeai as genai
import random
import time
assert genai.__version__ == '0.3.2'
genai.configure(api_key="YOUR_KEY_HERE!")
model = genai.GenerativeModel('gemini-pro')
# from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb
def retry_with_exponential_backoff(
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = 3,
errors: tuple = Exception, # Changed to catch all exceptions by default
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs):
num_retries = 0
delay = initial_delay
while True:
try:
return func(*args, **kwargs)
except errors as e:
num_retries += 1
if num_retries > max_retries:
print(f"Maximum number of retries ({max_retries}) exceeded.")
return None # Changed to return None instead of raising an exception
delay *= exponential_base * (1 + jitter * random.random())
print(f"Retrying in {delay:.2f} seconds due to error: {e}")
time.sleep(delay)
return wrapper
@retry_with_exponential_backoff
def call_gemini_api(prompt, max_tokens=300, temperature=0.0):
response = model.generate_content(
prompt,
generation_config=genai.types.GenerationConfig(
candidate_count=1,
max_output_tokens=max_tokens,
temperature=temperature)
)
return response.text
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment