Created
April 12, 2025 01:05
-
-
Save rahimnathwani/41e5bc475163cd5ea4382226c213df90 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env -S uv run --script | |
# /// script | |
# requires-python = ">=3.11" | |
# dependencies = [ | |
# "python-dotenv", | |
# "openai", | |
# ] | |
# /// | |
import os | |
from dotenv import load_dotenv | |
from openai import OpenAI | |
from datetime import datetime | |
import uuid | |
import readline | |
def main(): | |
load_dotenv() | |
client = OpenAI() | |
base_filename = f"{datetime.now().strftime('%Y-%m-%d')}-{uuid.uuid4()}" | |
debug_filename = f"debug-{base_filename}.txt" | |
output_filename = f"out-{base_filename}.txt" | |
# ----------------------------------------- | |
# Helper functions | |
# ----------------------------------------- | |
def append_output(text: str): | |
"""Append text to a file with a header for clarity.""" | |
with open(output_filename, "a", encoding="utf-8") as f: | |
f.write(f"\n\n{text}\n\n") | |
def append_debug(title: str, text: str): | |
with open(debug_filename, "a", encoding="utf-8") as f: | |
f.write(f"\n\n--- {title} ---\n\n{text}\n") | |
total_input_tokens = 0 | |
total_output_tokens = 0 | |
total_tokens = 0 | |
cumulative_novel_words = 0 | |
story_summary_so_far = "" | |
MIN_WORDS = 50000 | |
MAX_WORDS = 80000 | |
FINISH_WORDS = max(int(MIN_WORDS * 1.1), int(MAX_WORDS * 0.9)) | |
def update_token_usage(response): | |
"""Keep a running total of tokens used, for logging/tracking purposes.""" | |
nonlocal total_input_tokens, total_output_tokens, total_tokens | |
usage = response.usage | |
in_tokens = usage.input_tokens if usage and usage.input_tokens else 0 | |
out_tokens = usage.output_tokens if usage and usage.output_tokens else 0 | |
all_tokens = usage.total_tokens if usage and usage.total_tokens else 0 | |
total_input_tokens += in_tokens | |
total_output_tokens += out_tokens | |
total_tokens += all_tokens | |
print( | |
f"Request usage: Input={in_tokens}, Output={out_tokens}, Total={all_tokens}" | |
) | |
print( | |
f"Cumulative usage so far: Input={total_input_tokens}, Output={total_output_tokens}, Total={total_tokens}\n" | |
) | |
def maybe_proceed(): | |
""" | |
Optionally prompt the user if usage is extremely high. | |
Only ask if total_tokens >= 500k. Otherwise, proceed automatically. | |
""" | |
if total_tokens >= 850000: | |
answer = input("Proceed with the next step? (y/n): ").strip().lower() | |
return answer == "y" | |
return True | |
def count_words(text: str) -> int: | |
"""Simple word counter based on splitting by whitespace.""" | |
return len(text.split()) | |
def separate_chapter_and_summary(raw_output: str): | |
""" | |
Expects a format with a triple dash (---) separating the chapter text from the summary. | |
Returns (chapter_text, summary_text). | |
""" | |
parts = raw_output.split("---") | |
if len(parts) >= 2: | |
chapter_text = parts[0].strip() | |
# Everything after the first '---' is treated as the summary section | |
summary_text = "---".join(parts[1:]).strip() | |
return chapter_text, summary_text | |
else: | |
# If no delimiter found, consider everything as chapter text | |
return raw_output, "" | |
# ----------------------------------------- | |
# AI call wrapper functions | |
# ----------------------------------------- | |
def get_system_guidelines(hard_choice: str, story_description: str): | |
""" | |
Ask the model to produce a set of 'core rules' or 'principles' | |
for either (A) Hard Magic, (B) One-Rule Hard SF, or (C) Comedic Middle School Novel, | |
referencing appropriate guidelines for each genre. | |
""" | |
if hard_choice.upper().strip() == "A": | |
system_type = "Hard Magic system" | |
system_prompt = ( | |
"You are to design a Hard Magic system, ensuring clear, explicit rules that do not change, " | |
"and which will be explained to the reader. Emphasize the constraints, costs, or trade-offs of using magic, " | |
"and how these can generate plot tension without ever violating the stated rules. Follow Sanderson's First Law: " | |
"'An author's ability to solve conflict with magic is directly proportional to how well the reader understands it.' " | |
"Design at least one or two main rules or laws of magic that will remain consistent throughout the story. " | |
) | |
elif hard_choice.upper().strip() == "B": | |
system_type = "One-Rule Hard SF scenario" | |
system_prompt = ( | |
"You are to design a 'One-Rule Hard SF' scenario, in which a single major scientific or technological change " | |
"deviates from our known reality, and everything else is explored logically and consistently. " | |
"Stick to that single 'big lie' or big idea, do not add new speculative elements haphazardly. " | |
"Follow the principles of Hard SF by ensuring the scenario and its consequences remain internally consistent, " | |
"and avoid introducing random new powers or breaks from established logic. " | |
) | |
else: # Option C | |
system_type = "Comedic Middle Novel" | |
system_prompt = ( | |
"You are to design a comedic novel aimed at middle school readers, that balances humor with authentic experiences. " | |
"Focus on relatable characters, challenges, and hilariouscomedic situations that arise. " | |
"The story must have tension and uncertainty, but the humor should be light and the tone should be humorous. " | |
"The story must have some education value, either academic or life lessons or moral or cultural lessons. " | |
) | |
# Combined prompt | |
prompt_text = f""" | |
Using the user's story concept below, propose a concise set of core guidelines for a {system_type}. | |
Story Concept: | |
\"\"\"{story_description}\"\"\" | |
{system_prompt} | |
First, restate the single core principle or rule set (for Hard Magic or One-Rule SF). | |
Then list 3-5 bullet points about how these rules create tension, challenges, and possibilities in the world. | |
Finally, offer a brief note on how you'd ensure no 'rule breaking' arises spontaneously. | |
""" | |
print("Generating system guidelines...") | |
response = client.responses.create( | |
model=selected_model, | |
input=prompt_text, | |
) | |
system_guidelines = response.output_text | |
append_debug("System Guidelines", system_guidelines) | |
update_token_usage(response) | |
return system_guidelines | |
def get_outline(story_description: str, system_guidelines: str): | |
""" | |
Ask the model to write a detailed outline for a novel based on the description and system rules. | |
Returns the outline text. | |
""" | |
print(f"Generating outline ...") | |
prompt_text = ( | |
"Write a detailed outline for a full-length novel (80k-110k words) " | |
"incorporating the user's story concept and the system guidelines below. " | |
"Ensure the outline maintains consistency with the chosen genre style.\n\n" | |
f"STORY CONCEPT:\n{story_description}\n\n" | |
f"SYSTEM GUIDELINES:\n{system_guidelines}\n\n" | |
"Your outline must highlight major story arcs, subplots, key characters, and world-building details that respect the chosen genre's conventions." | |
) | |
response = client.responses.create( | |
model=selected_model, | |
input=prompt_text, | |
) | |
outline_text = response.output_text | |
print(f"Outline generated.") | |
append_debug("Outline", outline_text) | |
update_token_usage(response) | |
return outline_text | |
def get_subplots(outline_text: str, system_guidelines: str): | |
""" | |
Based on an existing outline, request deeper subplots, arcs, and world-building details | |
while reinforcing Hard Magic or One-Rule Hard SF constraints. | |
""" | |
print(f"Generating subplots ...") | |
prompt_text = ( | |
"Using the outline provided, go deeper and design subplots, story arcs, themes, " | |
"character developments, and relevant world-building. Always reaffirm how these arcs " | |
"adhere to the system guidelines, never contradicting or introducing random exceptions.\n\n" | |
f"OUTLINE:\n{outline_text}\n\n" | |
f"SYSTEM GUIDELINES:\n{system_guidelines}\n\n" | |
) | |
response = client.responses.create( | |
model=selected_model, | |
input=prompt_text, | |
) | |
subplots_text = response.output_text | |
print(f"Subplots generated.") | |
append_debug("Subplots", subplots_text) | |
update_token_usage(response) | |
return subplots_text | |
def write_chapter( | |
chapter_number: int, | |
outline_text: str, | |
subplots_text: str, | |
story_summary_so_far: str, | |
system_guidelines: str, | |
wrap_instructions: str, | |
): | |
""" | |
Request the AI to write the next chapter, returning both the raw output and the usage. | |
Emphasize consistent application of the chosen genre style throughout. | |
""" | |
chapter_prompt = f""" | |
You are writing **Chapter {chapter_number}** of a novel that must adhere to the following guidelines: | |
{system_guidelines} | |
**Instructions for Chapter {chapter_number}**: | |
- Aim for a large chunk of text (~10k words if possible). | |
- Rich description, character thoughts, and consistent world-building. | |
- Maintain the appropriate tone and style for the chosen genre. | |
- {wrap_instructions} | |
**Relevant Information**: | |
- Outline:\n{outline_text} | |
- Expanded Details:\n{subplots_text} | |
- Story So Far:\n{story_summary_so_far} | |
**Markdown Format Requirement**: | |
Please output the chapter in this format exactly. Use triple dashes (---) to separate the novel text from the summary: | |
# Chapter {chapter_number} | |
(Write the complete chapter text here, in markdown.) | |
# End of Chapter Summary | |
## Major Events: | |
(bulleted list) | |
## Open/Ongoing Arcs: | |
(bulleted list) | |
## Character States: | |
(bulleted list) | |
Remember: | |
- Show incremental progression of the story while maintaining the appropriate tone and style. | |
- Keep character development and plot progression consistent with the chosen genre. | |
""" | |
print(f"Writing chapter {chapter_number}...") | |
response = client.responses.create( | |
model=selected_model, | |
input=chapter_prompt, | |
) | |
raw_output = response.output_text | |
print(f"Chapter {chapter_number} written.") | |
append_debug(f"Raw Output Chapter {chapter_number}", raw_output) | |
update_token_usage(response) | |
return raw_output | |
def merge_summaries( | |
old_summary: str, | |
chapter_summary: str, | |
chapter_number: int, | |
system_guidelines: str, | |
) -> str: | |
""" | |
Combine the existing story summary with the new chapter's summary in a cohesive way. | |
Also confirm in the merged summary that no rules have been inadvertently broken. | |
""" | |
summary_prompt = f""" | |
You are maintaining a 'Story So Far' summary for a novel with these system guidelines: | |
{system_guidelines} | |
Please merge the existing summary with the end-of-chapter summary in a cohesive, concise way, ensuring no contradictions. | |
**Existing Overall Summary**: | |
{old_summary} | |
**New Chapter {chapter_number} Summary**: | |
{chapter_summary} | |
Include: | |
- Major events | |
- Open or ongoing arcs | |
- Current state of each main character | |
- Brief check: confirm or note if any events might appear to break the established rules. | |
If so, highlight them so it can be corrected. Otherwise, confirm consistency is maintained. | |
Return the merged summary in plain text. | |
""" | |
print(f"Merging summaries...") | |
response = client.responses.create( | |
model=selected_model, | |
input=summary_prompt, | |
) | |
new_summary = response.output_text | |
print(f"Summaries merged.") | |
append_debug("Summary", new_summary) | |
update_token_usage(response) | |
return new_summary | |
def check_arcs_resolved(story_summary: str, system_guidelines: str): | |
""" | |
Ask the model if major arcs are resolved in a satisfying way and if the system rules remain consistent. | |
Returns True if it says 'YES' to arcs resolved, else False. | |
""" | |
arcs_prompt = f""" | |
We have the following 'Story So Far' summary: | |
{story_summary} | |
System Guidelines: | |
{system_guidelines} | |
Question 1: Have all major story arcs been resolved in a satisfying way? | |
If yes, respond with "YES_ARCS" only. | |
If no, respond with "NO_ARCS" only. | |
Question 2: Have all Hard Magic/One-Rule Hard SF constraints been respected (no unapproved breaks in logic)? | |
If yes, respond with "YES_RULES" only. | |
If no, respond with "NO_RULES" only. | |
""" | |
print(f"Checking if arcs are resolved and rules remain intact...") | |
response = client.responses.create( | |
model=selected_model, | |
input=arcs_prompt, | |
) | |
arcs_answer = response.output_text.strip().upper() | |
print(f"Arcs/Rules check complete. Full answer: {arcs_answer}") | |
update_token_usage(response) | |
# Just a simple check to see if "YES_ARCS" and "YES_RULES" appear | |
arcs_resolved = "YES_ARCS" in arcs_answer | |
rules_consistent = "YES_RULES" in arcs_answer | |
return arcs_resolved, rules_consistent | |
# ----------------------------------------- | |
# Main script flow | |
# ----------------------------------------- | |
# Prompt user to select a model | |
model_choice = input( | |
"Choose a model:\n1) gpt-4.5-preview\n2) o3-mini\nEnter 1 or 2: " | |
).strip() | |
if model_choice == "2": | |
selected_model = "o3-mini" | |
else: | |
selected_model = "gpt-4.5-preview" | |
# A) Prompt user for Hard Magic or One-Rule Hard SF | |
choice_input = input( | |
"Choose your story type:\n\nA) Hard Magic\n\nB) One-Rule Hard Sci-Fi\n\nC) Comedic Novel\n\nEnter A, B, or C: " | |
).strip() | |
if choice_input.upper() not in ["A", "B", "C"]: | |
print("Invalid selection. Defaulting to Hard Magic (A).") | |
choice_input = "A" | |
# B) Prompt the user for a description of the story | |
story_description = input("Describe your story in detail: ") | |
# 1) Generate system guidelines | |
system_guidelines = get_system_guidelines(choice_input, story_description) | |
if not maybe_proceed(): | |
return | |
# 2) Outline | |
outline_text = get_outline(story_description, system_guidelines) | |
if not maybe_proceed(): | |
return | |
# 3) Subplots, arcs, etc. | |
subplots_text = get_subplots(outline_text, system_guidelines) | |
if not maybe_proceed(): | |
return | |
# 4) Chapter-writing loop | |
chapter_number = 1 | |
while True: | |
last_chapter = False | |
progress_percentage = (cumulative_novel_words / MIN_WORDS) * 100 | |
if cumulative_novel_words >= MIN_WORDS: | |
print( | |
f"Reached or exceeded {MIN_WORDS} words. We are past the minimum word count." | |
) | |
wrap_instructions = ( | |
"We have reached (or exceeded) the minimum word count for the book. " | |
"If the story arcs are nearly complete, begin wrapping up. Otherwise, continue. " | |
"Do not add any new characters or plotlines at this point. Ensure all Hard Magic/One-Rule SF constraints remain intact." | |
) | |
if cumulative_novel_words >= FINISH_WORDS: | |
print( | |
f"Reached or exceeded {FINISH_WORDS} words. This is going to be the final chapter." | |
) | |
wrap_instructions = ( | |
"We have reached (or exceeded) the maximum word count for the book. " | |
"THIS MUST BE THE LAST CHAPTER. " | |
"Please wrap up the story and resolve all major arcs and plotlines. " | |
"Do not add any new characters or plotlines, as this is the final chapter. " | |
"Reinforce the Hard Magic/One-Rule SF logic in the final resolution." | |
) | |
last_chapter = True | |
else: | |
wrap_instructions = ( | |
f"We are at approximately {progress_percentage:.1f}% of our target word count. " | |
"If the main arcs are further along than this percentage, please add more scenes, details, or dialogue, " | |
"while consistently applying the established system guidelines." | |
) | |
# Write the chapter | |
raw_chapter_output = write_chapter( | |
chapter_number, | |
outline_text, | |
subplots_text, | |
story_summary_so_far, | |
system_guidelines, | |
wrap_instructions, | |
) | |
# Separate the chapter text from the summary | |
chapter_text, chapter_summary = separate_chapter_and_summary(raw_chapter_output) | |
# Save the "cleaned" chapter text | |
append_output(chapter_text) | |
# Update word count | |
chapter_word_count = count_words(chapter_text) | |
cumulative_novel_words += chapter_word_count | |
print( | |
f"[INFO] Chapter {chapter_number} word count: {chapter_word_count}. " | |
f"Cumulative novel words: {cumulative_novel_words}.\n" | |
) | |
# Merge the new summary into the story so far | |
story_summary_so_far = merge_summaries( | |
story_summary_so_far, chapter_summary, chapter_number, system_guidelines | |
) | |
# Check for max word limit | |
if cumulative_novel_words >= MAX_WORDS: | |
print( | |
f"[INFO] Reached or exceeded max word limit ({MAX_WORDS}). Stopping.\n" | |
) | |
break | |
# Check if arcs are resolved and rules are intact (only if we've passed the min word count) | |
if cumulative_novel_words >= MIN_WORDS: | |
arcs_resolved, rules_okay = check_arcs_resolved( | |
story_summary_so_far, system_guidelines | |
) | |
if arcs_resolved: | |
print( | |
"[INFO] Model indicates all major arcs are resolved after reaching the minimum target." | |
) | |
if rules_okay: | |
print( | |
"[INFO] Also, the model indicates system rules remain intact. Stopping.\n" | |
) | |
break | |
else: | |
print( | |
"[WARN] The model indicates a possible rules inconsistency. You may want to revise or continue.\n" | |
) | |
answer = ( | |
input("Continue to another chapter to fix rules? (y/n): ") | |
.strip() | |
.lower() | |
) | |
if answer != "y": | |
break | |
if last_chapter: | |
print( | |
f"[INFO] Reached or exceeded {FINISH_WORDS} words. The AI model was told this is the final chapter.\n" | |
f"The final summary is:\n{story_summary_so_far}\n" | |
) | |
break | |
if not maybe_proceed(): | |
print("[INFO] User decided to stop at this point. Stopping.\n") | |
break | |
chapter_number += 1 | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment